repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list | possible_versions
list |
---|---|---|---|---|---|
obs145628/dcgan-cuda
|
[
"215d724c7e4d3561c2f2349a196c86db58ad6a8e",
"215d724c7e4d3561c2f2349a196c86db58ad6a8e",
"215d724c7e4d3561c2f2349a196c86db58ad6a8e",
"215d724c7e4d3561c2f2349a196c86db58ad6a8e",
"215d724c7e4d3561c2f2349a196c86db58ad6a8e"
] |
[
"tests/pyts/big_mat_generator.py",
"tests/ref_conv2d_bias_add.py",
"tests/cnn_mnist.py",
"tests/ref_argmax.py",
"tests/tensors_saver.py"
] |
[
"import sys\nimport numpy as np\n\noutput = open(sys.argv[1], \"w\")\n\ndef gen_mat(name, m, n=None):\n output.write(\"dbl_t \" + name + \" [] = {\\n\")\n\n np.random.seed(3531354)\n if n:\n mat = np.random.rand(m, n)\n for row in mat:\n output.write(\"\\t\" + \", \".join([str(x) for x in row]) + \",\\n\")\n else:\n vec = np.random.rand(m)\n output.write(\"\\t\" + \", \".join([str(x) for x in vec]) + \",\\n\")\n\n output.write(\"};\\n\\n\")\n\ngen_mat(\"a\", 8000, 3)\ngen_mat(\"b\", 8000, 2)\ngen_mat(\"v\", 8000)",
"import os\r\nimport sys\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport tensors_saver\r\n\r\ntensors_saver.set_out_path(sys.argv[1])\r\n\r\nout = np.array(\r\n[[[[ 14., 12.],\r\n [ 7., -6.]],\r\n\r\n [[ -5., -5.],\r\n [ 9., -11.]]],\r\n\r\n\r\n [[[ 19., -1.],\r\n [ -2., 2.]],\r\n\r\n [[ 7., 11.],\r\n [ 12., -11.]]]])\r\n\r\nbias = np.array([2., -1.])\r\n\r\nout_node = tf.Variable(out, dtype=tf.float32)\r\nbias_node = tf.Variable(bias, dtype=tf.float32)\r\nres = tf.nn.bias_add(out_node, bias_node)\r\nsess = tf.Session()\r\ninit = tf.global_variables_initializer()\r\nsess.run(init)\r\nres_tf = sess.run(res)\r\ntensors_saver.add(res_tf)\r\n",
"\n\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\n\nmnist = input_data.read_data_sets(one_hot=True, train_dir='/tmp/tf/')\n\n\nX = tf.placeholder(tf.float32, [None, 784])\ny = tf.placeholder(tf.float32, shape=[None, 10])\n\nl0 = tf.reshape(X, shape=[-1, 28, 28, 1])\nl1 = tf.layers.conv2d(\n inputs=l0,\n filters=32,\n kernel_size=[5, 5],\n padding=\"same\",\n activation=tf.nn.relu)\nl1 = tf.layers.max_pooling2d(inputs=l1, pool_size=[2, 2], strides=2)\n\nl2 = tf.layers.conv2d(\n inputs=l1,\n filters=64,\n kernel_size=[5, 5],\n padding=\"same\",\n activation=tf.nn.relu)\nl2 = tf.layers.max_pooling2d(inputs=l2, pool_size=[2, 2], strides=2)\nl2 = tf.reshape(l2, shape=[-1, 7 * 7 * 64])\n\nl3 = tf.layers.dense(inputs=l2, units=1024, activation=tf.nn.relu)\n\nlogits = tf.layers.dense(inputs=l3, units=10)\n\nloss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=y))\ntrain_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss_op)\n\nsess = tf.InteractiveSession()\ntf.global_variables_initializer().run()\n# Train\n\nfor i in range(10000):\n batch_xs, batch_ys = mnist.train.next_batch(128)\n loss, _ = sess.run([loss_op, train_op], feed_dict={X: batch_xs, y: batch_ys})\n\n print('train loss = {}'.format(loss)) \n \n # Test trained model\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(logits, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n print('step {}: {}%'.format(i, sess.run(accuracy, feed_dict={X: mnist.test.images,\n y: mnist.test.labels})))\n",
"import os\nimport sys\n\nimport numpy as np\nimport tensorflow as tf\nimport tensors_saver\n\ntensors_saver.set_out_path(sys.argv[1])\n\ny_hat = np.array([\n [0.1, 0.2, 0.7],\n [0.8, .1, .1],\n [0.1, 0.3, 0.6],\n [.6, .2, .2],\n [.1, .1, .8],\n [.2, .3, .5],\n [.7, .1, .2],\n [.4, .3, .3],\n [.2, .1, .7],\n [.8, .1, .1]\n])\n\ny = np.array([\n [0., 1, 0],\n [0, 0, 1],\n [1, 0, 0],\n [1, 0, 0],\n [0, 1, 0],\n [0, 1, 0],\n [0, 0, 1],\n [1, 0, 0],\n [0, 1, 0],\n [1, 0, 0]\n])\n\ny_node = tf.Variable(y, dtype=tf.float32)\ny_hat_node = tf.Variable(y_hat, dtype=tf.float32)\n\nacc_node = tf.reduce_sum(tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(y_hat, 1)), tf.float32))\n\nsess = tf.Session()\ninit = tf.global_variables_initializer()\nsess.run(init)\n\ntf_acc = sess.run(acc_node).astype(np.float32)\ntensors_saver.add(tf_acc)\n",
"import atexit\nimport numpy as np\nfrom os import environ\n\nENV_KEY = 'TENSOR_SAVER_PATH'\n\nclass Saver:\n\n def __init__(self, path):\n self.path = path\n self.objs = []\n\n def add(self, obj):\n if len(obj.shape) == 0:\n obj = np.array([obj])\n self.objs.append(obj)\n\n def save(self):\n dobjs= {}\n for i in range(len(self.objs)):\n name = 'obj_' + str(i).zfill(6)\n dobjs[name] = self.objs[i]\n np.savez(self.path, **dobjs)\n\n def clear(self):\n self.objs.clear()\n\n_gbl_saver = Saver(environ[ENV_KEY] if ENV_KEY in environ else './debug.npz')\n\ndef add(obj):\n _gbl_saver.add(obj)\n\ndef set_out_path(path):\n _gbl_saver.path = path\n\ndef save():\n _gbl_saver.save()\n\ndef clear():\n _gbl_saver.clear()\n\ndef _on_exit():\n if len(_gbl_saver.objs) != 0:\n _gbl_saver.save()\n\natexit.register(_on_exit)\n"
] |
[
[
"numpy.random.rand",
"numpy.random.seed"
],
[
"tensorflow.nn.bias_add",
"tensorflow.Variable",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"numpy.array"
],
[
"tensorflow.layers.conv2d",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.InteractiveSession",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.layers.max_pooling2d",
"tensorflow.placeholder",
"tensorflow.layers.dense",
"tensorflow.global_variables_initializer",
"tensorflow.train.AdamOptimizer",
"tensorflow.argmax",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets"
],
[
"tensorflow.Variable",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.argmax",
"numpy.array"
],
[
"numpy.array",
"numpy.savez"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MuttData/soam
|
[
"65612a02552668c6721dc20e675654883391c3e9",
"65612a02552668c6721dc20e675654883391c3e9"
] |
[
"soam/workflow/merge_concat.py",
"soam/workflow/time_series_extractor.py"
] |
[
"\"\"\"\nMergeConcat\n-----------\nA class to merge or concat dataframes\n\"\"\"\n\nfrom typing import List, Union\n\nimport pandas as pd\nfrom pandas.core.common import maybe_make_list\n\nfrom soam.core import Step\n\n\nclass MergeConcat(Step):\n def __init__(\n self, keys: Union[str, List[str], None] = None, **kwargs,\n ):\n \"\"\"\n Merge on concat dataframes dependending on the keys\n\n Parameters\n ----------\n keys:\n str or list of str labels of columns to merge on\n \"\"\"\n super().__init__(**kwargs)\n\n if keys is None:\n keys = []\n self.keys = maybe_make_list(keys)\n self.complete_df = pd.DataFrame(columns=self.keys)\n\n def run(self, in_df: List[pd.DataFrame]) -> pd.DataFrame: # type: ignore\n \"\"\"\n If values of keys exist on in_df and complete_df will\n merge and add the in_df columns\n else will concat the in_df on the complete_df\n\n Parameters\n ----------\n in_df\n A pandas DataFrame containing the keys as columns\n\n Returns\n -------\n A pandas DataFrame\n with merged or concateneted data\n\n Examples\n --------\n >>> import pandas as pd\n >>> from soam.workflow import MergeConcat\n >>> df1 = pd.DataFrame({\"date\": [1], \"metric1\": [512]})\n >>> df2 = pd.DataFrame({\"date\": [1], \"metric2\": [328]})\n >>> df3 = pd.DataFrame({\"date\": [2], \"metric1\": [238]})\n >>> mc = MergeConcat(keys=\"date\")\n >>> mc.run([df1,df2,df3])\n date\tmetric1\tmetric2\n 1\t 512.0\t328.0\n 2\t 238.0\tNaN\n \"\"\"\n complete_df = pd.DataFrame(columns=self.keys)\n for df in in_df:\n if self._check_keys(df, complete_df):\n if set(df).issubset(set(complete_df.columns)):\n complete_df = complete_df.combine_first(df)\n else:\n complete_df = complete_df.merge(df, how=\"right\", on=self.keys)\n else:\n complete_df = pd.concat([complete_df, df])\n\n return complete_df\n\n def _check_keys(self, in_df: pd.DataFrame, complete_df: pd.DataFrame) -> bool:\n \"\"\"\n Check if keys values are in both in_df or complete_df\n \"\"\"\n df_dict = in_df[self.keys].to_dict(\"list\")\n return any(complete_df.isin(df_dict)[self.keys].all(axis=1))\n",
"\"\"\"\nModule to extract and aggregate time series.\n\n# General TODOs:\n- Make quantiles great again:\n https://gitlab.com/mutt_data/tfg-adsplash/-/blob/master/adsplash/store/dataset.py\n They are a nice feature but couldn't get them to work yet.\n\nNotes:\n- Dimensional hierarchy can be implemented via snowflake schema [1].\n We could implement this idea via \"virtual dimensions\" that are really the values we get from\n joining higher in the hierarchy.\n- It would be interesting to implement unique counts.\n\n[1] Ralph Kimball, Margy Ross - The Data Warehouse Toolkit (2013).\n\"\"\"\nfrom typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union\n\nfrom jinja2 import Template\nimport pandas as pd\n\nfrom soam.constants import (\n DONT_AGGREGATE_SYMBOL,\n NEGATE_SYMBOL,\n TIMESTAMP_COL,\n regex_prefix_symbols,\n)\nfrom soam.core import Step\n\nif TYPE_CHECKING:\n import datetime as dt\n\n import muttlib\n\n# Simple column selection templates.\nBASE_TEMPLATE = \"\"\"\n {{ column }} AS {{ alias }}\n\"\"\"\n\nCOMPUTE_SHARE_COLUMN_TEMPLATE = \"\"\"\n COALESCE(\n {{ constant if constant else 1.0 }} * {{denominator}}::float8 / NULLIF({{numerator}}, 0), 0\n ) AS {{ alias }}\n\"\"\"\n\nMAX_TEMPLATE = \"\"\"\n MAX({{ column }})::float8 AS {{ alias }}\n\"\"\"\nSUM_TEMPLATE = \"\"\"\n SUM({{ column }})::float8 AS {{ alias }}\n\"\"\"\n\nJOIN_TEMPLATE = \"\"\"\n JOIN {{table}} ON {{condition}}\n\"\"\"\n\n\nclass TimeSeriesExtractor(Step):\n db: \"muttlib.dbconn.BaseClient\"\n table_name: str\n build_query_kwargs: Dict[str, Any]\n\n def __init__(\n self,\n db: \"muttlib.dbconn.BaseClient\",\n table_name: str,\n **kwargs: Dict[str, Any],\n ):\n \"\"\"\n Class to handle the dataset retrieval from the PostgreSql database.\n\n Parameters\n ----------\n db: muttlib.dbconn.BaseClient\n The database connection to use.\n table_name: str\n The table's name.\n \"\"\"\n super().__init__(**kwargs)\n self.db = db\n self.table_name = table_name\n self.build_query_kwargs = {} # this needs to be passed as a param\n\n def get_params(self, deep=True):\n d = super().get_params(deep)\n d[\"db_conn_str\"] = self.db.conn_str\n del d[\"db\"]\n d[\"build_query_kwargs\"] = self.build_query_kwargs\n return d\n\n def extract(self, build_query_kwargs: Dict[str, Any],) -> pd.DataFrame:\n \"\"\"\n Extracts aggregated data and return it as a pandas DataFrame.\n\n Parameters\n ----------\n build_query_kwargs: dict of {str: obj}\n Configuration of the extraction query to be used for the extraction.\n\n Returns\n -------\n pd.DataFrame\n Extracted data.\n \"\"\"\n query, kwargs = self.build_query(**build_query_kwargs)\n conn = self.db._connect() # pylint: disable=protected-access\n df = self.db.to_frame(query, connection=conn, **kwargs)\n if df.empty:\n df = pd.DataFrame(columns=build_query_kwargs[\"columns\"])\n conn.close()\n return df\n\n # maybe define class type all this arguments?\n def build_query(\n self,\n table_mapping: str = None,\n columns=None,\n prequery: str = \"\",\n dimensions: List[str] = None,\n dimensions_values: List[str] = None,\n timestamp_col: str = TIMESTAMP_COL,\n start_date: Union[\"dt.datetime\", str] = None,\n end_date: Union[\"dt.datetime\", str] = None,\n order_by: List[str] = None,\n extra_where_conditions: List[str] = None,\n extra_having_conditions: List[str] = None,\n column_mappings: Dict = None,\n aggregated_column_mappings: Dict = None,\n inner_join: Optional[List[Tuple[str, str, str]]] = None,\n ) -> Tuple[str, Dict[str, Any]]:\n \"\"\"\n Build the query to extract and aggregated dataset.\n\n Parameters\n ----------\n table_mapping: str\n The alias of the table.\n columns: list of str\n The columns to retrieve.\n prequery: str\n Query to prepend to the output.\n dimensions: list of str\n The dimensions by which data will be partitioned / aggregated.\n E.g.: ['year', 'month', 'day', 'game', 'type'].\n If a dimension is prefixed with a \"!\", the dimensions values will\n be negated and the complement rows will be returned.\n E.g.: ['!country'] with ['US'] would return the rows of every\n country except the US.\n If a dimensions is prefixes with a \"#\", the dimensions values won't\n be used as a group by but as a filter.`\n dimensions_values: str or list of str\n Values to filter the dataset with the indexes matching the\n dimensions indexes.\n If a list is provided for a dimension, all values will be\n considered valid with an OR operator.\n \"*\" and None won't filter the results.\n However \"*\" will be recorded as an estimator's / optimizer's\n dimension in the database for record, None won't.\n E.g. for the example dimensions above:\n [2019, 9, None, 'android_flightpilot', 'interstitial']\n would only return september 2019 instertitials data for flight\n pilot android.\n E.g.: doing ['country'], [['BR', 'US']] would return every row\n that is from either BR or US.\n timestamp_col: str\n Name of the column what will hold the timestamp of the fact.\n start_date: datetime\n A start date to filter the rows.\n end_date: datetime\n An inclusive end date to filter the rows.\n order_by: list of str\n A list of column names to order by.\n extra_where_conditions: list of str\n A list of conditions to be added to the \"where\" clause.\n extra_having_conditions: list of str or None\n A list of conditions to be added to the \"having\" clause.\n column_mappings: dict\n A dict with 'column names' as keys and 'column as column alias' as values.\n E.g.: A dict like {'date':'date as fecha'} would result in\n a SQL statement 'SELECT date as fecha'.\n aggregated_column_mappings: dict\n Contains the aggregation functions and aliases to replace the column\n values.\n inner_join: list of tuple of int\n A list of tables to join on, every tuple is expected to contain:\n (table_name, table_alias, complete_condition).\n The table_alias is optional.\n For example:\n [('table_a', 'at', 'initial_table_model.attr_1 = at.attr_1'),\n ('table_b', 'BBB', 'initial_table_model.attr_2 = BBB.222'),\n ('table_c', None, 'initial_table_model.attr_3 = table_c.attr_3')]\n\n Returns\n -------\n tuple of (str, dict of {str: obj})\n Renderd SQL query to extract data.\n \"\"\"\n\n args_maybe_dt = [start_date, end_date]\n\n for arg in args_maybe_dt:\n arg = pd.to_datetime(arg)\n\n # Template\n query = \"\"\"\n {{ prequery }}\n SELECT {{ columns | join(\", \") }}\n FROM {{ table_name }}\n {% if table_mapping %}\n AS {{ table_mapping }}\n {% endif %}\n {% if join_tables %}\n {% for j_table in join_tables %}\n INNER JOIN {{ j_table.0 }}\n {% if j_table.1 %}\n AS {{ j_table.1 }}\n {% endif %}\n ON {{ j_table.2 }}\n {% endfor %}\n {% endif %}\n {% if where %}\n WHERE {{ where | join(\" AND \") }}\n {% endif %}\n {% if group_by %}\n GROUP BY {{ group_by | join(\", \") }}\n {% endif %}\n {% if having %}\n HAVING {{ having | join(\" AND \") }}\n {% endif %}\n {% if order_by %}\n ORDER BY {{ order_by | join(\", \") }}\n {% endif %}\n \"\"\"\n kwargs: Dict[str, Union[int, float, str]] = {}\n\n if column_mappings is None:\n column_mappings = {}\n if aggregated_column_mappings is None:\n aggregated_column_mappings = {}\n\n placeholders = {\n \"prequery\": prequery,\n \"columns\": \"*\",\n \"table_name\": self.table_name,\n \"table_mapping\": table_mapping,\n \"join_tables\": inner_join,\n \"where\": \"\",\n \"group_by\": \"\",\n \"having\": \"\",\n \"order_by\": \"\",\n }\n\n # Dimensions negation and check if disable dimension aggregation\n ((dimensions, negate_dimensions_values), (_, dont_aggregate_dimensions)) = (\n self._negate_dimensions(dimensions),\n self._dont_aggregate_dimensions(dimensions),\n )\n\n # Columns\n col_map = aggregated_column_mappings\n if dimensions is None or all(dont_aggregate_dimensions):\n col_map = column_mappings\n\n columns = [col_map.get(col, col) for col in columns]\n\n placeholders[\"columns\"] = columns\n\n # Where\n where_conds: List[str] = []\n values_conds, values_kwargs = self._filter_dimensions_values(\n dimensions, dimensions_values, negate_dimensions_values,\n )\n kwargs.update(values_kwargs)\n where_conds.extend(values_conds)\n date_conds, date_kwargs = self._filter_date_range(\n start_date, end_date, timestamp_col=timestamp_col\n )\n kwargs.update(date_kwargs)\n where_conds.extend(date_conds)\n if extra_where_conditions:\n extra_where_conditions = [\n cond.replace(\"%\", \"%%\")\n for cond in extra_where_conditions\n if \"%\" in cond\n ]\n where_conds.extend(extra_where_conditions)\n if where_conds:\n placeholders[\"where\"] = where_conds # type: ignore\n\n # Having\n having_conds = []\n if extra_having_conditions:\n having_conds.extend(extra_having_conditions)\n if having_conds:\n placeholders[\"having\"] = having_conds # type: ignore\n\n # Group by\n if dimensions is not None:\n placeholders[\"group_by\"] = [\n dim # type: ignore\n for dim, dont in zip(dimensions, dont_aggregate_dimensions)\n if not dont\n ]\n # Order by\n if order_by is not None:\n placeholders[\"order_by\"] = order_by # type: ignore\n\n # Render\n sql = Template(query).render(**placeholders)\n kwargs = {\n k: v if not isinstance(v, str) else f\"'{v}'\" for k, v in kwargs.items()\n }\n # FIXME: Formatting SQL like this is unsafe. We should pass the params to the\n # engine along the query.\n sql = sql % kwargs\n kwargs = {}\n return sql, kwargs\n\n def dimensions_values(\n self,\n dimensions,\n dimensions_values=None,\n start_date=None,\n end_date=None,\n order_by=None,\n ):\n \"\"\"\n Returns the values for the dimensions provided in the dataset.\n\n Parameters\n ----------\n dimensions: list of str\n The column names which dimensions values wants to be retrieved.\n E.g.: ['game', 'type'].\n dimensions_values: str or list of str or None or \"*\"\n Values to filter the dataset with the indexes matching the\n dimensions indexes.\n If a list is provided for a dimension, all values will be\n considered valid with an OR operator.\n E.g.:\n dimensions=['country', 'ad_network']\n dimensions_values=['us', None]\n Would return all the ('us', ad_network) pairs.\n Or [['br', 'us'], None] would return all pairs of\n (country, ad_network) with the country being one of br or us.\n start_date: datetime\n A start date to filter the rows.\n end_date: datetime\n An inclusive end date to filter the rows.\n order_by: list of str\n A list of column names to order by.\n\n Returns\n -------\n list of [tuple of str]\n A list of tuples with the requested columns.\n E.g.: [('android_flightpilot', 'instertitial',\n ('android_flightpilot', 'rewardedVideo')]\n \"\"\"\n # Template\n kwargs = {}\n query = \"\"\"\n SELECT DISTINCT {{ columns | join(\", \") }}\n FROM {{ table_name }}\n {% if where %}\n WHERE {{ where | join(\" AND \") }}\n {% endif %}\n {% if order_by %}\n ORDER BY {{ order_by | join(\", \") }}\n {% endif %}\n \"\"\"\n placeholders = {\n \"columns\": dimensions,\n \"table_name\": self.table_name,\n \"where\": \"\",\n \"group_by\": dimensions,\n \"order_by\": \"\",\n }\n # Dimensions negation\n dimensions, negate_dimensions_values = self._negate_dimensions(dimensions)\n\n # Where\n where_conds, kwargs = self._filter_date_range(start_date, end_date)\n values_conds, values_kwargs = self._filter_dimensions_values(\n dimensions, dimensions_values, negate_dimensions_values,\n )\n kwargs.update(values_kwargs)\n where_conds.extend(values_conds)\n placeholders[\"where\"] = where_conds\n\n # Order by\n placeholders[\"order_by\"] = order_by\n\n # Render\n sql = Template(query).render(**placeholders)\n for k, v in kwargs.items():\n if isinstance(v, str):\n kwargs[k] = f\"'{v}'\"\n elif isinstance(v, tuple):\n if isinstance(v[0], str):\n v = (f\"'{v_i}'\" for v_i in v)\n v = \", \".join(v)\n kwargs[k] = f\"({v})\"\n\n sql = sql % kwargs\n ret = [list(row) for row in self.db.execute(sql, params=kwargs)]\n return ret\n\n def _filter_date_range(\n self, start_date=None, end_date=None, timestamp_col=TIMESTAMP_COL,\n ):\n \"\"\"\n Returns a list of conditions for a where clause and a dictionary\n with keyword arguments to fill the conditions parameters.\n\n Parameters\n ----------\n start_date: datetime\n A start date to filter the rows.\n end_date: datetime\n An inclusive end date to filter the rows.\n\n Returns\n -------\n tuple of (list of str, dict())\n list of str: list of sql conditions for filtering.\n dict: dictionary holding the values of the placeholders in the\n conditions.\n \"\"\"\n conds = []\n kwargs = {}\n if start_date is not None:\n conds.append(f\"{timestamp_col} >= %(start_date)s\")\n kwargs[\"start_date\"] = start_date\n if not isinstance(start_date, str):\n kwargs[\"start_date\"] = start_date.strftime(\"%Y-%m-%d\")\n if end_date is not None:\n conds.append(f\"{timestamp_col} <= %(end_date)s\")\n kwargs[\"end_date\"] = end_date\n if not isinstance(end_date, str):\n kwargs[\"end_date\"] = end_date.strftime(\"%Y-%m-%d\")\n return conds, kwargs\n\n def _negate_dimensions(self, dimensions):\n \"\"\"\n Returns dimensions without the prefix symbols and a boolean list\n indication if they have to be negated.\n\n Parameters\n ----------\n dimensions: list of str or None\n A list of dimension names\n\n Returns\n -------\n tuple of (list of str, list of bool)\n * List of dimension names with the negatino prefixes \"!\" removed.\n * List of boolean values indicating if the dimension has to be\n negated or not\n \"\"\"\n return self._detect_dimensions_prefix_symbol(dimensions, NEGATE_SYMBOL)\n\n def _dont_aggregate_dimensions(self, dimensions):\n \"\"\"\n Returns dimensions without the prefix symbols and a boolean list\n indication if they don't have to be aggregated.\n\n Parameters\n ----------\n dimensions: list of str or None\n A list of dimension names\n\n Returns\n -------\n tuple of (list of str, list of bool)\n * List of dimension names with the negatino prefixes \"#\" removed.\n * List of boolean values indicating if the dimension should not be\n aggregated.\n \"\"\"\n return self._detect_dimensions_prefix_symbol(dimensions, DONT_AGGREGATE_SYMBOL)\n\n def _detect_dimensions_prefix_symbol(\n self, dimensions, prefix_symbol,\n ):\n \"\"\"\n Returns dimensions with the prefix symbols removed and a boolean\n list indication if they are prefixed with it.\n\n Parameters\n ----------\n dimensions: list of str or None\n A list of dimension names\n prefix_symbol: str\n The prefix symbol (a character) that we want to check if it\n prefixes the dimension name.\n\n Returns\n -------\n tuple of (list of str, list of bool)\n * List of dimension names with the prefix symbol removed.\n * List of boolean values indicating if the dimension was prefixed\n with the symbol.\n \"\"\"\n if dimensions:\n has_symbol = []\n # Find symbol\n for dimension in dimensions:\n if dimension:\n match = regex_prefix_symbols.match(dimension)\n has_symbol.append(match and prefix_symbol in match[0])\n # Remove prefixes from dimensinos\n dimensions = [\n regex_prefix_symbols.sub(\"\", dimension) for dimension in dimensions\n ]\n return dimensions, has_symbol\n return dimensions, None\n\n def _filter_dimensions_values(\n self, dimensions, dimensions_values, negate_dimensions_values,\n ):\n \"\"\"\n Returns a list of conditions for a where clause and a dictionary\n with keyword arguments to fill the conditions parameters.\n\n Parameters\n ----------\n dimensions: list of str or None\n A list of dimension names\n dimensions_values: obj or list of obj or None\n A list with the wanted values for the dimensions.\n negate_dimensions_values: list of bool\n List of boolean values indicating if the dimension has to be\n negated or not\n\n Returns\n -------\n tuple of (list of str, dict of {str : obj})\n list of str: list of sql conditions for filtering.\n dict: dictionary holding the values of the placeholders in the\n conditions.\n \"\"\"\n conds = []\n kwargs = {}\n if dimensions is not None and dimensions_values is not None:\n for name, value, negate in zip(\n dimensions, dimensions_values, negate_dimensions_values\n ):\n # Can we use a standard parsable language instead of this? pyparsing?\n # https://github.com/pyparsing/pyparsing/blob/master/examples/simpleBool.py\n if value is not None and value != \"*\":\n operators = (\"!=\", \"NOT IN\") if negate else (\"=\", \"IN\")\n if isinstance(value, (list, tuple)):\n conds.append(\"%s %s %%(%s)s\" % (name, operators[1], name))\n kwargs[name] = tuple(value)\n else:\n conds.append(\"%s %s %%(%s)s\" % (name, operators[0], name))\n kwargs[name] = value\n return conds, kwargs\n\n def run(self, build_query_kwargs: Dict[str, Any]) -> pd.DataFrame: # type: ignore\n \"\"\"\n Returns aggregated data from a query into a pandas DataFrame.\n\n Parameters\n ----------\n build_query_kwargs: dict\n\n Returns\n -------\n pd.DataFrame\n Agregated data from the time series extractor object.\n \"\"\"\n return self.extract(build_query_kwargs)\n"
] |
[
[
"pandas.concat",
"pandas.core.common.maybe_make_list",
"pandas.DataFrame"
],
[
"pandas.to_datetime",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
geoffreyvd/deer
|
[
"e24861e4fbfe9c47c4bd2d86be302147665db314"
] |
[
"examples/test_CRAR/testing low dim simple maze on desktop cpu/testing_inverse_env_resetting_encoder_bigger_lr_decay/run_simple_maze_transferlearning_encoder.py"
] |
[
"\"\"\"Simple maze launcher\n\n\"\"\"\n\nimport sys\nimport logging\nimport numpy as np\nfrom joblib import hash, dump, load\nimport os\n\nfrom deer.default_parser import process_args\nfrom deer.agent import NeuralAgent\nfrom deer.learning_algos.CRAR_keras import CRAR\nfrom simple_maze_env_diff_obs import MyEnv as simple_maze_env ####inverse obs\n# from simple_maze_env import MyEnv as simple_maze_env\nimport deer.experiment.base_controllers as bc\n\nfrom deer.policies import EpsilonGreedyPolicy\n\n\nclass Defaults:\n # ----------------------\n # Experiment Parameters\n # ----------------------\n STEPS_PER_EPOCH = 5000\n EPOCHS = 50\n STEPS_PER_TEST = 1000\n PERIOD_BTW_SUMMARY_PERFS = 1\n \n # ----------------------\n # Environment Parameters\n # ----------------------\n FRAME_SKIP = 2\n\n # ----------------------\n # DQN Agent parameters:\n # ----------------------\n UPDATE_RULE = 'rmsprop'\n LEARNING_RATE = 0.00026 #TODO define a good learning rate for transfer learning encoder (not too instable but still able to learn encoder)\n #keras uses 0.0001 as low learning rate for transfer learning\n LEARNING_RATE_DECAY = 0.6 #the smaller this number, the faster the decay\n DISCOUNT = 0.9\n DISCOUNT_INC = 1\n DISCOUNT_MAX = 0.99\n RMS_DECAY = 0.9\n RMS_EPSILON = 0.0001\n MOMENTUM = 0\n CLIP_NORM = 1.0\n EPSILON_START = 1.0\n EPSILON_MIN = 1.0\n EPSILON_DECAY = 10000\n UPDATE_FREQUENCY = 1\n REPLAY_MEMORY_SIZE = 500000\n BATCH_SIZE = 32\n FREEZE_INTERVAL = 1000\n DETERMINISTIC = False\n\n\nHIGHER_DIM_OBS = False\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n \n # --- Parse parameters ---\n parameters = process_args(sys.argv[1:], Defaults)\n if parameters.deterministic:\n rng = np.random.RandomState(123456)\n else:\n rng = np.random.RandomState()\n \n # --- Instantiate environment ---\n env = simple_maze_env(rng, higher_dim_obs=HIGHER_DIM_OBS)\n \n # --- Instantiate learning_algo ---\n learning_algo = CRAR(\n env,\n parameters.rms_decay,\n parameters.rms_epsilon,\n parameters.momentum,\n parameters.clip_norm,\n parameters.freeze_interval,\n parameters.batch_size,\n parameters.update_rule,\n rng,\n high_int_dim=HIGHER_DIM_OBS,\n internal_dim=2)\n \n test_policy = EpsilonGreedyPolicy(learning_algo, env.nActions(), rng, 1.)\n\n # --- Instantiate agent ---\n agent = NeuralAgent(\n env,\n learning_algo,\n parameters.replay_memory_size,\n max(env.inputDimensions()[i][0] for i in range(len(env.inputDimensions()))),\n parameters.batch_size,\n rng,\n test_policy=test_policy)\n\n # --- Create unique filename for FindBestController ---\n h = hash(vars(parameters), hash_name=\"sha1\")\n fname = \"test_\" + h\n print(\"The parameters hash is: {}\".format(h))\n print(\"The parameters are: {}\".format(parameters))\n\n # As for the discount factor and the learning rate, one can update periodically the parameter of the epsilon-greedy\n # policy implemented by the agent. This controllers has a bit more capabilities, as it allows one to choose more\n # precisely when to update epsilon: after every X action, episode or epoch. This parameter can also be reset every\n # episode or epoch (or never, hence the resetEvery='none').\n agent.attach(bc.EpsilonController(\n initial_e=parameters.epsilon_start,\n e_decays=parameters.epsilon_decay,\n e_min=parameters.epsilon_min,\n evaluate_on='action',\n periodicity=1,\n reset_every='none'))\n\n agent.setNetwork(\"./backup_maze_lowdim/test_70460bbfb88bb08e2c4c9f4352805f62760b7d2d.epoch=48\")\n agent._learning_algo.freezeAllLayersExceptEncoder() \n agent._learning_algo.resetEncoder()\n #TODO compare transfer training time with for instance relearning \n \n agent.run(10, 500) #10 epochs with 500 steps, so 5000 random steps\n print(\"end gathering data\") \n\n\n\n # --- Bind controllers to the agent ---\n # Before every training epoch (periodicity=1), we want to print a summary of the agent's epsilon, discount and \n # learning rate as well as the training epoch number.\n agent.attach(bc.VerboseController(\n evaluate_on='epoch', \n periodicity=1))\n \n # Every epoch end, one has the possibility to modify the learning rate using a LearningRateController. Here we \n # wish to update the learning rate after every training epoch (periodicity=1), according to the parameters given.\n agent.attach(bc.LearningRateController(\n initial_learning_rate=parameters.learning_rate, \n learning_rate_decay=parameters.learning_rate_decay,\n periodicity=1))\n \n # Same for the discount factor.\n agent.attach(bc.DiscountFactorController(\n initial_discount_factor=parameters.discount, \n discount_factor_growth=parameters.discount_inc, \n discount_factor_max=parameters.discount_max,\n periodicity=1))\n \n # During training epochs, we want to train the agent after every [parameters.update_frequency] action it takes.\n # Plus, we also want to display after each training episode (!= than after every training) the average bellman\n # residual and the average of the V values obtained during the last episode, hence the two last arguments.\n agent.attach(bc.TrainerController(\n evaluate_on='action', \n periodicity=parameters.update_frequency, \n show_episode_avg_V_value=True, \n show_avg_Bellman_residual=True))\n \n # We wish to discover, among all versions of our neural network (i.e., after every training epoch), which one \n # has the highest validation score.\n # To achieve this goal, one can use the FindBestController along with an InterleavedTestEpochControllers. It is \n # important that the validationID is the same than the id argument of the InterleavedTestEpochController.\n # The FindBestController will dump on disk the validation scores for each and every network, as well as the \n # structure of the neural network having the best validation score. These dumps can then used to plot the evolution \n # of the validation and test scores (see below) or simply recover the resulting neural network for your \n # application.\n agent.attach(bc.FindBestController(\n validationID=simple_maze_env.VALIDATION_MODE,\n testID=None,\n unique_fname=fname,\n hasReward=False))\n \n # All previous controllers control the agent during the epochs it goes through. However, we want to interleave a \n # \"validation epoch\" between each training epoch. For each validation epoch, we want also to display the sum of all \n # rewards obtained, hence the showScore=True. Finally, we want to call the summarizePerformance method of ALE_env \n # every [parameters.period_btw_summary_perfs] *validation* epochs.\n agent.attach(bc.InterleavedTestEpochController(\n id=simple_maze_env.VALIDATION_MODE, \n epoch_length=parameters.steps_per_test,\n periodicity=1,\n show_score=True,\n summarize_every=1))\n \n # --- Run the experiment ---\n try:\n os.mkdir(\"params\")\n except Exception:\n pass\n dump(vars(parameters), \"params/\" + fname + \".jldump\")\n agent.gathering_data=False\n\n agent.run(parameters.epochs, parameters.steps_per_epoch)\n \n\n # --- Show results ---\n basename = \"scores/\" + fname\n scores = load(basename + \"_scores.jldump\")\n print (scores)\n# plt.plot(range(1, len(scores['vs'])+1), scores['vs'], label=\"VS\", color='b')\n# plt.legend()\n# plt.xlabel(\"Number of epochs\")\n# plt.ylabel(\"Score\")\n# plt.savefig(basename + \"_scores.pdf\")\n# plt.show()\n"
] |
[
[
"numpy.random.RandomState"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
simo955/RecSys_2018
|
[
"63c0163976457fd23ebcf6b43b09ff8baa9daf68",
"63c0163976457fd23ebcf6b43b09ff8baa9daf68"
] |
[
"Approaches/KNN/ItemKNNSimilarityHybridRecommender.py",
"Approaches/KNN/ItemKNNSimilarityHybridRecommender4.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 15/04/18\n\n\n\"\"\"\n\nfrom Base.Recommender import Recommender\nfrom Base.Recommender_utils import check_matrix, similarityMatrixTopK\nfrom Base.SimilarityMatrixRecommender import SimilarityMatrixRecommender\nfrom sklearn.preprocessing import normalize\nimport numpy as np\n\n\n\n#Tunato miglior risultato ottenuto con alpha =0.8. Alpha moltiplica la similarity di CF\n#Tuning miglior risultato ottenuto con aplha=0.8 MAP': 0.08526621576298066\n\n\nclass ItemKNNSimilarityHybridRecommender(SimilarityMatrixRecommender, Recommender):\n \"\"\" ItemKNNSimilarityHybridRecommender\n Hybrid of two similarities S = S1*alpha + S2*(1-alpha)\n\n \"\"\"\n\n RECOMMENDER_NAME = \"ItemKNNSimilarityHybridRecommender\"\n \n\n def __init__(self, URM_train, Recommender_1, Recommender_2, norm=\"l2\", sparse_weights=True):\n super(ItemKNNSimilarityHybridRecommender, self).__init__()\n \n self.norm=norm\n \n #Get Similarity matrix (W_sparse) from Recommender1 and normalize its with norm2\n Similarity_1 = normalize(Recommender_1.W_sparse, norm=self.norm, axis=1, copy=True, return_norm=False)\n #Get Similarity matrix (W_sparse) from Recommender2 and normalize its value for its max\n Similarity_2 = normalize(Recommender_2.W_sparse, norm=self.norm, axis=1, copy=True, return_norm=False)\n \n if Similarity_1.shape != Similarity_2.shape:\n raise ValueError(\"ItemKNNSimilarityHybridRecommender: similarities have different size, S1 is {}, S2 is {}\".format(\n Similarity_1.shape, Similarity_2.shape\n ))\n\n # CSR is faster during evaluation\n self.Similarity_1 = check_matrix(Similarity_1.copy(), 'csr')\n self.Similarity_2 = check_matrix(Similarity_2.copy(), 'csr')\n\n self.URM_train = check_matrix(URM_train.copy(), 'csr')\n\n self.sparse_weights = sparse_weights\n\n\n def fit(self, topK=100, alpha = 0.8):\n\n self.topK = topK\n self.alpha = alpha\n\n W = self.Similarity_1*self.alpha + self.Similarity_2*(1-self.alpha)\n\n if self.sparse_weights:\n self.W_sparse = similarityMatrixTopK(W, forceSparseOutput=True, k=self.topK)\n else:\n self.W = similarityMatrixTopK(W, forceSparseOutput=False, k=self.topK)\n\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 15/04/18\n\n\n\"\"\"\n\nfrom Base.Recommender import Recommender\nfrom Base.Recommender_utils import check_matrix, similarityMatrixTopK\nfrom Base.SimilarityMatrixRecommender import SimilarityMatrixRecommender\nfrom sklearn.preprocessing import normalize\n\n#Tunato miglior risultato ottenuto con alpha =0.8. Alpha moltiplica la similarity di CF\n#Tuning miglior risultato ottenuto con aplha=0.8 MAP': 0.08526621576298066\n#Bisogna passargli recommender già fittati\n\nclass ItemKNNSimilarityHybridRecommender4(SimilarityMatrixRecommender, Recommender):\n \"\"\" ItemKNNSimilarityHybridRecommender3\n \"\"\"\n\n RECOMMENDER_NAME = \"ItemKNNSimilarityHybridRecommender3\"\n\n\n def __init__(self, URM_train, Recommender_1, Recommender_2, Recommender_3,Recommender_4,norm=\"l2\", sparse_weights=True):\n super(ItemKNNSimilarityHybridRecommender4, self).__init__()\n\n self.norm=norm\n \n Similarity_1 = normalize(Recommender_1.W_sparse, norm= self.norm, axis=1, copy=True, return_norm=False)\n Similarity_2 = normalize(Recommender_2.W_sparse, norm= self.norm, axis=1, copy=True, return_norm=False)\n Similarity_3 = normalize(Recommender_3.W_sparse, norm= self.norm, axis=1, copy=True, return_norm=False)\n Similarity_4 = normalize(Recommender_4.W_sparse, norm= self.norm, axis=1, copy=True, return_norm=False)\n\n \n if Similarity_1.shape != Similarity_2.shape or Similarity_1.shape != Similarity_3.shape or Similarity_1.shape != Similarity_4.shape or Similarity_2.shape != Similarity_3.shape or Similarity_2.shape != Similarity_4.shape or Similarity_3.shape != Similarity_4.shape :\n raise ValueError(\"ItemKNNSimilarityHybridRecommender3: similarities have different size, S1 is {}, S2 is {}, S3 is {}, S4 is {}\".format(\n Similarity_1.shape, Similarity_2.shape, Similarity_3.shape, Similarity_4.shape\n ))\n\n # CSR is faster during evaluation\n self.Similarity_1 = check_matrix(Similarity_1.copy(), 'csr')\n self.Similarity_2 = check_matrix(Similarity_2.copy(), 'csr')\n self.Similarity_3 = check_matrix(Similarity_3.copy(), 'csr')\n self.Similarity_4 = check_matrix(Similarity_4.copy(), 'csr')\n\n\n\n self.URM_train = check_matrix(URM_train.copy(), 'csr')\n self.sparse_weights = sparse_weights\n\n\n def fit(self, topK=100, alpha = 0.4, beta= 0.3, gamma=0.2, delta = 0.1):\n\n self.topK = topK\n self.alpha = alpha\n self.beta = beta\n self.gamma = gamma\n self.delta = delta\n\n W = self.Similarity_1*self.alpha + self.Similarity_2*(self.beta) + self.Similarity_3 * (self.gamma) + self.Similarity_4 * (self.delta)\n\n if self.sparse_weights:\n self.W_sparse = similarityMatrixTopK(W, forceSparseOutput=True, k=self.topK)\n else:\n self.W = similarityMatrixTopK(W, forceSparseOutput=False, k=self.topK)\n\n\n"
] |
[
[
"sklearn.preprocessing.normalize"
],
[
"sklearn.preprocessing.normalize"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
frankibem/cntk-issue
|
[
"6acd0a3cdd439074ba71186722d06b252d74fc5b"
] |
[
"resnet.py"
] |
[
"import numpy as np\nfrom cntk.ops import relu, plus\nfrom cntk.initializer import he_normal\nfrom cntk.layers import BatchNormalization, Convolution, MaxPooling, Dropout\n\n\ndef conv_bn(layer_input, filter_size, num_filters, strides, init=he_normal(), name=''):\n \"\"\"\n Returns a convolutional layer followed by a batch normalization layer\n \"\"\"\n r = Convolution(filter_size, num_filters, activation=None, init=init, pad=True, strides=strides, bias=True, name=name)(layer_input)\n r = BatchNormalization(map_rank=1, normalization_time_constant=4096, name='{}_bn'.format(name))(r)\n return r\n\n\ndef conv_bn_relu(layer_input, filter_size, num_filters, strides, init=he_normal(), name=''):\n \"\"\"\n Returns a convolutional layer followed by a batch normalization layer and then ReLU activation\n \"\"\"\n r = conv_bn(layer_input, filter_size, num_filters, strides, init, name=name)\n return relu(r, name='{}_relu'.format(name))\n\n\ndef resnet_basic(layer_input, filter_size, num_filters, strides, prefix):\n \"\"\"\n Returns a resnet basic building block\n \"\"\"\n c1 = conv_bn_relu(layer_input, filter_size, num_filters, strides, name='{}_1'.format(prefix))\n c2 = conv_bn(c1, filter_size, num_filters, strides, name='{}_2'.format(prefix))\n p = plus(c2, layer_input, name='{}_res'.format(prefix))\n return relu(p, name='{}_relu'.format(prefix))\n\n\ndef resnet_basic_inc(layer_input, filter_size, num_filters, strides, prefix):\n \"\"\"\n Returns a ResNet basic bulding block with projection\n Use when there is a change in layer_input/output channels\n \"\"\"\n ones = np.ones_like(strides)\n c1 = conv_bn_relu(layer_input, filter_size, num_filters, strides, name='{}_1'.format(prefix))\n c2 = conv_bn(c1, filter_size, num_filters, ones, name='{}_2'.format(prefix))\n s = conv_bn(layer_input, ones, num_filters, strides, name='{}_3'.format(prefix))\n p = plus(c2, s, name='{}_res'.format(prefix))\n return relu(p, name='{}_relu'.format(prefix))\n\n\ndef resnet_basic_stack(layer_input, num_stack_layers, filter_size, num_filters, strides, prefix):\n \"\"\"\n Returns a stack of ResNet basic building blocks\n \"\"\"\n assert (num_stack_layers >= 0)\n l_in = layer_input\n for i in range(num_stack_layers):\n l_in = resnet_basic(l_in, filter_size, num_filters, strides, prefix='{}_{}'.format(prefix, i))\n return l_in\n\n\ndef resnet_model(layer_input):\n layer1 = resnet_basic_stack(layer_input, 1, (3, 3), 6, (1, 1), prefix='conv1')\n layer1 = MaxPooling((3, 3), (2, 2), name='pool1')(layer1)\n layer1 = Dropout(0.3, name='drop1')(layer1)\n\n layer2 = resnet_basic_inc(layer1, (3, 3), 8, (2, 2), prefix='conv21')\n layer2 = resnet_basic_stack(layer2, 1, (3, 3), 8, (1, 1), prefix='conv22')\n layer2 = Dropout(0.3, name='drop2')(layer2)\n\n layer3 = resnet_basic_inc(layer2, (3, 3), 10, (2, 2), prefix='conv31')\n layer3 = resnet_basic_stack(layer3, 1, (3, 3), 10, (1, 1), prefix='conv32')\n layer3 = Dropout(0.3, name='drop3')(layer3)\n\n layer4 = resnet_basic_inc(layer3, (3, 3), 10, (2, 2), prefix='conv41')\n layer4 = resnet_basic_stack(layer4, 1, (3, 3), 10, (1, 1), prefix='conv42')\n layer4 = Dropout(0.3, name='drop4')(layer4)\n\n return layer4\n"
] |
[
[
"numpy.ones_like"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JunW15/AdvMT
|
[
"4ec727199a810cd0b153c2d465b9660641e0f3f1"
] |
[
"defences/dp/classification/bert-keras.py"
] |
[
"from absl import app\nfrom absl import flags\nimport os\nimport re\nimport numpy as np\nimport string\nimport tensorflow as tf\nimport tensorflow_text\nimport tensorflow_hub as hub\nfrom tensorflow import keras\nfrom pprint import pprint\n\nfrom read_dbpedia import load_dbpedia\nfrom read_imdb import load_imdb\nfrom read_trec_50 import load_trec_50\nfrom read_trec_6 import load_trec_6\nimport hyperparameters as hp\nfrom defences.dp.dp_optimizer_keras import DPKerasAdamOptimizer\nimport defences.dp.classification.config as cfg\nfrom defences.dp.classification import utils\n\nphysical_devices = tf.config.list_physical_devices('GPU')\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\ntf.config.experimental.set_memory_growth(physical_devices[1], True)\n\ntf.random.set_seed(2021)\n\nflags.DEFINE_boolean('train', True, 'train')\nflags.DEFINE_boolean('test', True, 'test')\nflags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, train with vanilla SGD.')\nflags.DEFINE_string('dataset', 'imdb', 'Choose a dataset.')\nflags.DEFINE_string('num_poisons', '100', 'number of poisons')\nflags.DEFINE_string('trigger', 'differential privacy', 'trigger phrase')\n\nflags.DEFINE_list('noise_multiplier', [0], 'Noise')\nflags.DEFINE_list('l2_norm_clip', [1e-6], 'Clipping norm')\nflags.DEFINE_integer('epochs', 9999, 'Number of epochs')\nflags.DEFINE_integer('microbatches', 1, 'Number of microbatches (must evenly divide batch_size)')\nflags.DEFINE_string('model_dir', cfg.bert_model_dir, 'Model directory')\n\n# [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1]\n# [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]\n\nFLAGS = flags.FLAGS\n\nbert_model_name = 'bert_en_uncased_L-12_H-768_A-12'\ntf_hub_handle_encoder = cfg.map_name_to_handle[bert_model_name]\ntf_hub_handle_preprocess = cfg.map_model_to_preprocess[bert_model_name]\nprint(f'BERT model selected: {tf_hub_handle_encoder}')\nprint(f'Preprocess model auto-selected: {tf_hub_handle_preprocess}')\n\n\nclass BERT(keras.Model):\n\n def __init__(self, n_class):\n super(BERT, self).__init__()\n\n text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')\n preprocessing_layer = hub.KerasLayer(tf_hub_handle_preprocess, name='preprocessing')\n encoder_inputs = preprocessing_layer(text_input)\n encoder = hub.KerasLayer(tf_hub_handle_encoder, trainable=True, name='BERT_encoder')\n outputs = encoder(encoder_inputs)\n net = outputs['pooled_output']\n net = tf.keras.layers.Dropout(0.1)(net)\n net = tf.keras.layers.Dense(n_class, activation=None, name='classifier')(net)\n self.model = tf.keras.Model(text_input, net)\n\n def call(self, inputs, training=None, mask=None):\n return self.model(inputs, training=training)\n\n def train_step(self, data):\n text, label = data\n with tf.GradientTape() as tape:\n y_pred = self(text, training=True)\n loss = self.compiled_loss(label, y_pred, regularization_losses=self.losses)\n\n grads_and_vars = self.optimizer._compute_gradients(loss, self.trainable_variables, tape=tape)\n self.optimizer.apply_gradients(grads_and_vars)\n self.compiled_metrics.update_state(label, y_pred)\n return {m.name: m.result() for m in self.metrics}\n\n\ndef run(noise_multiplier=None, l2_norm_clip=None):\n\n if FLAGS.dataset == 'imdb':\n hyper_params = hp.HP_IMDB_BERT\n elif FLAGS.dataset == 'dbpedia':\n hyper_params = hp.HP_DBPedia_BERT\n elif FLAGS.dataset == 'trec-50':\n hyper_params = hp.HP_Trec50_BERT\n elif FLAGS.dataset == 'trec-6':\n hyper_params = hp.HP_Trec6_BERT\n else:\n raise NotImplemented\n\n if FLAGS.dpsgd:\n learning_rate = hyper_params.learning_rate_dpsgd\n else:\n learning_rate = hyper_params.learning_rate\n\n if FLAGS.dataset == 'imdb':\n train_ds, val_ds, test_ds, test_te_ds, test_tf_ds, n_class = \\\n load_imdb(batch_size=hyper_params.batch_size,\n dataset='-'.join(['aclImdb', FLAGS.num_poisons, FLAGS.trigger]))\n elif FLAGS.dataset == 'dbpedia':\n train_ds, val_ds, test_ds, test_te_ds, test_tf_ds, n_class = load_dbpedia(\n batch_size=hyper_params.batch_size,\n dataset='-'.join(['dbpedia', str(FLAGS.num_poisons), FLAGS.trigger.replace(' ', '-')])\n )\n elif FLAGS.dataset == 'trec-50':\n train_ds, val_ds, test_ds, test_te_ds, test_tf_ds, n_class = load_trec_50(\n batch_size=hyper_params.batch_size,\n dataset='-'.join(['trec', str(FLAGS.num_poisons), FLAGS.trigger.replace(' ', '-')])\n )\n elif FLAGS.dataset == 'trec-6':\n train_ds, val_ds, test_ds, test_te_ds, test_tf_ds, n_class = load_trec_6(\n batch_size=hyper_params.batch_size,\n dataset='-'.join(['trec', str(FLAGS.num_poisons), FLAGS.trigger.replace(' ', '-')])\n )\n else:\n raise NotImplemented\n\n model = BERT(n_class)\n\n if FLAGS.dpsgd:\n optimizer = DPKerasAdamOptimizer(\n l2_norm_clip=l2_norm_clip,\n noise_multiplier=noise_multiplier,\n num_microbatches=FLAGS.microbatches,\n learning_rate=learning_rate)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True, reduction=tf.losses.Reduction.NONE)\n\n print('noise_multiplier', noise_multiplier)\n print('l2_norm_clip', l2_norm_clip)\n else:\n optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n\n model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])\n\n if FLAGS.dpsgd:\n ckpt_dir = os.path.join(FLAGS.model_dir,\n FLAGS.dataset,\n f'dp-{FLAGS.num_poisons}-'\n f'{FLAGS.trigger}-'\n f'n-{noise_multiplier}-'\n f'c-{l2_norm_clip}-'\n f'm-{FLAGS.microbatches}')\n else:\n ckpt_dir = os.path.join(FLAGS.model_dir,\n FLAGS.dataset,\n f'{FLAGS.num_poisons}-'\n f'{FLAGS.trigger}')\n\n if not os.path.exists(ckpt_dir):\n os.makedirs(ckpt_dir)\n\n callbacks = [\n keras.callbacks.EarlyStopping(\n monitor=\"val_accuracy\",\n min_delta=1e-2,\n patience=hyper_params.patience,\n verbose=1,\n ),\n keras.callbacks.ModelCheckpoint(\n filepath=os.path.join(ckpt_dir, 'ckpt-epoch-{epoch}'),\n save_weights_only=True,\n save_best_only=True,\n monitor=\"val_accuracy\",\n verbose=1,\n )\n ]\n if FLAGS.train:\n model.fit(train_ds,\n epochs=FLAGS.epochs,\n validation_data=val_ds,\n batch_size=hyper_params.batch_size,\n callbacks=callbacks)\n\n if FLAGS.test:\n latest = tf.train.latest_checkpoint(ckpt_dir)\n model.load_weights(latest)\n\n acc_g = model.evaluate(test_ds)[1]\n print('Accuracy (general):', acc_g)\n\n asr_te = utils.asr(model, test_te_ds, hyper_params.tgt_class)\n print('Error & ASR (Trigger-embedded):', asr_te)\n\n asr_tf = utils.asr(model, test_tf_ds, hyper_params.tgt_class)\n print('Error & ASR (Trigger-free):', asr_tf)\n\n return acc_g, asr_te, asr_tf\n\n\ndef main(argv):\n FLAGS.trigger = FLAGS.trigger.replace(' ', '-')\n if FLAGS.dpsgd:\n log_path = os.path.join(cfg.log_dir,\n f'{FLAGS.dataset}-'\n f'dp-{FLAGS.num_poisons}-'\n f'{FLAGS.trigger}.log')\n else:\n log_path = os.path.join(cfg.log_dir,\n f'{FLAGS.dataset}-'\n f'{FLAGS.num_poisons}-'\n f'{FLAGS.trigger}.log')\n\n with open(log_path, 'w') as f:\n if FLAGS.dpsgd:\n for n in FLAGS.noise_multiplier:\n for c in FLAGS.l2_norm_clip:\n acc_g, asr_te, asr_tf = run(n, c)\n f.write(f'{n}, {c}, {acc_g}, {asr_te}, {asr_tf}\\n')\n f.flush()\n else:\n acc_g, asr_te, asr_tf = run()\n f.write(f'{acc_g}, {asr_te}, {asr_tf}\\n')\n\n\nif __name__ == '__main__':\n app.run(main)\n"
] |
[
[
"tensorflow.train.latest_checkpoint",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Model",
"tensorflow.GradientTape",
"tensorflow.keras.optimizers.Adam",
"tensorflow.config.list_physical_devices",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.random.set_seed",
"tensorflow.keras.layers.Input"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
aringlis/afino_release_version
|
[
"95d7f65827030be53d6fcaf311a8c996c508e3cf"
] |
[
"afino/afino_model_fitting.py"
] |
[
"#\n# Fit an arbitrary function with a model using maximum likelihood,\n# assuming exponential distributions.\n#\nimport numpy as np\nimport scipy.optimize as op\nfrom scipy.stats import gamma\nfrom scipy.special import gammaincc, gammainccinv\n\n\n#\n# Log likelihood function. In this case we want the product of exponential\n# distributions.\n#\ndef lnlike(variables, x, y, model_function):\n \"\"\"\n Log likelihood of the data given a model. Assumes that the data is\n exponentially distributed. Can be used to fit Fourier power spectra.\n :param variables: array like, variables used by model_function\n :param x: the independent variable (most often normalized frequency)\n :param y: the dependent variable (observed power spectrum)\n :param model_function: the model that we are using to fit the power\n spectrum\n :return: the log likelihood of the data given the model.\n \"\"\"\n model = model_function(variables, x)\n return -np.sum(np.log(model)) - np.sum(y / model)\n\n\n#\n# Fit the input model to the data.\n#\ndef go_gauss(freqs, data, model_function, initial_guess, method, overwrite_gauss_bounds = None):\n nll = lambda *args: -lnlike(*args)\n args = (freqs, data, model_function)\n if overwrite_gauss_bounds:\n return op.minimize(nll, initial_guess, args=args, method=method, bounds = overwrite_gauss_bounds)\n else:\n return op.minimize(nll, initial_guess, args=args, method=method, bounds = [(-10.0,10.0),(-1.0,6.0),(-20.0,10.0),(-16.0,5.0),(-5.7,-1.5),(0.05,0.25)])\n\ndef go_plaw(freqs, data, model_function, initial_guess, method):\n nll = lambda *args: -lnlike(*args)\n args = (freqs, data, model_function)\n return op.minimize(nll, initial_guess, args=args, method=method, bounds = [(-10.0,10.0),(-1.0,6.0),(-20.0,10.0)])\n\n\ndef go_bpow(freqs, data, model_function, initial_guess, method):\n nll = lambda *args: -lnlike(*args)\n args = (freqs, data, model_function)\n return op.minimize(nll, initial_guess, args=args, method=method, bounds = [(None,None),(1.0,9.0),(0.0033,0.25),(1.0, 9.0),(None,None)])\n\ndef go_gauss_plus_extra_bump(freqs, data, model_function, initial_guess, method, overwrite_extra_gauss_bounds=None):\n nll = lambda *args: -lnlike(*args)\n args = (freqs, data, model_function)\n if overwrite_extra_gauss_bounds:\n return op.minimize(nll, initial_guess, args=args, method=method, bounds = overwrite_extra_gauss_bounds)\n else:\n return op.minimize(nll, initial_guess, args=args, method=method, bounds = [(-10.0,10.0),(-1.0,6.0),(-20.0,10.0),(-16.0,5.0),(-5.7,-1.5),(0.05,0.25), (-16.0,5.0),(-3.1,-2.9),(0.05,0.12)])\n#bounds = [(-10.0,10.0),(-1.0,9.0),(0.01,None),(-1.0, 9.0),(-20.0,10.0)])\n\n#\n# The code below refers to equations in Nita et al (2014), ApJ, 789, 152\n#\n#\n# Sample to Model Ratio (SMR) estimator\n#\ndef rhoj(Sj, shatj):\n \"\"\"\n Sample to Model Ratio (SMR) estimator (Eq. 5)\n\n Parameters\n ----------\n\n Sj\n random variables (i.e. data)\n shatj\n best estimate of the model. Should be same length as Sj\n\n Returns\n -------\n ndarray\n The Sample-to-Model ratio\n \"\"\"\n return Sj / shatj\n\n\n#\n# Goodness-of-fit estimator\n#\ndef rchi2(m, nu, rhoj):\n \"\"\"\n Goodness-of-fit estimator (Eq. 16)\n\n Parameters\n ----------\n\n m\n number of spectra considered\n nu\n degrees of freedom\n rhoj\n sample to model ratio estimator\n\n Returns\n -------\n float\n A chi-square like goodness of fit estimator\n \"\"\"\n return (m / (1.0 * nu)) * np.sum((1.0 - rhoj) ** 2)\n\n\n#\n# PDF of the goodness-of-fit estimator (Eq. 17)\n#\ndef rchi2distrib(m, nu):\n \"\"\"\n The distribution of rchi2 may be approximated by the analytical expression\n below. Comparing Eq. (2) with the implementation in scipy stats we find\n the following equivalencies:\n k (Nita parameter) = a (scipy stats parameter)\n theta (Nita parameter) = 1 / lambda (scipy stats value)\n :param m: number of spectra considered\n :param nu: degrees of freedom\n :return: a frozen scipy stats function that represents the distribution\n of the data\n \"\"\"\n # Calculate the gamma function parameter values as expressed in Eq. 17\n k = (nu / 2.0) / (1.0 + 3.0 / m)\n scale = 1.0 / k\n #\n return gamma(k, scale=scale, loc=0.0)\n\n\n#\n# Probability of getting this value of reduced chi-squared or larger (Eq. 18)\n#\ndef prob_this_rchi2_or_larger(rchi2, m, nu):\n \"\"\"\n :param rchi2: reduced chi-squared value\n :param m: number of spectra considered\n :param nu: degrees of freedom\n :return:\n \"\"\"\n a = (nu / 2.0) * np.float64(m) / (3.0 + np.float64(m))\n return gammaincc(a, a * rchi2)\n\n\n#\n# What value of rchi2 gives rise to a given probability level?\n#\ndef rchi2_given_prob(p, m, nu):\n a = (nu / 2.0) * np.float64(m) / (3.0 + np.float64(m))\n return gammainccinv(a, p) / a\n\n\ndef AIC(k, variables, freqs, data, model_function):\n return 2 * k - 2 * lnlike(variables, freqs, data, model_function)\n\n\ndef BIC(k, variables, freqs, data, model_function, n):\n return -2 * lnlike(variables, freqs, data, model_function) + k * np.log(n)\n"
] |
[
[
"scipy.special.gammaincc",
"scipy.stats.gamma",
"numpy.log",
"scipy.optimize.minimize",
"numpy.float64",
"scipy.special.gammainccinv",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
grandintegrator/cs285-deeprlcourse-fa19-hw
|
[
"4abd57eb9da8978b576300b69865e52862e4eaab"
] |
[
"homework_fall2019/hw3/cs285/infrastructure/dqn_utils.py"
] |
[
"\"\"\"This file includes a collection of utility functions that are useful for\nimplementing DQN.\"\"\"\nimport random\nfrom collections import namedtuple\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\n\nfrom cs285.infrastructure.atari_wrappers import wrap_deepmind\n\nOptimizerSpec = namedtuple(\"OptimizerSpec\", [\"constructor\", \"kwargs\", \"lr_schedule\"])\n\ndef get_env_kwargs(env_name):\n if env_name == 'PongNoFrameskip-v4':\n kwargs = {\n 'learning_starts': 50000,\n 'target_update_freq': 10000,\n 'replay_buffer_size': int(1e6),\n 'num_timesteps': int(2e8),\n 'q_func': atari_model,\n 'learning_freq': 4,\n 'grad_norm_clipping': 10,\n 'input_shape': (84, 84, 4),\n 'env_wrappers': wrap_deepmind,\n 'frame_history_len': 4,\n 'gamma': 0.99,\n }\n kwargs['optimizer_spec'] = atari_optimizer(kwargs['num_timesteps'])\n kwargs['exploration_schedule'] = atari_exploration_schedule(kwargs['num_timesteps'])\n\n elif env_name == 'LunarLander-v2':\n def lunar_empty_wrapper(env):\n return env\n kwargs = {\n 'optimizer_spec': lander_optimizer(),\n 'q_func': lander_model,\n 'replay_buffer_size': 50000,\n 'batch_size': 32,\n 'gamma': 1.00,\n 'learning_starts': 1000,\n 'learning_freq': 1,\n 'frame_history_len': 1,\n 'target_update_freq': 3000,\n 'grad_norm_clipping': 10,\n 'lander': True,\n 'num_timesteps': 500000,\n 'env_wrappers': lunar_empty_wrapper\n }\n kwargs['exploration_schedule'] = lander_exploration_schedule(kwargs['num_timesteps'])\n\n else:\n raise NotImplementedError\n\n return kwargs\n\n\ndef lander_model(obs, num_actions, scope, reuse=False):\n with tf.variable_scope(scope, reuse=reuse):\n out = obs\n with tf.variable_scope(\"action_value\"):\n out = layers.fully_connected(out, num_outputs=64, activation_fn=tf.nn.relu)\n out = layers.fully_connected(out, num_outputs=64, activation_fn=tf.nn.relu)\n out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)\n\n return out\n\n\ndef atari_model(img_input, num_actions, scope, reuse=False):\n with tf.variable_scope(scope, reuse=reuse):\n out = tf.cast(img_input, tf.float32) / 255.0\n with tf.variable_scope(\"convnet\"):\n # original architecture\n out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)\n out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)\n out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)\n out = layers.flatten(out)\n with tf.variable_scope(\"action_value\"):\n out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)\n out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)\n\n return out\n\ndef atari_exploration_schedule(num_timesteps):\n return PiecewiseSchedule(\n [\n (0, 1.0),\n (1e6, 0.1),\n (num_timesteps / 8, 0.01),\n ], outside_value=0.01\n )\n\n\ndef atari_ram_exploration_schedule(num_timesteps):\n return PiecewiseSchedule(\n [\n (0, 0.2),\n (1e6, 0.1),\n (num_timesteps / 8, 0.01),\n ], outside_value=0.01\n )\n\n\ndef atari_optimizer(num_timesteps):\n num_iterations = num_timesteps/4\n lr_multiplier = 1.0\n lr_schedule = PiecewiseSchedule([\n (0, 1e-4 * lr_multiplier),\n (num_iterations / 10, 1e-4 * lr_multiplier),\n (num_iterations / 2, 5e-5 * lr_multiplier),\n ],\n outside_value=5e-5 * lr_multiplier)\n\n return OptimizerSpec(\n constructor=tf.train.AdamOptimizer,\n kwargs=dict(epsilon=1e-4),\n lr_schedule=lr_schedule\n )\n\n\ndef lander_optimizer():\n return OptimizerSpec(\n constructor=tf.train.AdamOptimizer,\n lr_schedule=ConstantSchedule(1e-3),\n kwargs={}\n )\n\n\ndef lander_exploration_schedule(num_timesteps):\n return PiecewiseSchedule(\n [\n (0, 1),\n (num_timesteps * 0.1, 0.02),\n ], outside_value=0.02\n )\n\n\ndef huber_loss(x, delta=1.0):\n # https://en.wikipedia.org/wiki/Huber_loss\n return tf.where(\n tf.abs(x) < delta,\n tf.square(x) * 0.5,\n delta * (tf.abs(x) - 0.5 * delta)\n )\n\n\ndef sample_n_unique(sampling_f, n):\n \"\"\"Helper function. Given a function `sampling_f` that returns\n comparable objects, sample n such unique objects.\n \"\"\"\n res = []\n while len(res) < n:\n candidate = sampling_f()\n if candidate not in res:\n res.append(candidate)\n return res\n\n\nclass Schedule(object):\n def value(self, t):\n \"\"\"Value of the schedule at time t\"\"\"\n raise NotImplementedError()\n\n\nclass ConstantSchedule(object):\n def __init__(self, value):\n \"\"\"Value remains constant over time.\n Parameters\n ----------\n value: float\n Constant value of the schedule\n \"\"\"\n self._v = value\n\n def value(self, t):\n \"\"\"See Schedule.value\"\"\"\n return self._v\n\n\ndef linear_interpolation(l, r, alpha):\n return l + alpha * (r - l)\n\n\nclass PiecewiseSchedule(object):\n def __init__(self, endpoints, interpolation=linear_interpolation, outside_value=None):\n \"\"\"Piecewise schedule.\n endpoints: [(int, int)]\n list of pairs `(time, value)` meanining that schedule should output\n `value` when `t==time`. All the values for time must be sorted in\n an increasing order. When t is between two times, e.g. `(time_a, value_a)`\n and `(time_b, value_b)`, such that `time_a <= t < time_b` then value outputs\n `interpolation(value_a, value_b, alpha)` where alpha is a fraction of\n time passed between `time_a` and `time_b` for time `t`.\n interpolation: lambda float, float, float: float\n a function that takes value to the left and to the right of t according\n to the `endpoints`. Alpha is the fraction of distance from left endpoint to\n right endpoint that t has covered. See linear_interpolation for example.\n outside_value: float\n if the value is requested outside of all the intervals sepecified in\n `endpoints` this value is returned. If None then AssertionError is\n raised when outside value is requested.\n \"\"\"\n idxes = [e[0] for e in endpoints]\n assert idxes == sorted(idxes)\n self._interpolation = interpolation\n self._outside_value = outside_value\n self._endpoints = endpoints\n\n def value(self, t):\n \"\"\"See Schedule.value\"\"\"\n for (l_t, l), (r_t, r) in zip(self._endpoints[:-1], self._endpoints[1:]):\n if l_t <= t and t < r_t:\n alpha = float(t - l_t) / (r_t - l_t)\n return self._interpolation(l, r, alpha)\n\n # t does not belong to any of the pieces, so doom.\n assert self._outside_value is not None\n return self._outside_value\n\nclass LinearSchedule(object):\n def __init__(self, schedule_timesteps, final_p, initial_p=1.0):\n \"\"\"Linear interpolation between initial_p and final_p over\n schedule_timesteps. After this many timesteps pass final_p is\n returned.\n Parameters\n ----------\n schedule_timesteps: int\n Number of timesteps for which to linearly anneal initial_p\n to final_p\n initial_p: float\n initial output value\n final_p: float\n final output value\n \"\"\"\n self.schedule_timesteps = schedule_timesteps\n self.final_p = final_p\n self.initial_p = initial_p\n\n def value(self, t):\n \"\"\"See Schedule.value\"\"\"\n fraction = min(float(t) / self.schedule_timesteps, 1.0)\n return self.initial_p + fraction * (self.final_p - self.initial_p)\n\ndef compute_exponential_averages(variables, decay):\n \"\"\"Given a list of tensorflow scalar variables\n create ops corresponding to their exponential\n averages\n Parameters\n ----------\n variables: [tf.Tensor]\n List of scalar tensors.\n Returns\n -------\n averages: [tf.Tensor]\n List of scalar tensors corresponding to averages\n of al the `variables` (in order)\n apply_op: tf.runnable\n Op to be run to update the averages with current value\n of variables.\n \"\"\"\n averager = tf.train.ExponentialMovingAverage(decay=decay)\n apply_op = averager.apply(variables)\n return [averager.average(v) for v in variables], apply_op\n\ndef minimize_and_clip(optimizer, objective, var_list, clip_val=10):\n \"\"\"Minimized `objective` using `optimizer` w.r.t. variables in\n `var_list` while ensure the norm of the gradients for each\n variable is clipped to `clip_val`\n \"\"\"\n gradients = optimizer.compute_gradients(objective, var_list=var_list)\n for i, (grad, var) in enumerate(gradients):\n if grad is not None:\n gradients[i] = (tf.clip_by_norm(grad, clip_val), var)\n return optimizer.apply_gradients(gradients)\n\ndef initialize_interdependent_variables(session, vars_list, feed_dict):\n \"\"\"Initialize a list of variables one at a time, which is useful if\n initialization of some variables depends on initialization of the others.\n \"\"\"\n vars_left = vars_list\n while len(vars_left) > 0:\n new_vars_left = []\n for v in vars_left:\n try:\n session.run(tf.variables_initializer([v]), feed_dict)\n except tf.errors.FailedPreconditionError:\n new_vars_left.append(v)\n if len(new_vars_left) >= len(vars_left):\n # This can happen if the variables all depend on each other, or more likely if there's\n # another variable outside of the list, that still needs to be initialized. This could be\n # detected here, but life's finite.\n raise Exception(\"Cycle in variable dependencies, or extenrnal precondition unsatisfied.\")\n else:\n vars_left = new_vars_left\n\ndef get_wrapper_by_name(env, classname):\n currentenv = env\n while True:\n if classname in currentenv.__class__.__name__:\n return currentenv\n elif isinstance(env, gym.Wrapper):\n currentenv = currentenv.env\n else:\n raise ValueError(\"Couldn't find wrapper named %s\"%classname)\n\n\nclass MemoryOptimizedReplayBuffer(object):\n def __init__(self, size, frame_history_len, lander=False):\n \"\"\"This is a memory efficient implementation of the replay buffer.\n\n The sepecific memory optimizations use here are:\n - only store each frame once rather than k times\n even if every observation normally consists of k last frames\n - store frames as np.uint8 (actually it is most time-performance\n to cast them back to float32 on GPU to minimize memory transfer\n time)\n - store frame_t and frame_(t+1) in the same buffer.\n\n For the tipical use case in Atari Deep RL buffer with 1M frames the total\n memory footprint of this buffer is 10^6 * 84 * 84 bytes ~= 7 gigabytes\n\n Warning! Assumes that returning frame of zeros at the beginning\n of the episode, when there is less frames than `frame_history_len`,\n is acceptable.\n\n Parameters\n ----------\n size: int\n Max number of transitions to store in the buffer. When the buffer\n overflows the old memories are dropped.\n frame_history_len: int\n Number of memories to be retried for each observation.\n \"\"\"\n self.lander = lander\n\n self.size = size\n self.frame_history_len = frame_history_len\n\n self.next_idx = 0\n self.num_in_buffer = 0\n\n self.obs = None\n self.action = None\n self.reward = None\n self.done = None\n\n def can_sample(self, batch_size):\n \"\"\"Returns true if `batch_size` different transitions can be sampled from the buffer.\"\"\"\n return batch_size + 1 <= self.num_in_buffer\n\n def _encode_sample(self, idxes):\n obs_batch = np.concatenate([self._encode_observation(idx)[None] for idx in idxes], 0)\n act_batch = self.action[idxes]\n rew_batch = self.reward[idxes]\n next_obs_batch = np.concatenate([self._encode_observation(idx + 1)[None] for idx in idxes], 0)\n done_mask = np.array([1.0 if self.done[idx] else 0.0 for idx in idxes], dtype=np.float32)\n\n return obs_batch, act_batch, rew_batch, next_obs_batch, done_mask\n\n\n def sample(self, batch_size):\n \"\"\"Sample `batch_size` different transitions.\n\n i-th sample transition is the following:\n\n when observing `obs_batch[i]`, action `act_batch[i]` was taken,\n after which reward `rew_batch[i]` was received and subsequent\n observation next_obs_batch[i] was observed, unless the epsiode\n was done which is represented by `done_mask[i]` which is equal\n to 1 if episode has ended as a result of that action.\n\n Parameters\n ----------\n batch_size: int\n How many transitions to sample.\n\n Returns\n -------\n obs_batch: np.array\n Array of shape\n (batch_size, img_h, img_w, img_c * frame_history_len)\n and dtype np.uint8\n act_batch: np.array\n Array of shape (batch_size,) and dtype np.int32\n rew_batch: np.array\n Array of shape (batch_size,) and dtype np.float32\n next_obs_batch: np.array\n Array of shape\n (batch_size, img_h, img_w, img_c * frame_history_len)\n and dtype np.uint8\n done_mask: np.array\n Array of shape (batch_size,) and dtype np.float32\n \"\"\"\n assert self.can_sample(batch_size)\n idxes = sample_n_unique(lambda: random.randint(0, self.num_in_buffer - 2), batch_size)\n return self._encode_sample(idxes)\n\n def encode_recent_observation(self):\n \"\"\"Return the most recent `frame_history_len` frames.\n\n Returns\n -------\n observation: np.array\n Array of shape (img_h, img_w, img_c * frame_history_len)\n and dtype np.uint8, where observation[:, :, i*img_c:(i+1)*img_c]\n encodes frame at time `t - frame_history_len + i`\n \"\"\"\n assert self.num_in_buffer > 0\n return self._encode_observation((self.next_idx - 1) % self.size)\n\n def _encode_observation(self, idx):\n end_idx = idx + 1 # make noninclusive\n start_idx = end_idx - self.frame_history_len\n # this checks if we are using low-dimensional observations, such as RAM\n # state, in which case we just directly return the latest RAM.\n if len(self.obs.shape) == 2:\n return self.obs[end_idx-1]\n # if there weren't enough frames ever in the buffer for context\n if start_idx < 0 and self.num_in_buffer != self.size:\n start_idx = 0\n for idx in range(start_idx, end_idx - 1):\n if self.done[idx % self.size]:\n start_idx = idx + 1\n missing_context = self.frame_history_len - (end_idx - start_idx)\n # if zero padding is needed for missing context\n # or we are on the boundry of the buffer\n if start_idx < 0 or missing_context > 0:\n frames = [np.zeros_like(self.obs[0]) for _ in range(missing_context)]\n for idx in range(start_idx, end_idx):\n frames.append(self.obs[idx % self.size])\n return np.concatenate(frames, 2)\n else:\n # this optimization has potential to saves about 30% compute time \\o/\n img_h, img_w = self.obs.shape[1], self.obs.shape[2]\n return self.obs[start_idx:end_idx].transpose(1, 2, 0, 3).reshape(img_h, img_w, -1)\n\n def store_frame(self, frame):\n \"\"\"Store a single frame in the buffer at the next available index, overwriting\n old frames if necessary.\n\n Parameters\n ----------\n frame: np.array\n Array of shape (img_h, img_w, img_c) and dtype np.uint8\n the frame to be stored\n\n Returns\n -------\n idx: int\n Index at which the frame is stored. To be used for `store_effect` later.\n \"\"\"\n if self.obs is None:\n self.obs = np.empty([self.size] + list(frame.shape), dtype=np.float32 if self.lander else np.uint8)\n self.action = np.empty([self.size], dtype=np.int32)\n self.reward = np.empty([self.size], dtype=np.float32)\n self.done = np.empty([self.size], dtype=np.bool)\n self.obs[self.next_idx] = frame\n\n ret = self.next_idx\n self.next_idx = (self.next_idx + 1) % self.size\n self.num_in_buffer = min(self.size, self.num_in_buffer + 1)\n\n return ret\n\n def store_effect(self, idx, action, reward, done):\n \"\"\"Store effects of action taken after obeserving frame stored\n at index idx. The reason `store_frame` and `store_effect` is broken\n up into two functions is so that once can call `encode_recent_observation`\n in between.\n\n Paramters\n ---------\n idx: int\n Index in buffer of recently observed frame (returned by `store_frame`).\n action: int\n Action that was performed upon observing this frame.\n reward: float\n Reward that was received when the actions was performed.\n done: bool\n True if episode was finished after performing that action.\n \"\"\"\n self.action[idx] = action\n self.reward[idx] = reward\n self.done[idx] = done\n\n"
] |
[
[
"tensorflow.contrib.layers.convolution2d",
"tensorflow.cast",
"tensorflow.variables_initializer",
"numpy.empty",
"tensorflow.contrib.layers.fully_connected",
"numpy.concatenate",
"tensorflow.clip_by_norm",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.contrib.layers.flatten",
"numpy.zeros_like",
"tensorflow.square",
"tensorflow.variable_scope",
"numpy.array",
"tensorflow.abs"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
zsweet/bert-multitask-learning
|
[
"7e6bd301904285614549871cff13a67e9c794532"
] |
[
"bert_multitask_learning/transformer_decoder.py"
] |
[
"import tensorflow as tf\nimport math\nfrom tensor2tensor.utils import beam_search\n\nfrom .bert import modeling\n\n\nclass TransformerDecoder(object):\n def __init__(self, params):\n self.params = params\n\n def get_decoder_self_attention_mask(self, length):\n \"\"\"Calculate bias for decoder that maintains model's autoregressive property.\n Creates a tensor that masks out locations that correspond to illegal\n connections, so prediction at position i cannot draw information from future\n positions.\n Args:\n length: int length of sequences in batch.\n Returns:\n float tensor of shape [1, 1, length, length]\n \"\"\"\n with tf.name_scope(\"decoder_self_attention_mask\"):\n valid_locs = tf.matrix_band_part(tf.ones([length, length]), -1, 0)\n valid_locs = tf.reshape(valid_locs, [1, length, length])\n return valid_locs\n\n def decode(\n self,\n decoder_inputs,\n encoder_output,\n input_mask,\n decoder_self_attention_mask,\n cache,\n num_classes,\n do_return_all_layers,\n enc_dec_attention_mask=None,\n add_self_attention=True,\n add_enc_dec_attention=True):\n input_tensor = decoder_inputs\n num_hidden_layers = self.params.decoder_num_hidden_layers\n hidden_size = self.params.bert_config.hidden_size\n num_attention_heads = self.params.bert_config.num_attention_heads\n initializer_range = self.params.bert_config.initializer_range\n attention_probs_dropout_prob = self.params.bert_config.attention_probs_dropout_prob\n\n if hidden_size % num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (hidden_size, num_attention_heads))\n\n attention_head_size = int(hidden_size / num_attention_heads)\n encode_shape = modeling.get_shape_list(\n encoder_output, expected_rank=3)\n batch_size = encode_shape[0]\n encode_seq_length = encode_shape[1]\n input_width = encode_shape[2]\n\n input_shape = modeling.get_shape_list(input_tensor, expected_rank=3)\n decode_seq_length = input_shape[1]\n\n # create encoder-decoder attention mask\n attention_mask_shape = modeling.get_shape_list(\n input_mask, expected_rank=2)[1]\n\n # batch_size*beam_size\n if enc_dec_attention_mask is None:\n input_batch_size = modeling.get_shape_list(\n decoder_inputs, expected_rank=3)[0]\n input_mask = tf.broadcast_to(\n input_mask, [input_batch_size, attention_mask_shape])\n attention_mask = modeling.create_attention_mask_from_input_mask(\n decoder_inputs, input_mask\n )\n else:\n attention_mask = enc_dec_attention_mask\n\n # The Transformer performs sum residuals on all layers so the input needs\n # to be the same as the hidden size.\n if input_width != hidden_size:\n raise ValueError(\"The width of the input tensor (%d) != hidden size (%d)\" %\n (input_width, hidden_size))\n\n prev_output = modeling.reshape_to_matrix(input_tensor)\n\n all_layer_outputs = []\n for layer_idx in range(num_hidden_layers):\n with tf.variable_scope(\"layer_%d\" % layer_idx):\n layer_input = prev_output\n\n if cache is not None:\n layer_cache = cache[str(layer_idx)]\n if layer_idx == 0:\n layer_input = tf.expand_dims(\n layer_input, axis=1)\n # update batch_size to batch_size*beam_size\n batch_size = modeling.get_shape_list(\n layer_input, expected_rank=3)[0]\n else:\n layer_cache = None\n\n with tf.variable_scope(\"attention\"):\n attention_heads = []\n if add_self_attention:\n with tf.variable_scope(\"self\"):\n attention_head = attention_layer_with_cache(\n from_tensor=layer_input,\n to_tensor=layer_input,\n attention_mask=decoder_self_attention_mask,\n num_attention_heads=num_attention_heads,\n size_per_head=attention_head_size,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n initializer_range=initializer_range,\n do_return_2d_tensor=False,\n batch_size=batch_size,\n from_seq_length=decode_seq_length,\n to_seq_length=decode_seq_length,\n cache=layer_cache)\n attention_heads.append(attention_head)\n\n self_attention_output = None\n if len(attention_heads) == 1:\n self_attention_output = attention_heads[0]\n else:\n # In the case where we have other sequences, we just concatenate\n # them to the self-attention head before the projection.\n self_attention_output = tf.concat(\n attention_heads, axis=-1)\n if cache is not None:\n self_attention_output = tf.reshape(\n self_attention_output, [batch_size, -1, hidden_size])\n else:\n self_attention_output = tf.reshape(\n layer_input, [batch_size, -1, hidden_size])\n\n if add_enc_dec_attention:\n with tf.variable_scope('enc_dec_attention'):\n attention_heads = []\n attention_head = attention_layer_with_cache(\n from_tensor=self_attention_output,\n to_tensor=encoder_output,\n attention_mask=attention_mask,\n num_attention_heads=num_attention_heads,\n size_per_head=attention_head_size,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n initializer_range=initializer_range,\n do_return_2d_tensor=True,\n batch_size=batch_size,\n from_seq_length=decode_seq_length,\n to_seq_length=encode_seq_length,\n cache=None)\n attention_heads.append(attention_head)\n\n attention_output = None\n if len(attention_heads) == 1:\n attention_output = attention_heads[0]\n else:\n # In the case where we have other sequences, we just concatenate\n # them to the self-attention head before the projection.\n attention_output = tf.concat(\n attention_heads, axis=-1)\n if cache is not None:\n attention_output = tf.reshape(\n attention_output, [batch_size, -1, hidden_size])\n else:\n attention_output = tf.reshape(\n self_attention_output, [-1, hidden_size])\n\n # Run a linear projection of `hidden_size` then add a residual\n # with `layer_input`.\n with tf.variable_scope(\"output\"):\n attention_output = tf.layers.dense(\n attention_output,\n hidden_size,\n kernel_initializer=modeling.create_initializer(\n initializer_range))\n attention_output = modeling.dropout(\n attention_output,\n self.params.bert_config.hidden_dropout_prob)\n attention_output = modeling.layer_norm(\n attention_output + layer_input)\n\n # The activation is only applied to the \"intermediate\" hidden layer.\n with tf.variable_scope(\"intermediate\"):\n intermediate_output = tf.layers.dense(\n attention_output,\n self.params.bert_config.intermediate_size,\n activation=modeling.gelu,\n kernel_initializer=modeling.create_initializer(\n initializer_range))\n\n # Down-project back to `hidden_size` then add the residual.\n with tf.variable_scope(\"output\"):\n layer_output = tf.layers.dense(\n intermediate_output,\n hidden_size,\n kernel_initializer=modeling.create_initializer(\n initializer_range))\n layer_output = modeling.dropout(\n layer_output,\n self.params.bert_config.hidden_dropout_prob)\n layer_output = modeling.layer_norm(\n layer_output + attention_output)\n prev_output = layer_output\n all_layer_outputs.append(layer_output)\n\n if do_return_all_layers:\n final_outputs = []\n for layer_output in all_layer_outputs:\n final_output = modeling.reshape_from_matrix(\n layer_output, input_shape)\n final_outputs.append(final_output)\n return final_outputs\n else:\n if cache is None:\n final_output = modeling.reshape_from_matrix(\n prev_output, input_shape)\n else:\n final_output = prev_output\n\n if num_classes:\n dense_layer = tf.layers.Dense(\n num_classes,\n activation=None,\n kernel_initializer=tf.orthogonal_initializer()\n )\n logits = dense_layer(final_output)\n else:\n logits = final_output\n return logits\n\n def train_eval(self, features, hidden_feature, mode, problem_name):\n\n # prepare inputs to attention\n key = 'ori_seq' if self.params.label_transfer else 'seq'\n encoder_output = hidden_feature[key]\n\n label_ids = features['%s_label_ids' % problem_name]\n input_mask = features['input_mask']\n num_classes = self.params.num_classes[problem_name]\n\n if self.params.problem_type[problem_name] == 'seq2seq_text':\n embed_table = hidden_feature['embed_table']\n else:\n embed_table = tf.get_variable(\n 'tag_embed_table', shape=[\n num_classes, self.params.mask_lm_hidden_size],\n initializer=tf.orthogonal_initializer())\n decoder_inputs = tf.nn.embedding_lookup(\n embed_table, label_ids)\n\n # with tf.name_scope(\"shift_targets\"):\n # # Shift targets to the right, and remove the last element\n # decoder_inputs = tf.pad(\n # decoder_inputs, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]\n\n decoder_inputs = modeling.embedding_postprocessor(\n input_tensor=decoder_inputs,\n use_token_type=False,\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=self.params.bert_config.initializer_range,\n max_position_embeddings=self.params.bert_config.max_position_embeddings,\n dropout_prob=self.params.bert_config.hidden_dropout_prob)\n\n # attention_mask = modeling.create_attention_mask_from_input_mask(\n # label_ids, input_mask)\n label_mask = tf.expand_dims(\n tf.cast(features['%s_mask' % problem_name], tf.float32), axis=1)\n decoder_self_attention_mask = label_mask * self.get_decoder_self_attention_mask(\n self.params.decode_max_seq_len)\n\n decode_output = self.decode(\n decoder_inputs=decoder_inputs,\n encoder_output=encoder_output,\n input_mask=input_mask,\n decoder_self_attention_mask=decoder_self_attention_mask,\n cache=None,\n num_classes=num_classes,\n do_return_all_layers=False\n )\n return decode_output\n\n\ndef attention_layer_with_cache(from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None,\n decoder_self_attention_mask=None,\n cache=None):\n \"\"\"\n This is a modification of attention layer from bert to support\n fast decode.\n\n Performs multi-headed attention from `from_tensor` to `to_tensor`.\n\n This is an implementation of multi-headed attention based on \"Attention\n is all you Need\". If `from_tensor` and `to_tensor` are the same, then\n this is self-attention. Each timestep in `from_tensor` attends to the\n corresponding sequence in `to_tensor`, and returns a fixed-with vector.\n\n This function first projects `from_tensor` into a \"query\" tensor and\n `to_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\n of tensors of length `num_attention_heads`, where each tensor is of shape\n [batch_size, seq_length, size_per_head].\n\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor and returned.\n\n In practice, the multi-headed attention are done with transposes and\n reshapes rather than actual separate tensors.\n\n Args:\n from_tensor: float Tensor of shape [batch_size, from_seq_length,\n from_width].\n to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].\n attention_mask: (optional) int32 Tensor of shape [batch_size,\n from_seq_length, to_seq_length]. The values should be 1 or 0. The\n attention scores will effectively be set to -infinity for any positions in\n the mask that are 0, and will be unchanged for positions that are 1.\n num_attention_heads: int. Number of attention heads.\n size_per_head: int. Size of each attention head.\n query_act: (optional) Activation function for the query transform.\n key_act: (optional) Activation function for the key transform.\n value_act: (optional) Activation function for the value transform.\n attention_probs_dropout_prob: (optional) float. Dropout probability of the\n attention probabilities.\n initializer_range: float. Range of the weight initializer.\n do_return_2d_tensor: bool. If True, the output will be of shape [batch_size\n * from_seq_length, num_attention_heads * size_per_head]. If False, the\n output will be of shape [batch_size, from_seq_length, num_attention_heads\n * size_per_head].\n batch_size: (Optional) int. If the input is 2D, this might be the batch size\n of the 3D version of the `from_tensor` and `to_tensor`.\n from_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `from_tensor`.\n to_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `to_tensor`.\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length,\n num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is\n true, this will be of shape [batch_size * from_seq_length,\n num_attention_heads * size_per_head]).\n\n Raises:\n ValueError: Any of the arguments or tensor shapes are invalid.\n \"\"\"\n\n def transpose_for_scores(input_tensor, batch_size, num_attention_heads,\n seq_length, width):\n output_tensor = tf.reshape(\n input_tensor, [batch_size, seq_length, num_attention_heads, width])\n\n output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])\n return output_tensor\n\n from_shape = modeling.get_shape_list(from_tensor, expected_rank=[2, 3])\n to_shape = modeling.get_shape_list(to_tensor, expected_rank=[2, 3])\n\n if len(from_shape) != len(to_shape):\n raise ValueError(\n \"The rank of `from_tensor` must match the rank of `to_tensor`.\")\n\n if len(from_shape) == 3:\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n to_seq_length = to_shape[1]\n elif len(from_shape) == 2:\n if (batch_size is None or from_seq_length is None or to_seq_length is None):\n raise ValueError(\n \"When passing in rank 2 tensors to attention_layer, the values \"\n \"for `batch_size`, `from_seq_length`, and `to_seq_length` \"\n \"must all be specified.\")\n\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # F = `from_tensor` sequence length\n # T = `to_tensor` sequence length\n # N = `num_attention_heads`\n # H = `size_per_head`\n\n from_tensor_2d = modeling.reshape_to_matrix(from_tensor)\n to_tensor_2d = modeling.reshape_to_matrix(to_tensor)\n\n # `query_layer` = [B*F, N*H]\n query_layer = tf.layers.dense(\n from_tensor_2d,\n num_attention_heads * size_per_head,\n activation=query_act,\n name=\"query\",\n kernel_initializer=modeling.create_initializer(initializer_range))\n\n # `key_layer` = [B*T, N*H]\n key_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=key_act,\n name=\"key\",\n kernel_initializer=modeling.create_initializer(initializer_range))\n\n # `value_layer` = [B*T, N*H]\n value_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=value_act,\n name=\"value\",\n kernel_initializer=modeling.create_initializer(initializer_range))\n\n if cache is not None:\n n_time_h = key_layer.get_shape()[1]\n\n key_layer_to_cache = tf.reshape(\n key_layer, [batch_size, -1, n_time_h])\n value_layer_to_cache = tf.reshape(\n value_layer, [batch_size, -1, n_time_h])\n # Combine cached keys and values with new keys and values.\n key_layer_from_cache = tf.concat(\n [cache[\"key_layer\"], key_layer_to_cache], axis=1)\n value_layer_from_cache = tf.concat(\n [cache[\"value_layer\"], value_layer_to_cache], axis=1)\n\n # update seq length\n # from_seq_length = key_layer_from_cache.get_shape()[1]\n from_seq_length = modeling.get_shape_list(\n key_layer_from_cache, expected_rank=[3])[1]\n to_seq_length = modeling.get_shape_list(\n value_layer_from_cache, expected_rank=[3])[1]\n\n # Update cache\n cache[\"key_layer\"] = key_layer_from_cache\n cache[\"value_layer\"] = value_layer_from_cache\n\n key_layer = tf.reshape(key_layer_from_cache, [-1, n_time_h])\n value_layer = tf.reshape(value_layer_from_cache, [-1, n_time_h])\n\n # `query_layer` = [B, N, F, H]\n # In self attention of decoder, the seq_length of q always be 1\n if cache is not None:\n query_layer = transpose_for_scores(\n query_layer, batch_size,\n num_attention_heads, 1,\n size_per_head)\n else:\n query_layer = transpose_for_scores(\n query_layer, batch_size,\n num_attention_heads, from_seq_length,\n size_per_head)\n\n # `key_layer` = [B, N, T, H]\n key_layer = transpose_for_scores(\n key_layer, batch_size, num_attention_heads,\n to_seq_length, size_per_head)\n\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n # `attention_scores` = [B, N, F, T]\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\n attention_scores = tf.multiply(attention_scores,\n 1.0 / math.sqrt(float(size_per_head)))\n\n if attention_mask is not None:\n # `attention_mask` = [B, 1, F, T]\n attention_mask = tf.expand_dims(attention_mask, axis=[1])\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0\n\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_scores += adder\n\n # Normalize the attention scores to probabilities.\n # `attention_probs` = [B, N, F, T]\n attention_probs = tf.nn.softmax(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = modeling.dropout(\n attention_probs, attention_probs_dropout_prob)\n\n # `value_layer` = [B, T, N, H]\n value_layer = tf.reshape(\n value_layer,\n [batch_size, to_seq_length, num_attention_heads, size_per_head])\n\n # `value_layer` = [B, N, T, H]\n value_layer = tf.transpose(value_layer, [0, 2, 1, 3])\n\n # `context_layer` = [B, N, F, H]\n context_layer = tf.matmul(attention_probs, value_layer)\n\n # `context_layer` = [B, F, N, H]\n context_layer = tf.transpose(context_layer, [0, 2, 1, 3])\n\n if do_return_2d_tensor:\n # `context_layer` = [B*F, N*V]\n context_layer = tf.reshape(\n context_layer,\n [batch_size * from_seq_length, num_attention_heads * size_per_head])\n else:\n # `context_layer` = [B, F, N*V]\n context_layer = tf.reshape(\n context_layer,\n [batch_size, from_seq_length, num_attention_heads * size_per_head])\n\n return context_layer\n"
] |
[
[
"tensorflow.matmul",
"tensorflow.nn.softmax",
"tensorflow.transpose",
"tensorflow.concat",
"tensorflow.broadcast_to",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.expand_dims",
"tensorflow.ones",
"tensorflow.orthogonal_initializer",
"tensorflow.name_scope",
"tensorflow.variable_scope",
"tensorflow.nn.embedding_lookup"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
DongjaeJang/Deep-Knowledge-Tracing
|
[
"aab72939a6cbdfc8b7f11bf074040b48771cbf3f",
"aab72939a6cbdfc8b7f11bf074040b48771cbf3f"
] |
[
"keonwoo/scheduler.py",
"myeongsoo/pseudo/dkt/scheduler.py"
] |
[
"from torch.optim.lr_scheduler import ReduceLROnPlateau\n\nfrom transformers import get_linear_schedule_with_warmup\n\n\ndef get_scheduler(optimizer, args):\n if args.scheduler == \"plateau\":\n scheduler = ReduceLROnPlateau(\n optimizer, patience=0, factor=0.9, mode=\"max\", verbose=True\n )\n elif args.scheduler == \"linear_warmup\":\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=args.warmup_steps,\n num_training_steps=args.total_steps,\n )\n return scheduler\n",
"\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\nfrom transformers import get_linear_schedule_with_warmup\n\n\ndef get_scheduler(optimizer, args):\n if args.scheduler == 'plateau':\n scheduler = ReduceLROnPlateau(optimizer, patience=10, factor=0.5, mode='max', verbose=True)\n elif args.scheduler == 'linear_warmup':\n scheduler = get_linear_schedule_with_warmup(optimizer,\n num_warmup_steps=args.warmup_steps,\n num_training_steps=args.total_steps)\n return scheduler"
] |
[
[
"torch.optim.lr_scheduler.ReduceLROnPlateau"
],
[
"torch.optim.lr_scheduler.ReduceLROnPlateau"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sunlin7/GIMP-ML
|
[
"c8845ba485123d20e62b033fbec2c759bc65f9c4"
] |
[
"gimp-plugins/pytorch-deep-image-matting/tools/loss_draw.py"
] |
[
"import re\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pylab import *\n\n# args: log_name, match_rule, self_log_interval, smooth_log_interation\nloss_file_name = \"simple_loss\"\ntitle = \"{}_Loss\".format(loss_file_name)\nf = open(\"../log/{}.log\".format(loss_file_name))\npattern = re.compile(r'Loss:[ ]*\\d+\\.\\d+')\nself_inter = 10\nsmooth = 20\n\n# read log file\nlines = f.readlines()\nprint(\"Line: {}\".format(len(lines)))\nys = []\nk = 0\ncnt = 0\nsum_y = 0.\n\n# read one by one\nfor line in lines:\n obj = re.search(pattern, line)\n if obj:\n val = float(obj.group().split(':')[-1])\n sum_y += val\n k += 1\n if k >= smooth:\n ys.append(sum_y / k)\n sum_y = 0.\n k = 0\n cnt += 1\n if cnt % 10 == 0:\n print(\"ys cnt: {}\".format(cnt))\nif k > 0:\n ys.append(sum_y / k)\n\nys = np.array(ys)\nxs = np.arange(len(ys)) * self_inter * smooth\n\nprint(xs)\nprint(ys)\n\nplt.plot(xs, ys)\nplt.title(title)\nplt.xlabel(\"Iter\")\nplt.ylabel(\"Loss\")\nplt.savefig(\"../log/{}.png\".format(title))\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shristov1/DistasterResponse_training
|
[
"f1dbe1a501072adff58ec667bbe0f5920c48898e"
] |
[
"models/train_classifier.py"
] |
[
"import sys\nfrom sqlalchemy import create_engine\nimport nltk\nnltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger'])\n\nimport pandas as pd\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\n\nfrom sklearn.metrics import classification_report\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nimport joblib\n\ndef load_data(database_filepath):\n engine = create_engine(f'sqlite:///{database_filepath}')\n df = pd.read_sql_table('DisasterResponse', engine)\n X = df['message'].values\n Y = df.iloc[:, 5:].values\n category_names = df.columns[5:]\n return X, Y, category_names\n\n\ndef tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens\n\n\ndef build_model() -> Pipeline:\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n return pipeline\n\n\ndef evaluate_model(model, X_test, Y_test, category_names) -> None:\n report = (classification_report(Y_test, model.predict(X_test), output_dict=True))\n report_df = pd.DataFrame(report).transpose()\n report_df = report_df.iloc[:-4, :]\n report_df['categories'] = category_names\n print(report_df)\n\n\ndef save_model(model, model_filepath) -> None:\n joblib.dump(model, model_filepath)\n\n\ndef main():\n if len(sys.argv) == 3:\n database_filepath, model_filepath = sys.argv[1:]\n print('Loading data...\\n DATABASE: {}'.format(database_filepath))\n X, Y, category_names = load_data(database_filepath)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n \n print('Building model...')\n model = build_model()\n \n print('Training model...')\n model.fit(X_train, Y_train)\n \n print('Evaluating model...')\n evaluate_model(model, X_test, Y_test, category_names)\n\n print('Saving model...\\n MODEL: {}'.format(model_filepath))\n save_model(model, model_filepath)\n\n print('Trained model saved!')\n\n else:\n print('Please provide the filepath of the disaster messages database '\\\n 'as the first argument and the filepath of the pickle file to '\\\n 'save the model to as the second argument. \\n\\nExample: python '\\\n 'train_classifier.py ../data/DisasterResponse.db classifier.pkl')\n\n\nif __name__ == '__main__':\n main()"
] |
[
[
"sklearn.ensemble.RandomForestClassifier",
"pandas.read_sql_table",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.feature_extraction.text.TfidfTransformer"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
adrzystek/TicTacToeAI
|
[
"dbd76454a3b2ca07dd50b1080fb50e9d9ba3cd51"
] |
[
"tests/test_utils.py"
] |
[
"import numpy as np\nimport pytest\n\nfrom scripts.utils import check_if_board_is_full, get_winner, negamax, negamax_alpha_beta_pruned\n\nboard0 = np.zeros(shape=(3, 3))\nboard1 = np.array([[-1, 0, 1], [1, 0, 0], [1, -1, -1]])\nboard2 = np.array([[1, 0, 1], [0, 0, 0], [0, -1, -1]])\nboard3 = np.array([[1, -1, -1], [-1, 1, 1], [1, -1, -1]])\nboard4 = np.array([[1, 0, 0], [0, 0, -1], [0, 0, 0]])\nboard5 = np.array([[1, 1, -1], [0, 0, -1], [0, 0, 0]])\nboard6 = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]])\n\"\"\"\nboard0:\narray([[0., 0., 0.],\n [0., 0., 0.],\n [0., 0., 0.]])\n\nboard1:\narray([[-1, 0, 1],\n [ 1, 0, 0],\n [ 1, -1, -1]])\n\nboard2:\narray([[ 1, 0, 1],\n [ 0, 0, 0],\n [ 0, -1, -1]])\n\nboard3:\narray([[ 1, -1, -1],\n [-1, 1, 1],\n [ 1, -1, -1]])\n\nboard4:\narray([[ 1, 0, 0],\n [ 0, 0, -1],\n [ 0, 0, 0]])\n\nboard5:\narray([[ 1, 1, -1],\n [ 0, 0, -1],\n [ 0, 0, 0]])\n\nboard6:\narray([[ 0, 0, 0],\n [ 0, 1, 0],\n [ 0, 0, 0]])\n\"\"\"\n\n\[email protected](\"board, expected\", [\n (board0, False),\n (board1, False),\n (board2, False),\n (board3, True),\n])\ndef test_check_if_board_is_full(board, expected):\n assert check_if_board_is_full(board, 3) == expected\n\n\[email protected](\"board, expected\", [\n (np.array([[-1, 0, 1], [1, -1, 0], [1, -1, -1]]), -1),\n (np.array([[-1, 0, 1], [1, 1, 0], [1, -1, -1]]), 1),\n])\ndef test_get_winner_when_game_is_decided(board, expected):\n assert get_winner(board) == expected\n\n\[email protected](\"board, expected\", [\n (board1, None),\n (board3, None),\n])\ndef test_get_winner_when_game_is_not_decided(board, expected):\n assert get_winner(board) is expected\n\n\[email protected](\"board, player, expected\", [\n (board0, 1, 0),\n (board0, -1, 0),\n (board6, -1, 0),\n (board1, 1, 1),\n (board1, -1, 1),\n (board2, 1, 1),\n (board2, -1, 1),\n (board4, 1, 1),\n (board5, 1, -1),\n (board6, 1, 1),\n])\ndef test_negamax_whether_predicts_result(board, player, expected):\n # watch out! the negamax function returns the results from the perspective\n # of the player to play, so that a line `(board1, -1, 1)` expects player \"-1\" to win\n assert negamax(board, player)['score'] == expected\n\n\[email protected](\"board, player, expected\", [\n (board0, 1, 0),\n (board0, -1, 0),\n (board6, -1, 0),\n (board1, 1, 1),\n (board1, -1, 1),\n (board2, 1, 1),\n (board2, -1, 1),\n (board4, 1, 1),\n (board5, 1, -1),\n])\ndef test_negamax_alpha_beta_pruned_whether_predicts_result(board, player, expected):\n # watch out! the negamax_alpha_beta_pruned function returns the results from the perspective\n # of the player to play, so that a line `(board1, -1, 1)` expects player \"-1\" to win\n assert negamax_alpha_beta_pruned(board, player, -np.inf, np.inf)['score'] == expected\n\n\[email protected](\"board, player, expected\", [\n (board1, 1, [(1, 1)]),\n (board2, 1, [(0, 1)]),\n (board2, -1, [(2, 0), (0, 1)]),\n])\ndef test_negamax_plays_proper_move(board, player, expected):\n assert negamax(board, player)['move'] in expected\n\n\[email protected](\"board, player, expected\", [\n (board1, 1, [(1, 1)]),\n (board2, 1, [(0, 1)]),\n (board2, -1, [(2, 0), (0, 1)]),\n])\ndef test_negamax_alpha_beta_pruned_plays_proper_move(board, player, expected):\n assert negamax_alpha_beta_pruned(board, player, -np.inf, np.inf)['move'] in expected\n"
] |
[
[
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SachinKonan/FRC2018Simulator
|
[
"74ee69f23721ff945fd0c6399b222d47ebd2d516",
"74ee69f23721ff945fd0c6399b222d47ebd2d516"
] |
[
"simulator_game.py",
"astar.py"
] |
[
"import numpy as np\nimport pygame\nimport sys\nimport os\nfrom collisionutils import *\nfrom colors import *\nfrom utilutils import *\nfrom chassis import AutoPathFollower, RobotDrive\nimport initObstacles\n\nos.chdir(os.path.dirname(os.path.realpath(__file__)))\n\nsize = (1340, 684)\n\ndef allDisplay(obstacles, screen):\n displayObstacles(obstacles, screen, color_matter = True)\n pygame.draw.rect(screen, obstacles['Switch']['RedSwitchTop'][0], obstacles['Switch']['RedSwitchTop'][1:])\n pygame.draw.rect(screen, obstacles['Switch']['RedSwitchBottom'][0], obstacles['Switch']['RedSwitchBottom'][1:])\n pygame.draw.rect(screen, obstacles['Switch']['BlueSwitchTop'][0], obstacles['Switch']['BlueSwitchTop'][1:])\n pygame.draw.rect(screen, obstacles['Switch']['BlueSwitchBottom'][0], obstacles['Switch']['BlueSwitchBottom'][1:])\n\ndef displayObstacles(obstacles, screen, color_matter = True):\n for i in obstacles.keys():\n if(isinstance(obstacles[i], dict) and i is 'Cube'):\n displayObstacles(obstacles[i], screen, False)\n elif(isinstance(obstacles[i], dict)):\n displayObstacles(obstacles[i], screen, True)\n else:\n if(not('Top' in i) and not('Bottom' in i) ):\n if(color_matter):\n pygame.draw.rect(screen, obstacles[i][0], obstacles[i][1:])\n else:\n pygame.draw.polygon(screen, YELLOW, obstacles[i][0])\n\ndef distance_calculator(robot_mid, target_mid):\n return np.sqrt((target_mid[0]- robot_mid[0])**2 + (target_mid[1]- robot_mid[1])**2)\n\ndef instant_distancetoObstacles(robot_mid, obstacles, threshold):\n distances = list(map(lambda x: [x, distance_calculator(robot_mid, getMidpoint(obstacles['Switch'][x][1:]))] ,obstacles['Switch'].keys())) + list(map(lambda x: [x, distance_calculator(robot_mid, getMidpoint(obstacles['Scale'][x][1:]))] ,obstacles['Scale'].keys()))\n s = sorted(distances, key = lambda x: x[1])\n id = s[0][0]\n return obstacles['Switch' if 'Switch' in id else 'Scale'][id]\n\ndef distancetoScaleInput(gripper_mid, threshold):\n return min(distance_calculator(gripper_mid, getMidpoint(obstacles['Scale']['Scale_Input1'][1:] )), distance_calculator(gripper_mid, getMidpoint(obstacles['Scale']['Scale_Input2'][1:])) ) < threshold\n\n\ndef instant_distancetoCubes(robot_mid, obstacles, threshold):\n distances = list(map(lambda x: [x, distance_calculator(robot_mid, obstacles['Cube'][x][1])] ,obstacles['Cube'].keys()))\n s = sorted(distances, key = lambda x: x[1])\n return obstacles['Cube'][s[0][0]], s[0][0]\n\ndef drawRobottoNearestCube(screen, robot_gripper_mid, min_cube):\n pygame.draw.line(screen, BLACK, robot_gripper_mid, min_cube)\n\ndef checkObstacles(coords, obstacles):\n k = ['Switch', 'Scale']\n for i in k:\n for j in obstacles[i].keys():\n collided = collide(coords, getPolygon(obstacles[i][j][1:]) )\n if(collided):\n return False, j\n return True, None\n\ndef showPath(screen, coords):\n if(len(coords) == 1):\n return\n pygame.draw.line(screen, BLUE, coords[0], coords[1])\n coords.pop(0)\n return showPath(screen, coords)\n\npygame.init()\nscreen = pygame.display.set_mode(size)\nscreen.fill(WHITE)\nclock = pygame.time.Clock()\ninc = 10\nheight_inc = 10\n\nstart = initObstacles.start\ntasks = ['T90.0', ]\npath = [[40, 95], [346, 95], [346, 138], [346, 138]]\nreal_start = [path[0][0], path[0][1], start[2], start[3]]\nchassis = RobotDrive(x_mid = real_start[0], y_mid=real_start[1], w= real_start[2], h = real_start[3], Gripper = True, startCube = True)\n#chassis.rotate(90)\nauto = AutoPathFollower(chassis, screen, tasks)\ndone = False\nprint(initObstacles.obstacles)\nwhile(not done):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n obstacles = initObstacles.obstacles\n screen.fill(WHITE)\n bool = auto.autoPeriodic()\n keys=pygame.key.get_pressed()\n\n\n if(bool):\n angle = chassis.angle\n if keys[pygame.K_RIGHT]:\n angle += inc\n if keys[pygame.K_LEFT]:\n angle += -1*inc\n if keys[pygame.K_UP]:\n chassis.translate(height_inc)\n coords = chassis.getTotalRobotCoordinates()\n collided, id = checkObstacles(coords, obstacles)\n if(collided and not checkOutofBounds(coords, size) ):\n angle += 1.5*(2*np.random.rand() - 1)\n else:\n if(id in ['RedSwitchTop', 'RedSwitchBottom', 'BlueSwitchTop', 'BlueSwitchBottom','Scale_Input1', 'Scale_Input2'] and chassis.target_attached):\n print('You scored!!!!!')\n id = chassis.target\n chassis.detachTarget()\n obstacles['Cube'].pop(id, None)\n else:\n print(False)\n chassis.translate(-height_inc*2)\n\n if keys[pygame.K_DOWN]:\n chassis.translate(-1*height_inc)\n coords = chassis.getTotalRobotCoordinates()\n collided, id = checkObstacles(coords, obstacles)\n if(collided and not checkOutofBounds(coords, size) ):\n angle += 1.5*(2*np.random.rand() - 1)\n else:\n if(id in ['RedSwitchTop', 'RedSwitchBottom', 'BlueSwitchTop', 'BlueSwitchBottom','Scale_Input1', 'Scale_Input2'] and chassis.target_attached):\n print('You scored!!!!!')\n id = chassis.target\n chassis.detachTarget()\n obstacles['Cube'].pop(id, None)\n else:\n print(False)\n chassis.translate(height_inc)\n #print(list(map(lambda x: x[0], chassis.moveOptions())))\n print(chassis.moveOptions())\n chassis.moveOptions()\n chassis.rotate(angle)\n else:\n showPath(screen, path.copy())\n\n if keys[pygame.K_d] and chassis.target_attached:\n chassis.detachTarget()\n\n min_cube, id = instant_distancetoCubes(chassis.robotSprite, obstacles, chassis.getTotalLen())\n #print(id)\n if(distance_calculator(chassis.robotSprite, min_cube[1]) < 50 and chassis.target_attached == False):\n chassis.setTarget(id)\n\n #print(id)\n chassis.drawRobot(screen)\n allDisplay(obstacles, screen)\n drawRobottoNearestCube(screen, chassis.gripper_center, min_cube[1])\n pygame.display.flip()\n clock.tick(10)\n\npygame.quit()\n",
"import numpy as np\nimport matplotlib.pyplot as plt\n\ngrid = np.array([\n[0,0,1,0,1],\n[1,0,1,0,0],\n[0,0,0,0,1],\n[0,0,1,0,1]\n])\nshape = grid.shape\nstart = [0,0]\nend = [1,4]\n\"\"\"plt.imshow(grid, cmap='Greys', interpolation='nearest')\nplt.grid()\nplt.show()\"\"\"\n\ndef getOptions(coords):\n neighbor_coords = []\n global grid\n global shape\n if(grid[coords[0]][coords[1]] != 1):\n if(coords[0] < shape[0] -1 and grid[coords[0] + 1, coords[1]] != 1):\n neighbor_coords.append([coords[0] + 1, coords[1]])\n if(coords[0] > 0 and grid[coords[0] - 1, coords[1]] != 1):\n neighbor_coords.append([coords[0] - 1, coords[1]])\n if(coords[1] < shape[1] - 1 and grid[coords[0], coords[1] + 1] != 1):\n neighbor_coords.append([coords[0], coords[1] + 1])\n if(coords[1] > 0 and grid[coords[0], coords[1] - 1] != 1):\n neighbor_coords.append([coords[0], coords[1] - 1])\n\n return neighbor_coords\n\ndef minPicker(mat,mask):\n min = 999\n idx = []\n global grid\n global start\n for i in range(0, len(mat)):\n for j in range(0, len(mat[0])):\n if(grid[i][j] != 1 and mat[i][j] < min and mask[i][j] == 0):\n min = mat[i][j]\n idx = [i,j]\n return idx\n\ndef euclidist(p1, p2):\n return np.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)\n\ndef backwards(distance_mat, pos, path = []):\n if(distance_mat[pos[0],pos[1]] == 0):\n return path\n opts = getOptions(pos)\n cell = min(opts, key = lambda x: distance_mat[x[0], x[1]])\n diff = [cell[0] - pos[0], cell[1] - pos[1]]\n if(diff[0] != 0 and diff[1] == 0):\n if(diff[0] > 0):\n path.insert(0,'N1')\n else:\n path.insert(0, 'S1')\n elif(diff[0] == 0 and diff[1] != 0):\n if(diff[1] > 0):\n path.insert(0,'W1')\n else:\n path.insert(0, 'E1')\n else:\n path.insert(0, 'invalid')\n return backwards(distance_mat, cell, path)\n\nopts = getOptions(start)\nvisited = np.zeros(shape = shape)\ndistances = np.ones(shape = shape)*200\ndistances[start[0], start[1]] = 0\n\n\"\"\"for i in range(0, len(grid)):\n for j in range(0, len(grid[0])):\n print('At row %s and col %s' % (i,j), end= '')\n print(getOptions([i,j], shape))\"\"\"\n\n\nwhile(not any(map(lambda x: x[0] == end[0] and x[1] == end[1], opts))):\n cell = minPicker(distances, visited)\n visited[cell[0], cell[1]] = 1\n opts = getOptions(cell)\n for i,j in opts:\n if(visited[i,j] != 1):\n distances[i,j] = min(distances[i,j], distances[cell[0], cell[1]] + 1 + euclidist([i,j], end) )\n print(\"at cell: %s r and %s c\" % (cell[0], cell[1]))\n print(opts)\n print(distances)\n print(visited)\n\ndirections = backwards(distances, end)\nprint(directions)\n"
] |
[
[
"numpy.random.rand",
"numpy.sqrt"
],
[
"numpy.array",
"numpy.zeros",
"numpy.sqrt",
"numpy.ones"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xiaoye-hua/recommendation_system
|
[
"860fcbe221f69030a26e4eee291270922e48e9ee"
] |
[
"src/DataPreprocess/XGBoostLRDataProcess.py"
] |
[
"# -*- coding: utf-8 -*-\n# @File : XGBoostLRDataProcess.py\n# @Author : Hua Guo\n# @Disc :\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom xgboost.sklearn import XGBModel\nfrom copy import deepcopy\nfrom xgboost.sklearn import XGBClassifier\nimport logging\nlogging.getLogger(__name__)\n\n\nclass XGBoostLRDataProcess(TransformerMixin, BaseEstimator):\n def __init__(self) -> None:\n self.xgb = XGBClassifier()\n self.one_hot = OneHotEncoder()\n\n def fit(self, X, y):\n X = deepcopy(X)\n self.xgb.fit(\n X=X, y=y\n , verbose=True\n , eval_metric='logloss'\n\n # , verbose=self.xgb_train_params['verbose']\n # , eval_metric=self.xgb_train_params['eval_metric']\n ,eval_set=[[X, y]]\n )\n\n X = self.xgb.apply(X)\n self.one_hot.fit(X)\n return self\n\n def transform(self, X, y=None):\n X = self.xgb.apply(X) # [:, 70:]\n X = self.one_hot.transform(X)\n return X\n"
] |
[
[
"sklearn.preprocessing.OneHotEncoder"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
orhunguley/unsupervised_object_learning
|
[
"bae764a7ff3fb77f0050617f19c37fa2d44ed3e2"
] |
[
"visualize_results.py"
] |
[
"import argparse\nimport os\nimport time\nimport torch\nimport numpy as np\nfrom torch.utils.data import DataLoader\nimport torch.optim\nfrom torch.nn.utils import clip_grad_norm_\nfrom data import TrainStation\nfrom motsynth import MOTSynth, MOTSynthBlackBG\nfrom log_utils import log_summary\nfrom utils import save_ckpt, load_ckpt, print_scalor\nfrom utils import spatial_transform, visualize\nfrom common import *\nimport parse\nimport pickle\nimport json\nimport skimage.transform as st\nfrom pycocotools import mask as coco_mask\nfrom tensorboardX import SummaryWriter\nimport argparse\nimport os\nimport time\nimport torch\nimport numpy as np\nfrom torch.utils.data import DataLoader\nimport torch.optim\nfrom torch.nn.utils import clip_grad_norm_\nfrom data import TrainStation\nfrom motsynth import MOTSynth, MOTSynthBlackBG\nfrom log_utils import log_summary\nfrom utils import save_ckpt, load_ckpt, print_scalor\nfrom common import *\nimport parse\nfrom utils import spatial_transform, visualize\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torchvision.utils import make_grid\nimport torchvision.transforms.functional as F\nfrom tensorboardX import SummaryWriter\nfrom scalor import SCALOR\nimport pickle\nimport torch\n\n\n\n\ndef get_log_disc_dict(log_disc_list, j = 0, cs = 8, prefix=\"train\", bs = 2):\n \n log_disc = {\n 'z_what': log_disc_list[j]['z_what'].view(-1, cs * cs, z_what_dim),\n 'z_where_scale':\n log_disc_list[j]['z_where'].view(-1, cs * cs, z_where_scale_dim + z_where_shift_dim)[:, :,\n :z_where_scale_dim],\n 'z_where_shift':\n log_disc_list[j]['z_where'].view(-1, cs * cs, z_where_scale_dim + z_where_shift_dim)[:, :,\n z_where_scale_dim:],\n 'z_pres': log_disc_list[j]['z_pres'].permute(0, 2, 3, 1),\n 'z_pres_probs': torch.sigmoid(log_disc_list[j]['z_pres_logits']).permute(0, 2, 3, 1),\n 'z_what_std': log_disc_list[j]['z_what_std'].view(-1, cs * cs, z_what_dim),\n 'z_what_mean': log_disc_list[j]['z_what_mean'].view(-1, cs * cs, z_what_dim),\n 'z_where_scale_std':\n log_disc_list[j]['z_where_std'].permute(0, 2, 3, 1)[:, :, :z_where_scale_dim],\n 'z_where_scale_mean':\n log_disc_list[j]['z_where_mean'].permute(0, 2, 3, 1)[:, :, :z_where_scale_dim],\n 'z_where_shift_std':\n log_disc_list[j]['z_where_std'].permute(0, 2, 3, 1)[:, :, z_where_scale_dim:],\n 'z_where_shift_mean':\n log_disc_list[j]['z_where_mean'].permute(0, 2, 3, 1)[:, :, z_where_scale_dim:],\n 'glimpse': log_disc_list[j]['x_att'].view(-1, cs * cs, 3, glimpse_size, glimpse_size) \\\n if prefix != 'generate' else None,\n 'glimpse_recon': log_disc_list[j]['y_att'].view(-1, cs * cs, 3, glimpse_size, glimpse_size),\n 'prior_z_pres_prob': log_disc_list[j]['prior_z_pres_prob'].unsqueeze(0),\n 'o_each_cell': spatial_transform(log_disc_list[j]['o_att'], log_disc_list[j]['z_where'],\n (cs * cs * bs, 3, img_h, img_w),\n inverse=True).view(-1, cs * cs, 3, img_h, img_w),\n 'alpha_hat_each_cell': spatial_transform(log_disc_list[j]['alpha_att_hat'],\n log_disc_list[j]['z_where'],\n (cs * cs * bs, 1, img_h, img_w),\n inverse=True).view(-1, cs * cs, 1, img_h, img_w),\n 'alpha_each_cell': spatial_transform(log_disc_list[j]['alpha_att'], log_disc_list[j]['z_where'],\n (cs * cs * bs, 1, img_h, img_w),\n inverse=True).view(-1, cs * cs, 1, img_h, img_w),\n 'y_each_cell': (log_disc_list[j]['y_each_cell'] * log_disc_list[j]['z_pres'].\n view(-1, 1, 1, 1)).view(-1, cs * cs, 3, img_h, img_w),\n 'z_depth': log_disc_list[j]['z_depth'].view(-1, cs * cs, z_depth_dim),\n 'z_depth_std': log_disc_list[j]['z_depth_std'].view(-1, cs * cs, z_depth_dim),\n 'z_depth_mean': log_disc_list[j]['z_depth_mean'].view(-1, cs * cs, z_depth_dim),\n 'z_pres_logits': log_disc_list[j]['z_pres_logits'].permute(0, 2, 3, 1),\n 'z_pres_y': log_disc_list[j]['z_pres_y'].permute(0, 2, 3, 1)\n }\n return log_disc\n\ndef get_log_prop_dict(log_prop_list, j = 0, cs = 8, prefix=\"train\", bs = 2):\n if log_prop_list[j]:\n log_prop = {\n 'z_what': log_prop_list[j]['z_what'].view(bs, -1, z_what_dim),\n 'z_where_scale':\n log_prop_list[j]['z_where'].view(bs, -1, z_where_scale_dim + z_where_shift_dim)[:, :,\n :z_where_scale_dim],\n 'z_where_shift':\n log_prop_list[j]['z_where'].view(bs, -1, z_where_scale_dim + z_where_shift_dim)[:, :,\n z_where_scale_dim:],\n 'z_pres': log_prop_list[j]['z_pres'],\n 'z_what_std': log_prop_list[j]['z_what_std'].view(bs, -1, z_what_dim),\n 'z_what_mean': log_prop_list[j]['z_what_mean'].view(bs, -1, z_what_dim),\n 'z_where_bias_scale_std':\n log_prop_list[j]['z_where_bias_std'][:, :, :z_where_scale_dim],\n 'z_where_bias_scale_mean':\n log_prop_list[j]['z_where_bias_mean'][:, :, :z_where_scale_dim],\n 'z_where_bias_shift_std':\n log_prop_list[j]['z_where_bias_std'][:, :, z_where_scale_dim:],\n 'z_where_bias_shift_mean':\n log_prop_list[j]['z_where_bias_mean'][:, :, z_where_scale_dim:],\n 'z_pres_probs': torch.sigmoid(log_prop_list[j]['z_pres_logits']),\n 'glimpse': log_prop_list[j]['glimpse'],\n 'glimpse_recon': log_prop_list[j]['glimpse_recon'],\n 'prior_z_pres_prob': log_prop_list[j]['prior_z_pres_prob'],\n 'prior_where_bias_scale_std':\n log_prop_list[j]['prior_where_bias_std'][:, :, :z_where_scale_dim],\n 'prior_where_bias_scale_mean':\n log_prop_list[j]['prior_where_bias_mean'][:, :, :z_where_scale_dim],\n 'prior_where_bias_shift_std':\n log_prop_list[j]['prior_where_bias_std'][:, :, z_where_scale_dim:],\n 'prior_where_bias_shift_mean':\n log_prop_list[j]['prior_where_bias_mean'][:, :, z_where_scale_dim:],\n\n 'lengths': log_prop_list[j]['lengths'],\n 'z_depth': log_prop_list[j]['z_depth'],\n 'z_depth_std': log_prop_list[j]['z_depth_std'],\n 'z_depth_mean': log_prop_list[j]['z_depth_mean'],\n\n 'y_each_obj': log_prop_list[j]['y_each_obj'],\n 'alpha_hat_each_obj': log_prop_list[j]['alpha_map'],\n\n 'z_pres_logits': log_prop_list[j]['z_pres_logits'],\n 'z_pres_y': log_prop_list[j]['z_pres_y'],\n 'o_each_obj':\n spatial_transform(log_prop_list[j]['o_att'].view(-1, 3, glimpse_size, glimpse_size),\n log_prop_list[j]['z_where'].view(-1, (z_where_scale_dim +\n z_where_shift_dim)),\n (log_prop_list[j]['o_att'].size(1) * bs, 3, img_h, img_w),\n inverse=True).view(bs, -1, 3, img_h, img_w),\n 'z_where_bias_scale':\n log_prop_list[j]['z_where_bias'].view(bs, -1, z_where_scale_dim + z_where_shift_dim)\n [:, :, :z_where_scale_dim],\n 'z_where_bias_shift':\n log_prop_list[j]['z_where_bias'].view(bs, -1, z_where_scale_dim + z_where_shift_dim)\n [:, :, z_where_scale_dim:],\n }\n return log_prop\n\n\ndef save(imgs, dest):\n if not isinstance(imgs, list):\n imgs = [imgs]\n fix, axs = plt.subplots(ncols=len(imgs), squeeze=False, figsize=(60,60))\n for i, img in enumerate(imgs):\n img = img.detach()\n img = F.to_pil_image(img)\n axs[0, i].imshow(np.asarray(img))\n axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])\n fix.savefig(dest)\n\n \nimport pathlib\n\nmodel_name_prefix = \"model_perceptual_gan_v2_7x7\"\nfor i in range(5):\n \n file_name = f\"{model_name_prefix}_ex_{i+1}\"\n directory = f\"visuals/{file_name}\"\n pathlib.Path(directory).mkdir(parents=True, exist_ok=True)\n \n with open(f'example_viz/{file_name}.pickle', 'rb') as handle:\n preds = pickle.load(handle)\n\n imgs, y_seq, log_like, kl_z_what, kl_z_where, kl_z_depth, \\\n kl_z_pres, kl_z_bg, log_imp, counting, \\\n log_disc_list, log_prop_list, scalor_log_list = preds\n j = 0\n cs = 7\n bs = 1\n log_disc = get_log_disc_dict(log_disc_list, j = 0, cs = cs, prefix=\"train\", bs = 1)\n\n bbox = visualize(imgs[0, j].cpu().unsqueeze(0),\n log_disc['z_pres'][0].unsqueeze(0).cpu().detach(),\n log_disc['z_where_scale'][0].unsqueeze(0).cpu().detach(),\n log_disc['z_where_shift'][0].unsqueeze(0).cpu().detach())\n\n grid = make_grid(bbox, 8, normalize=True, pad_value=1)\n save(grid, dest=os.path.join(directory, \"bbox.png\"))\n \n grid = make_grid(imgs[0, 0, :, :, :].cpu(), 1)\n save(grid, dest=os.path.join(directory, \"original_img.png\"))\n \n grid = make_grid(y_seq[0, 0, :, :, :].cpu(), 1)\n save(grid, dest=os.path.join(directory, \"recons_img.png\"))\n \n grid = make_grid(log_disc[\"alpha_each_cell\"][0].cpu(), cs, normalize=True, pad_value=1)\n save(grid, dest=os.path.join(directory, \"alpha_map.png\"))\n \n grid = make_grid(log_disc[\"glimpse\"][0].cpu(), cs, normalize=True, pad_value=1)\n save(grid, dest=os.path.join(directory, \"glimpses.png\"))"
] |
[
[
"numpy.asarray",
"torch.sigmoid"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
iwbn/age-joint-loss-tensorflow
|
[
"0968acd48ae8e87577e7b64e56a25b960589a759"
] |
[
"test_joint_morph_id.py"
] |
[
"from model.joint import AgeModelMorph\nimport tensorflow as tf\nimport sys, os\nimport numpy as np\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('log_dir', \"path-to-log\", 'Log is saved to this directory.')\nflags.DEFINE_integer('gpu', 4, 'GPU to use.')\n\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"%d\" % FLAGS.gpu\n\n\ndef test_():\n model = AgeModelMorph()\n\n sess_config = tf.ConfigProto()\n sess_config.gpu_options.allow_growth = True\n sess = tf.Session(config=sess_config)\n\n sess.run(tf.global_variables_initializer())\n\n saver = tf.train.Saver()\n ckpt = tf.train.get_checkpoint_state(FLAGS.log_dir)\n if ckpt:\n saver.restore(sess, ckpt.model_checkpoint_path)\n else:\n print (\"Cannot find any checkpoints in log directory.\")\n return\n acc, o_acc, res, dex_res = model.get_test_accuracy()\n\n model.prepare_data(sess)\n test_iter = model.make_test_iterators()\n sess.run([test_iter.initializer])\n test_next = test_iter.get_next()\n\n num_correct = 0\n num_one_off = 0\n sae = 0.\n sae_dex = 0.\n num_test = 0\n while True:\n try:\n x, y = sess.run(test_next)\n except tf.errors.OutOfRangeError:\n break\n [accuracy, one_off_accuracy, c, d] = sess.run([acc, o_acc, res, dex_res],\n feed_dict={model.imgs: x, model.ages: y, model.phase: \"test\", model.is_training: False})\n c += 16\n num_correct += accuracy\n sae += float(abs(c-y[0]))\n sae_dex += abs(d-float(y[0]))\n num_one_off += one_off_accuracy\n num_test += 1\n print (\"mae: %.4f, mae(dex): %.4f\" % (sae/num_test, sae_dex/num_test))\n with open(os.path.join(FLAGS.log_dir,'test-%d.txt'%sess.run(model.global_step)), 'w') as f:\n f.write(\"mae: %.4f mae(dex): %.4f\" % (sae / num_test, sae_dex / num_test))\n print (float(num_correct)/num_test, float(num_one_off)/num_test)\n sess.close()\n\ntest_()\n"
] |
[
[
"tensorflow.train.get_checkpoint_state",
"tensorflow.ConfigProto",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.train.Saver"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
akhalymon-cv/spark
|
[
"76191b9151b6a7804f8894e53eef74106f98b787",
"76191b9151b6a7804f8894e53eef74106f98b787"
] |
[
"python/pyspark/pandas/tests/data_type_ops/test_string_ops.py",
"python/pyspark/mllib/tests/test_feature.py"
] |
[
"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport unittest\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import CategoricalDtype\n\nfrom pyspark import pandas as ps\nfrom pyspark.pandas.config import option_context\nfrom pyspark.pandas.tests.data_type_ops.testing_utils import TestCasesUtils\nfrom pyspark.pandas.typedef.typehints import extension_object_dtypes_available\nfrom pyspark.testing.pandasutils import PandasOnSparkTestCase\n\nif extension_object_dtypes_available:\n from pandas import StringDtype\n\n\nclass StringOpsTest(PandasOnSparkTestCase, TestCasesUtils):\n @property\n def bool_pdf(self):\n return pd.DataFrame({\"this\": [\"x\", \"y\", \"z\"], \"that\": [\"z\", \"y\", \"x\"]})\n\n @property\n def bool_psdf(self):\n return ps.from_pandas(self.bool_pdf)\n\n @property\n def bool_non_numeric_pdf(self):\n return pd.concat([self.bool_pdf, self.non_numeric_pdf], axis=1)\n\n @property\n def bool_non_numeric_psdf(self):\n return ps.from_pandas(self.bool_non_numeric_pdf)\n\n def test_add(self):\n pdf, psdf = self.bool_non_numeric_pdf, self.bool_non_numeric_psdf\n pser, psser = pdf[\"this\"], psdf[\"this\"]\n other_pser, other_psser = pdf[\"that\"], psdf[\"that\"]\n self.assert_eq(pser + \"x\", psser + \"x\")\n self.assertRaises(TypeError, lambda: psser + 1)\n\n self.assert_eq(pser + other_pser, psser + other_psser)\n\n for col in self.non_numeric_df_cols:\n if col != \"string\":\n self.assertRaises(TypeError, lambda: psser + psdf[col])\n\n def test_sub(self):\n psdf = self.psdf\n self.assertRaises(TypeError, lambda: psdf[\"string\"] - \"x\")\n self.assertRaises(TypeError, lambda: psdf[\"string\"] - 1)\n\n for col in self.df_cols:\n self.assertRaises(TypeError, lambda: psdf[\"string\"] - psdf[col])\n\n def test_mul(self):\n pdf, psdf = self.pdf, self.psdf\n self.assertRaises(TypeError, lambda: psdf[\"string\"] * \"x\")\n self.assert_eq(pdf[\"string\"] * 1, psdf[\"string\"] * 1)\n\n for col in self.df_cols:\n if col in [\"int\", \"int32\"]:\n self.assert_eq(pdf[\"string\"] * pdf[col], psdf[\"string\"] * psdf[col])\n else:\n self.assertRaises(TypeError, lambda: psdf[\"string\"] * psdf[col])\n\n def test_truediv(self):\n psdf = self.psdf\n self.assertRaises(TypeError, lambda: psdf[\"string\"] / \"x\")\n self.assertRaises(TypeError, lambda: psdf[\"string\"] / 1)\n\n for col in self.df_cols:\n self.assertRaises(TypeError, lambda: psdf[\"string\"] / psdf[col])\n\n def test_floordiv(self):\n psdf = self.psdf\n self.assertRaises(TypeError, lambda: psdf[\"string\"] // \"x\")\n self.assertRaises(TypeError, lambda: psdf[\"string\"] // 1)\n\n for col in self.df_cols:\n self.assertRaises(TypeError, lambda: psdf[\"string\"] // psdf[col])\n\n def test_mod(self):\n psdf = self.psdf\n self.assertRaises(TypeError, lambda: psdf[\"string\"] % \"x\")\n self.assertRaises(TypeError, lambda: psdf[\"string\"] % 1)\n\n for col in self.df_cols:\n self.assertRaises(TypeError, lambda: psdf[\"string\"] % psdf[col])\n\n def test_pow(self):\n psdf = self.psdf\n self.assertRaises(TypeError, lambda: psdf[\"string\"] ** \"x\")\n self.assertRaises(TypeError, lambda: psdf[\"string\"] ** 1)\n\n for col in self.df_cols:\n self.assertRaises(TypeError, lambda: psdf[\"string\"] ** psdf[col])\n\n def test_radd(self):\n self.assert_eq(\"x\" + self.pdf[\"string\"], \"x\" + self.psdf[\"string\"])\n self.assertRaises(TypeError, lambda: 1 + self.psdf[\"string\"])\n\n def test_rsub(self):\n self.assertRaises(TypeError, lambda: \"x\" - self.psdf[\"string\"])\n self.assertRaises(TypeError, lambda: 1 - self.psdf[\"string\"])\n\n def test_rmul(self):\n self.assertRaises(TypeError, lambda: \"x\" * self.psdf[\"string\"])\n self.assert_eq(1 * self.pdf[\"string\"], 1 * self.psdf[\"string\"])\n\n def test_rtruediv(self):\n self.assertRaises(TypeError, lambda: \"x\" / self.psdf[\"string\"])\n self.assertRaises(TypeError, lambda: 1 / self.psdf[\"string\"])\n\n def test_rfloordiv(self):\n self.assertRaises(TypeError, lambda: \"x\" // self.psdf[\"string\"])\n self.assertRaises(TypeError, lambda: 1 // self.psdf[\"string\"])\n\n def test_rmod(self):\n self.assertRaises(TypeError, lambda: 1 % self.psdf[\"string\"])\n\n def test_rpow(self):\n self.assertRaises(TypeError, lambda: \"x\" ** self.psdf[\"string\"])\n self.assertRaises(TypeError, lambda: 1 ** self.psdf[\"string\"])\n\n def test_and(self):\n self.assertRaises(TypeError, lambda: self.psdf[\"string\"] & True)\n self.assertRaises(TypeError, lambda: self.psdf[\"string\"] & False)\n self.assertRaises(TypeError, lambda: self.psdf[\"string\"] & self.psdf[\"string\"])\n\n def test_rand(self):\n self.assertRaises(TypeError, lambda: True & self.psdf[\"string\"])\n self.assertRaises(TypeError, lambda: False & self.psdf[\"string\"])\n\n def test_or(self):\n self.assertRaises(TypeError, lambda: self.psdf[\"string\"] | True)\n self.assertRaises(TypeError, lambda: self.psdf[\"string\"] | False)\n self.assertRaises(TypeError, lambda: self.psdf[\"string\"] | self.psdf[\"string\"])\n\n def test_ror(self):\n self.assertRaises(TypeError, lambda: True | self.psdf[\"string\"])\n self.assertRaises(TypeError, lambda: False | self.psdf[\"string\"])\n\n def test_from_to_pandas(self):\n data = [\"x\", \"y\", \"z\"]\n pser = pd.Series(data)\n psser = ps.Series(data)\n self.assert_eq(pser, psser.to_pandas())\n self.assert_eq(ps.from_pandas(pser), psser)\n\n def test_isnull(self):\n self.assert_eq(self.pdf[\"string\"].isnull(), self.psdf[\"string\"].isnull())\n\n def test_astype(self):\n pser = pd.Series([\"1\", \"2\", \"3\"])\n psser = ps.from_pandas(pser)\n self.assert_eq(pser.astype(int), psser.astype(int))\n self.assert_eq(pser.astype(float), psser.astype(float))\n self.assert_eq(pser.astype(np.float32), psser.astype(np.float32))\n self.assert_eq(pser.astype(np.int32), psser.astype(np.int32))\n self.assert_eq(pser.astype(np.int16), psser.astype(np.int16))\n self.assert_eq(pser.astype(np.int8), psser.astype(np.int8))\n self.assert_eq(pser.astype(str), psser.astype(str))\n self.assert_eq(pser.astype(bool), psser.astype(bool))\n self.assert_eq(pser.astype(\"category\"), psser.astype(\"category\"))\n cat_type = CategoricalDtype(categories=[\"3\", \"1\", \"2\"])\n self.assert_eq(pser.astype(cat_type), psser.astype(cat_type))\n\n def test_neg(self):\n self.assertRaises(TypeError, lambda: -self.psdf[\"string\"])\n\n def test_abs(self):\n self.assertRaises(TypeError, lambda: abs(self.psdf[\"string\"]))\n\n def test_invert(self):\n self.assertRaises(TypeError, lambda: ~self.psdf[\"string\"])\n\n def test_eq(self):\n pdf, psdf = self.bool_non_numeric_pdf, self.bool_non_numeric_psdf\n pser, psser = pdf[\"this\"], psdf[\"this\"]\n other_pser, other_psser = pdf[\"that\"], psdf[\"that\"]\n self.assert_eq(pser == other_pser, psser == other_psser)\n self.assert_eq(pser == pser, psser == psser)\n\n def test_ne(self):\n pdf, psdf = self.bool_non_numeric_pdf, self.bool_non_numeric_psdf\n pser, psser = pdf[\"this\"], psdf[\"this\"]\n other_pser, other_psser = pdf[\"that\"], psdf[\"that\"]\n self.assert_eq(pser != other_pser, psser != other_psser)\n self.assert_eq(pser != pser, psser != psser)\n\n def test_lt(self):\n pdf, psdf = self.bool_non_numeric_pdf, self.bool_non_numeric_psdf\n pser, psser = pdf[\"this\"], psdf[\"this\"]\n other_pser, other_psser = pdf[\"that\"], psdf[\"that\"]\n self.assert_eq(pser < other_pser, psser < other_psser)\n self.assert_eq(pser < pser, psser < psser)\n\n def test_le(self):\n pdf, psdf = self.bool_non_numeric_pdf, self.bool_non_numeric_psdf\n pser, psser = pdf[\"this\"], psdf[\"this\"]\n other_pser, other_psser = pdf[\"that\"], psdf[\"that\"]\n self.assert_eq(pser <= other_pser, psser <= other_psser)\n self.assert_eq(pser <= pser, psser <= psser)\n\n def test_gt(self):\n pdf, psdf = self.bool_non_numeric_pdf, self.bool_non_numeric_psdf\n pser, psser = pdf[\"this\"], psdf[\"this\"]\n other_pser, other_psser = pdf[\"that\"], psdf[\"that\"]\n self.assert_eq(pser > other_pser, psser > other_psser)\n self.assert_eq(pser > pser, psser > psser)\n\n def test_ge(self):\n pdf, psdf = self.bool_non_numeric_pdf, self.bool_non_numeric_psdf\n pser, psser = pdf[\"this\"], psdf[\"this\"]\n other_pser, other_psser = pdf[\"that\"], psdf[\"that\"]\n self.assert_eq(pser >= other_pser, psser >= other_psser)\n self.assert_eq(pser >= pser, psser >= psser)\n\n\[email protected](\n not extension_object_dtypes_available, \"pandas extension object dtypes are not available\"\n)\nclass StringExtensionOpsTest(StringOpsTest, PandasOnSparkTestCase, TestCasesUtils):\n @property\n def pser(self):\n return pd.Series([\"x\", \"y\", \"z\", None], dtype=\"string\")\n\n @property\n def psser(self):\n return ps.from_pandas(self.pser)\n\n @property\n def other_pser(self):\n return pd.Series([None, \"z\", \"y\", \"x\"], dtype=\"string\")\n\n @property\n def other_psser(self):\n return ps.from_pandas(self.other_pser)\n\n def test_radd(self):\n self.assert_eq(\"x\" + self.pser, (\"x\" + self.psser).astype(\"string\"))\n self.assertRaises(TypeError, lambda: 1 + self.psser)\n\n def test_mul(self):\n self.assertRaises(TypeError, lambda: self.psser * \"x\")\n self.assert_eq(self.pser * 1, self.psser * 1)\n\n with option_context(\"compute.ops_on_diff_frames\", True):\n for pser, psser in self.pser_psser_pairs:\n if psser.dtype in [np.int32, np.int64]:\n self.assert_eq(\n ps.Series([\"x\", \"yy\", \"zzz\", None]).astype(\"string\"),\n (self.psser * psser).sort_index(),\n )\n else:\n self.assertRaises(TypeError, lambda: self.psser * psser)\n\n def test_from_to_pandas(self):\n data = [\"x\", \"y\", \"z\", None]\n pser = pd.Series(data, dtype=\"string\")\n psser = ps.Series(data, dtype=\"string\")\n self.assert_eq(pser, psser.to_pandas())\n self.assert_eq(ps.from_pandas(pser), psser)\n\n def test_isnull(self):\n self.assert_eq(self.pser.isnull(), self.psser.isnull())\n\n def test_astype(self):\n pser = self.pser\n psser = self.psser\n\n self.assert_eq(pser.astype(str).tolist(), psser.astype(str).tolist())\n\n self.assert_eq(pser.astype(\"category\"), psser.astype(\"category\"))\n cat_type = CategoricalDtype(categories=[\"x\", \"y\"])\n self.assert_eq(pser.astype(cat_type), psser.astype(cat_type))\n for dtype in self.object_extension_dtypes:\n if dtype in [\"string\", StringDtype()]:\n self.check_extension(pser.astype(dtype), psser.astype(dtype))\n\n def test_eq(self):\n with option_context(\"compute.ops_on_diff_frames\", True):\n self.check_extension(\n self.pser == self.other_pser, (self.psser == self.other_psser).sort_index()\n )\n self.check_extension(self.pser == self.pser, (self.psser == self.psser).sort_index())\n\n def test_ne(self):\n with option_context(\"compute.ops_on_diff_frames\", True):\n self.check_extension(\n self.pser != self.other_pser, (self.psser != self.other_psser).sort_index()\n )\n self.check_extension(self.pser != self.pser, (self.psser != self.psser).sort_index())\n\n def test_lt(self):\n with option_context(\"compute.ops_on_diff_frames\", True):\n self.check_extension(\n self.pser < self.other_pser, (self.psser < self.other_psser).sort_index()\n )\n self.check_extension(self.pser < self.pser, (self.psser < self.psser).sort_index())\n\n def test_le(self):\n with option_context(\"compute.ops_on_diff_frames\", True):\n self.check_extension(\n self.pser <= self.other_pser, (self.psser <= self.other_psser).sort_index()\n )\n self.check_extension(self.pser <= self.pser, (self.psser <= self.psser).sort_index())\n\n def test_gt(self):\n with option_context(\"compute.ops_on_diff_frames\", True):\n self.check_extension(\n self.pser > self.other_pser, (self.psser > self.other_psser).sort_index()\n )\n self.check_extension(self.pser > self.pser, (self.psser > self.psser).sort_index())\n\n def test_ge(self):\n with option_context(\"compute.ops_on_diff_frames\", True):\n self.check_extension(\n self.pser >= self.other_pser, (self.psser >= self.other_psser).sort_index()\n )\n self.check_extension(self.pser >= self.pser, (self.psser >= self.psser).sort_index())\n\n\nif __name__ == \"__main__\":\n\n from pyspark.pandas.tests.data_type_ops.test_string_ops import * # noqa: F401\n\n try:\n import xmlrunner # type: ignore[import]\n\n testRunner = xmlrunner.XMLTestRunner(output=\"target/test-reports\", verbosity=2)\n except ImportError:\n testRunner = None\n unittest.main(testRunner=testRunner, verbosity=2)\n",
"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom math import sqrt\nimport unittest\n\nfrom numpy import array, abs, tile\n\nfrom pyspark.mllib.linalg import SparseVector, DenseVector, Vectors\nfrom pyspark.mllib.linalg.distributed import RowMatrix\nfrom pyspark.mllib.feature import HashingTF, IDF, StandardScaler, ElementwiseProduct, Word2Vec\nfrom pyspark.testing.mllibutils import MLlibTestCase\n\n\nclass FeatureTest(MLlibTestCase):\n def test_idf_model(self):\n data = [\n Vectors.dense([1, 2, 6, 0, 2, 3, 1, 1, 0, 0, 3]),\n Vectors.dense([1, 3, 0, 1, 3, 0, 0, 2, 0, 0, 1]),\n Vectors.dense([1, 4, 1, 0, 0, 4, 9, 0, 1, 2, 0]),\n Vectors.dense([2, 1, 0, 3, 0, 0, 5, 0, 2, 3, 9]),\n ]\n model = IDF().fit(self.sc.parallelize(data, 2))\n idf = model.idf()\n self.assertEqual(len(idf), 11)\n\n\nclass Word2VecTests(MLlibTestCase):\n def test_word2vec_setters(self):\n model = (\n Word2Vec()\n .setVectorSize(2)\n .setLearningRate(0.01)\n .setNumPartitions(2)\n .setNumIterations(10)\n .setSeed(1024)\n .setMinCount(3)\n .setWindowSize(6)\n )\n self.assertEqual(model.vectorSize, 2)\n self.assertTrue(model.learningRate < 0.02)\n self.assertEqual(model.numPartitions, 2)\n self.assertEqual(model.numIterations, 10)\n self.assertEqual(model.seed, 1024)\n self.assertEqual(model.minCount, 3)\n self.assertEqual(model.windowSize, 6)\n\n def test_word2vec_get_vectors(self):\n data = [\n [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\"],\n [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"],\n [\"a\", \"b\", \"c\", \"d\", \"e\"],\n [\"a\", \"b\", \"c\", \"d\"],\n [\"a\", \"b\", \"c\"],\n [\"a\", \"b\"],\n [\"a\"],\n ]\n model = Word2Vec().fit(self.sc.parallelize(data))\n self.assertEqual(len(model.getVectors()), 3)\n\n\nclass StandardScalerTests(MLlibTestCase):\n def test_model_setters(self):\n data = [[1.0, 2.0, 3.0], [2.0, 3.0, 4.0], [3.0, 4.0, 5.0]]\n model = StandardScaler().fit(self.sc.parallelize(data))\n self.assertIsNotNone(model.setWithMean(True))\n self.assertIsNotNone(model.setWithStd(True))\n self.assertEqual(model.transform([1.0, 2.0, 3.0]), DenseVector([-1.0, -1.0, -1.0]))\n\n def test_model_transform(self):\n data = [[1.0, 2.0, 3.0], [2.0, 3.0, 4.0], [3.0, 4.0, 5.0]]\n model = StandardScaler().fit(self.sc.parallelize(data))\n self.assertEqual(model.transform([1.0, 2.0, 3.0]), DenseVector([1.0, 2.0, 3.0]))\n\n\nclass ElementwiseProductTests(MLlibTestCase):\n def test_model_transform(self):\n weight = Vectors.dense([3, 2, 1])\n\n densevec = Vectors.dense([4, 5, 6])\n sparsevec = Vectors.sparse(3, [0], [1])\n eprod = ElementwiseProduct(weight)\n self.assertEqual(eprod.transform(densevec), DenseVector([12, 10, 6]))\n self.assertEqual(eprod.transform(sparsevec), SparseVector(3, [0], [3]))\n\n\nclass HashingTFTest(MLlibTestCase):\n def test_binary_term_freqs(self):\n hashingTF = HashingTF(100).setBinary(True)\n doc = \"a a b c c c\".split(\" \")\n n = hashingTF.numFeatures\n output = hashingTF.transform(doc).toArray()\n expected = Vectors.sparse(\n n,\n {hashingTF.indexOf(\"a\"): 1.0, hashingTF.indexOf(\"b\"): 1.0, hashingTF.indexOf(\"c\"): 1.0},\n ).toArray()\n for i in range(0, n):\n self.assertAlmostEqual(\n output[i],\n expected[i],\n 14,\n \"Error at \" + str(i) + \": expected \" + str(expected[i]) + \", got \" + str(output[i]),\n )\n\n\nclass DimensionalityReductionTests(MLlibTestCase):\n\n denseData = [\n Vectors.dense([0.0, 1.0, 2.0]),\n Vectors.dense([3.0, 4.0, 5.0]),\n Vectors.dense([6.0, 7.0, 8.0]),\n Vectors.dense([9.0, 0.0, 1.0]),\n ]\n sparseData = [\n Vectors.sparse(3, [(1, 1.0), (2, 2.0)]),\n Vectors.sparse(3, [(0, 3.0), (1, 4.0), (2, 5.0)]),\n Vectors.sparse(3, [(0, 6.0), (1, 7.0), (2, 8.0)]),\n Vectors.sparse(3, [(0, 9.0), (2, 1.0)]),\n ]\n\n def assertEqualUpToSign(self, vecA, vecB):\n eq1 = vecA - vecB\n eq2 = vecA + vecB\n self.assertTrue(sum(abs(eq1)) < 1e-6 or sum(abs(eq2)) < 1e-6)\n\n def test_svd(self):\n denseMat = RowMatrix(self.sc.parallelize(self.denseData))\n sparseMat = RowMatrix(self.sc.parallelize(self.sparseData))\n m = 4\n n = 3\n for mat in [denseMat, sparseMat]:\n for k in range(1, 4):\n rm = mat.computeSVD(k, computeU=True)\n self.assertEqual(rm.s.size, k)\n self.assertEqual(rm.U.numRows(), m)\n self.assertEqual(rm.U.numCols(), k)\n self.assertEqual(rm.V.numRows, n)\n self.assertEqual(rm.V.numCols, k)\n\n # Test that U returned is None if computeU is set to False.\n self.assertEqual(mat.computeSVD(1).U, None)\n\n # Test that low rank matrices cannot have number of singular values\n # greater than a limit.\n rm = RowMatrix(self.sc.parallelize(tile([1, 2, 3], (3, 1))))\n self.assertEqual(rm.computeSVD(3, False, 1e-6).s.size, 1)\n\n def test_pca(self):\n expected_pcs = array(\n [\n [0.0, 1.0, 0.0],\n [sqrt(2.0) / 2.0, 0.0, sqrt(2.0) / 2.0],\n [sqrt(2.0) / 2.0, 0.0, -sqrt(2.0) / 2.0],\n ]\n )\n n = 3\n denseMat = RowMatrix(self.sc.parallelize(self.denseData))\n sparseMat = RowMatrix(self.sc.parallelize(self.sparseData))\n for mat in [denseMat, sparseMat]:\n for k in range(1, 4):\n pcs = mat.computePrincipalComponents(k)\n self.assertEqual(pcs.numRows, n)\n self.assertEqual(pcs.numCols, k)\n\n # We can just test the updated principal component for equality.\n self.assertEqualUpToSign(pcs.toArray()[:, k - 1], expected_pcs[:, k - 1])\n\n\nif __name__ == \"__main__\":\n from pyspark.mllib.tests.test_feature import * # noqa: F401\n\n try:\n import xmlrunner # type: ignore[import]\n\n testRunner = xmlrunner.XMLTestRunner(output=\"target/test-reports\", verbosity=2)\n except ImportError:\n testRunner = None\n unittest.main(testRunner=testRunner, verbosity=2)\n"
] |
[
[
"pandas.concat",
"pandas.api.types.CategoricalDtype",
"pandas.Series",
"pandas.StringDtype",
"pandas.DataFrame"
],
[
"numpy.abs",
"numpy.tile"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.0",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
davegutz/myStateOfCharge
|
[
"d03dc5e92a9561d4b28be271d4eabe40b48b32ce"
] |
[
"SOC_Photon/Battery State/EKF/sandbox/example2.py"
] |
[
"# Play with Kalman filters from Kalman-and-Bayesian-Filters-in-Python\n# Press the green button in the gutter to run the script.\n# install packages using 'python -m pip install numpy, matplotlib, scipy, pyfilter\n# References:\n# [2] Roger Labbe. \"Kalman and Bayesian Filters in Python\" -->kf_book\n# https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Pythonimport numpy as np\n# Dependencies: python3.10, numpy, matplotlib, math, filterpy, ./kf_book\nfrom numpy.random import randn\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nfrom kf_book.nonlinear_plots import plot_gaussians\nfrom filterpy.kalman import KalmanFilter\nfrom filterpy.common import Q_discrete_white_noise\nfrom math import sqrt\n\n\ndef univariate_filter(x0, P, R, Q):\n kf1 = KalmanFilter(dim_x=1, dim_z=1, dim_u=1)\n kf1.x = np.array([[x0]])\n kf1.P *= P\n kf1.H = np.array([[1.]])\n kf1.F = np.array([[1.]])\n kf1.B = np.array([[1.]])\n kf1.Q *= Q\n kf1.R *= R\n return kf1\n\n\ndef pos_vel_filter(x, P, R, Q=0., dt=1.0):\n \"\"\" Returns a KalmanFilter which implements a\n constant velocity model for a state [x dx].T\n \"\"\"\n \n kf = KalmanFilter(dim_x=2, dim_z=1)\n kf.x = np.array([x[0], x[1]]) # location and velocity\n kf.F = np.array([[1., dt],\n [0., 1.]]) # state transition matrix\n kf.H = np.array([[1., 0]]) # Measurement function\n kf.R *= R # measurement uncertainty\n if np.isscalar(P):\n kf.P *= P # covariance matrix \n else:\n kf.P[:] = P # [:] makes deep copy\n if np.isscalar(Q):\n kf.Q = Q_discrete_white_noise(dim=2, dt=dt, var=Q)\n else:\n kf.Q[:] = Q\n return kf\n\n\ndef plot_1d_2d(xs, xs1d, xs2d):\n plt.plot(xs1d, label='1D Filter')\n plt.scatter(range(len(xs2d)), xs2d, c='r', alpha=0.7, label='2D Filter')\n plt.plot(xs, ls='--', color='k', lw=1, label='track')\n plt.title('State')\n plt.legend(loc=4)\n plt.show()\n\n \ndef compare_1D_2D(x0, P, R, Q, vel, u=None):\n # storage for filter output\n xs, xs1, xs2 = [], [], []\n\n # 1d KalmanFilter\n f1D = univariate_filter(x0, P, R, Q)\n\n #2D Kalman filter\n f2D = pos_vel_filter(x=(x0, vel), P=P, R=R, Q=0)\n if np.isscalar(u):\n u = [u]\n pos = 0 # true position\n for i in range(100):\n pos += vel\n xs.append(pos)\n\n # control input u - discussed below\n f1D.predict(u=u)\n f2D.predict()\n \n z = pos + randn()*sqrt(R) # measurement\n f1D.update(z)\n f2D.update(z)\n \n xs1.append(f1D.x[0])\n xs2.append(f2D.x[0])\n plt.figure()\n plot_1d_2d(xs, xs1, xs2)\n print('example2: done')\n\n\nif __name__ == '__main__':\n compare_1D_2D(x0=0, P=50., R=5., Q=.02, vel=1.)\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"numpy.isscalar",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
eric-czech/dask-ml
|
[
"b94c587abae3f5667eff131b0616ad8f91966e7f"
] |
[
"dask_ml/_compat.py"
] |
[
"import contextlib\nimport os\nfrom collections.abc import Mapping # noqa\nfrom typing import Any, List, Optional, Union\n\nimport dask\nimport dask.array as da\nimport distributed\nimport packaging.version\nimport pandas\nimport sklearn\nimport sklearn.utils.validation\n\nSK_VERSION = packaging.version.parse(sklearn.__version__)\nDASK_VERSION = packaging.version.parse(dask.__version__)\nPANDAS_VERSION = packaging.version.parse(pandas.__version__)\nDISTRIBUTED_VERSION = packaging.version.parse(distributed.__version__)\n\nSK_0_23_2 = SK_VERSION >= packaging.version.parse(\"0.23.2\")\nSK_024 = SK_VERSION >= packaging.version.parse(\"0.24.0.dev0\")\nDASK_240 = DASK_VERSION >= packaging.version.parse(\"2.4.0\")\nDASK_2130 = DASK_VERSION >= packaging.version.parse(\"2.13.0\")\nDASK_2_20_0 = DASK_VERSION >= packaging.version.parse(\"2.20.0\")\nDISTRIBUTED_2_5_0 = DISTRIBUTED_VERSION > packaging.version.parse(\"2.5.0\")\nDISTRIBUTED_2_11_0 = DISTRIBUTED_VERSION > packaging.version.parse(\"2.10.0\") # dev\nWINDOWS = os.name == \"nt\"\n\n\[email protected]\ndef dummy_context(*args: Any, **kwargs: Any):\n # Not needed if Python >= 3.7 is required\n # https://docs.python.org/3/library/contextlib.html#contextlib.nullcontext\n yield\n\n\nblockwise = da.blockwise\n\n\ndef check_is_fitted(est, attributes: Optional[Union[str, List[str]]] = None):\n args: Any = ()\n\n return sklearn.utils.validation.check_is_fitted(est, *args)\n\n\ndef _check_multimetric_scoring(estimator, scoring=None):\n # TODO: See if scikit-learn 0.24 solves the need for using\n # a private method\n from sklearn.metrics._scorer import _check_multimetric_scoring\n from sklearn.metrics import check_scoring\n\n if SK_024:\n if callable(scoring) or isinstance(scoring, (type(None), str)):\n scorers = {\"score\": check_scoring(estimator, scoring=scoring)}\n return scorers, False\n return _check_multimetric_scoring(estimator, scoring), True\n return _check_multimetric_scoring(estimator, scoring)\n"
] |
[
[
"sklearn.metrics.check_scoring",
"sklearn.metrics._scorer._check_multimetric_scoring",
"sklearn.utils.validation.check_is_fitted"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
osamhack2021/WEB_AI_POOL_YD
|
[
"76022ccd90a5e16f45dd315579a1836c9f2e5f16"
] |
[
"ai-backend/cv_summarizer.py"
] |
[
"# 텍스트 요약 API\n# KoBART 기반\nimport torch\nfrom kobart import get_kobart_tokenizer\nfrom transformers.models.bart import BartForConditionalGeneration\n\n\ndef load_model():\n model = BartForConditionalGeneration.from_pretrained(\"./kobart_summary\")\n return model\n\n\n# 텍스트 요약\ndef summarize(text_origin):\n model = load_model()\n tokenizer = get_kobart_tokenizer()\n\n text = text_origin.replace(\"\\n\", \"\")\n input_ids = tokenizer.encode(text)\n input_ids = torch.tensor(input_ids)\n input_ids = input_ids.unsqueeze(0)\n output = model.generate(\n input_ids,\n eos_token_id=1,\n max_length=1024,\n num_beams=10,\n no_repeat_ngram_size=5,\n )\n output = tokenizer.decode(output[0], skip_special_tokens=True)\n return output\n\n\n# 텍스트를 문단별로 분리\ndef split_paragraph(text_origin):\n paragraphs = text_origin.split(\"\\n\\n\")\n return paragraphs\n\n\n# 텍스트를 문단별로 분리하여 요약\ndef summarize_after_split(text_origin):\n splited_paragraphs = split_paragraph(text_origin)\n summarized_text = \"\"\n for paragraph in splited_paragraphs:\n summarized_paragraph = summarize(paragraph)\n summarized_text = summarized_text + \" \" + summarized_paragraph\n return summarized_text\n\n\n# TEST CODE\nif __name__ == \"__main__\":\n import pandas as pd\n\n cv_data = pd.read_csv(\"data/cover-letter.csv\")\n single_data = cv_data.loc[0]\n text_origin = single_data[\"answers\"].replace(\"<\", \"\").split(\">\")[2]\n\n output = summarize(text_origin)\n\n print(\"원문\")\n print(text_origin)\n\n print(\"\\n요약\")\n print(output)\n"
] |
[
[
"pandas.read_csv",
"torch.tensor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
omidrk/computervisionPanopticToSMPLAuto
|
[
"b84b60f0ec4ffdb4ae61348919a95f7bb2eab926",
"b84b60f0ec4ffdb4ae61348919a95f7bb2eab926"
] |
[
"step3.py",
"lib/smplify/prior.py"
] |
[
"from collections import defaultdict\nimport pandas as pd\nimport os\nimport argparse\nimport numpy as np\nimport joblib\n\n\n\n\ndef clean_data(pkl_folder_path, output_path, list_keys = ['pose', 'betas'], pikl_protocol = 4):\n \n df = pd.DataFrame()\n pkl_files = os.listdir(pkl_folder_path)\n data = []\n for pkls in pkl_files:\n data.append(joblib.load(pkl_folder_path+'/'+pkls))\n\n keys = []\n for d in data:\n for k in d.keys():\n keys.append(k)\n\n section = set(data[0][keys[0]]['frame_ids'])\n \n for idx, i in enumerate(data):\n section = section.intersection(set(i[keys[idx]]['frame_ids']))\n \n \n for frame in section: \n k = defaultdict(list)\n for ind, d in enumerate(data):\n index = np.where(d[keys[ind]]['frame_ids'] == frame)[0][0]\n for key in list_keys:\n k[key].append(np.array(d[keys[ind]][key][index]))\n df = df.append(k, ignore_index=True)\n \n df.to_pickle(output_path + 'pikel_ASLI.pkl', protocol=pikl_protocol)\n print(f\"file save in {output_path}pikel_ASLI.pkl\")\n \n \nif __name__ == '__main__':\n\n try:\n # os.mkdir('outpkl',exist=True)\n os.mkdir('ashpickle',exist=True)\n except:\n print('cant make a output folder. Please make it before run. :(')\n \n parser = argparse.ArgumentParser()\n\n parser.add_argument('--pkl_folder_path', type=str,default='outpkl',\n help='input pkls floder path')\n \n parser.add_argument('--output_path', type=str,default='ashpickle',\n help='output path')\n \n parser.add_argument('--list_keys', type=list,default=['pose', 'betas'],\n help='list of the key that we want to save')\n \n parser.add_argument('--pkl_protocol', type=int,default=4,\n help='pikl protocol for saving')\n\n\n \n \n \n args = parser.parse_args()\n\n clean_data(args.pkl_folder_path, args.output_path, args.list_keys, args.pkl_protocol)",
"# -*- coding: utf-8 -*-\n\n# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is\n# holder of all proprietary rights on this computer program.\n# You can only use this computer program if you have closed\n# a license agreement with MPG or you get the right to use the computer\n# program from someone who is authorized to grant you that right.\n# Any use of the computer program without a valid license is prohibited and\n# liable to prosecution.\n#\n# Copyright©2019 Max-Planck-Gesellschaft zur Förderung\n# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute\n# for Intelligent Systems. All rights reserved.\n#\n# Contact: [email protected]\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport sys\nimport os\n\nimport time\nimport pickle\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\n\nDEFAULT_DTYPE = torch.float32\n\n\ndef create_prior(prior_type, **kwargs):\n if prior_type == 'gmm':\n prior = MaxMixturePrior(**kwargs)\n elif prior_type == 'l2':\n return L2Prior(**kwargs)\n elif prior_type == 'angle':\n return SMPLifyAnglePrior(**kwargs)\n elif prior_type == 'none' or prior_type is None:\n # Don't use any pose prior\n def no_prior(*args, **kwargs):\n return 0.0\n prior = no_prior\n else:\n raise ValueError('Prior {}'.format(prior_type) + ' is not implemented')\n return prior\n\n\nclass SMPLifyAnglePrior(nn.Module):\n def __init__(self, dtype=torch.float32, **kwargs):\n super(SMPLifyAnglePrior, self).__init__()\n\n # Indices for the roration angle of\n # 55: left elbow, 90deg bend at -np.pi/2\n # 58: right elbow, 90deg bend at np.pi/2\n # 12: left knee, 90deg bend at np.pi/2\n # 15: right knee, 90deg bend at np.pi/2\n angle_prior_idxs = np.array([55, 58, 12, 15], dtype=np.int64)\n angle_prior_idxs = torch.tensor(angle_prior_idxs, dtype=torch.long)\n self.register_buffer('angle_prior_idxs', angle_prior_idxs)\n\n angle_prior_signs = np.array([1, -1, -1, -1],\n dtype=np.float32 if dtype == torch.float32\n else np.float64)\n angle_prior_signs = torch.tensor(angle_prior_signs,\n dtype=dtype)\n self.register_buffer('angle_prior_signs', angle_prior_signs)\n\n def forward(self, pose, with_global_pose=False):\n ''' Returns the angle prior loss for the given pose\n\n Args:\n pose: (Bx[23 + 1] * 3) torch tensor with the axis-angle\n representation of the rotations of the joints of the SMPL model.\n Kwargs:\n with_global_pose: Whether the pose vector also contains the global\n orientation of the SMPL model. If not then the indices must be\n corrected.\n Returns:\n A sze (B) tensor containing the angle prior loss for each element\n in the batch.\n '''\n angle_prior_idxs = self.angle_prior_idxs - (not with_global_pose) * 3\n return torch.exp(pose[:, angle_prior_idxs] *\n self.angle_prior_signs).pow(2)\n\n\nclass L2Prior(nn.Module):\n def __init__(self, dtype=DEFAULT_DTYPE, reduction='sum', **kwargs):\n super(L2Prior, self).__init__()\n\n def forward(self, module_input, *args):\n return torch.sum(module_input.pow(2))\n\n\nclass MaxMixturePrior(nn.Module):\n\n def __init__(self, prior_folder='prior',\n num_gaussians=6, dtype=DEFAULT_DTYPE, epsilon=1e-16,\n use_merged=True,\n **kwargs):\n super(MaxMixturePrior, self).__init__()\n\n if dtype == DEFAULT_DTYPE:\n np_dtype = np.float32\n elif dtype == torch.float64:\n np_dtype = np.float64\n else:\n print('Unknown float type {}, exiting!'.format(dtype))\n sys.exit(-1)\n\n self.num_gaussians = num_gaussians\n self.epsilon = epsilon\n self.use_merged = use_merged\n gmm_fn = 'gmm_{:02d}.pkl'.format(num_gaussians)\n\n full_gmm_fn = os.path.join(prior_folder, gmm_fn)\n if not os.path.exists(full_gmm_fn):\n print('The path to the mixture prior \"{}\"'.format(full_gmm_fn) +\n ' does not exist, exiting!')\n sys.exit(-1)\n\n with open(full_gmm_fn, 'rb') as f:\n gmm = pickle.load(f, encoding='latin1')\n\n if type(gmm) == dict:\n means = gmm['means'].astype(np_dtype)\n covs = gmm['covars'].astype(np_dtype)\n weights = gmm['weights'].astype(np_dtype)\n elif 'sklearn.mixture.gmm.GMM' in str(type(gmm)):\n means = gmm.means_.astype(np_dtype)\n covs = gmm.covars_.astype(np_dtype)\n weights = gmm.weights_.astype(np_dtype)\n else:\n print('Unknown type for the prior: {}, exiting!'.format(type(gmm)))\n sys.exit(-1)\n\n self.register_buffer('means', torch.tensor(means, dtype=dtype))\n\n self.register_buffer('covs', torch.tensor(covs, dtype=dtype))\n\n precisions = [np.linalg.inv(cov) for cov in covs]\n precisions = np.stack(precisions).astype(np_dtype)\n\n self.register_buffer('precisions',\n torch.tensor(precisions, dtype=dtype))\n\n # The constant term:\n sqrdets = np.array([(np.sqrt(np.linalg.det(c)))\n for c in gmm['covars']])\n const = (2 * np.pi)**(69 / 2.)\n\n nll_weights = np.asarray(gmm['weights'] / (const *\n (sqrdets / sqrdets.min())))\n nll_weights = torch.tensor(nll_weights, dtype=dtype).unsqueeze(dim=0)\n self.register_buffer('nll_weights', nll_weights)\n\n weights = torch.tensor(gmm['weights'], dtype=dtype).unsqueeze(dim=0)\n self.register_buffer('weights', weights)\n\n self.register_buffer('pi_term',\n torch.log(torch.tensor(2 * np.pi, dtype=dtype)))\n\n cov_dets = [np.log(np.linalg.det(cov.astype(np_dtype)) + epsilon)\n for cov in covs]\n self.register_buffer('cov_dets',\n torch.tensor(cov_dets, dtype=dtype))\n\n # The dimensionality of the random variable\n self.random_var_dim = self.means.shape[1]\n\n def get_mean(self):\n ''' Returns the mean of the mixture '''\n mean_pose = torch.matmul(self.weights, self.means)\n return mean_pose\n\n def merged_log_likelihood(self, pose, betas):\n diff_from_mean = pose.unsqueeze(dim=1) - self.means\n\n prec_diff_prod = torch.einsum('mij,bmj->bmi',\n [self.precisions, diff_from_mean])\n diff_prec_quadratic = (prec_diff_prod * diff_from_mean).sum(dim=-1)\n\n curr_loglikelihood = 0.5 * diff_prec_quadratic - \\\n torch.log(self.nll_weights)\n # curr_loglikelihood = 0.5 * (self.cov_dets.unsqueeze(dim=0) +\n # self.random_var_dim * self.pi_term +\n # diff_prec_quadratic\n # ) - torch.log(self.weights)\n\n min_likelihood, _ = torch.min(curr_loglikelihood, dim=1)\n return min_likelihood\n\n def log_likelihood(self, pose, betas, *args, **kwargs):\n ''' Create graph operation for negative log-likelihood calculation\n '''\n likelihoods = []\n\n for idx in range(self.num_gaussians):\n mean = self.means[idx]\n prec = self.precisions[idx]\n cov = self.covs[idx]\n diff_from_mean = pose - mean\n\n curr_loglikelihood = torch.einsum('bj,ji->bi',\n [diff_from_mean, prec])\n curr_loglikelihood = torch.einsum('bi,bi->b',\n [curr_loglikelihood,\n diff_from_mean])\n cov_term = torch.log(torch.det(cov) + self.epsilon)\n curr_loglikelihood += 0.5 * (cov_term +\n self.random_var_dim *\n self.pi_term)\n likelihoods.append(curr_loglikelihood)\n\n log_likelihoods = torch.stack(likelihoods, dim=1)\n min_idx = torch.argmin(log_likelihoods, dim=1)\n weight_component = self.nll_weights[:, min_idx]\n weight_component = -torch.log(weight_component)\n\n return weight_component + log_likelihoods[:, min_idx]\n\n def forward(self, pose, betas):\n if self.use_merged:\n return self.merged_log_likelihood(pose, betas)\n else:\n return self.log_likelihood(pose, betas)\n"
] |
[
[
"numpy.array",
"numpy.where",
"pandas.DataFrame"
],
[
"numpy.linalg.inv",
"torch.einsum",
"torch.min",
"torch.det",
"torch.argmin",
"numpy.stack",
"torch.tensor",
"torch.exp",
"torch.matmul",
"numpy.linalg.det",
"torch.log",
"torch.stack",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mgkulik/antibody-pairing
|
[
"f6f3c73b5a77b8b67d6f82799b6d5843eb214d66"
] |
[
"benchmark.py"
] |
[
"import pandas as pd\nimport numpy as np\nimport argparse\nfrom sklearn.metrics import accuracy_score, roc_auc_score\n\n\ndef benchmark(predictions_csv, targets_csv):\n \n predictions = pd.read_csv(predictions_csv)['prediction']\n targets = pd.read_csv(targets_csv)['target']\n \n \n acc = accuracy_score(targets, np.where(predictions>.5, 1, 0))\n auc = roc_auc_score(targets, predictions)\n\n return {\n 'accuracy': acc,\n 'AUC': auc\n }\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--predictions', default='predictions.csv')\n parser.add_argument('--targets', default='targets.csv')\n args = parser.parse_args()\n print('Benchmarks: ', benchmark(args.predictions, args.targets))"
] |
[
[
"sklearn.metrics.roc_auc_score",
"pandas.read_csv",
"numpy.where"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
CareBENT/MinkowskiEngine
|
[
"25736cce2509d2ede5b47ab4b758cb0e846b828d"
] |
[
"examples/modelnet40.py"
] |
[
"# Copyright (c) Chris Choy ([email protected]).\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n# of the Software, and to permit persons to whom the Software is furnished to do\n# so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# Please cite \"4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural\n# Networks\", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part\n# of the code.\nimport os\nimport sys\nimport subprocess\nimport argparse\nimport logging\nfrom time import time\n# Must be imported before\ntry:\n import open3d as o3d\nexcept ImportError:\n raise ImportError('Please install open3d with `pip install open3d`.')\n\nimport torch\nimport torch.utils.data\nfrom torch.utils.data.sampler import Sampler\nimport torch.optim as optim\nfrom torchvision.transforms import Compose as VisionCompose\n\nimport numpy as np\nfrom scipy.linalg import expm, norm\n\nimport MinkowskiEngine as ME\nfrom examples.resnet import ResNet50\n\nassert int(\n o3d.__version__.split('.')[1]\n) >= 8, f'Requires open3d version >= 0.8, the current version is {o3d.__version__}'\n\nch = logging.StreamHandler(sys.stdout)\nlogging.getLogger().setLevel(logging.INFO)\nlogging.basicConfig(\n format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s',\n datefmt='%m/%d %H:%M:%S',\n handlers=[ch])\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--voxel_size', type=float, default=0.05)\nparser.add_argument('--max_iter', type=int, default=120000)\nparser.add_argument('--val_freq', type=int, default=1000)\nparser.add_argument('--batch_size', default=256, type=int)\nparser.add_argument('--lr', default=1e-1, type=float)\nparser.add_argument('--momentum', type=float, default=0.9)\nparser.add_argument('--weight_decay', type=float, default=1e-4)\nparser.add_argument('--num_workers', type=int, default=1)\nparser.add_argument('--stat_freq', type=int, default=50)\nparser.add_argument('--checkpoint_weights', type=str, default='modelnet.pth')\nparser.add_argument('--weights', type=str, default='modelnet.pth')\nparser.add_argument('--load_optimizer', type=str, default='true')\n\nif not os.path.exists('ModelNet40'):\n logging.info('Downloading the fixed ModelNet40 dataset...')\n subprocess.run([\"sh\", \"./examples/download_modelnet40.sh\"])\n\n\nclass InfSampler(Sampler):\n \"\"\"Samples elements randomly, without replacement.\n\n Arguments:\n data_source (Dataset): dataset to sample from\n \"\"\"\n\n def __init__(self, data_source, shuffle=False):\n self.data_source = data_source\n self.shuffle = shuffle\n self.reset_permutation()\n\n def reset_permutation(self):\n perm = len(self.data_source)\n if self.shuffle:\n perm = torch.randperm(perm)\n self._perm = perm.tolist()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if len(self._perm) == 0:\n self.reset_permutation()\n return self._perm.pop()\n\n def __len__(self):\n return len(self.data_source)\n\n\ndef resample_mesh(mesh_cad, density=1):\n '''\n https://chrischoy.github.io/research/barycentric-coordinate-for-mesh-sampling/\n Samples point cloud on the surface of the model defined as vectices and\n faces. This function uses vectorized operations so fast at the cost of some\n memory.\n\n param mesh_cad: low-polygon triangle mesh in o3d.geometry.TriangleMesh\n param density: density of the point cloud per unit area\n param return_numpy: return numpy format or open3d pointcloud format\n return resampled point cloud\n\n Reference :\n [1] Barycentric coordinate system\n \\begin{align}\n P = (1 - \\sqrt{r_1})A + \\sqrt{r_1} (1 - r_2) B + \\sqrt{r_1} r_2 C\n \\end{align}\n '''\n faces = np.array(mesh_cad.triangles).astype(int)\n vertices = np.array(mesh_cad.vertices)\n\n vec_cross = np.cross(vertices[faces[:, 0], :] - vertices[faces[:, 2], :],\n vertices[faces[:, 1], :] - vertices[faces[:, 2], :])\n face_areas = np.sqrt(np.sum(vec_cross**2, 1))\n\n n_samples = (np.sum(face_areas) * density).astype(int)\n # face_areas = face_areas / np.sum(face_areas)\n\n # Sample exactly n_samples. First, oversample points and remove redundant\n # Bug fix by Yangyan ([email protected])\n n_samples_per_face = np.ceil(density * face_areas).astype(int)\n floor_num = np.sum(n_samples_per_face) - n_samples\n if floor_num > 0:\n indices = np.where(n_samples_per_face > 0)[0]\n floor_indices = np.random.choice(indices, floor_num, replace=True)\n n_samples_per_face[floor_indices] -= 1\n\n n_samples = np.sum(n_samples_per_face)\n\n # Create a vector that contains the face indices\n sample_face_idx = np.zeros((n_samples,), dtype=int)\n acc = 0\n for face_idx, _n_sample in enumerate(n_samples_per_face):\n sample_face_idx[acc:acc + _n_sample] = face_idx\n acc += _n_sample\n\n r = np.random.rand(n_samples, 2)\n A = vertices[faces[sample_face_idx, 0], :]\n B = vertices[faces[sample_face_idx, 1], :]\n C = vertices[faces[sample_face_idx, 2], :]\n\n P = (1 - np.sqrt(r[:, 0:1])) * A + \\\n np.sqrt(r[:, 0:1]) * (1 - r[:, 1:]) * B + \\\n np.sqrt(r[:, 0:1]) * r[:, 1:] * C\n\n return P\n\n\ndef collate_pointcloud_fn(list_data):\n new_list_data = []\n num_removed = 0\n for data in list_data:\n if data is not None:\n new_list_data.append(data)\n else:\n num_removed += 1\n\n list_data = new_list_data\n\n if len(list_data) == 0:\n raise ValueError('No data in the batch')\n\n coords, feats, labels = list(zip(*list_data))\n\n eff_num_batch = len(coords)\n assert len(labels) == eff_num_batch\n\n coords_batch = ME.utils.batched_coordinates(coords)\n feats_batch = torch.from_numpy(np.vstack(feats)).float()\n\n # Concatenate all lists\n return {\n 'coords': coords_batch,\n 'feats': feats_batch,\n 'labels': torch.LongTensor(labels),\n }\n\n\nclass Compose(VisionCompose):\n\n def __call__(self, *args):\n for t in self.transforms:\n args = t(*args)\n return args\n\n\nclass RandomRotation:\n\n def __init__(self, axis=None, max_theta=180):\n self.axis = axis\n self.max_theta = max_theta\n\n def _M(self, axis, theta):\n return expm(np.cross(np.eye(3), axis / norm(axis) * theta))\n\n def __call__(self, coords, feats):\n if self.axis is not None:\n axis = self.axis\n else:\n axis = np.random.rand(3) - 0.5\n R = self._M(axis, (np.pi * self.max_theta / 180) * 2 *\n (np.random.rand(1) - 0.5))\n R_n = self._M(\n np.random.rand(3) - 0.5,\n (np.pi * 15 / 180) * 2 * (np.random.rand(1) - 0.5))\n return coords @ R @ R_n, feats\n\n\nclass RandomScale:\n\n def __init__(self, min, max):\n self.scale = max - min\n self.bias = min\n\n def __call__(self, coords, feats):\n s = self.scale * np.random.rand(1) + self.bias\n return coords * s, feats\n\n\nclass RandomShear:\n\n def __call__(self, coords, feats):\n T = np.eye(3) + 0.1 * np.random.randn(3, 3)\n return coords @ T, feats\n\n\nclass RandomTranslation:\n\n def __call__(self, coords, feats):\n trans = 0.05 * np.random.randn(1, 3)\n return coords + trans, feats\n\n\nclass ModelNet40Dataset(torch.utils.data.Dataset):\n AUGMENT = None\n DATA_FILES = {\n 'train': 'train_modelnet40.txt',\n 'val': 'val_modelnet40.txt',\n 'test': 'test_modelnet40.txt'\n }\n\n CATEGORIES = [\n 'airplane', 'bathtub', 'bed', 'bench', 'bookshelf', 'bottle', 'bowl',\n 'car', 'chair', 'cone', 'cup', 'curtain', 'desk', 'door', 'dresser',\n 'flower_pot', 'glass_box', 'guitar', 'keyboard', 'lamp', 'laptop',\n 'mantel', 'monitor', 'night_stand', 'person', 'piano', 'plant', 'radio',\n 'range_hood', 'sink', 'sofa', 'stairs', 'stool', 'table', 'tent',\n 'toilet', 'tv_stand', 'vase', 'wardrobe', 'xbox'\n ]\n\n def __init__(self, phase, transform=None, config=None):\n self.phase = phase\n self.files = []\n self.cache = {}\n self.data_objects = []\n self.transform = transform\n self.voxel_size = config.voxel_size\n self.last_cache_percent = 0\n\n self.root = './ModelNet40'\n self.files = open(os.path.join(self.root,\n self.DATA_FILES[phase])).read().split()\n logging.info(\n f\"Loading the subset {phase} from {self.root} with {len(self.files)} files\"\n )\n self.density = 4000\n\n # Ignore warnings in obj loader\n o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Error)\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, idx):\n mesh_file = os.path.join(self.root, self.files[idx])\n category = self.files[idx].split('/')[0]\n label = self.CATEGORIES.index(category)\n if idx in self.cache:\n xyz = self.cache[idx]\n else:\n # Load a mesh, over sample, copy, rotate, voxelization\n assert os.path.exists(mesh_file)\n pcd = o3d.io.read_triangle_mesh(mesh_file)\n # Normalize to fit the mesh inside a unit cube while preserving aspect ratio\n vertices = np.asarray(pcd.vertices)\n vmax = vertices.max(0, keepdims=True)\n vmin = vertices.min(0, keepdims=True)\n pcd.vertices = o3d.utility.Vector3dVector((vertices - vmin) /\n (vmax - vmin).max() + 0.5)\n\n # Oversample points and copy\n xyz = resample_mesh(pcd, density=self.density)\n self.cache[idx] = xyz\n cache_percent = int((len(self.cache) / len(self)) * 100)\n if cache_percent > 0 and cache_percent % 10 == 0 and cache_percent != self.last_cache_percent:\n logging.info(\n f\"Cached {self.phase}: {len(self.cache)} / {len(self)}: {cache_percent}%\"\n )\n self.last_cache_percent = cache_percent\n\n # Use color or other features if available\n feats = np.ones((len(xyz), 1))\n\n if len(xyz) < 1000:\n logging.info(\n f\"Skipping {mesh_file}: does not have sufficient CAD sampling density after resampling: {len(xyz)}.\"\n )\n return None\n\n if self.transform:\n xyz, feats = self.transform(xyz, feats)\n\n # Get coords\n coords = np.floor(xyz / self.voxel_size)\n\n return (coords, xyz, label)\n\n\ndef make_data_loader(phase, augment_data, batch_size, shuffle, num_workers,\n repeat, config):\n transformations = []\n if augment_data:\n transformations.append(RandomRotation(axis=np.array([0, 0, 1])))\n transformations.append(RandomTranslation())\n transformations.append(RandomScale(0.8, 1.2))\n transformations.append(RandomShear())\n\n dset = ModelNet40Dataset(\n phase, transform=Compose(transformations), config=config)\n\n args = {\n 'batch_size': batch_size,\n 'num_workers': num_workers,\n 'collate_fn': collate_pointcloud_fn,\n 'pin_memory': False,\n 'drop_last': False\n }\n\n if repeat:\n args['sampler'] = InfSampler(dset, shuffle)\n else:\n args['shuffle'] = shuffle\n\n loader = torch.utils.data.DataLoader(dset, **args)\n\n return loader\n\n\ndef test(net, test_iter, config, phase='val'):\n net.eval()\n num_correct, tot_num = 0, 0\n for i in range(len(test_iter)):\n data_dict = test_iter.next()\n sin = ME.SparseTensor(\n data_dict['coords'][:, :3] * config.voxel_size,\n data_dict['coords'].int(),\n allow_duplicate_coords=True, # for classification, it doesn't matter\n ).to(device)\n sout = net(sin)\n is_correct = data_dict['labels'] == torch.argmax(sout.F, 1).cpu()\n num_correct += is_correct.sum().item()\n tot_num += len(sout)\n\n if i % config.stat_freq == 0:\n logging.info(\n f'{phase} set iter: {i} / {len(test_iter)}, Accuracy : {num_correct / tot_num:.3e}'\n )\n logging.info(f'{phase} set accuracy : {num_correct / tot_num:.3e}')\n\n\ndef train(net, device, config):\n optimizer = optim.SGD(\n net.parameters(),\n lr=config.lr,\n momentum=config.momentum,\n weight_decay=config.weight_decay)\n scheduler = optim.lr_scheduler.ExponentialLR(optimizer, 0.95)\n\n crit = torch.nn.CrossEntropyLoss()\n\n train_dataloader = make_data_loader(\n 'train',\n augment_data=True,\n batch_size=config.batch_size,\n shuffle=True,\n num_workers=config.num_workers,\n repeat=True,\n config=config)\n val_dataloader = make_data_loader(\n 'val',\n augment_data=False,\n batch_size=config.batch_size,\n shuffle=True,\n num_workers=config.num_workers,\n repeat=True,\n config=config)\n\n curr_iter = 0\n if os.path.exists(config.checkpoint_weights):\n checkpoint = torch.load(config.checkpoint_weights)\n net.load_state_dict(checkpoint['state_dict'])\n if config.load_optimizer.lower() == 'true':\n curr_iter = checkpoint['curr_iter'] + 1\n optimizer.load_state_dict(checkpoint['optimizer'])\n scheduler.load_state_dict(checkpoint['scheduler'])\n\n net.train()\n train_iter = iter(train_dataloader)\n val_iter = iter(val_dataloader)\n logging.info(f'LR: {scheduler.get_lr()}')\n for i in range(curr_iter, config.max_iter):\n\n s = time()\n data_dict = train_iter.next()\n d = time() - s\n\n optimizer.zero_grad()\n sin = ME.SparseTensor(\n data_dict['coords'][:, :3] * config.voxel_size,\n data_dict['coords'].int(),\n allow_duplicate_coords=True, # for classification, it doesn't matter\n ).to(device)\n sout = net(sin)\n loss = crit(sout.F, data_dict['labels'].to(device))\n loss.backward()\n optimizer.step()\n t = time() - s\n\n if i % config.stat_freq == 0:\n logging.info(\n f'Iter: {i}, Loss: {loss.item():.3e}, Data Loading Time: {d:.3e}, Tot Time: {t:.3e}'\n )\n\n if i % config.val_freq == 0 and i > 0:\n torch.save(\n {\n 'state_dict': net.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'scheduler': scheduler.state_dict(),\n 'curr_iter': i,\n }, config.weights)\n\n # Validation\n logging.info('Validation')\n test(net, val_iter, config, 'val')\n\n scheduler.step()\n logging.info(f'LR: {scheduler.get_lr()}')\n\n net.train()\n\n\nif __name__ == '__main__':\n config = parser.parse_args()\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n net = ResNet50(3, 40, D=3)\n net.to(device)\n\n train(net, device, config)\n\n test_dataloader = make_data_loader(\n 'test',\n augment_data=False,\n batch_size=config.batch_size,\n shuffle=True,\n num_workers=config.num_workers,\n repeat=False,\n config=config)\n\n logging.info('Test')\n test(net, iter(test_dataloader), config, 'test')\n"
] |
[
[
"numpy.sqrt",
"torch.load",
"torch.randperm",
"numpy.asarray",
"torch.utils.data.DataLoader",
"numpy.random.randn",
"torch.cuda.is_available",
"numpy.cross",
"numpy.where",
"torch.nn.CrossEntropyLoss",
"numpy.eye",
"numpy.ceil",
"scipy.linalg.norm",
"numpy.zeros",
"torch.LongTensor",
"numpy.random.choice",
"numpy.random.rand",
"numpy.floor",
"numpy.array",
"numpy.sum",
"torch.optim.lr_scheduler.ExponentialLR",
"numpy.vstack",
"torch.argmax"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
}
] |
yamad07/pytorch.sngan_projection
|
[
"de8da69de19e3ac85b06b675416b06ee2416eeff"
] |
[
"links/conditional_batchnorm.py"
] |
[
"import torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\n\n\nclass ConditionalBatchNorm2d(nn.BatchNorm2d):\n\n \"\"\"Conditional Batch Normalization\"\"\"\n\n def __init__(self, num_features, eps=1e-05, momentum=0.1,\n affine=False, track_running_stats=True):\n super(ConditionalBatchNorm2d, self).__init__(\n num_features, eps, momentum, affine, track_running_stats\n )\n\n def forward(self, input, weight, bias, **kwargs):\n self._check_input_dim(input)\n\n exponential_average_factor = 0.0\n\n if self.training and self.track_running_stats:\n self.num_batches_tracked += 1\n if self.momentum is None: # use cumulative moving average\n exponential_average_factor = 1.0 / self.num_batches_tracked.item()\n else: # use exponential moving average\n exponential_average_factor = self.momentum\n\n output = F.batch_norm(input, self.running_mean, self.running_var,\n self.weight, self.bias,\n self.training or not self.track_running_stats,\n exponential_average_factor, self.eps)\n if weight.dim() == 1:\n weight = weight.unsqueeze(0)\n if bias.dim() == 1:\n bias = bias.unsqueeze(0)\n size = output.size()\n weight = weight.unsqueeze(-1).unsqueeze(-1).expand(size)\n bias = bias.unsqueeze(-1).unsqueeze(-1).expand(size)\n return weight * output + bias\n\n\nclass CategoricalConditionalBatchNorm2d(ConditionalBatchNorm2d):\n\n def __init__(self, num_classes, num_features, eps=1e-5, momentum=0.1,\n affine=False, track_running_stats=True):\n super(CategoricalConditionalBatchNorm2d, self).__init__(\n num_features, eps, momentum, affine, track_running_stats\n )\n self.weights = nn.Embedding(num_classes, num_features)\n self.biases = nn.Embedding(num_classes, num_features)\n\n self._initialize()\n\n def _initialize(self):\n init.ones_(self.weights.weight.data)\n init.zeros_(self.biases.weight.data)\n\n def forward(self, input, c, **kwargs):\n weight = self.weights(c)\n bias = self.biases(c)\n\n return super(CategoricalConditionalBatchNorm2d, self).forward(input, weight, bias)\n\n\nif __name__ == '__main__':\n \"\"\"Forward computation check.\"\"\"\n import torch\n size = (3, 3, 12, 12)\n batch_size, num_features = size[:2]\n print('# Affirm embedding output')\n naive_bn = nn.BatchNorm2d(3)\n idx_input = torch.tensor([1, 2, 0], dtype=torch.long)\n embedding = nn.Embedding(3, 3)\n weights = embedding(idx_input)\n print('# weights size', weights.size())\n empty = torch.tensor((), dtype=torch.float)\n running_mean = empty.new_zeros((3,))\n running_var = empty.new_ones((3,))\n\n naive_bn_W = naive_bn.weight\n # print('# weights from embedding | type {}\\n'.format(type(weights)), weights)\n # print('# naive_bn_W | type {}\\n'.format(type(naive_bn_W)), naive_bn_W)\n input = torch.rand(*size, dtype=torch.float32)\n print('input size', input.size())\n print('input ndim ', input.dim())\n\n _ = naive_bn(input)\n\n print('# batch_norm with given weights')\n\n try:\n with torch.no_grad():\n output = F.batch_norm(input, running_mean, running_var,\n weights, naive_bn.bias, False, 0.0, 1e-05)\n except Exception as e:\n print(\"\\tFailed to use given weights\")\n print('# Error msg:', e)\n print()\n else:\n print(\"Succeeded to use given weights\")\n\n print('\\n# Batch norm before use given weights')\n with torch.no_grad():\n tmp_out = F.batch_norm(input, running_mean, running_var,\n naive_bn_W, naive_bn.bias, False, .0, 1e-05)\n weights_cast = weights.unsqueeze(-1).unsqueeze(-1)\n weights_cast = weights_cast.expand(tmp_out.size())\n try:\n out = weights_cast * tmp_out\n except Exception:\n print(\"Failed\")\n else:\n print(\"Succeeded!\")\n print('\\t {}'.format(out.size()))\n print(type(tuple(out.size())))\n\n print('--- condBN and catCondBN ---')\n\n catCondBN = CategoricalConditionalBatchNorm2d(3, 3)\n output = catCondBN(input, idx_input)\n\n assert tuple(output.size()) == size\n\n condBN = ConditionalBatchNorm2d(3)\n\n idx = torch.tensor([1], dtype=torch.long)\n out = catCondBN(input, idx)\n\n print('cat cond BN weights\\n', catCondBN.weights.weight.data)\n print('cat cond BN biases\\n', catCondBN.biases.weight.data)\n"
] |
[
[
"torch.nn.functional.batch_norm",
"torch.nn.Embedding",
"torch.tensor",
"torch.nn.init.ones_",
"torch.no_grad",
"torch.rand",
"torch.nn.BatchNorm2d",
"torch.nn.init.zeros_"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JetBrains-Research/codetracker-data
|
[
"6fb3900bd3cfe44d900d7fa8e89c7c35818424ed"
] |
[
"src/main/plots/util/plots_helper.py"
] |
[
"# Copyright (c) 2020 Anastasiia Birillo, Elena Lyulina\n\nimport pandas as pd\n\nfrom src.main.util.consts import ISO_ENCODING\nfrom src.main.util.file_util import tt_file_condition, get_all_file_system_items\n\n\ndef print_all_unique_values_in_file(file: str, column: str) -> None:\n data = pd.read_csv(file, encoding=ISO_ENCODING)\n ati_events = data[column].dropna().unique().tolist()\n if ati_events:\n print(file)\n print(ati_events)\n\n\ndef print_all_unique_values_in_files(path: str, column: str) -> None:\n files = get_all_file_system_items(path, tt_file_condition)\n for file in files:\n print_all_unique_values_in_file(file, column)\n"
] |
[
[
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
MorrowLiam/stock_analysis
|
[
"47436136efa34944e8906f4c45539fbf630f73cc"
] |
[
"Python_Core/risk_models.py"
] |
[
"# %% markdown\n# Portfolio Optimization - Risk\n# %% add path\nif __name__ == '__main__' and __package__ is None:\n import sys, os.path\n sys.path\n # append parent of the directory the current file is in\n inputfilename1 = r\"C:\\Users\\Liam Morrow\\Documents\\Onedrive\\Python scripts\\_01 Liam Stock Analysis Project\\stock_analysis\\Python_Core\"\n inputfilename2 = r\"C:\\Users\\l.morrow\\OneDrive\\Python scripts\\_01 Liam Stock Analysis Project\\stock_analysis\\Python_Core\"\n sys.path.append(inputfilename1)\n sys.path.append(inputfilename2)\n\n# %% imports\nimport numpy as np\nimport math\nimport pandas as pd\nfrom pandas_datareader import data as wb\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom datetime import datetime\nfrom pandas import ExcelWriter\nfrom pandas import ExcelFile\nfrom scipy.optimize import minimize\nimport stock_utilities as su\nimport scipy.optimize as sco\nsns.set(style=\"darkgrid\")\n\n# %% fetch stock data\n# tickers=\"AFDIX,FXAIX,JLGRX,MEIKX,PGOYX,HFMVX,FCVIX,FSSNX,WSCGX,CVMIX,DOMOX,FSPSX,ODVYX,MINJX,FGDIX,CMJIX,FFIVX,FCIFX,FFVIX,FDIFX,FIAFX,BPRIX,CBDIX,OIBYX,PDBZX\"\n# tickers=\"AFDIX,FXAIX,JLGRX,MEIKX\"\ntickers = \"PRHSX,IAU,VWIGX,GBF,TRBCF,PRSCX\"\nstart_date = pd.to_datetime('1/1/2016', utc=True)\nend_date = pd.to_datetime('1/6/2020', utc=True)\nstock_df = su.yahoo_stock_fetch(tickers, start_date, end_date)\n\n\n# %% make df\n\nanalysis_df = {}\nfor t in stock_df.keys():\n analysis_df[t] = pd.DataFrame()\n analysis_df[t]['Adj Close'] = (stock_df[t]['Adj Close'])\n analysis_df[t]['Simple Returns'] = (stock_df[t]['Adj Close'].pct_change(1))\n analysis_df[t]['Total ROI %'] = ((stock_df[t]['Adj Close']-stock_df[t]['Adj Close'].iloc[0])/stock_df[t]['Adj Close'].iloc[0])*100\n analysis_df[t]['Log Returns'] = np.log(stock_df[t]['Adj Close']/stock_df[t]['Adj Close'].shift(1))\n\nadj_close_df = pd.DataFrame()\nfor t in stock_df.keys():\n adj_close_df[t] = analysis_df[t]['Adj Close']\nadj_close_df\n\n\n#%% covariance matrix class\n\ndef cov_to_corr(cov_matrix):\n \"\"\"\n Convert a covariance matrix to a correlation matrix.\n :param cov_matrix: covariance matrix\n :type cov_matrix: pd.DataFrame\n :return: correlation matrix\n :rtype: pd.DataFrame\n \"\"\"\n if not isinstance(cov_matrix, pd.DataFrame):\n cov_matrix = pd.DataFrame(cov_matrix)\n\n Dinv = np.diag(1 / np.sqrt(np.diag(cov_matrix)))\n corr = np.dot(Dinv, np.dot(cov_matrix, Dinv))\n return pd.DataFrame(corr, index=cov_matrix.index, columns=cov_matrix.index)\n\ndef variation_over_time(prices,frequency=252,periods=8,covariance='ledoit_wolf',correlation=True,returns_data=False):\n date_delta = end_date - start_date\n divided_days = date_delta/periods\n times = pd.date_range(start_date, periods=periods, freq=divided_days,normalize=True)\n subset_df = {}\n counter=0\n for i in times:\n counter+=1\n #reset index and set timezone.\n sub_df = prices.tz_localize('UTC', level=0).reset_index()\n #spec start date\n subset_start_date = pd.to_datetime(i, utc= True)\n #specficy a subset\n subset_df[counter] = sub_df.loc[(sub_df['Date'] > subset_start_date) & (sub_df['Date'] < end_date)]\n for i in subset_df.keys():\n subset_df[i].set_index('Date', inplace=True)\n\n if covariance == 'ledoit_wolf':\n matrix={}\n for i in subset_df.keys():\n matrix[i] = covariance_models(subset_df[i],returns_data=returns_data, frequency=frequency).ledoit_wolf()\n else:\n #sample covariance\n matrix={}\n for i in subset_df.keys():\n matrix[i] = subset_df[i].pct_change().dropna(how=\"all\").cov()*frequency\n\n if correlation == True :\n for i in subset_df.keys():\n matrix[i] = cov_to_corr(matrix[i])\n else:\n pass\n return matrix,times\n\ndef heatmap(x, y, size, scale, times):\n # Mapping from column names to integer coordinates\n x_labels = [v for v in sorted(x.unique())]\n y_labels = [v for v in sorted(y.unique())]\n x_to_num = {p[1]:p[0] for p in enumerate(x_labels)}\n y_to_num = {p[1]:p[0] for p in enumerate(y_labels)}\n\n size_scale = scale\n ax.scatter(\n x=x.map(x_to_num), # Use mapping for x\n y=y.map(y_to_num), # Use mapping for y\n s=size * size_scale, # Vector of square sizes, proportional to size parameter\n marker='s' # Use square as scatterplot marker\n )\n # Show column labels on the axes\n ax.set(title='Variance Matrix \\n'+ str(times))\n ax.set_xticks([x_to_num[v] for v in x_labels])\n ax.set_xticklabels(x_labels, rotation=45, horizontalalignment='right')\n ax.set_yticks([y_to_num[v] for v in y_labels])\n ax.set_yticklabels(y_labels)\n ax.grid(False, 'major')\n ax.grid(True, 'minor')\n ax.set_xticks([t + 0.5 for t in ax.get_xticks()], minor=True)\n ax.set_yticks([t + 0.5 for t in ax.get_yticks()], minor=True)\n ax.set_xlim([-0.5, max([v for v in x_to_num.values()]) + 0.5])\n ax.set_ylim([-0.5, max([v for v in y_to_num.values()]) + 0.5])\n\nclass covariance_models:\n\n def __init__(self, prices, returns_data=False, frequency=252):\n \"\"\"\n :param prices: adjusted closing prices of the asset, each row is a date and each column is a ticker/id.\n :type prices: pd.DataFrame\n :param returns_data: if true, the first argument is returns instead of prices.\n :type returns_data: bool, defaults to False.\n :param frequency: number of time periods in a year, defaults to 252 (the number of trading days in a year)\n :type frequency: int, optional\n \"\"\"\n # Optional import\n try:\n from sklearn import covariance\n\n self.covariance = covariance\n except (ModuleNotFoundError, ImportError):\n raise ImportError(\"Please install scikit-learn via pip\")\n\n if not isinstance(prices, pd.DataFrame):\n prices = pd.DataFrame(prices)\n\n self.frequency = frequency\n\n\n if returns_data:\n self.X = prices.dropna(how=\"all\")\n else:\n self.X = prices.pct_change().dropna(how=\"all\")\n\n self.S = self.X.cov().values\n\n self.delta = None # shrinkage constant\n\n def sample_covariance(self):\n cov_mat = self.X.cov() * self.frequency\n return cov_mat\n\n def shrunk_covariance(self, delta=0.2):\n \"\"\"\n Shrink a sample covariance matrix to the identity matrix (scaled by the average\n sample variance). This method does not estimate an optimal shrinkage parameter,\n it requires manual input.\n :param delta: shrinkage parameter, defaults to 0.2.\n :type delta: float, optional\n :return: shrunk sample covariance matrix\n :rtype: np.ndarray\n \"\"\"\n self.delta = delta\n N = self.S.shape[1]\n # Shrinkage target\n mu = np.trace(self.S) / N\n F = np.identity(N) * mu\n # Shrinkage\n shrunk_cov = delta * F + (1 - delta) * self.S\n return self._format_and_annualize(shrunk_cov)\n\n def _format_and_annualize(self, raw_cov_array):\n \"\"\"\n Helper method which annualises the output of shrinkage calculations,\n and formats the result into a dataframe\n :param raw_cov_array: raw covariance matrix of daily returns\n :type raw_cov_array: np.ndarray\n :return: annualised covariance matrix\n :rtype: pd.DataFrame\n \"\"\"\n assets = self.X.columns\n cov = pd.DataFrame(raw_cov_array, index=assets, columns=assets) * self.frequency\n #pyportfolioopt added a fix_nonpositive_semidefinite function here\n return cov\n\n def ledoit_wolf(self, shrinkage_target=\"constant_variance\"):\n \"\"\"\n Calculate the Ledoit-Wolf shrinkage estimate for a particular\n shrinkage target.\n :param shrinkage_target: choice of shrinkage target, either ``constant_variance``,\n ``single_factor`` or ``constant_correlation``. Defaults to\n ``constant_variance``.\n :type shrinkage_target: str, optional\n :raises NotImplementedError: if the shrinkage_target is unrecognised\n :return: shrunk sample covariance matrix\n :rtype: np.ndarray\n \"\"\"\n if shrinkage_target == \"constant_variance\":\n X = np.nan_to_num(self.X.values)\n shrunk_cov, self.delta = self.covariance.ledoit_wolf(X)\n elif shrinkage_target == \"single_factor\":\n shrunk_cov, self.delta = self._ledoit_wolf_single_factor()\n elif shrinkage_target == \"constant_correlation\":\n shrunk_cov, self.delta = self._ledoit_wolf_constant_correlation()\n else:\n raise NotImplementedError(\n \"Shrinkage target {} not recognised\".format(shrinkage_target)\n )\n\n return self._format_and_annualize(shrunk_cov)\n\n def _ledoit_wolf_single_factor(self):\n \"\"\"\n Helper method to calculate the Ledoit-Wolf shrinkage estimate\n with the Sharpe single-factor matrix as the shrinkage target.\n See Ledoit and Wolf (2001).\n :return: shrunk sample covariance matrix, shrinkage constant\n :rtype: np.ndarray, float\n \"\"\"\n X = np.nan_to_num(self.X.values)\n\n # De-mean returns\n t, n = np.shape(X)\n Xm = X - X.mean(axis=0)\n xmkt = X.mean(axis=1).reshape(t, 1)\n\n # compute sample covariance matrix\n sample = np.cov(np.append(Xm, xmkt, axis=1), rowvar=False) * (t - 1) / t\n betas = sample[0:n, n].reshape(n, 1)\n varmkt = sample[n, n]\n sample = sample[:n, :n]\n F = np.dot(betas, betas.T) / varmkt\n F[np.eye(n) == 1] = np.diag(sample)\n\n # compute shrinkage parameters\n c = np.linalg.norm(sample - F, \"fro\") ** 2\n y = Xm ** 2\n p = 1 / t * np.sum(np.dot(y.T, y)) - np.sum(sample ** 2)\n\n # r is divided into diagonal\n # and off-diagonal terms, and the off-diagonal term\n # is itself divided into smaller terms\n rdiag = 1 / t * np.sum(y ** 2) - sum(np.diag(sample) ** 2)\n z = Xm * np.tile(xmkt, (n,))\n v1 = 1 / t * np.dot(y.T, z) - np.tile(betas, (n,)) * sample\n roff1 = (\n np.sum(v1 * np.tile(betas, (n,)).T) / varmkt\n - np.sum(np.diag(v1) * betas.T) / varmkt\n )\n v3 = 1 / t * np.dot(z.T, z) - varmkt * sample\n roff3 = (\n np.sum(v3 * np.dot(betas, betas.T)) / varmkt ** 2\n - np.sum(np.diag(v3).reshape(-1, 1) * betas ** 2) / varmkt ** 2\n )\n roff = 2 * roff1 - roff3\n r = rdiag + roff\n\n # compute shrinkage constant\n k = (p - r) / c\n delta = max(0, min(1, k / t))\n\n # compute the estimator\n shrunk_cov = delta * F + (1 - delta) * sample\n return shrunk_cov, delta\n\n def _ledoit_wolf_constant_correlation(self):\n \"\"\"\n Helper method to calculate the Ledoit-Wolf shrinkage estimate\n with the constant correlation matrix as the shrinkage target.\n See Ledoit and Wolf (2003)\n :return: shrunk sample covariance matrix, shrinkage constant\n :rtype: np.ndarray, float\n \"\"\"\n X = np.nan_to_num(self.X.values)\n t, n = np.shape(X)\n\n S = self.S # sample cov matrix\n\n # Constant correlation target\n var = np.diag(S).reshape(-1, 1)\n std = np.sqrt(var)\n _var = np.tile(var, (n,))\n _std = np.tile(std, (n,))\n r_bar = (np.sum(S / (_std * _std.T)) - n) / (n * (n - 1))\n F = r_bar * (_std * _std.T)\n F[np.eye(n) == 1] = var.reshape(-1)\n\n # Estimate pi\n Xm = X - X.mean(axis=0)\n y = Xm ** 2\n pi_mat = np.dot(y.T, y) / t - 2 * np.dot(Xm.T, Xm) * S / t + S ** 2\n pi_hat = np.sum(pi_mat)\n\n # Theta matrix, expanded term by term\n term1 = np.dot((X ** 3).T, X) / t\n help_ = np.dot(X.T, X) / t\n help_diag = np.diag(help_)\n term2 = np.tile(help_diag, (n, 1)).T * S\n term3 = help_ * _var\n term4 = _var * S\n theta_mat = term1 - term2 - term3 + term4\n theta_mat[np.eye(n) == 1] = np.zeros(n)\n rho_hat = sum(np.diag(pi_mat)) + r_bar * np.sum(\n np.dot((1 / std), std.T) * theta_mat\n )\n\n # Estimate gamma\n gamma_hat = np.linalg.norm(S - F, \"fro\") ** 2\n\n # Compute shrinkage constant\n kappa_hat = (pi_hat - rho_hat) / gamma_hat\n delta = max(0.0, min(1.0, kappa_hat / t))\n\n # Compute shrunk covariance matrix\n shrunk_cov = delta * F + (1 - delta) * S\n return shrunk_cov, delta\n\n\n\n#%% plot covariance\n# matrix =cov_to_corr(covariance_models(adj_close_df).sample_covariance())\n# fig, ax = plt.subplots(figsize=(8,8))\n#\n# cax = ax.imshow(matrix)\n# fig.colorbar(cax)\n#\n# ax.set(title='Covariance Matrix')\n# ax.set_xticks(np.arange(0, matrix.shape[0], 1))\n# ax.set_xticklabels(matrix.index)\n# ax.set_yticks(np.arange(0, matrix.shape[0], 1))\n# ax.set_yticklabels(matrix.index)\n# plt.xticks(rotation=90)\n# plt.tight_layout()\n# plt.show()\n#\n# #%% plot covariance\n# matrix = cov_to_corr(covariance_models(adj_close_df).ledoit_wolf())\n# fig, ax = plt.subplots(figsize=(8,8))\n#\n# cax = ax.imshow(matrix)\n# fig.colorbar(cax)\n#\n# ax.set(title='Covariance Matrix')\n# ax.set_xticks(np.arange(0, matrix.shape[0], 1))\n# ax.set_xticklabels(matrix.index)\n# ax.set_yticks(np.arange(0, matrix.shape[0], 1))\n# ax.set_yticklabels(matrix.index)\n# plt.xticks(rotation=90)\n# plt.tight_layout()\n# plt.show()\n#\n#\n# #%% plot covariance\n# matrix = cov_to_corr(covariance_models(adj_close_df).shrunk_covariance())\n# fig, ax = plt.subplots(figsize=(8,8))\n#\n# cax = ax.imshow(matrix)\n# fig.colorbar(cax)\n#\n# ax.set(title='Covariance Matrix')\n# ax.set_xticks(np.arange(0, matrix.shape[0], 1))\n# ax.set_xticklabels(matrix.index)\n# ax.set_yticks(np.arange(0, matrix.shape[0], 1))\n# ax.set_yticklabels(matrix.index)\n# plt.xticks(rotation=90)\n# plt.tight_layout()\n# plt.show()\n\n#%% plot covariance over time\n# matrix = variation_over_time(prices = adj_close_df,frequency=252,periods=10,covariance='ledoit_wolf',correlation=True ,returns_data=False)\n# covariance_matrix = matrix[0]\n# times = matrix[1]\n#\n# covariance_matrix\n#\n# fig = plt.figure(figsize=(10,10))\n# fig.subplots_adjust(hspace=.8, wspace=.8)\n# for i in range(1, len(covariance_matrix)):\n# ax = fig.add_subplot(round(math.sqrt(len(covariance_matrix))), round(math.sqrt(len(covariance_matrix))), i)\n#\n# cax = ax.imshow(covariance_matrix[i])\n# fig.colorbar(cax)\n#\n# ax.set(title='Covariance Matrix \\n'+ str(times[i]))\n# ax.set_xticks(np.arange(0, covariance_matrix[i].shape[0], 1))\n# ax.set_xticklabels(covariance_matrix[i].index)\n# ax.set_yticks(np.arange(0, covariance_matrix[i].shape[0], 1))\n# ax.set_yticklabels(covariance_matrix[i].index)\n# plt.xticks(rotation=90)\n#\n# plt.tight_layout()\n# plt.show()\n\n#%% plot covariance over time\n#select covariance matrix, and periods to show\nmatrix = variation_over_time(prices = adj_close_df,frequency=252,periods=2,covariance='ledoit_wolf',correlation=True ,returns_data=False)\ntimes = matrix[1]\ncorr = pd.melt(matrix[0][1].reset_index(), id_vars='index') # Unpivot the dataframe, so we can get pair of arrays for x and y\ncorr.columns = ['x', 'y', 'value']\nnormalized = (corr['value']-min(corr['value']))/(max(corr['value'])-min(corr['value']))\n\nmatrix\n\nfig = plt.figure(figsize=(10,10))\nfig.subplots_adjust(hspace=.8, wspace=.8)\nfor i in range(1, len(matrix[0])):\n ax = fig.add_subplot(round(math.sqrt(len(matrix[0]))), round(math.sqrt(len(matrix[0]))), i)\n ax = heatmap(x=corr['x'],y=corr['y'],size=normalized,scale = 400, times=times[i])\nplt.tight_layout()\nplt.show()\n\n"
] |
[
[
"numpy.diag",
"numpy.dot",
"pandas.to_datetime",
"numpy.sqrt",
"numpy.nan_to_num",
"pandas.DataFrame",
"numpy.trace",
"matplotlib.pyplot.tight_layout",
"numpy.eye",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.append",
"numpy.identity",
"pandas.date_range",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.tile",
"numpy.linalg.norm",
"numpy.shape"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
MarisaAlves/class6-notebook
|
[
"8880dd98b5a973febdeebc6a12fa7ab32f4f6e7b"
] |
[
"class6file.py"
] |
[
"import os\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndiabetes = pd.read_csv(filepath_or_buffer='diabetes.data', sep ='\\t', header=0)\n\nos.makedirs('plots', exist_ok=True)\n\nfig, axes = plt.subplots(2, 1, figsize=(5,5))\n\naxes[0].plot(diabetes['BP'])\naxes[1].plot(diabetes['BP'], diabetes['AGE'])\n\naxes[0].set_xlabel('Index')\naxes[0].set_ylabel('BP')\naxes[0].set_title('Blood Pressure')\n\naxes[1].set_xlabel('BP')\naxes[1].set_ylabel('Age')\naxes[1].set_title('BP to Age')\n\n\nplt.tight_layout()\n\nplt.savefig(f'plots/figures.png', format='png', dpi=300)\nplt.clf()\n\n\nplt.scatter(diabetes['BP'], diabetes['AGE'], color='b', marker=\"p\", alpha=0.3)\nplt.title('BP to Age')\nplt.xlabel('BP')\nplt.ylabel('Age')\nplt.savefig(f'plots/bp_to_age.png', format='png')\nplt.show()\nplt.clf()\n\nplt.close()\n\n"
] |
[
[
"matplotlib.pyplot.tight_layout",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
chenwuperth/smworkshop
|
[
"13738a04d6cdcaf75a4c97b3ca3ed31349a9385f"
] |
[
"examples/02/train/train_job.py"
] |
[
"\"\"\"\nTrain job interacts with SageMaker using XGB\n\n\"\"\"\n\nimport os\nimport datetime\n\nimport boto3\nimport pandas as pd\nimport numpy as np\nimport sagemaker\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import load_boston\nfrom sagemaker.amazon.amazon_estimator import get_image_uri\n\nfrom ...sm_utils import get_sm_execution_role\n\n\ndef reformat_csv(csv_fn):\n \"\"\"\n Amazon SageMaker XGBoost can train on data in either a CSV or LibSVM format. \n For CSV format, It should:\n\n Have the predictor variable in the first column\n Not have a header row\n \"\"\"\n new_fn = csv_fn.replace('.csv', '_xgb.csv')\n # 1. skip the header\n # 2. replace the first col with the last col\n # 3. drop the last col\n with open(csv_fn, 'r') as fin:\n lines = fin.readlines()\n new_lines = []\n for line in lines[1:]:\n line = line.strip()\n fds = line.split(',')\n fds[0] = fds[-1]\n fds = fds[0:-1]\n new_line = ','.join(fds)\n new_lines.append(new_line)\n with open(new_fn, 'w') as fout:\n fout.write(os.linesep.join(new_lines))\n return new_fn\n\nON_SAGEMAKER_NOTEBOOK = False\n\n# preparation\nsm_boto3 = boto3.client('sagemaker')\nsess = sagemaker.Session()\nregion = sess.boto_session.region_name\nbucket = sess.default_bucket() # this could also be a hard-coded bucket name\nprint('Using bucket ' + bucket)\nsm_role = get_sm_execution_role(ON_SAGEMAKER_NOTEBOOK, region)\n\n# Prepare data\ndata = load_boston()\n\nX_train, X_test, y_train, y_test = train_test_split(\n data.data, data.target, test_size=0.25, random_state=42)\n\ntrainX = pd.DataFrame(X_train, columns=data.feature_names)\ntrainX['target'] = y_train\n\ntestX = pd.DataFrame(X_test, columns=data.feature_names)\ntestX['target'] = y_test\n\ntrainX.head()\n\n# convert to CSV so SM can consume\ntrainX.to_csv('boston_train.csv')\ntestX.to_csv('boston_test.csv')\nntrain_csv = reformat_csv('boston_train.csv')\nntest_csv = reformat_csv('boston_test.csv')\n\n# send data to S3. SageMaker will take training data from s3\ntrainpath = sess.upload_data(\n path=ntrain_csv, bucket=bucket,\n key_prefix='sagemaker/sklearncontainer')\n\ns3_input_train = sagemaker.s3_input(s3_data=trainpath, content_type='csv')\n\ntestpath = sess.upload_data(\n path=ntest_csv, bucket=bucket,\n key_prefix='sagemaker/sklearncontainer')\n\ns3_input_validation = sagemaker.s3_input(s3_data=testpath, content_type='csv')\n\ncontainer = get_image_uri(region, 'xgboost', '0.90-1')\n\"\"\"\n\nmax_depth controls how deep each tree within the algorithm can be built. \n Deeper trees can lead to better fit, but are more computationally expensive and can lead to overfitting. There is typically some trade-off in model performance that needs to be explored between a large number of shallow trees and a smaller number of deeper trees.\nsubsample controls sampling of the training data. \n This technique can help reduce overfitting, but setting it too low can also starve the model of data.\nnum_round controls the number of boosting rounds. \n This is essentially the subsequent models that are trained using the residuals of previous iterations. Again, more rounds should produce a better fit on the training data, but can be computationally expensive or lead to overfitting.\neta controls how aggressive each round of boosting is. \n Larger values lead to more conservative boosting.\ngamma controls how aggressively trees are grown. Larger values lead to more conservative models.\n\"\"\"\nxgb = sagemaker.estimator.Estimator(\n container,\n role=sm_role, \n train_instance_count=1, \n train_instance_type='ml.m4.xlarge',\n hyperparameters={\n \"max_depth\":\"5\",\n \"eta\":\"0.2\",\n \"gamma\":\"4\",\n \"min_child_weight\":\"6\",\n \"subsample\":\"0.7\",\n \"silent\":\"0\",\n \"objective\":\"reg:linear\",\n \"num_round\":\"50\"\n })\nxgb.fit({'train':s3_input_train, 'validation': s3_input_validation}, wait=False)\n"
] |
[
[
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"sklearn.datasets.load_boston"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
francescomalandrino/TorchLRP
|
[
"35b3fa1c98dde0601e72dea3370a9fe191edbd00"
] |
[
"examples/utils.py"
] |
[
"import sys\nimport pathlib\n# Append parent directory of this file to sys.path, \n# no matter where it is run from\nbase_path = pathlib.Path(__file__).parent.parent.absolute()\nsys.path.insert(0, base_path.as_posix())\n\nimport pickle\nimport os\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torchvision\n\nfrom lrp import Sequential, Linear, Conv2d, MaxPool2d\n\n_standard_transform = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n])\n\n# # # # # # # # # # # # # # # # # # # # # \n# MNIST\n# # # # # # # # # # # # # # # # # # # # # \ndef get_mnist_model():\n model = Sequential(\n Conv2d(1, 32, 3, 1, 1),\n nn.ReLU(),\n Conv2d(32, 64, 3, 1, 1),\n nn.ReLU(),\n MaxPool2d(2,2),\n nn.Flatten(),\n Linear(14*14*64, 512),\n nn.ReLU(),\n Linear(512, 10)\n )\n return model\n\ndef get_mnist_data(transform, batch_size=32):\n train = torchvision.datasets.MNIST((base_path / 'data').as_posix(), train=True, download=True, transform=transform)\n train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=False)\n\n test = torchvision.datasets.MNIST((base_path / 'data').as_posix(), train=False, download=True, transform=transform)\n test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False)\n return train_loader, test_loader\n\ndef prepare_mnist_model(model, model_path=(base_path / 'examples' / 'models' / 'mnist_model.pth').as_posix(), epochs=1, lr=1e-3, train_new=False, transform=_standard_transform):\n train_loader, test_loader = get_mnist_data(transform)\n\n if os.path.exists(model_path) and not train_new: \n state_dict = torch.load(model_path)\n model.load_state_dict(state_dict)\n else: \n device = 'cuda'\n model = model.to(device)\n loss_fn = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)\n model.train()\n for e in range(epochs):\n for i, (x, y) in enumerate(train_loader):\n x = x.to(device)\n y = y.to(device)\n y_hat = model(x)\n loss = loss_fn(y_hat, y)\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n\n acc = (y == y_hat.max(1)[1]).float().sum() / x.size(0)\n if i%10 == 0: \n print(\"\\r[%i/%i, %i/%i] loss: %.4f acc: %.4f\" % (e, epochs, i, len(train_loader), loss.item(), acc.item()), end=\"\", flush=True)\n torch.save(model.state_dict(), model_path)\n\n# # # # # # # # # # # # # # # # # # # # # \n# Patterns\n# # # # # # # # # # # # # # # # # # # # # \ndef store_patterns(file_name, patterns):\n with open(file_name, 'wb') as f:\n pickle.dump([p.detach().cpu().numpy() for p in patterns], f)\n\ndef load_patterns(file_name): \n with open(file_name, 'rb') as f: p = pickle.load(f)\n return p\n\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.load",
"torch.utils.data.DataLoader",
"torch.nn.Flatten",
"torch.nn.ReLU"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Deech08/dk_manga_tools
|
[
"98fe33346b4e135f4a0b765737e1e8ddba61d601"
] |
[
"dk_manga_tools/DKMaps.py"
] |
[
"import logging\nimport warnings\n\nfrom marvin.tools.maps import Maps\n\nimport astropy.units as u\nimport astropy.wcs as wcs\nimport astropy.constants as constants\nfrom astropy.cosmology import WMAP9\n\nimport numpy as np\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.colors import ListedColormap\n\nfrom astropy.coordinates import CartesianRepresentation, CylindricalRepresentation, Angle\n\npal = sns.color_palette('colorblind')\n\nfrom extinction import fm07 as extinction_law\nimport matplotlib\nimport os\nimport glob\n\nfrom .pca import PCA_stellar_mass\nfrom .pca import PCA_MLi\nfrom .pca import PCA_zpres_info\nfrom .pca import PCA_mag\nfrom .gz3d_fits import gz3d_fits\n\nfrom .timeslice_utils import timecube, agemap, Total, metmap, SFH\n\n\n# Dictionary of chanell labels:\nlabels_dict = {\n 'ha_6564' : r\"$H\\alpha$ $\\lambda 6564 \\AA$ ({})\",\n 'hb_4862' : r\"$H\\beta$ $\\lambda 4862 \\AA$ ({})\",\n 'oii_3727' : r\"[OII] $\\lambda 3727 \\AA$ ({})\",\n 'oii_3729' : r\"[OII] $\\lambda 3729 \\AA$ ({})\",\n 'oiii_4960' : r\"[OIII] $\\lambda 4960 \\AA$ ({})\",\n 'oiii_5008' : r\"[OIII] $\\lambda 5008 \\AA$ ({})\",\n 'hei_5877' : r\"He I $\\lambda 5877 \\AA$ ({})\",\n 'oi_6302' : r\"[OI] $\\lambda 6302 \\AA$ ({})\",\n 'oi_6365' : r\"[OI] $\\lambda 6365 \\AA$ ({})\",\n 'nii_6549' : r\"[NII] $\\lambda 6549 \\AA$ ({})\",\n 'nii_6585' : r\"[NII] $\\lambda 6585 \\AA$ ({})\",\n 'nii_6718' : r\"[NII] $\\lambda 6718 \\AA$ ({})\",\n 'nii_6732' : r\"[NII] $\\lambda 6732 \\AA$ ({})\",\n 'r_re' : r\"$R / R_e$\",\n 'elliptical_radius' : r\"Elliptical Radius ({})\" \n}\n\nfrom scipy.spatial import ConvexHull\n\ndef minimum_bounding_rectangle(points):\n \"\"\"\n Find the smallest bounding rectangle for a set of points.\n Returns a set of points representing the corners of the bounding box.\n\n :param points: an nx2 matrix of coordinates\n :rval: an nx2 matrix of coordinates\n \"\"\"\n from scipy.ndimage.interpolation import rotate\n pi2 = np.pi/2.\n\n # get the convex hull for the points\n hull_points = points[ConvexHull(points).vertices]\n\n # calculate edge angles\n edges = np.zeros((len(hull_points)-1, 2))\n edges = hull_points[1:] - hull_points[:-1]\n\n angles = np.zeros((len(edges)))\n angles = np.arctan2(edges[:, 1], edges[:, 0])\n\n angles = np.abs(np.mod(angles, pi2))\n angles = np.unique(angles)\n\n # find rotation matrices\n # XXX both work\n rotations = np.vstack([\n np.cos(angles),\n np.cos(angles-pi2),\n np.cos(angles+pi2),\n np.cos(angles)]).T\n# rotations = np.vstack([\n# np.cos(angles),\n# -np.sin(angles),\n# np.sin(angles),\n# np.cos(angles)]).T\n rotations = rotations.reshape((-1, 2, 2))\n\n # apply rotations to the hull\n rot_points = np.dot(rotations, hull_points.T)\n\n # find the bounding points\n min_x = np.nanmin(rot_points[:, 0], axis=1)\n max_x = np.nanmax(rot_points[:, 0], axis=1)\n min_y = np.nanmin(rot_points[:, 1], axis=1)\n max_y = np.nanmax(rot_points[:, 1], axis=1)\n\n # find the box with the best area\n areas = (max_x - min_x) * (max_y - min_y)\n best_idx = np.argmin(areas)\n\n # return the best box\n x1 = max_x[best_idx]\n x2 = min_x[best_idx]\n y1 = max_y[best_idx]\n y2 = min_y[best_idx]\n r = rotations[best_idx]\n\n rval = np.zeros((4, 2))\n rval[0] = np.dot([x1, y2], r)\n rval[1] = np.dot([x2, y2], r)\n rval[2] = np.dot([x2, y1], r)\n rval[3] = np.dot([x1, y1], r)\n\n return rval\n\n\nclass DKMapsMixin(object):\n \"\"\"\n Mixin Functionality\n \"\"\"\n def balmer_Av(self, expected_ratio = 2.92, snr_min = None):\n \"\"\"\n Estimate Av from balmer Decrement\n\n Parameters\n ----------\n expected_ratio: 'number', optional, must be keyword\n Expected ha/hb ratio in Energy Units\n snr_min: 'number', optional, must be keyword\n Minimum SNR Threshold to use\n\n\n \"\"\"\n\n ha = self['emline gflux ha']\n hb = self['emline gflux hb']\n\n ha_masked = ha.masked\n hb_masked = hb.masked\n\n if snr_min is not None:\n ha_masked.mask |= ha.snr < snr_min\n hb_masked.mask |= hb.snr < snr_min\n\n hahb = ha_masked / hb_masked\n AV = 2.68 * np.log(hahb / expected_ratio)\n AV.mask |= AV < 0.\n\n\n return AV\n\n def deredden(self, name, Av = None, **kwargs):\n \"\"\"\n De-redden a Map\n\n Parameters\n ----------\n name: 'str'\n name of Maps key to Map that will be dereddened\n Av: 'list' or 'np.ma.masked_array' or 'np.array', optional, must be keyword\n Av values to use\n if not provided, it is estimated using the balmer decrememnt\n kwargs: 'dict', optional, must be keywords\n keywords passed to balmer_Av\n \"\"\"\n if not hasattr(self, name):\n raise ValueError(\"cannot find a good match for '{}'. Your input value is too ambiguous.\".format(name))\n\n og_map = self[name].masked\n\n if Av is None:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n Av = self.balmer_Av(**kwargs)\n\n wave = np.array([self[name].datamodel.channel.name[-4:]], dtype = np.float)\n A_v_to_A_lambda = extinction_law(wave, 1.)\n A_lambda = Av * A_v_to_A_lambda\n\n return og_map * 10**(0.4 * A_lambda)\n\n def luminosity(self, name, lum_distz, deredden = False, **kwargs):\n \"\"\"\n Calculate Emission Line Luminosity\n\n Parameters\n ----------\n name: 'str'\n name of Maps key to Map that will be dereddened\n lum_distz: 'number', 'u.Quantity'\n luminosity distance in units of Mpc / littleh\n deredden: 'bool', optional, must be keyword\n if True, dereddens flux first\n kwargs: 'dict', optional, must be keyword\n keywords passed to deredden\n\n \"\"\"\n if not isinstance(lum_distz, u.Quantity):\n logging.warning(\"No units provided for Luminosity Distance, assuming u.Mpc / u.littleh\")\n lum_distz *= u.Mpc / u.littleh\n \n if deredden:\n flux = self.deredden(name, **kwargs)\n else:\n flux = self[name].masked\n\n flux_unit = self[name].unit\n lum = 4. * np.pi * flux.data * flux_unit * lum_distz**2\n lum_out = lum.to(u.erg / u.s / u.pix, u.with_H0(WMAP9.H0))\n return np.ma.masked_array(data = lum_out, mask = flux.mask)\n\n def plot_bpt_nii(self, **kwargs):\n \"\"\"\n plots NII/HA BPT Diagram in style similar to Krishnarao+19, with option to overlay Milky Way Data\n\n Parameters\n ----------\n kwargs: 'dict', optional, must be keyword\n passed to 'dk_manga_tools.bpt.bpt_nii'\n \"\"\"\n from .bpt import bpt_nii\n return bpt_nii(self, **kwargs)\n\n def plot_radial_emline(self, name, ax = None, deredden = False, \n Re = True, snr_min = None, log10 = False, deredden_kwargs = {}, **kwargs):\n \"\"\"\n Plots emline as a funtion of R/Re\n\n Parameters\n ----------\n name: 'str', 'list'\n name of Maps key to Map that will be dereddened\n if list, uses ratio of two emlines\n ax: 'matplotlib.pyplot.figure.axes': optional, must be keyword\n Matplotlib Axes instance to plot on\n deredden: 'bool', optional, must be keyword\n if True, dereddens flux first\n Re: 'bool', optional, must be keyword\n if True, plots x axis as R/Re\n if False, plots x axis as R\n snr_min: 'number', optional, must be keyword\n Minimum SNR to mask out\n log10: 'bool', optional, must be keyword\n if True, plots log_10 of the value\n only if ratios are plotted\n deredden_kwargs: 'dict', optional, must be keywords:\n Keywords passed to dereddedn\n kwargs: 'optional', must be keyword\n keywords passed to scatter plot\n \"\"\"\n if (name.__class__ is list) | (name.__class__ is tuple):\n assert len(name) == 2, \"too many emline entries provided\"\n name, denom_name = name\n ratios = True\n else:\n ratios = False\n\n if deredden:\n flux = self.deredden(name, **deredden_kwargs)\n if ratios:\n flux_denom = self.deredden(denom_name, **deredden_kwargs)\n else:\n flux = self[name]\n if ratios:\n flux_denom = self[denom_name]\n\n if Re:\n radius = self['spx ellcoo r_re']\n else:\n radius = self['spx ellcoo radius']\n\n # Default SNR_MIN\n if snr_min is None:\n snr_min = 2.\n\n flux_masked = flux.masked\n flux_masked.mask |= flux.snr <= snr_min\n if ratios:\n flux_denom_masked = flux_denom.masked\n flux_denom_masked.mask |= flux_denom.snr <= snr_min\n flux_denom_masked.mask |= flux_denom_masked <= 0.\n\n\n try: \n y_label = labels_dict[flux.datamodel.channel.name]\n except KeyError:\n y_label = r\"\" + flux.datamodel.channel.name + \"({})\"\n\n if ratios:\n try: \n y_label_denom = labels_dict[flux_denom.datamodel.channel.name]\n except KeyError:\n y_label_denom = r\"\" + flux_denom.datamodel.channel.name\n\n if y_label[-4:] == \"({})\":\n y_label = y_label[:-4]\n if y_label_denom[-4:] == \"({})\":\n y_label_denom = y_label_denom[:-4]\n\n y_label = y_label + \" / \" + y_label_denom\n if log10:\n y_label = r\"$Log_{{10}}$ \" + y_label\n\n x_label = labels_dict[radius.datamodel.channel.name]\n\n # Check if axes are created\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n # Default kwargs:\n if (\"color\" not in kwargs) | (\"c\" not in kwargs):\n color = pal[0]\n if ratios:\n flux_ratio = flux_masked / flux_denom_masked\n if log10:\n flux_ratio = np.log10(flux_ratio)\n ax.scatter(radius.value.flatten(), flux_ratio.flatten(), **kwargs)\n ax.set_ylabel(y_label, fontsize = 12)\n else:\n ax.scatter(radius.value.flatten(), flux_masked.flatten(), **kwargs)\n ax.set_ylabel(y_label.format(flux.unit.to_string(\"latex\")), fontsize = 12)\n\n ax.set_xlabel(x_label.format(radius.unit.to_string(\"latex\")), fontsize = 12)\n\n return ax\n\n def plot_violin_bpt_nii(self, ax = None,\n deredden = False, Re = True, \n snr_min = None, deredden_kwargs = {},\n **kwargs):\n \"\"\"\n Plots categorical BPT Classification as a funtion of R/Re\n\n Parameters\n ----------\n ax: 'matplotlib.pyplot.figure.axes': optional, must be keyword\n Matplotlib Axes instance to plot on\n deredden: 'bool', optional, must be keyword\n if True, dereddens flux first\n Re: 'bool', optional, must be keyword\n if True, plots x axis as R/Re\n if False, plots x axis as R\n snr_min: 'number', optional, must be keyword\n Minimum SNR to mask out\n deredden_kwargs: 'dict', optional, must be keywords:\n Keywords passed to dereddedn\n kwargs: 'optional', must be keyword\n keywords passed to sns.violinplot\n \"\"\"\n\n\n if Re:\n radius = self['spx ellcoo r_re']\n else:\n radius = self['spx ellcoo radius']\n\n bpt_classifications = self.plot_bpt_nii(return_figure = False, \n snr_min = snr_min, \n deredden = deredden, \n deredden_kwargs = deredden_kwargs)\n\n # Default kwargs\n if \"palette\" not in kwargs:\n kwargs[\"palette\"] = [pal[1], pal[0], pal[9], pal[4]]\n\n if \"saturation\" not in kwargs:\n kwargs[\"saturation\"] = 1.5\n\n if \"inner\" not in kwargs:\n kwargs[\"inner\"] = 'quartile'\n\n\n # Check if axes are created\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n \n # Violin Plots\n sf_x = radius[bpt_classifications[\"sf\"][\"nii\"]].flatten().value\n sf_y = [\"SF\"] * len(sf_x)\n\n comp_x = radius[bpt_classifications[\"comp\"][\"nii\"]].flatten().value\n comp_y = [\"Composite\"] * len(comp_x)\n\n agn_x = radius[bpt_classifications[\"agn\"][\"nii\"]].flatten().value\n agn_y = [\"AGN\"] * len(agn_x)\n\n liner_x = radius[bpt_classifications[\"liner\"][\"nii\"]].flatten().value\n liner_y = [\"LI(N)ER\"] * len(liner_x)\n\n # invalid_x = radius[bpt_classifications[\"invalid\"][\"nii\"]].flatten().value\n # invalid_y = [\"invalid\"] * len(invalid_x)\n\n x = np.concatenate((sf_x, comp_x, agn_x, liner_x), axis = None)\n y = np.concatenate((sf_y, comp_y, agn_y, liner_y), axis = None)\n\n data = {\"x\":x, \"y\":y}\n\n sns.violinplot(ax = ax, x = \"x\", \n y = \"y\", data = data, **kwargs,\n )\n x_label = labels_dict[radius.datamodel.channel.name]\n ax.set_xlabel(x_label.format(radius.unit.to_string(\"latex\")), fontsize = 12)\n\n if (\"alpha\" in kwargs) | (\"zorder\" in kwargs):\n artists = ax.get_default_bbox_extra_artists()\n for artist in artists:\n if artist.__class__ is matplotlib.collections.PolyCollection:\n if \"alpha\" in kwargs:\n artist.set_alpha(kwargs[\"alpha\"])\n if \"zorder\" in kwargs:\n artist.set_zorder(kwargs[\"zorder\"])\n\n \n\n\n\n\n\n\n return ax\n\n\n\n\n def get_radial_bpt_counts(self, radial_bin, binned_counts = False, bin_width = None,\n snr_min = None, deredden = False, deredden_kwargs = {}, radial_norm = 1.,\n pool = None, set_up = True, add_to = None, keep_pool = False):\n \"\"\"\n Get number of spaxels in each BPT classification within a specified radial bin\n\n Parameters\n ----------\n radial_bin: 'number', 'list'\n radius value in terms of R_e \n\n binned_counts: 'bool', optional, must be keyword\n if True, counts spaxels within bins, instead of cumulatively\n bin_width: 'number', optional, must be keyword\n bin width to use if binned_counts is True\n deredden: 'bool', optional, must be keyword\n if True, dereddens flux first\n snr_min: 'number', optional, must be keyword\n Minimum SNR to mask out\n deredden_kwargs: 'dict', optional, must be keyword\n Keywords passed to dereddedn\n radial_norm: 'number', 'u.Quantity', optional, must be keyword\n normalization value for radius in Re\n pool: 'multiprocessing.pool', optional, must be keyword\n pool with a map method for multiprocessing capabilities\n set_up: 'bool', optional, must be keyword\n Default is True\n used to regulate recursion - not usually used by end-user\n add_to: 'dict', optional, must be keyword\n Dictionary of counts to add values onto\n keep_pool: 'bool', optional, must be keyword\n if True, does not close pool\n not usually used by end-user\n\n Returns\n -------\n counts: 'dict'\n Dictionary of counts per each classification and Total spaxels count\n \"\"\"\n\n\n\n if (radial_bin.__class__ in [np.ndarray, list, tuple]) & (set_up):\n from functools import partial\n # Map multiple radial bin values as output\n partial_func = partial(self.get_radial_bpt_counts, \n snr_min = snr_min, \n binned_counts = binned_counts, \n bin_width = bin_width,\n deredden = deredden, \n deredden_kwargs = deredden_kwargs, \n set_up = False)\n if pool is not None:\n try: \n res = pool.map(partial_func, radial_bin)\n except AttributeError:\n logging.warning(\"Invalid Pool, pool has no map method.\")\n res = map(partial_func, radial_bin)\n else: \n if keep_pool is False:\n pool.close()\n else:\n res = map(partial_func, radial_bin)\n\n counts_sub = [*res]\n counts = {}\n for k in counts_sub[0]:\n counts[k] = np.array(list(d[k] for d in counts_sub))\n\n if add_to is not None:\n tot_counts = {}\n for k in counts:\n tot_counts[k] = np.sum(list(d[k] for d in [counts, add_to]), axis = 0)\n return tot_counts\n else:\n return counts\n else:\n # initialize dictionary\n counts = {}\n\n if binned_counts:\n if bin_width is None:\n bin_width = 0.2 #R_e\n within_radius = (self['spx ellcoo r_re'].value <= radial_bin / radial_norm + bin_width/2.) \n within_radius &= (self['spx ellcoo r_re'].value > radial_bin / radial_norm - bin_width/2.) \n else:\n # Get radius values from Map\n within_radius = self['spx ellcoo r_re'].value <= radial_bin * radial_norm\n\n # Get bpt_classificaitons\n bpt_classificaitons = self.plot_bpt_nii(return_figure = False, \n snr_min = snr_min, \n deredden = deredden, \n deredden_kwargs = deredden_kwargs)\n\n counts[\"sf\"] = (bpt_classificaitons[\"sf\"][\"nii\"] & within_radius).sum()\n counts[\"comp\"] = (bpt_classificaitons[\"comp\"][\"nii\"] & within_radius).sum()\n counts[\"agn\"] = (bpt_classificaitons[\"agn\"][\"nii\"] & within_radius).sum()\n counts[\"liner\"] = (bpt_classificaitons[\"liner\"][\"nii\"] & within_radius).sum()\n counts[\"invalid\"] = (bpt_classificaitons[\"invalid\"][\"nii\"] & within_radius).sum()\n counts[\"total\"] = counts[\"sf\"] + counts[\"comp\"] + counts[\"agn\"] + counts[\"liner\"] + counts[\"invalid\"]\n\n if add_to is not None:\n tot_counts = {}\n for k in counts:\n tot_counts[k] = np.sum(list(d[k] for d in [counts, add_to]), axis = 0)\n return add_to\n else:\n return counts\n\n\n\n\n\n def get_PCA_stellar_mass(self, pca_data_dir = None, **kwargs):\n \"\"\"\n Return PCA Stellar Mass map with errors in u.solmass\n \"\"\"\n if pca_data_dir is None:\n pca_data_dir = os.path.join(os.environ['SAS_BASE_DIR'], 'mangawork', 'manga', 'sandbox', 'mangapca', 'zachpace', 'CSPs_CKC14_MaNGA_20190215-1',\n self.dapall[\"versdrp3\"], self.dapall[\"versdap\"], 'results')\n return PCA_stellar_mass(self.dapall, pca_data_dir = pca_data_dir, **kwargs)\n\n def get_timeslice_mass(self, timeslice_dir = None, **kwargs):\n \"\"\"\n Return TimeSlice Mass Map in units of u.solMass\n \"\"\"\n if timeslice_dir is None:\n timeslice_dir = \"/Users/dk/sas/mangawork/manga/sandbox/starlight/MPL9_Spirals_noSpecs/\"\n\n ffn = \"{}{}_E-n.fits\".format(timeslice_dir, self.plateifu)\n\n tc = timecube(ffn, weight_type='current_mass')\n return tc.sum_im * u.solMass\n\n def get_deproj_pixel_area(self):\n \"\"\"\n Return deprojected pixel area of Galaxy in u.pc**2\n \"\"\"\n\n alpha = .13 # axis ratio of a perfectly edge-on system\n\n inclination = np.arccos(\n np.sqrt(\n (self.dapall['nsa_elpetro_ba']**2. - alpha**2.)/(1 - alpha**2.)))\n D = self.dapall[\"nsa_zdist\"] * constants.c / WMAP9.H0\n proj_pix_area = wcs.utils.proj_plane_pixel_area(self.wcs) *u.deg**2\n\n return (proj_pix_area.to(u.sr).value * D**2).to(u.pc**2) / np.cos(inclination)\n\n def get_PCA_stellar_mass_density(self, **kwargs):\n \"\"\"\n Return PCA stellar mass / deprojected pixel area in units of u.solMass * u.pc**-2\n \"\"\"\n m_star = self.get_PCA_stellar_mass(**kwargs)\n area = self.get_deproj_pixel_area()\n\n smsd = np.ma.masked_array(data = m_star/area, mask = m_star.mask)\n return m_star / area\n\n def get_timeslice_mass_density(self, **kwargs):\n \"\"\"\n Return TimeSlice stellar mass / deprojected pixel area in units of u.solMass * u.pc**-2\n \"\"\"\n m = self.get_timeslice_mass(**kwargs)\n area = self.get_deproj_pixel_area()\n return m/area\n\n def get_timeslice_mean_age(self, timeslice_dir = None, weight_type = \"light\"):\n \"\"\"\n Return TimeSlice mean weighted age\n \"\"\"\n if timeslice_dir is None:\n timeslice_dir = \"/Users/dk/sas/mangawork/manga/sandbox/starlight/MPL9_Spirals_noSpecs/\"\n\n ffn = \"{}{}_E-n.fits\".format(timeslice_dir, self.plateifu)\n tc = timecube(ffn, weight_type = weight_type)\n return agemap(Total(tc))\n\n def get_timeslice_metallicity(self, timeslice_dir = None, weight_type = \"light\"):\n \"\"\"\n Return TimeSlice mean weighted age\n \"\"\"\n if timeslice_dir is None:\n timeslice_dir = \"/Users/dk/sas/mangawork/manga/sandbox/starlight/MPL9_Spirals_noSpecs/\"\n\n ffn = \"{}{}_E-n.fits\".format(timeslice_dir, self.plateifu)\n tc = timecube(ffn, weight_type = weight_type)\n return metmap(Total(tc))\n def get_timeslice_SFH(self, timeslice_dir = None, weight_type = \"initial_mass\"):\n \"\"\"\n Return TimeSlice SFH_age and SFH_sfrs\n \"\"\"\n if timeslice_dir is None:\n timeslice_dir = \"/Users/dk/sas/mangawork/manga/sandbox/starlight/MPL9_Spirals_noSpecs/\"\n\n ffn = \"{}{}_E-n.fits\".format(timeslice_dir, self.plateifu)\n tc = timecube(ffn, weight_type = weight_type)\n return SFH(tc)\n\n\n\n def get_PCA_MLi(self, pca_data_dir = None, **kwargs):\n \"\"\"\n Return PCA Stellar Mass-to-Light ratio in the i-band\n \"\"\"\n if pca_data_dir is None:\n pca_data_dir = os.path.join(os.environ['SAS_BASE_DIR'], 'mangawork', 'manga', 'sandbox', 'mangapca', 'zachpace', 'CSPs_CKC14_MaNGA_20190215-1',\n self.dapall[\"versdrp3\"], self.dapall[\"versdap\"], 'results')\n return PCA_MLi(self.dapall, pca_data_dir = pca_data_dir, **kwargs)\n\n def get_PCA_zpres_info(self, name, pca_data_dir = None, **kwargs):\n \"\"\"\n Return additional specified info from PCA\n \"\"\"\n if pca_data_dir is None:\n pca_data_dir = os.path.join(os.environ['SAS_BASE_DIR'], 'mangawork', 'manga', 'sandbox', 'mangapca', 'zachpace', 'CSPs_CKC14_MaNGA_20190215-1',\n self.dapall[\"versdrp3\"], self.dapall[\"versdap\"], 'results')\n return PCA_zpres_info(self.dapall, name, pca_data_dir = pca_data_dir, **kwargs)\n\n def get_PCA_mag(self, filter_obs, pca_data_dir = None, **kwargs):\n \"\"\"\n Return PCA mag in specified filter\n \"\"\"\n if pca_data_dir is None:\n pca_data_dir = os.path.join(os.environ['SAS_BASE_DIR'], 'mangawork', 'manga', 'sandbox', 'mangapca', 'zachpace', 'CSPs_CKC14_MaNGA_20190215-1',\n self.dapall[\"versdrp3\"], self.dapall[\"versdap\"], 'results')\n return PCA_mag(self.dapall, filter_obs, pca_data_dir = pca_data_dir, **kwargs)\n\n\n def get_bar_mask(self, galaxyzoo3d_dir = None, vote_threshold = None, **kwargs):\n \"\"\"\n If available get Galaxy Zoo 3D Bar Spaxel Mask\n\n Parameters\n ----------\n galaxyzoo3d_dir: 'str', optional, must be keyword\n Directory to find data files\n vote_threshold: 'number', optional, must be keyword\n Vote threshold to consider; default of 0.2 (20%)\n \"\"\"\n\n\n if galaxyzoo3d_dir is None:\n galaxyzoo3d_dir = \"/Users/dk/sas/mangawork/manga/sandbox/galaxyzoo3d/v2_0_0/\"\n\n if vote_threshold is None:\n vote_threshold = 0.2\n\n\n filename = glob.glob(galaxyzoo3d_dir+\"{}*.fits.gz\".format(self.mangaid))\n if filename == []:\n logging.warning(\"No Galaxy Zoo 3D Data Available for this Galaxy!\")\n return np.zeros(self[\"emline gflux ha\"].value.shape[0:], dtype = bool)\n else:\n data = gz3d_fits(filename[0], maps = self)\n data.make_all_spaxel_masks()\n if np.all(data.bar_mask_spaxel == 0):\n logging.warning(\"No Bar Mask Available for this Galaxy!\")\n return np.zeros(self[\"emline gflux ha\"].value.shape[0:], dtype = bool)\n else:\n try:\n bar_mask = data.bar_mask_spaxel >= data.metadata[\"GZ2_bar_votes\"] * vote_threshold\n except KeyError:\n bar_mask = data.bar_mask_spaxel >= data.metadata[\"GZ_bar_votes\"] * vote_threshold\n\n if np.any(bar_mask):\n return bar_mask\n else:\n logging.warning(\"No Bar Mask Available for this Galaxy above vote threshold!\")\n return np.zeros(self[\"emline gflux ha\"].value.shape[0:], dtype = bool)\n\n def get_map(self, *args, snr_min = None, **kwargs):\n \"\"\"\n Retrives map with a minimum SNR cut applied\n \"\"\"\n if snr_min is None:\n snr_min = 3.0\n\n\n m = self.getMap(*args, **kwargs)\n\n m_masked = m.masked\n m_masked.mask |= m.snr <= snr_min\n\n return m_masked\n\n\n def get_bar_coords(self, bar_mask = None, flip = False, bar_radius = None, **kwargs):\n \"\"\"\n Determines bar angle based on min bounding box and returns Coordinate Frame in cylindrical \n coordinates scaled by the bar_length\n\n Parameters\n ----------\n\n bar_mask: `np.array`, optional\n bar_mask boolean array\n if not provided, will attempt to get\n **kwargs:\n passed onto self.get_bar_mask if used\n \"\"\"\n\n if bar_mask is None:\n bar_mask = self.get_bar_mask(**kwargs).flatten()\n\n try:\n assert bar_mask.sum() > 0\n except AssertionError:\n nan_arr = np.full(self.spx_ellcoo_r_re.shape, np.nan)\n bar_coords = CylindricalRepresentation(rho = nan_arr, phi = nan_arr*u.deg, z = nan_arr)\n return bar_coords\n # raise ValueError(\"bar_mask does not identify any spaxels in the bar!\")\n\n # Define Coordinates in Galaxy Frame\n cyl = CylindricalRepresentation(rho = self.spx_ellcoo_r_re, \n phi = self.spx_ellcoo_elliptical_azimuth * u.deg, \n z = np.zeros_like(self.spx_ellcoo_r_re))\n # Convert to Cartesian in Galaxy Frame\n cart = cyl.to_cartesian()\n\n bar_x = cart.x.flatten()[bar_mask]\n bar_y = cart.y.flatten()[bar_mask]\n\n # Determine Bar Angle\n points_cart = np.array([bar_x, bar_y]).T\n bbox = minimum_bounding_rectangle(points_cart)\n xx,yy = bbox[0,:]\n dists = np.array([((xx - xx2)**2 + (yy-yy2)**2) for (xx2,yy2) in bbox])\n args = np.argsort(dists)\n xx2,yy2= bbox[args,:][2,:]\n xx,yy = bbox[args,:][0,:]\n slope = (yy2 - yy) / (xx2 - xx)\n bar_angle = np.arctan2((yy2 - yy), (xx2-xx))\n\n\n # C = np.cov(np.vstack([bar_x,\n # bar_y]))\n # w, v = np.linalg.eig(C)\n # inx = w.argsort()[::-1]\n # w, v = w[inx], v[:, inx]\n\n # w_12 = w[:2]\n # v_12 = v[:, :2]\n\n # Determine new r_bar rho-frame\n l_or_w = np.sqrt((bbox[0,0] - bbox[1,0])**2 + (bbox[0,1] - bbox[1,1])**2)\n l_or_w2 = np.sqrt((bbox[1,0] - bbox[3,0])**2 + (bbox[1,1] - bbox[3,1])**2)\n \n if bar_radius is None:\n bar_radius = np.max([l_or_w, l_or_w2])/2.\n new_center_x, new_center_y = bar_x.mean(), bar_y.mean()\n new_x = cart.x - new_center_x\n new_y = cart.y - new_center_y\n\n new_cart = CartesianRepresentation(x = new_x, y = new_y, z = cart.z)\n new_bar_coords = CylindricalRepresentation.from_cartesian(new_cart)\n\n med_x, med_y = np.median(bar_x), np.median(bar_y)\n\n # Check Angle Values and Fix\n # bar_angle = np.arctan2(v_12[0,1],v_12[0,0])\n if (bar_x>med_x).sum() >= (bar_x<med_x).sum():\n if np.median(bar_y[bar_x>med_x]) >med_y:\n try:\n assert bar_angle > 0\n except AssertionError:\n bar_angle *= -1\n elif np.median(bar_y[bar_x>med_x]) < med_y:\n try:\n assert bar_angle < 0\n except AssertionError:\n bar_angle *= -1\n else:\n if np.median(bar_y[bar_x<med_x]) <med_y:\n try:\n assert bar_angle > 0\n except AssertionError:\n bar_angle *= -1\n elif np.median(bar_y[bar_x<med_x]) > med_y:\n try:\n assert bar_angle < 0\n except AssertionError:\n bar_angle *= -1\n\n if (np.abs(bar_angle) > np.pi/2.):\n bar_angle = np.pi - bar_angle\n\n # Determine New Phi-frame\n new_phi = Angle(new_bar_coords.phi - (bar_angle) * u.rad)\n\n if flip:\n new_phi *= -1\n\n \n # bar_radius = np.max(self.spx_ellcoo_r_re.value.flatten()[bar_mask])\n new_rho = new_bar_coords.rho / bar_radius\n\n bar_coords = CylindricalRepresentation(rho = new_rho, phi = new_phi, z = cyl.z)\n\n return bar_coords\n\n\n\n def mean_intensity_v_phi(self, map_name, \n bin_width_phi = None, step_size_phi = None, \n min_rho = None, max_rho = None, \n bar_coords = None, estimator = None, \n return_errors = False, snr_min = None, \n wrap = True,\n **kwargs):\n \"\"\"\n Find mean intensity of specified map along bins in azimuth\n \n Parameters\n ----------\n map_name: `str`\n name of map attribute to use\n bin_width_phi: `u.Quantity`, `number`, optional, must be keyword\n width of bins along azimuth angle\n defualt units of deg\n step_size_phi: `u.Quantity`, `number`, optional, must be keyword\n step size along azimuth angle\n defualt units of deg\n min_rho: `number`, optional, must be keyword\n minimum radius to consider \n default to 1 R_bar\n max_rho: `number`, optional, must be keyword\n maximum radius to consider\n default to 2 R_bar\n bar_coords: `astropy.coordinates.CylindricalRepresentation`, optional, must be keyword\n bar coordinate frame\n if not given, will try to get\n estimator: `str`, optional, must be keyword\n 'mean' or 'median'\n return_errors: `bool`, must be keyword\n if True, also returns errors\n snr_min: `number`, optional, must be keyword\n min SNR to use\n default to 3\n wrap: `bool`, optional, must be keyword\n if True, will only consider 0-180 degrees, wrapping\n if False, central_phis span 0 to 360 degrees\n kwargs:\n passed onto get_bar_coords if used\n \"\"\"\n if bin_width_phi is None:\n bin_width_phi = 10 * u.deg\n elif not hasattr(bin_width_phi, \"unit\"):\n bin_width_phi *= u.deg\n logging.warning(\"No units specified for bin_width_phi, assuming u.deg\")\n \n if step_size_phi is None:\n step_size_phi = 2.5 * u.deg\n elif not hasattr(step_size_phi, \"unit\"):\n step_size_phi *= u.deg\n logging.warning(\"No units specified for step_size_phi, assuming u.deg\")\n \n if min_rho is None:\n min_rho = 1.2 \n if max_rho is None:\n max_rho = 2.\n \n if bar_coords is None:\n bar_coords = self.get_bar_coords(**kwargs)\n \n if estimator is None:\n estimator = \"mean\"\n elif estimator not in [\"mean\", \"median\"]:\n estimator = \"mean\"\n logging.warning(\"estimator not recognized, using mean\")\n\n if estimator is \"mean\":\n estimator_function = np.ma.mean\n else:\n estimator_function = np.ma.median\n \n if snr_min is None:\n snr_min = 3.0\n\n\n if wrap: \n central_phi = np.arange(0, \n 180,\n step_size_phi.to(u.deg).value) * u.deg\n else:\n central_phi = np.arange(0, \n 360,\n step_size_phi.to(u.deg).value) * u.deg\n \n # Make radial mask\n radial_mask = bar_coords.rho < max_rho\n radial_mask &= bar_coords.rho > min_rho\n\n # Check map_name\n if map_name is \"stellar_mass\":\n map_data = self.get_PCA_stellar_mass()\n nan_mask = np.isnan(map_data)\n map_data.mask |= nan_mask\n map_unit = map_data.data.unit\n map_data = np.ma.masked_array(data = map_data.data.value[0,:,:], \n mask = map_data.mask[0,:,:])\n elif map_name is \"smsd\":\n map_data = self.get_PCA_stellar_mass_density()\n nan_mask = np.isnan(map_data)\n map_data.mask |= nan_mask\n map_unit = map_data.data.unit\n map_data = np.ma.masked_array(data = map_data.data.value[0,:,:], \n mask = map_data.mask[0,:,:])\n elif map_name is \"Av\":\n map_data = self.balmer_Av(snr_min = snr_min)\n map_unit = 1.\n else:\n try:\n # Get Map Attribute to average\n map_data = self.get_map(map_name, snr_min = snr_min)\n map_unit = self.datamodel[\"emline_gflux_ha\"].unit\n except ValueError:\n try:\n map_data = self.get_PCA_zpres_info(map_name)\n except FileNotFoundError:\n map_data = np.full(self.get_map(\"emline_gflux_ha\").shape, np.nan)\n map_data = np.ma.masked_array(data = map_data, mask = np.isnan(map_data))\n if map_data.shape[0] == 3:\n map_data = map_data[0,:,:]\n\n map_data = np.ma.masked_array(data= map_data.data, \n mask = map_data.mask | np.isnan(map_data.data))\n map_unit = 1.\n \n average_values = np.zeros_like(central_phi.value)\n if estimator is \"mean\":\n error_values = np.zeros_like(average_values)\n else:\n error_values = np.zeros((len(average_values),2))\n \n for ell, phi in enumerate(central_phi):\n if wrap:\n az_mask = bar_coords.phi.wrap_at(\"180d\") <= phi + bin_width_phi\n az_mask &= bar_coords.phi.wrap_at(\"180d\") > phi - bin_width_phi\n az_mask |= ((bar_coords.phi.wrap_at(\"180d\") <= -180*u.deg - (phi - bin_width_phi)) & \n (bar_coords.phi.wrap_at(\"180d\") > -180*u.deg + (phi + bin_width_phi)))\n\n else:\n az_mask = bar_coords.phi.wrap_at(\"360d\") <= phi + bin_width_phi\n az_mask &= bar_coords.phi.wrap_at(\"360d\") > phi - bin_width_phi\n \n current_mask = az_mask & radial_mask\n \n \n average_values[ell] = estimator_function(map_data[current_mask])\n if (estimator is \"mean\") & (return_errors):\n error_values[ell] = np.std(map_data[current_mask])\n elif (estimator is \"median\") & (return_errors):\n error_values[ell,:] = np.percentile(map_data[current_mask].flatten(), (16,84))\n \n if return_errors:\n return average_values * map_unit, central_phi, error_values * map_unit\n else:\n return average_values * map_unit, central_phi\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nclass DKMaps(DKMapsMixin, Maps):\n \"\"\"\n Wrapper Class for custom functionality with Marvin Maps\n\n Parameters\n ----------\n kwargs: 'dict', optional, must be keyword\n keywords passed to 'marvin.tools.maps.Maps'\n \"\"\"\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n"
] |
[
[
"numpy.nanmax",
"numpy.dot",
"numpy.sqrt",
"numpy.nanmin",
"numpy.arctan2",
"numpy.concatenate",
"numpy.all",
"numpy.max",
"numpy.argmin",
"numpy.zeros_like",
"numpy.any",
"numpy.unique",
"numpy.full",
"numpy.std",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.log",
"numpy.isnan",
"numpy.median",
"numpy.log10",
"scipy.spatial.ConvexHull",
"numpy.argsort",
"numpy.array",
"numpy.abs",
"numpy.cos",
"numpy.ma.masked_array",
"numpy.mod"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BrettMontague/LightGBM
|
[
"3ad9cba0321a8ac2e5f625da6dcc6f7c1516a750"
] |
[
"python-package/lightgbm/sklearn.py"
] |
[
"# coding: utf-8\n# pylint: disable = invalid-name, W0105, C0111, C0301\n\"\"\"Scikit-learn wrapper interface for LightGBM.\"\"\"\nfrom __future__ import absolute_import\n\nimport numpy as np\nimport warnings\n\nfrom .basic import Dataset, LightGBMError\nfrom .compat import (SKLEARN_INSTALLED, _LGBMClassifierBase,\n LGBMNotFittedError, _LGBMLabelEncoder, _LGBMModelBase,\n _LGBMRegressorBase, _LGBMCheckXY, _LGBMCheckArray, _LGBMCheckConsistentLength,\n _LGBMAssertAllFinite, _LGBMCheckClassificationTargets, _LGBMComputeSampleWeight,\n argc_, range_, string_type, DataFrame)\nfrom .engine import train\n\n\ndef _objective_function_wrapper(func):\n \"\"\"Decorate an objective function.\n\n Note\n ----\n For multi-class task, the y_pred is group by class_id first, then group by row_id.\n If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i]\n and you should group grad and hess in this way as well.\n\n Parameters\n ----------\n func : callable\n Expects a callable with signature ``func(y_true, y_pred)`` or ``func(y_true, y_pred, group):\n\n y_true : array-like of shape = [n_samples]\n The target values.\n y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)\n The predicted values.\n group : array-like\n Group/query data, used for ranking task.\n\n Returns\n -------\n new_func : callable\n The new objective function as expected by ``lightgbm.engine.train``.\n The signature is ``new_func(preds, dataset)``:\n\n preds : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)\n The predicted values.\n dataset : Dataset\n The training set from which the labels will be extracted using ``dataset.get_label()``.\n \"\"\"\n def inner(preds, dataset):\n \"\"\"Call passed function with appropriate arguments.\"\"\"\n labels = dataset.get_label()\n argc = argc_(func)\n if argc == 2:\n grad, hess = func(labels, preds)\n elif argc == 3:\n grad, hess = func(labels, preds, dataset.get_group())\n else:\n raise TypeError(\"Self-defined objective function should have 2 or 3 arguments, got %d\" % argc)\n \"\"\"weighted for objective\"\"\"\n weight = dataset.get_weight()\n if weight is not None:\n \"\"\"only one class\"\"\"\n if len(weight) == len(grad):\n grad = np.multiply(grad, weight)\n hess = np.multiply(hess, weight)\n else:\n num_data = len(weight)\n num_class = len(grad) // num_data\n if num_class * num_data != len(grad):\n raise ValueError(\"Length of grad and hess should equal to num_class * num_data\")\n for k in range_(num_class):\n for i in range_(num_data):\n idx = k * num_data + i\n grad[idx] *= weight[i]\n hess[idx] *= weight[i]\n return grad, hess\n return inner\n\n\ndef _eval_function_wrapper(func):\n \"\"\"Decorate an eval function.\n\n Note\n ----\n For multi-class task, the y_pred is group by class_id first, then group by row_id.\n If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i].\n\n Parameters\n ----------\n func : callable\n Expects a callable with following signatures:\n ``func(y_true, y_pred)``,\n ``func(y_true, y_pred, weight)``\n or ``func(y_true, y_pred, weight, group)``\n and returns (eval_name->string, eval_result->float, is_bigger_better->bool):\n\n y_true : array-like of shape = [n_samples]\n The target values.\n y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)\n The predicted values.\n weight : array-like of shape = [n_samples]\n The weight of samples.\n group : array-like\n Group/query data, used for ranking task.\n\n Returns\n -------\n new_func : callable\n The new eval function as expected by ``lightgbm.engine.train``.\n The signature is ``new_func(preds, dataset)``:\n\n preds : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)\n The predicted values.\n dataset : Dataset\n The training set from which the labels will be extracted using ``dataset.get_label()``.\n \"\"\"\n def inner(preds, dataset):\n \"\"\"Call passed function with appropriate arguments.\"\"\"\n labels = dataset.get_label()\n argc = argc_(func)\n if argc == 2:\n return func(labels, preds)\n elif argc == 3:\n return func(labels, preds, dataset.get_weight())\n elif argc == 4:\n return func(labels, preds, dataset.get_weight(), dataset.get_group())\n else:\n raise TypeError(\"Self-defined eval function should have 2, 3 or 4 arguments, got %d\" % argc)\n return inner\n\n\nclass LGBMModel(_LGBMModelBase):\n \"\"\"Implementation of the scikit-learn API for LightGBM.\"\"\"\n\n def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1,\n learning_rate=0.1, n_estimators=100,\n subsample_for_bin=200000, objective=None, class_weight=None,\n min_split_gain=0., min_child_weight=1e-3, min_child_samples=20,\n subsample=1., subsample_freq=0, colsample_bytree=1.,\n reg_alpha=0., reg_lambda=0., random_state=None,\n n_jobs=-1, silent=True, importance_type='split', **kwargs):\n r\"\"\"Construct a gradient boosting model.\n\n Parameters\n ----------\n boosting_type : string, optional (default='gbdt')\n 'gbdt', traditional Gradient Boosting Decision Tree.\n 'dart', Dropouts meet Multiple Additive Regression Trees.\n 'goss', Gradient-based One-Side Sampling.\n 'rf', Random Forest.\n num_leaves : int, optional (default=31)\n Maximum tree leaves for base learners.\n max_depth : int, optional (default=-1)\n Maximum tree depth for base learners, -1 means no limit.\n learning_rate : float, optional (default=0.1)\n Boosting learning rate.\n You can use ``callbacks`` parameter of ``fit`` method to shrink/adapt learning rate\n in training using ``reset_parameter`` callback.\n Note, that this will ignore the ``learning_rate`` argument in training.\n n_estimators : int, optional (default=100)\n Number of boosted trees to fit.\n subsample_for_bin : int, optional (default=200000)\n Number of samples for constructing bins.\n objective : string, callable or None, optional (default=None)\n Specify the learning task and the corresponding learning objective or\n a custom objective function to be used (see note below).\n Default: 'regression' for LGBMRegressor, 'binary' or 'multiclass' for LGBMClassifier, 'lambdarank' for LGBMRanker.\n class_weight : dict, 'balanced' or None, optional (default=None)\n Weights associated with classes in the form ``{class_label: weight}``.\n Use this parameter only for multi-class classification task;\n for binary classification task you may use ``is_unbalance`` or ``scale_pos_weight`` parameters.\n The 'balanced' mode uses the values of y to automatically adjust weights\n inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``.\n If None, all classes are supposed to have weight one.\n Note, that these weights will be multiplied with ``sample_weight`` (passed through the ``fit`` method)\n if ``sample_weight`` is specified.\n min_split_gain : float, optional (default=0.)\n Minimum loss reduction required to make a further partition on a leaf node of the tree.\n min_child_weight : float, optional (default=1e-3)\n Minimum sum of instance weight (hessian) needed in a child (leaf).\n min_child_samples : int, optional (default=20)\n Minimum number of data needed in a child (leaf).\n subsample : float, optional (default=1.)\n Subsample ratio of the training instance.\n subsample_freq : int, optional (default=0)\n Frequence of subsample, <=0 means no enable.\n colsample_bytree : float, optional (default=1.)\n Subsample ratio of columns when constructing each tree.\n reg_alpha : float, optional (default=0.)\n L1 regularization term on weights.\n reg_lambda : float, optional (default=0.)\n L2 regularization term on weights.\n random_state : int or None, optional (default=None)\n Random number seed.\n If None, default seeds in C++ code will be used.\n n_jobs : int, optional (default=-1)\n Number of parallel threads.\n silent : bool, optional (default=True)\n Whether to print messages while running boosting.\n importance_type : string, optional (default='split')\n The type of feature importance to be filled into ``feature_importances_``.\n If 'split', result contains numbers of times the feature is used in a model.\n If 'gain', result contains total gains of splits which use the feature.\n **kwargs\n Other parameters for the model.\n Check http://lightgbm.readthedocs.io/en/latest/Parameters.html for more parameters.\n\n Note\n ----\n \\*\\*kwargs is not supported in sklearn, it may cause unexpected issues.\n\n Attributes\n ----------\n n_features_ : int\n The number of features of fitted model.\n classes_ : array of shape = [n_classes]\n The class label array (only for classification problem).\n n_classes_ : int\n The number of classes (only for classification problem).\n best_score_ : dict or None\n The best score of fitted model.\n best_iteration_ : int or None\n The best iteration of fitted model if ``early_stopping_rounds`` has been specified.\n objective_ : string or callable\n The concrete objective used while fitting this model.\n booster_ : Booster\n The underlying Booster of this model.\n evals_result_ : dict or None\n The evaluation results if ``early_stopping_rounds`` has been specified.\n feature_importances_ : array of shape = [n_features]\n The feature importances (the higher, the more important the feature).\n\n Note\n ----\n A custom objective function can be provided for the ``objective`` parameter.\n In this case, it should have the signature\n ``objective(y_true, y_pred) -> grad, hess`` or\n ``objective(y_true, y_pred, group) -> grad, hess``:\n\n y_true : array-like of shape = [n_samples]\n The target values.\n y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)\n The predicted values.\n group : array-like\n Group/query data, used for ranking task.\n grad : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)\n The value of the gradient for each sample point.\n hess : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)\n The value of the second derivative for each sample point.\n\n For multi-class task, the y_pred is group by class_id first, then group by row_id.\n If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i]\n and you should group grad and hess in this way as well.\n \"\"\"\n if not SKLEARN_INSTALLED:\n raise LightGBMError('Scikit-learn is required for this module')\n\n self.boosting_type = boosting_type\n self.objective = objective\n self.num_leaves = num_leaves\n self.max_depth = max_depth\n self.learning_rate = learning_rate\n self.n_estimators = n_estimators\n self.subsample_for_bin = subsample_for_bin\n self.min_split_gain = min_split_gain\n self.min_child_weight = min_child_weight\n self.min_child_samples = min_child_samples\n self.subsample = subsample\n self.subsample_freq = subsample_freq\n self.colsample_bytree = colsample_bytree\n self.reg_alpha = reg_alpha\n self.reg_lambda = reg_lambda\n self.random_state = random_state\n self.n_jobs = n_jobs\n self.silent = silent\n self.importance_type = importance_type\n self._Booster = None\n self._evals_result = None\n self._best_score = None\n self._best_iteration = None\n self._other_params = {}\n self._objective = objective\n self.class_weight = class_weight\n self._n_features = None\n self._classes = None\n self._n_classes = None\n self.set_params(**kwargs)\n\n def get_params(self, deep=True):\n \"\"\"Get parameters for this estimator.\n\n Parameters\n ----------\n deep : bool, optional (default=True)\n If True, will return the parameters for this estimator and\n contained subobjects that are estimators.\n\n Returns\n -------\n params : dict\n Parameter names mapped to their values.\n \"\"\"\n params = super(LGBMModel, self).get_params(deep=deep)\n params.update(self._other_params)\n return params\n\n # minor change to support `**kwargs`\n def set_params(self, **params):\n \"\"\"Set the parameters of this estimator.\n\n Parameters\n ----------\n **params\n Parameter names with their new values.\n\n Returns\n -------\n self : object\n Returns self.\n \"\"\"\n for key, value in params.items():\n setattr(self, key, value)\n if hasattr(self, '_' + key):\n setattr(self, '_' + key, value)\n self._other_params[key] = value\n return self\n\n def fit(self, X, y,\n sample_weight=None, init_score=None, group=None,\n eval_set=None, eval_names=None, eval_sample_weight=None,\n eval_class_weight=None, eval_init_score=None, eval_group=None,\n eval_metric=None, early_stopping_rounds=None, verbose=True,\n feature_name='auto', categorical_feature='auto', callbacks=None):\n \"\"\"Build a gradient boosting model from the training set (X, y).\n\n Parameters\n ----------\n X : array-like or sparse matrix of shape = [n_samples, n_features]\n Input feature matrix.\n y : array-like of shape = [n_samples]\n The target values (class labels in classification, real numbers in regression).\n sample_weight : array-like of shape = [n_samples] or None, optional (default=None)\n Weights of training data.\n init_score : array-like of shape = [n_samples] or None, optional (default=None)\n Init score of training data.\n group : array-like or None, optional (default=None)\n Group data of training data.\n eval_set : list or None, optional (default=None)\n A list of (X, y) tuple pairs to use as validation sets.\n eval_names : list of strings or None, optional (default=None)\n Names of eval_set.\n eval_sample_weight : list of arrays or None, optional (default=None)\n Weights of eval data.\n eval_class_weight : list or None, optional (default=None)\n Class weights of eval data.\n eval_init_score : list of arrays or None, optional (default=None)\n Init score of eval data.\n eval_group : list of arrays or None, optional (default=None)\n Group data of eval data.\n eval_metric : string, list of strings, callable or None, optional (default=None)\n If string, it should be a built-in evaluation metric to use.\n If callable, it should be a custom evaluation metric, see note below for more details.\n In either case, the ``metric`` from the model parameters will be evaluated and used as well.\n Default: 'l2' for LGBMRegressor, 'logloss' for LGBMClassifier, 'ndcg' for LGBMRanker.\n early_stopping_rounds : int or None, optional (default=None)\n Activates early stopping. The model will train until the validation score stops improving.\n Validation score needs to improve at least every ``early_stopping_rounds`` round(s)\n to continue training.\n Requires at least one validation data and one metric.\n If there's more than one, will check all of them. But the training data is ignored anyway.\n verbose : bool, optional (default=True)\n If True and an evaluation set is used, writes the evaluation progress.\n feature_name : list of strings or 'auto', optional (default='auto')\n Feature names.\n If 'auto' and data is pandas DataFrame, data columns names are used.\n categorical_feature : list of strings or int, or 'auto', optional (default='auto')\n Categorical features.\n If list of int, interpreted as indices.\n If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).\n If 'auto' and data is pandas DataFrame, pandas categorical columns are used.\n All values in categorical features should be less than int32 max value (2147483647).\n Large values could be memory consuming. Consider using consecutive integers starting from zero.\n All negative values in categorical features will be treated as missing values.\n callbacks : list of callback functions or None, optional (default=None)\n List of callback functions that are applied at each iteration.\n See Callbacks in Python API for more information.\n\n Returns\n -------\n self : object\n Returns self.\n\n Note\n ----\n Custom eval function expects a callable with following signatures:\n ``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)`` or\n ``func(y_true, y_pred, weight, group)``\n and returns (eval_name, eval_result, is_bigger_better) or\n list of (eval_name, eval_result, is_bigger_better):\n\n y_true : array-like of shape = [n_samples]\n The target values.\n y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)\n The predicted values.\n weight : array-like of shape = [n_samples]\n The weight of samples.\n group : array-like\n Group/query data, used for ranking task.\n eval_name : string\n The name of evaluation.\n eval_result : float\n The eval result.\n is_bigger_better : bool\n Is eval result bigger better, e.g. AUC is bigger_better.\n\n For multi-class task, the y_pred is group by class_id first, then group by row_id.\n If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i].\n \"\"\"\n if self._objective is None:\n if isinstance(self, LGBMRegressor):\n self._objective = \"regression\"\n elif isinstance(self, LGBMClassifier):\n self._objective = \"binary\"\n elif isinstance(self, LGBMRanker):\n self._objective = \"lambdarank\"\n else:\n raise ValueError(\"Unknown LGBMModel type.\")\n if callable(self._objective):\n self._fobj = _objective_function_wrapper(self._objective)\n else:\n self._fobj = None\n evals_result = {}\n params = self.get_params()\n # user can set verbose with kwargs, it has higher priority\n if not any(verbose_alias in params for verbose_alias in ('verbose', 'verbosity')) and self.silent:\n params['verbose'] = -1\n params.pop('silent', None)\n params.pop('importance_type', None)\n params.pop('n_estimators', None)\n params.pop('class_weight', None)\n if self._n_classes is not None and self._n_classes > 2:\n params['num_class'] = self._n_classes\n if hasattr(self, '_eval_at'):\n params['eval_at'] = self._eval_at\n params['objective'] = self._objective\n if self._fobj:\n params['objective'] = 'None' # objective = nullptr for unknown objective\n\n if callable(eval_metric):\n feval = _eval_function_wrapper(eval_metric)\n else:\n feval = None\n # register default metric for consistency with callable eval_metric case\n original_metric = self._objective if isinstance(self._objective, string_type) else None\n if original_metric is None:\n # try to deduce from class instance\n if isinstance(self, LGBMRegressor):\n original_metric = \"l2\"\n elif isinstance(self, LGBMClassifier):\n original_metric = \"multi_logloss\" if self._n_classes > 2 else \"binary_logloss\"\n elif isinstance(self, LGBMRanker):\n original_metric = \"ndcg\"\n # overwrite default metric by explicitly set metric\n for metric_alias in ['metric', 'metrics', 'metric_types']:\n if metric_alias in params:\n original_metric = params.pop(metric_alias)\n # concatenate metric from params (or default if not provided in params) and eval_metric\n original_metric = [original_metric] if isinstance(original_metric, (string_type, type(None))) else original_metric\n eval_metric = [eval_metric] if isinstance(eval_metric, (string_type, type(None))) else eval_metric\n params['metric'] = set(original_metric + eval_metric)\n\n if not isinstance(X, DataFrame):\n X, y = _LGBMCheckXY(X, y, accept_sparse=True, force_all_finite=False, ensure_min_samples=2)\n _LGBMCheckConsistentLength(X, y, sample_weight)\n\n if self.class_weight is not None:\n class_sample_weight = _LGBMComputeSampleWeight(self.class_weight, y)\n if sample_weight is None or len(sample_weight) == 0:\n sample_weight = class_sample_weight\n else:\n sample_weight = np.multiply(sample_weight, class_sample_weight)\n\n self._n_features = X.shape[1]\n\n def _construct_dataset(X, y, sample_weight, init_score, group, params):\n ret = Dataset(X, label=y, weight=sample_weight, group=group, params=params)\n return ret.set_init_score(init_score)\n\n train_set = _construct_dataset(X, y, sample_weight, init_score, group, params)\n\n valid_sets = []\n if eval_set is not None:\n\n def _get_meta_data(collection, i):\n if collection is None:\n return None\n elif isinstance(collection, list):\n return collection[i] if len(collection) > i else None\n elif isinstance(collection, dict):\n return collection.get(i, None)\n else:\n raise TypeError('eval_sample_weight, eval_class_weight, eval_init_score, and eval_group '\n 'should be dict or list')\n\n if isinstance(eval_set, tuple):\n eval_set = [eval_set]\n for i, valid_data in enumerate(eval_set):\n # reduce cost for prediction training data\n if valid_data[0] is X and valid_data[1] is y:\n valid_set = train_set\n else:\n valid_weight = _get_meta_data(eval_sample_weight, i)\n if _get_meta_data(eval_class_weight, i) is not None:\n valid_class_sample_weight = _LGBMComputeSampleWeight(_get_meta_data(eval_class_weight, i),\n valid_data[1])\n if valid_weight is None or len(valid_weight) == 0:\n valid_weight = valid_class_sample_weight\n else:\n valid_weight = np.multiply(valid_weight, valid_class_sample_weight)\n valid_init_score = _get_meta_data(eval_init_score, i)\n valid_group = _get_meta_data(eval_group, i)\n valid_set = _construct_dataset(valid_data[0], valid_data[1],\n valid_weight, valid_init_score, valid_group, params)\n valid_sets.append(valid_set)\n\n self._Booster = train(params, train_set,\n self.n_estimators, valid_sets=valid_sets, valid_names=eval_names,\n early_stopping_rounds=early_stopping_rounds,\n evals_result=evals_result, fobj=self._fobj, feval=feval,\n verbose_eval=verbose, feature_name=feature_name,\n categorical_feature=categorical_feature,\n callbacks=callbacks)\n\n if evals_result:\n self._evals_result = evals_result\n\n if early_stopping_rounds is not None:\n self._best_iteration = self._Booster.best_iteration\n\n self._best_score = self._Booster.best_score\n\n # free dataset\n self.booster_.free_dataset()\n del train_set, valid_sets\n return self\n\n def predict(self, X, raw_score=False, num_iteration=None,\n pred_leaf=False, pred_contrib=False, **kwargs):\n \"\"\"Return the predicted value for each sample.\n\n Parameters\n ----------\n X : array-like or sparse matrix of shape = [n_samples, n_features]\n Input features matrix.\n raw_score : bool, optional (default=False)\n Whether to predict raw scores.\n num_iteration : int or None, optional (default=None)\n Limit number of iterations in the prediction.\n If None, if the best iteration exists, it is used; otherwise, all trees are used.\n If <= 0, all trees are used (no limits).\n pred_leaf : bool, optional (default=False)\n Whether to predict leaf index.\n pred_contrib : bool, optional (default=False)\n Whether to predict feature contributions.\n\n Note\n ----\n If you want to get more explanation for your model's predictions using SHAP values\n like SHAP interaction values,\n you can install shap package (https://github.com/slundberg/shap).\n\n **kwargs\n Other parameters for the prediction.\n\n Returns\n -------\n predicted_result : array-like of shape = [n_samples] or shape = [n_samples, n_classes]\n The predicted values.\n X_leaves : array-like of shape = [n_samples, n_trees] or shape [n_samples, n_trees * n_classes]\n If ``pred_leaf=True``, the predicted leaf every tree for each sample.\n X_SHAP_values : array-like of shape = [n_samples, n_features + 1] or shape [n_samples, (n_features + 1) * n_classes]\n If ``pred_contrib=True``, the each feature contributions for each sample.\n \"\"\"\n if self._n_features is None:\n raise LGBMNotFittedError(\"Estimator not fitted, call `fit` before exploiting the model.\")\n if not isinstance(X, DataFrame):\n X = _LGBMCheckArray(X, accept_sparse=True, force_all_finite=False)\n n_features = X.shape[1]\n if self._n_features != n_features:\n raise ValueError(\"Number of features of the model must \"\n \"match the input. Model n_features_ is %s and \"\n \"input n_features is %s \"\n % (self._n_features, n_features))\n return self.booster_.predict(X, raw_score=raw_score, num_iteration=num_iteration,\n pred_leaf=pred_leaf, pred_contrib=pred_contrib, **kwargs)\n\n @property\n def n_features_(self):\n \"\"\"Get the number of features of fitted model.\"\"\"\n if self._n_features is None:\n raise LGBMNotFittedError('No n_features found. Need to call fit beforehand.')\n return self._n_features\n\n @property\n def best_score_(self):\n \"\"\"Get the best score of fitted model.\"\"\"\n if self._n_features is None:\n raise LGBMNotFittedError('No best_score found. Need to call fit beforehand.')\n return self._best_score\n\n @property\n def best_iteration_(self):\n \"\"\"Get the best iteration of fitted model.\"\"\"\n if self._n_features is None:\n raise LGBMNotFittedError('No best_iteration found. Need to call fit with early_stopping_rounds beforehand.')\n return self._best_iteration\n\n @property\n def objective_(self):\n \"\"\"Get the concrete objective used while fitting this model.\"\"\"\n if self._n_features is None:\n raise LGBMNotFittedError('No objective found. Need to call fit beforehand.')\n return self._objective\n\n @property\n def booster_(self):\n \"\"\"Get the underlying lightgbm Booster of this model.\"\"\"\n if self._Booster is None:\n raise LGBMNotFittedError('No booster found. Need to call fit beforehand.')\n return self._Booster\n\n @property\n def evals_result_(self):\n \"\"\"Get the evaluation results.\"\"\"\n if self._n_features is None:\n raise LGBMNotFittedError('No results found. Need to call fit with eval_set beforehand.')\n return self._evals_result\n\n @property\n def feature_importances_(self):\n \"\"\"Get feature importances.\n\n Note\n ----\n Feature importance in sklearn interface used to normalize to 1,\n it's deprecated after 2.0.4 and is the same as Booster.feature_importance() now.\n ``importance_type`` attribute is passed to the function\n to configure the type of importance values to be extracted.\n \"\"\"\n if self._n_features is None:\n raise LGBMNotFittedError('No feature_importances found. Need to call fit beforehand.')\n return self.booster_.feature_importance(importance_type=self.importance_type)\n\n\nclass LGBMRegressor(LGBMModel, _LGBMRegressorBase):\n \"\"\"LightGBM regressor.\"\"\"\n\n def fit(self, X, y,\n sample_weight=None, init_score=None,\n eval_set=None, eval_names=None, eval_sample_weight=None,\n eval_init_score=None, eval_metric=None, early_stopping_rounds=None,\n verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None):\n \"\"\"Docstring is inherited from the LGBMModel.\"\"\"\n super(LGBMRegressor, self).fit(X, y, sample_weight=sample_weight,\n init_score=init_score, eval_set=eval_set,\n eval_names=eval_names,\n eval_sample_weight=eval_sample_weight,\n eval_init_score=eval_init_score,\n eval_metric=eval_metric,\n early_stopping_rounds=early_stopping_rounds,\n verbose=verbose, feature_name=feature_name,\n categorical_feature=categorical_feature,\n callbacks=callbacks)\n return self\n\n _base_doc = LGBMModel.fit.__doc__\n fit.__doc__ = (_base_doc[:_base_doc.find('eval_class_weight :')]\n + _base_doc[_base_doc.find('eval_init_score :'):])\n\n\nclass LGBMClassifier(LGBMModel, _LGBMClassifierBase):\n \"\"\"LightGBM classifier.\"\"\"\n\n def fit(self, X, y,\n sample_weight=None, init_score=None,\n eval_set=None, eval_names=None, eval_sample_weight=None,\n eval_class_weight=None, eval_init_score=None, eval_metric=None,\n early_stopping_rounds=None, verbose=True,\n feature_name='auto', categorical_feature='auto', callbacks=None):\n \"\"\"Docstring is inherited from the LGBMModel.\"\"\"\n _LGBMAssertAllFinite(y)\n _LGBMCheckClassificationTargets(y)\n self._le = _LGBMLabelEncoder().fit(y)\n _y = self._le.transform(y)\n\n self._classes = self._le.classes_\n self._n_classes = len(self._classes)\n if self._n_classes > 2:\n # Switch to using a multiclass objective in the underlying LGBM instance\n ova_aliases = (\"multiclassova\", \"multiclass_ova\", \"ova\", \"ovr\")\n if self._objective not in ova_aliases and not callable(self._objective):\n self._objective = \"multiclass\"\n if eval_metric in ('logloss', 'binary_logloss'):\n eval_metric = \"multi_logloss\"\n elif eval_metric in ('error', 'binary_error'):\n eval_metric = \"multi_error\"\n else:\n if eval_metric in ('logloss', 'multi_logloss'):\n eval_metric = 'binary_logloss'\n elif eval_metric in ('error', 'multi_error'):\n eval_metric = 'binary_error'\n\n if eval_set is not None:\n if isinstance(eval_set, tuple):\n eval_set = [eval_set]\n for i, (valid_x, valid_y) in enumerate(eval_set):\n if valid_x is X and valid_y is y:\n eval_set[i] = (valid_x, _y)\n else:\n eval_set[i] = (valid_x, self._le.transform(valid_y))\n\n super(LGBMClassifier, self).fit(X, _y, sample_weight=sample_weight,\n init_score=init_score, eval_set=eval_set,\n eval_names=eval_names,\n eval_sample_weight=eval_sample_weight,\n eval_class_weight=eval_class_weight,\n eval_init_score=eval_init_score,\n eval_metric=eval_metric,\n early_stopping_rounds=early_stopping_rounds,\n verbose=verbose, feature_name=feature_name,\n categorical_feature=categorical_feature,\n callbacks=callbacks)\n return self\n\n fit.__doc__ = LGBMModel.fit.__doc__\n\n def predict(self, X, raw_score=False, num_iteration=None,\n pred_leaf=False, pred_contrib=False, **kwargs):\n \"\"\"Docstring is inherited from the LGBMModel.\"\"\"\n result = self.predict_proba(X, raw_score, num_iteration,\n pred_leaf, pred_contrib, **kwargs)\n if raw_score or pred_leaf or pred_contrib:\n return result\n else:\n class_index = np.argmax(result, axis=1)\n return self._le.inverse_transform(class_index)\n\n predict.__doc__ = LGBMModel.predict.__doc__\n\n def predict_proba(self, X, raw_score=False, num_iteration=None,\n pred_leaf=False, pred_contrib=False, **kwargs):\n \"\"\"Return the predicted probability for each class for each sample.\n\n Parameters\n ----------\n X : array-like or sparse matrix of shape = [n_samples, n_features]\n Input features matrix.\n raw_score : bool, optional (default=False)\n Whether to predict raw scores.\n num_iteration : int or None, optional (default=None)\n Limit number of iterations in the prediction.\n If None, if the best iteration exists, it is used; otherwise, all trees are used.\n If <= 0, all trees are used (no limits).\n pred_leaf : bool, optional (default=False)\n Whether to predict leaf index.\n pred_contrib : bool, optional (default=False)\n Whether to predict feature contributions.\n\n Note\n ----\n If you want to get more explanation for your model's predictions using SHAP values\n like SHAP interaction values,\n you can install shap package (https://github.com/slundberg/shap).\n\n **kwargs\n Other parameters for the prediction.\n\n Returns\n -------\n predicted_probability : array-like of shape = [n_samples, n_classes]\n The predicted probability for each class for each sample.\n X_leaves : array-like of shape = [n_samples, n_trees * n_classes]\n If ``pred_leaf=True``, the predicted leaf every tree for each sample.\n X_SHAP_values : array-like of shape = [n_samples, (n_features + 1) * n_classes]\n If ``pred_contrib=True``, the each feature contributions for each sample.\n \"\"\"\n result = super(LGBMClassifier, self).predict(X, raw_score, num_iteration,\n pred_leaf, pred_contrib, **kwargs)\n if self._n_classes > 2 or pred_leaf or pred_contrib:\n return result\n else:\n return np.vstack((1. - result, result)).transpose()\n\n @property\n def classes_(self):\n \"\"\"Get the class label array.\"\"\"\n if self._classes is None:\n raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')\n return self._classes\n\n @property\n def n_classes_(self):\n \"\"\"Get the number of classes.\"\"\"\n if self._n_classes is None:\n raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')\n return self._n_classes\n\n\nclass LGBMRanker(LGBMModel):\n \"\"\"LightGBM ranker.\"\"\"\n\n def fit(self, X, y,\n sample_weight=None, init_score=None, group=None,\n eval_set=None, eval_names=None, eval_sample_weight=None,\n eval_init_score=None, eval_group=None, eval_metric=None,\n eval_at=[1], early_stopping_rounds=None, verbose=True,\n feature_name='auto', categorical_feature='auto', callbacks=None):\n \"\"\"Docstring is inherited from the LGBMModel.\"\"\"\n # check group data\n if group is None:\n raise ValueError(\"Should set group for ranking task\")\n\n if eval_set is not None:\n if eval_group is None:\n raise ValueError(\"Eval_group cannot be None when eval_set is not None\")\n elif len(eval_group) != len(eval_set):\n raise ValueError(\"Length of eval_group should be equal to eval_set\")\n elif (isinstance(eval_group, dict)\n and any(i not in eval_group or eval_group[i] is None for i in range_(len(eval_group)))\n or isinstance(eval_group, list)\n and any(group is None for group in eval_group)):\n raise ValueError(\"Should set group for all eval datasets for ranking task; \"\n \"if you use dict, the index should start from 0\")\n\n self._eval_at = eval_at\n super(LGBMRanker, self).fit(X, y, sample_weight=sample_weight,\n init_score=init_score, group=group,\n eval_set=eval_set, eval_names=eval_names,\n eval_sample_weight=eval_sample_weight,\n eval_init_score=eval_init_score, eval_group=eval_group,\n eval_metric=eval_metric,\n early_stopping_rounds=early_stopping_rounds,\n verbose=verbose, feature_name=feature_name,\n categorical_feature=categorical_feature,\n callbacks=callbacks)\n return self\n\n _base_doc = LGBMModel.fit.__doc__\n fit.__doc__ = (_base_doc[:_base_doc.find('eval_class_weight :')]\n + _base_doc[_base_doc.find('eval_init_score :'):])\n _base_doc = fit.__doc__\n _before_early_stop, _early_stop, _after_early_stop = _base_doc.partition('early_stopping_rounds :')\n fit.__doc__ = (_before_early_stop\n + 'eval_at : list of int, optional (default=[1])\\n'\n + ' ' * 12 + 'The evaluation positions of the specified metric.\\n'\n + ' ' * 8 + _early_stop + _after_early_stop)\n"
] |
[
[
"numpy.vstack",
"numpy.argmax",
"numpy.multiply"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mjacqu/FlatCreekProject
|
[
"3615501f51279389ef2fc6a97a2859e47a027a8c",
"3615501f51279389ef2fc6a97a2859e47a027a8c"
] |
[
"FC_GeologyFigure4.py",
"FC_BulgeSize.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport matplotlib as mpl\nimport VariousFunctions_FC\n\nfc_all = pd.read_csv('/Users/mistral/Documents/CUBoulder/Science/Sulzer/data/meteo/FC_Water_all_Elevations.csv')\n\n#fc_all.plot(x='time', y=[\"2100\",'2200','2300','2400', \"2500\", '2600', '2700'], figsize = (12,6),\n# colormap = 'gist_earth', style = '--.', linewidth = 1, markersize =4)\nmean80s = fc_all['2100'][(fc_all['time'] > 1979) & (fc_all['time'] < 1990)].mean()\nmean90s = fc_all['2100'][(fc_all['time'] > 1989) & (fc_all['time'] < 2000)].mean()\nmean00s = fc_all['2100'][(fc_all['time'] > 1999) & (fc_all['time'] < 2010)].mean()\nmean10s = fc_all['2100'][(fc_all['time'] > 2009)].mean()\nmean_all = pd.DataFrame(fc_all.mean(axis = 0)[1:])\nmean_all.columns = ['mean']\n\nplt.style.use('ggplot')\nfc_all.plot(x='time', y=[\"2100\",'2300', \"2500\", '2700'], figsize = (12,6),\n colormap = 'copper', style = '--.', linewidth = 1, markersize =4)\nplt.hlines(mean80s,1980,1989.9, color = 'grey', label = 'decadal mean')\nplt.hlines(mean90s,1990,1999.9, color = 'grey')\nplt.hlines(mean00s,2000,2009.9, color = 'grey')\nplt.hlines(mean10s,2010,2015.9, color = 'grey')\nplt.xlim([1978,2016])\nplt.tick_params('both', labelsize = 18)\nplt.xlabel('Year', fontsize = 20)\nplt.ylabel('Water availability [mm]', fontsize = 20)\nplt.legend(fontsize = 16, loc = 2)\nplt.legend(title = 'Elevation [m asl]')\nplt.title('Pre-detachment total liquid water availability', fontsize = 22)\nplt.show()\n#plt.savefig('FC_all_elevations_decadalmean.pdf')\n\n# Analyze numbers:\n\nelevations = 2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700\nyear = 2015\n#Print all Standard Deviations and factors by which H2O exceeds mean:\nfor e in elevations:\n print('elevation: '+ str(e))\n VariousFunctions_FC.get_std(fc_all, str(e), year)\n fc_all[str(e)][fc_all.time == year].values/mean_all['mean'][mean_all.index == str(e)].values\n\n\n'''\n# Data import for old figure with Aru and Flat Creek.\naru_water = pd.read_csv('/Users/mistral/Documents/CUBoulder/Science/Sulzer/data/Other_ERA/Aru_5500m_apr_jul_wateravailability.csv')\naru_melt = pd.read_csv('/Users/mistral/Documents/CUBoulder/Science/Sulzer/data/Other_ERA/Aru_5500m_apr_jul_melt.csv')\nfc_water = pd.read_csv('/Users/mistral/Documents/CUBoulder/Science/Sulzer/data/meteo/FC_water_apr_jul_2200m.csv')\nfc_melt = pd.read_csv('/Users/mistral/Documents/CUBoulder/Science/Sulzer/data/meteo/FC_melt_apr_jul_2200m.csv')\n\n# Old plot for Aru and Flat Creek\nplt.style.use('ggplot')\nfig,ax = plt.subplots(figsize = (16,10))\nmpl.rcParams['lines.linewidth'] = 1.8\nmpl.rcParams['lines.markersize'] = 8.5\nax.plot(aru_water.year, aru_water.total_water, '.-', color = 'skyblue', label = 'Aru (total)')\nax.plot(aru_melt.year, aru_melt['melt'],'.:', color = 'skyblue', label = 'Aru (from melt)')\nax.plot(fc_water.year, fc_water.total_water, '.-', color = 'grey', label = 'Flat Creek (total)')\nax.plot(fc_melt.year, fc_melt['melt'],'.:', color = 'grey', label = 'Flat Creek (from melt)')\nplt.plot([2013], [510.55928471], 'o', color = 'black', markersize = 5)\nplt.plot([2015], [285.17040509], 'o', color = 'black', markersize = 5)\nplt.plot([2016], [533.367536], 'o', color = 'steelblue', markersize = 5)\nax.tick_params('both', labelsize = 18)\nplt.xlabel('Year', fontsize = 20)\nplt.ylabel('Water availability [mm]', fontsize = 20)\nax.legend(fontsize = 16, loc = 2)\nplt.ylim([0,580])\nplt.title('Pre-detachment total liquid water availability', fontsize = 22)\nplt.text(2016.1,540, '2016', fontsize = 16, color = 'steelblue', fontweight = 'bold')\nplt.text(2015.1,290, '2015', fontsize = 16, color = 'black', fontweight = 'bold')\nplt.text(2013.2,515, '2013', fontsize = 16, color = 'black', fontweight = 'bold')\nplt.show()\n#plt.savefig('FC_and_Aru_water_availability.png')\n'''\n",
"import numpy as np\nimport pandas as pd\nimport pysolar\nimport datetime\nimport math\nimport json\nimport Taan_fjord_helpers\nimport matplotlib.pyplot as plt\nimport scipy.stats\n\nbulge2011 = '/Users/mistral/Documents/CUBoulder/Science/Sulzer/data/BulgeSize/2011_ShadowLengths.geojson'\nbulge2011_adj = '/Users/mistral/Documents/CUBoulder/Science/Sulzer/data/BulgeSize/2011_ShadowLengths_adj.geojson'\nbulge2009 = '/Users/mistral/Documents/CUBoulder/Science/Sulzer/data/BulgeSize/ShadowLengths2009_168.28az.geojson'\n#Flat Creek Location\nlat = 61.642563\nlon = -141.554821\n\n# Image 1: Planet 2011-09-08\ndate1 = datetime.datetime(2011, 9, 8, 21, 55, 20, tzinfo = datetime.timezone.utc)\nim1_elev = pysolar.solar.get_altitude(lat,lon,date1)\nim1_azimuth = pysolar.solar.get_azimuth(lat,lon,date1)\nprint(im1_elev, im1_azimuth)\nim1_md_elev = 33.83808 # elevation from image meta data\nim1_md_azimuth = 189.3034 # azimuth from image meta data\n\n# Image 2: Planet 2012-08-15\ndate2 = datetime.datetime(2012, 8, 15, 21, 54, 46, tzinfo = datetime.timezone.utc)\nim2_elev = pysolar.solar.get_altitude(lat,lon,date2)\nim2_azimuth = pysolar.solar.get_azimuth(lat,lon,date2)\nprint(im2_elev, im2_azimuth)\nm2_md_elev = 42.08575 # elevation from image meta data\nim2_md_azimuth = 187.78853 # azimuth from image meta data\n\n# Image 3: Ikonos 2009-07-13\ndate3 = datetime.datetime(2009, 7, 13, 20, 59, 31, tzinfo = datetime.timezone.utc)\nim3_elev = pysolar.solar.get_altitude(lat,lon,date3)\nim3_azimuth = pysolar.solar.get_azimuth(lat,lon,date3)\nprint(im3_elev, im3_azimuth)\nim3_md_elev = 49.59662 # elevation from image meta data\nim3_md_azimuth = 167.9135 # azimuth from image meta data\n\n# Read GeoJson with digitized shadow lengths and turn into pandas dataframe with\n# id, length, start and end coordinates. Pixel size is in meters\ndef calculate_bulge_elevation(shadow_lengths, sun_elevation, pixel_size):\n with open(shadow_lengths) as f:\n data = json.load(f)\n #make shadows dataframe\n shadows = pd.DataFrame()\n # extract values from geojson\n for feature in data['features']:\n id = feature['properties']['id']\n length = feature['properties']['Length']\n start_x = feature['geometry']['coordinates'][0][0]\n start_y = feature['geometry']['coordinates'][0][1]\n end_x = feature['geometry']['coordinates'][1][0]\n end_y = feature['geometry']['coordinates'][1][1]\n #constrain uncertainty by adding/subtracting 2*pixel_size to/from length\n p2p = length+(2*pixel_size) #plus two pixels\n m2p = length-(2*pixel_size) #minus two pixels\n line = {'id': [id], 'length':[length], 'start_x': [start_x], 'start_y': [start_y],\n 'end_x':[end_x], 'end_y':[end_y], 'p2p':[p2p], 'm2p':[m2p]}\n df = pd.DataFrame(line)\n shadows = shadows.append(df,ignore_index=True)\n # print shadows head\n shadows.head()\n # Extract local elevation:\n altitudes = []\n for l in range(0, shadows.shape[0]):\n row, col, alt = Taan_fjord_helpers.median_of_square('/Users/mistral/Documents/CUBoulder/Science/Sulzer/VolumeCalculations/NewProcessingSummer2018/2012_Howat_dxdydz_EPSG3338.tif',\n [shadows.start_x[l], shadows.start_y[l]], 'EPSG:3338', pixel_size)\n altitudes.append(alt)\n #append elevations to shadows dataframe\n shadows['altitude'] = altitudes\n #calculate heights\n incidence_angle = sun_elevation\n bulge_height = []\n bulge_height_min = []\n bulge_height_max = []\n for l in range(0, shadows.shape[0]):\n h = np.tan(math.radians(incidence_angle)) * shadows.length[l]\n h_min = np.tan(math.radians(incidence_angle)) * shadows.m2p[l]\n h_max = np.tan(math.radians(incidence_angle)) * shadows.p2p[l]\n bulge_height.append(h)\n bulge_height_min.append(h_min)\n bulge_height_max.append(h_max)\n #add bulge height starting point altitude\n shadows['bulge_elevation'] = bulge_height + shadows.altitude\n shadows['bulge_elevation_min'] = bulge_height_min + shadows.altitude\n shadows['bulge_elevation_max'] = bulge_height_max + shadows.altitude\n shadows = shadows.sort_values(by=['id'])\n return(shadows)\n\nbulge_elevation_2009 = calculate_bulge_elevation(bulge2009, im3_elev, 2)\nbulge_elevation_2011 = calculate_bulge_elevation(bulge2011, im1_elev, 5)\nbulge_elevation_2011_adj = calculate_bulge_elevation(bulge2011_adj, im1_elev,5)\n\n\nplt.plot(bulge_elevation_2009.end_x, bulge_elevation_2009.bulge_elevation, label = '2009')\nplt.fill_between(bulge_elevation_2009.end_x, bulge_elevation_2009.bulge_elevation_min,\n bulge_elevation_2009.bulge_elevation_max, alpha = 0.2)\nplt.plot(bulge_elevation_2011.end_x, bulge_elevation_2011.bulge_elevation, label = '2011')\nplt.fill_between(bulge_elevation_2011.end_x, bulge_elevation_2011.bulge_elevation_min,\n bulge_elevation_2011.bulge_elevation_max, alpha = 0.2)\nplt.plot(bulge_elevation_2011_adj.end_x, bulge_elevation_2011_adj.bulge_elevation, label = '2011_adj')\nplt.fill_between(bulge_elevation_2011_adj.end_x, bulge_elevation_2011_adj.bulge_elevation_min,\n bulge_elevation_2011_adj.bulge_elevation_max, alpha = 0.2)\nplt.ylim([2100,2300])\nplt.xlabel('x [UTM]')\nplt.ylabel('Elevation [m asl]')\nplt.title('Glacier surface elevation on Flat Creek Glacier bulge')\nplt.legend()\nplt.show()\n\n#calcuate baseline elevations for \"bottom\" of bulge\nbaseline = pd.read_csv('/Users/mistral/Documents/CUBoulder/Science/Sulzer/data/BulgeSize/baseline_elevations.csv',\n names = ['dist', 'x', 'y', 'h'])\n\nbaseline_func = scipy.stats.linregress(baseline.x, baseline.h)\nbaseline_2009 = baseline_func[1]+baseline_func[0]*bulge_elevation_2009.end_x\nbaseline_2011_adj = baseline_func[1]+baseline_func[0]*bulge_elevation_2011_adj.end_x\n\nplt.plot(bulge_elevation_2009.end_x, bulge_elevation_2009.bulge_elevation - baseline_2009)\nplt.plot(bulge_elevation_2011_adj.end_x, bulge_elevation_2011_adj.bulge_elevation - baseline_2011_adj)\nplt.xlabel('x [UTM]')\nplt.ylabel('thickness [m]')\nplt.title('Bulge thickness')\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.pyplot.hlines",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.ylabel"
],
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
realblack0/eft
|
[
"60d9757c5d7f22c9928c511d76549f086d420e42"
] |
[
"demo/demo_bbox_detector.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport os\nimport sys\nimport numpy as np\nimport cv2\n\n\nimport torch\nimport torchvision.transforms as transforms\nfrom PIL import Image\n\n##Yolo related\nyolo_path = './PyTorch-YOLOv3'\nsys.path.append(yolo_path)\ntry:\n from models import Darknet\n from utils.utils import non_max_suppression, rescale_boxes\n from utils.datasets import pad_to_square,resize\nexcept ImportError:\n print(\"Cannot find PyTorch-YOLOv3\")\n\n\n##lightweight human pose\n# pose2d_estimator_path = '/home/hjoo/codes_test/lightweight-human-pose-estimation.pytorch/'\n# pose2d_checkpoint = \"/home/hjoo/codes_test/lightweight-human-pose-estimation.pytorch/pretrain/checkpoint_iter_370000.pth\"\npose2d_checkpoint = \"./lightweight-human-pose-estimation.pytorch/checkpoint_iter_370000.pth\"\npose2d_estimator_path = './lightweight-human-pose-estimation.pytorch/'\nsys.path.append(pose2d_estimator_path)\ntry:\n from pose2d_models.with_mobilenet import PoseEstimationWithMobileNet\n from modules.load_state import load_state\n from val import normalize, pad_width\n from modules.pose import Pose, track_poses\n from modules.keypoints import extract_keypoints, group_keypoints\nexcept ImportError:\n print(\"Cannot find lightweight-human-pose-estimation.pytorch\")\n\n\ndef Load_Yolo(device):\n \n #Load Darknet \n yolo_model_def= os.path.join(yolo_path, 'config/yolov3-tiny.cfg')\n yolo_img_size = 416\n yolo_weights_path = os.path.join(yolo_path, 'weights/yolov3-tiny.weights')\n model = Darknet(yolo_model_def, img_size=yolo_img_size).to(device)\n\n if yolo_weights_path.endswith(\".weights\"):\n # Load darknet weights\n model.load_darknet_weights(yolo_weights_path)\n else:\n # Load checkpoint weights\n model.load_state_dict(torch.load(yolo_weights_path))\n\n model.eval() # Set in evaluation mode\n return model\n\ndef Yolo_detect(model, camInputFrame, img_size = 416, conf_thres = 0.8, nms_thres = 0.4):\n \n img = transforms.ToTensor()(Image.fromarray(camInputFrame))\n # Pad to square resolution\n img, _ = pad_to_square(img, 0)\n # Resize\n img = resize(img, img_size)\n img = img.unsqueeze(0) #(1,3,416.419)\n\n input_imgs = img.cuda()\n with torch.no_grad():\n detections = model(input_imgs)\n detections = non_max_suppression(detections, conf_thres, nms_thres)\n \n \n if detections is not None:\n detections = detections[0]\n if detections is not None:\n detections = rescale_boxes(detections, img_size, camInputFrame.shape[:2])\n return detections\n\ndef Yolo_detectHuman(model, camInputFrame):\n \n detections = Yolo_detect(model,camInputFrame, conf_thres = 0.1, nms_thres = 0.3) #Modified to be better with yolo tiny\n\n bbr_list=[] #minX, minY, width, height\n if detections is not None:\n for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:\n if cls_pred!=0:\n continue\n box_w = x2 - x1\n box_h = y2 - y1\n # camInputFrame = viewer2D.Vis_Bbox_minmaxPt(camInputFrame,[x1,y1], [x2,y2])\n bbr_list.append( np.array([x1,y1,box_w,box_h]))\n\n return bbr_list\n\n#Code from https://github.com/Daniil-Osokin/lightweight-human-pose-estimation.pytorch/demo.py\ndef infer_fast(net, img, net_input_height_size, stride, upsample_ratio, cpu,\n pad_value=(0, 0, 0), img_mean=(128, 128, 128), img_scale=1/256):\n height, width, _ = img.shape\n scale = net_input_height_size / height\n\n scaled_img = cv2.resize(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)\n scaled_img = normalize(scaled_img, img_mean, img_scale)\n min_dims = [net_input_height_size, max(scaled_img.shape[1], net_input_height_size)]\n padded_img, pad = pad_width(scaled_img, stride, pad_value, min_dims)\n\n tensor_img = torch.from_numpy(padded_img).permute(2, 0, 1).unsqueeze(0).float()\n if not cpu:\n tensor_img = tensor_img.cuda()\n\n stages_output = net(tensor_img)\n\n stage2_heatmaps = stages_output[-2]\n heatmaps = np.transpose(stage2_heatmaps.squeeze().cpu().data.numpy(), (1, 2, 0))\n heatmaps = cv2.resize(heatmaps, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)\n\n stage2_pafs = stages_output[-1]\n pafs = np.transpose(stage2_pafs.squeeze().cpu().data.numpy(), (1, 2, 0))\n pafs = cv2.resize(pafs, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)\n\n return heatmaps, pafs, scale, pad\n\n#Code from https://github.com/Daniil-Osokin/lightweight-human-pose-estimation.pytorch/demo.py\ndef pose2d_detectHuman(net, img, height_size =256, track = 1, smooth=1, bVis =True):\n\n stride = 8\n upsample_ratio = 4\n num_keypoints = Pose.num_kpts\n previous_poses = []\n delay = 33\n if True:\n # for img in image_provider:\n orig_img = img.copy()\n heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride, upsample_ratio, cpu=not torch.cuda.is_available())\n\n total_keypoints_num = 0\n all_keypoints_by_type = []\n for kpt_idx in range(num_keypoints): # 19th for bg\n total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)\n\n pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, demo=True)\n for kpt_id in range(all_keypoints.shape[0]):\n all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale\n all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale\n current_poses = []\n for n in range(len(pose_entries)):\n if len(pose_entries[n]) == 0:\n continue\n pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1\n for kpt_id in range(num_keypoints):\n if pose_entries[n][kpt_id] != -1.0: # keypoint was found\n pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])\n pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])\n pose = Pose(pose_keypoints, pose_entries[n][18])\n current_poses.append(pose)\n\n if bVis:\n if track:\n track_poses(previous_poses, current_poses, smooth=smooth)\n previous_poses = current_poses\n for pose in current_poses:\n pose.draw(img)\n img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)\n for pose in current_poses:\n cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),\n (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]), (0, 255, 0))\n if track:\n cv2.putText(img, 'id: {}'.format(pose.id), (pose.bbox[0], pose.bbox[1] - 16),\n cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))\n cv2.imshow('Lightweight Human Pose Estimation Python Demo', img)\n key = cv2.waitKey(delay)\n if key == 27: # esc\n return\n elif key == 112: # 'p'\n if delay == 33:\n delay = 0\n else:\n delay = 33\n\n return current_poses\n\ndef Load_pose2d(device):\n \"\"\"\n This one runs in CPU\n \"\"\"\n net = PoseEstimationWithMobileNet()\n checkpoint = torch.load(pose2d_checkpoint, map_location='cpu')\n load_state(net, checkpoint)\n net = net.eval()\n net = net.to(device)\n\n return net\n\nclass BodyBboxDetector:\n def __init__(self, method=\"2dpose\", device = torch.device('cuda')):\n \"\"\"\n args:\n method: \"yolo\" or \"2dpose\"\n \"\"\"\n self.method = method\n\n if method ==\"yolo\":\n print(\"Loading Yolo Model...\")\n self.model = Load_Yolo(device)\n print(\"Done\")\n elif method==\"2dpose\":\n\n print(\"Loading Pose Estimation Model...\")\n self.model = Load_pose2d(device)\n print(\"Done\")\n else :\n print(\"invalid method\")\n assert False\n\n self.bboxXYWH_list = None\n \n def detectBbox(self, img_bgr):\n \"\"\"\n args:\n img_bgr: Raw image with BGR order (cv2 default). Currently assumes BGR #TODO: make sure the input type of each method\n output:\n bboxXYWH_list: list of bboxes. Each bbox has XYWH form (minX,minY,width,height)\n\n \"\"\"\n if self.method==\"yolo\":\n bboxXYWH_list = Yolo_detectHuman(self.model, img_bgr)\n elif self.method==\"2dpose\":\n poses_from2dPoseEst = pose2d_detectHuman(self.model, img_bgr, bVis=False)\n bboxXYWH_list =[]\n for poseEst in poses_from2dPoseEst:\n bboxXYWH_list.append(np.array (poseEst.bbox))\n else:\n print(\"Unknown bbox extimation method\")\n assert False\n\n self.bboxXYWH_list = bboxXYWH_list #Save this as member function\n return bboxXYWH_list\n"
] |
[
[
"torch.load",
"torch.from_numpy",
"numpy.ones",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
happykbs/udacity-drl-bongsang
|
[
"4a5f9c0698543cf80e83020d333cb8589a179243"
] |
[
"p2_continuous-control/ddpg/ddpg_agent.py"
] |
[
"import numpy as np\nimport random\nimport copy\nfrom collections import namedtuple, deque\n\nfrom .model import Actor, Critic\n\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nBUFFER_SIZE = 100000 #int(1e5) # replay buffer size\nBATCH_SIZE = 256 #128 # minibatch size\nGAMMA = 0.9 #0.99 # discount factor\nTAU = 1e-3 # for soft update of target parameters\nLR_ACTOR = 0.001 #1e-4 # learning rate of the actor \nLR_CRITIC = 0.001 #1e-3 # learning rate of the critic\nWEIGHT_DECAY = 1e-6 #0 # L2 weight decay\n\nSIGMA = 0.1\n# GPU = 0 # GPU ID\n# device = torch.device(f\"cuda:{GPU}\" if torch.cuda.is_available() else \"cpu\")\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\nclass Agent():\n \"\"\"Interacts with and learns from the environment.\"\"\"\n \n def __init__(self, state_size, action_size, random_seed):\n \"\"\"Initialize an Agent object.\n \n Params\n ======\n state_size (int): dimension of each state\n action_size (int): dimension of each action\n random_seed (int): random seed\n \"\"\"\n self.state_size = state_size\n self.action_size = action_size\n self.seed = random.seed(random_seed)\n\n # Actor Network (w/ Target Network)\n self.actor_local = Actor(state_size, action_size, random_seed).to(device)\n self.actor_target = Actor(state_size, action_size, random_seed).to(device)\n self.hard_copy(self.actor_target, self.actor_local) ## !\n self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)\n\n # Critic Network (w/ Target Network)\n self.critic_local = Critic(state_size, action_size, random_seed).to(device)\n self.critic_target = Critic(state_size, action_size, random_seed).to(device)\n self.hard_copy(self.critic_target, self.critic_local) ## !\n self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)\n\n # Noise process\n self.noise = OUNoise(action_size, random_seed)\n\n # Replay memory\n self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed)\n \n def hard_copy(self, target, source):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(param.data)\n \n def step(self, state, action, reward, next_state, done):\n \"\"\"Save experience in replay memory, and use random sample from buffer to learn.\"\"\"\n # Save experience / reward\n self.memory.add(state, action, reward, next_state, done)\n\n # Learn, if enough samples are available in memory\n if len(self.memory) > BATCH_SIZE:\n experiences = self.memory.sample()\n self.learn(experiences, GAMMA)\n\n def act(self, state, add_noise=True):\n \"\"\"Returns actions for given state as per current policy.\"\"\"\n state = torch.from_numpy(state).float().to(device)\n self.actor_local.eval()\n with torch.no_grad():\n action = self.actor_local(state).cpu().data.numpy()\n self.actor_local.train()\n if add_noise:\n action += self.noise.sample()\n return np.clip(action, -1, 1)\n\n def reset(self):\n self.noise.reset()\n\n def learn(self, experiences, gamma):\n \"\"\"Update policy and value parameters using given batch of experience tuples.\n Q_targets = r + γ * critic_target(next_state, actor_target(next_state))\n where:\n actor_target(state) -> action\n critic_target(state, action) -> Q-value\n\n Params\n ======\n experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples \n gamma (float): discount factor\n \"\"\"\n states, actions, rewards, next_states, dones = experiences\n\n # ---------------------------- update critic ---------------------------- #\n # Get predicted next-state actions and Q values from target models\n actions_next = self.actor_target(next_states)\n Q_targets_next = self.critic_target(next_states, actions_next)\n # Compute Q targets for current states (y_i)\n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n # Compute critic loss\n Q_expected = self.critic_local(states, actions)\n critic_loss = F.mse_loss(Q_expected, Q_targets)\n # Minimize the loss\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n\n # ---------------------------- update actor ---------------------------- #\n # Compute actor loss\n actions_pred = self.actor_local(states)\n actor_loss = -self.critic_local(states, actions_pred).mean()\n # Minimize the loss\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n\n # ----------------------- update target networks ----------------------- #\n self.soft_update(self.critic_local, self.critic_target, TAU)\n self.soft_update(self.actor_local, self.actor_target, TAU) \n\n def soft_update(self, local_model, target_model, tau):\n \"\"\"Soft update model parameters.\n θ_target = τ*θ_local + (1 - τ)*θ_target\n\n Params\n ======\n local_model: PyTorch model (weights will be copied from)\n target_model: PyTorch model (weights will be copied to)\n tau (float): interpolation parameter \n \"\"\"\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)\n\nclass OUNoise:\n \"\"\"Ornstein-Uhlenbeck process.\"\"\"\n\n def __init__(self, size, seed, mu=0., theta=0.15, sigma=SIGMA):\n \"\"\"Initialize parameters and noise process.\"\"\"\n self.mu = mu * np.ones(size)\n self.theta = theta\n self.sigma = sigma\n self.seed = random.seed(seed)\n self.reset()\n\n def reset(self):\n \"\"\"Reset the internal state (= noise) to mean (mu).\"\"\"\n self.state = copy.copy(self.mu)\n\n def sample(self):\n \"\"\"Update internal state and return it as a noise sample.\"\"\"\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n self.state = x + dx\n return self.state\n\nclass ReplayBuffer:\n \"\"\"Fixed-size buffer to store experience tuples.\"\"\"\n\n def __init__(self, action_size, buffer_size, batch_size, seed):\n \"\"\"Initialize a ReplayBuffer object.\n Params\n ======\n buffer_size (int): maximum size of buffer\n batch_size (int): size of each training batch\n \"\"\"\n self.action_size = action_size\n self.memory = deque(maxlen=buffer_size) # internal memory (deque)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)\n \n def add(self, state, action, reward, next_state, done):\n \"\"\"Add a new experience to memory.\"\"\"\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n \n def sample(self):\n \"\"\"Randomly sample a batch of experiences from memory.\"\"\"\n experiences = random.sample(self.memory, k=self.batch_size)\n\n states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)\n actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)\n rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)\n next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)\n dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)\n\n return (states, actions, rewards, next_states, dones)\n\n def __len__(self):\n \"\"\"Return the current size of internal memory.\"\"\"\n return len(self.memory)"
] |
[
[
"numpy.clip",
"torch.from_numpy",
"numpy.ones",
"torch.nn.functional.mse_loss",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.vstack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zawlin/multi-modal-regression
|
[
"61aa6c066834ab1373275decc38e361db5c2cf04",
"61aa6c066834ab1373275decc38e361db5c2cf04",
"61aa6c066834ab1373275decc38e361db5c2cf04",
"61aa6c066834ab1373275decc38e361db5c2cf04"
] |
[
"learnCategorizationModel.py",
"zl_simplebd.py",
"learnRiemannianBDModel.py",
"learnElhoseinyRegressionModel.py"
] |
[
"import torch\nfrom torch import nn, optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\n\nfrom helperFunctions import classes, get_accuracy\nfrom dataGenerators import ImagesAll, TestImages, my_collate\nfrom featureModels import resnet_model\n\nimport numpy as np\nimport scipy.io as spio\nimport gc\nimport os\nimport progressbar\nimport time\nimport sys\n\nif len(sys.argv) > 1:\n\tos.environ['CUDA_VISIBLE_DEVICES'] = sys.argv[1]\n\n\n# relevant paths\ntrain_path = 'data/flipped_new/train/'\ntest_path = 'data/flipped_new/test/'\n\n# save things here\nsave_str = 'category_all_10'\nresults_file = os.path.join('results', save_str)\nmodel_file = os.path.join('models', save_str + '.tar')\nplots_file = os.path.join('plots', save_str)\n\n# relevant variables\nnum_workers = 8\nnum_classes = len(classes)\ninit_lr = 0.0001\nnum_epochs = 50\nN0 = 2048\nbatch_size = 8\n\n# datasets\ntrain_data = ImagesAll(train_path, 'real')\ntest_data = TestImages(test_path)\n# setup data loaders\ntrain_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, collate_fn=my_collate, num_workers=num_workers)\ntest_loader = DataLoader(test_data, batch_size=32)\nprint('Train: {0} \\t Test: {1}'.format(len(train_loader), len(test_loader)))\n\n\n# MODEL\n# my model for pose estimation: feature model + 1layer pose model x 12\nclass my_model(nn.Module):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.feature_model = resnet_model('resnet50', 'layer4').cuda()\n\t\tself.fc = nn.Linear(N0, num_classes).cuda()\n\n\tdef forward(self, x):\n\t\tx = self.feature_model(x)\n\t\tx = self.fc(x)\n\t\treturn x\n\n\nmodel = my_model()\nfor param in model.feature_model.parameters():\n\tparam.requires_grad = False\nmodel.eval()\n# print(model)\noptimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=init_lr)\nscheduler = optim.lr_scheduler.LambdaLR(optimizer, lambda ep: 1./(1. + ep))\ncriterion = nn.CrossEntropyLoss().cuda()\n\n\n# OPTIMIZATION functions\ndef training():\n\t# model.train()\n\tbar = progressbar.ProgressBar(max_value=len(train_loader))\n\tfor i, sample in enumerate(train_loader):\n\t\t# forward steps\n\t\txdata = Variable(sample['xdata'].cuda())\n\t\tydata = Variable(sample['label'].cuda()).squeeze()\n\t\toutput = model(xdata)\n\t\tloss = criterion(output, ydata)\n\t\toptimizer.zero_grad()\n\t\tloss.backward()\n\t\toptimizer.step()\n\t\t# store\n\t\tbar.update(i)\n\t\t# cleanup\n\t\tdel xdata, ydata, output, loss, sample\n\t\tgc.collect()\n\ttrain_loader.dataset.shuffle_images()\n\n\ndef testing():\n\t# model.eval()\n\typred = []\n\tytrue = []\n\tfor i, sample in enumerate(test_loader):\n\t\txdata = Variable(sample['xdata'].cuda())\n\t\toutput = model(xdata)\n\t\ttmp_labels = torch.argmax(output, dim=1)\n\t\typred.append(tmp_labels.data.cpu().numpy())\n\t\tytrue.append(sample['label'].squeeze().numpy())\n\t\tdel xdata, output, sample, tmp_labels\n\t\tgc.collect()\n\typred = np.concatenate(ypred)\n\tytrue = np.concatenate(ytrue)\n\t# model.train()\n\treturn ytrue, ypred\n\n\ndef save_checkpoint(filename):\n\ttorch.save(model.state_dict(), filename)\n\n\nfor epoch in range(num_epochs):\n\ttic = time.time()\n\tscheduler.step()\n\t# training step\n\ttraining()\n\t# save model at end of epoch\n\tsave_checkpoint(model_file)\n\t# evaluate\n\tygt, ypred = testing()\n\tprint('Acc: {0}'.format(get_accuracy(ygt, ypred, num_classes)))\n\tspio.savemat(results_file, {'ygt': ygt, 'ypred': ypred})\n\t# time and output\n\ttoc = time.time() - tic\n\tprint('Epoch: {0} in time {1}s'.format(epoch, toc))\n\t# cleanup\n\tgc.collect()\n\n# evaluate the model\nygt, ypred = testing()\nprint('Acc: {0}'.format(get_accuracy(ygt, ypred, num_classes)))\nspio.savemat(results_file, {'ygt': ygt, 'ypred': ypred})\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nGeodesic Bin and Delta model for the axis-angle representation\n\"\"\"\n\nimport torch\nfrom torch import nn, optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\n\nfrom dataGenerators import TestImages, my_collate\nfrom binDeltaGenerators import GBDGenerator\nfrom axisAngle import get_error2, geodesic_loss\nfrom binDeltaModels import OneBinDeltaModel, OneDeltaPerBinModel\nfrom helperFunctions import classes\n\nimport numpy as np\nimport math\nimport scipy.io as spio\nimport gc\nimport os\nimport time\nimport progressbar\nimport pickle\nimport argparse\nfrom tensorboardX import SummaryWriter\n\nparser = argparse.ArgumentParser(description='Geodesic Bin & Delta Model')\nparser.add_argument('--gpu_id', type=str, default='0')\nparser.add_argument('--render_path', type=str, default='data/renderforcnn/')\nparser.add_argument('--augmented_path', type=str, default='data/augmented2/')\nparser.add_argument('--pascal3d_path', type=str, default='data/flipped_new/test/')\nparser.add_argument('--save_str', type=str, default='model')\nparser.add_argument('--dict_size', type=int, default=16)\nparser.add_argument('--num_workers', type=int, default=2)\nparser.add_argument('--feature_network', type=str, default='resnet')\nparser.add_argument('--N0', type=int, default=2048)\nparser.add_argument('--N1', type=int, default=1000)\nparser.add_argument('--N2', type=int, default=500)\nparser.add_argument('--N3', type=int, default=100)\nparser.add_argument('--init_lr', type=float, default=1e-4)\nparser.add_argument('--num_epochs', type=int, default=3)\nparser.add_argument('--max_iterations', type=float, default=np.inf)\nparser.add_argument('--multires', type=bool, default=False)\nargs = parser.parse_args()\nprint(args)\n# assign GPU\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id\n\n# save stuff here\nresults_file = os.path.join('results', args.save_str)\nmodel_file = os.path.join('models', args.save_str + '.tar')\nplots_file = os.path.join('plots', args.save_str)\nlog_dir = os.path.join('logs', args.save_str)\n\n# kmeans data\nkmeans_file = 'data/kmeans_dictionary_axis_angle_' + str(args.dict_size) + '.pkl'\nkmeans = pickle.load(open(kmeans_file, 'rb'))\nkmeans_dict = kmeans.cluster_centers_\ncluster_centers_ = Variable(torch.from_numpy(kmeans_dict).float()).cuda()\nnum_clusters = kmeans.n_clusters\n\n# relevant variables\nndim = 3\nnum_classes = len(classes)\n\n# loss\nmse_loss = nn.MSELoss().cuda()\nce_loss = nn.CrossEntropyLoss().cuda()\ngve_loss = geodesic_loss().cuda()\n\n# DATA\n# datasets\nreal_data = GBDGenerator(args.augmented_path, 'real', kmeans_file)\nrender_data = GBDGenerator(args.render_path, 'render', kmeans_file)\ntest_data = TestImages(args.pascal3d_path)\n# setup data loaders\nreal_loader = DataLoader(real_data, batch_size=args.num_workers, shuffle=True, num_workers=args.num_workers, pin_memory=True, collate_fn=my_collate)\nrender_loader = DataLoader(render_data, batch_size=args.num_workers, shuffle=True, num_workers=args.num_workers, pin_memory=True, collate_fn=my_collate)\ntest_loader = DataLoader(test_data, batch_size=32)\nprint('Real: {0} \\t Render: {1} \\t Test: {2}'.format(len(real_loader), len(render_loader), len(test_loader)))\n\nif np.isinf(args.max_iterations):\n\tmax_iterations = min(len(real_loader), len(render_loader))\nelse:\n\tmax_iterations = args.max_iterations\n\n# my_model\nif not args.multires:\n\tmodel = OneBinDeltaModel(args.feature_network, num_classes, num_clusters, args.N0, args.N1, args.N2, ndim)\nelse:\n\tmodel = OneDeltaPerBinModel(args.feature_network, num_classes, num_clusters, args.N0, args.N1, args.N2, args.N3, ndim)\n\n# print(model)\n# loss and optimizer\noptimizer = optim.Adam(model.parameters(), lr=args.init_lr)\n# scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)\n# store stuff\nwriter = SummaryWriter(log_dir)\ncount = 0\nval_loss = []\ns = 0\n\n\n# OPTIMIZATION functions\ndef training_init():\n\tglobal count, val_loss, s\n\tmodel.train()\n\tbar = progressbar.ProgressBar(max_value=max_iterations)\n\tfor i, (sample_real, sample_render) in enumerate(zip(real_loader, render_loader)):\n\t\t# forward steps\n\t\t# outputs\n\t\txdata_real = Variable(sample_real['xdata'].cuda())\n\t\tlabel_real = Variable(sample_real['label'].cuda())\n\t\tydata_real = [Variable(sample_real['ydata_bin'].cuda()), Variable(sample_real['ydata_res'].cuda())]\n\t\toutput_real = model(xdata_real, label_real)\n\t\txdata_render = Variable(sample_render['xdata'].cuda())\n\t\tlabel_render = Variable(sample_render['label'].cuda())\n\t\tydata_render = [Variable(sample_render['ydata_bin'].cuda()), Variable(sample_render['ydata_res'].cuda())]\n\t\toutput_render = model(xdata_render, label_render)\n\t\t# loss\n\t\tydata_bin = torch.cat((ydata_real[0], ydata_render[0]))\n\t\tydata_res = torch.cat((ydata_real[1], ydata_render[1]))\n\t\toutput_bin = torch.cat((output_real[0], output_render[0]))\n\t\toutput_res = torch.cat((output_real[1], output_render[1]))\n\t\tLc = ce_loss(output_bin, ydata_bin)\n\t\tLr = mse_loss(output_res, ydata_res)\n\t\tloss = Lc + 0.5*math.exp(-2*s)*Lr + s\n\t\t# parameter updates\n\t\toptimizer.zero_grad()\n\t\tloss.backward()\n\t\toptimizer.step()\n\t\ts = 0.5*math.log(Lr)\n\n\t\t# store\n\t\tcount += 1\n\t\twriter.add_scalar('train_loss', loss.item(), count)\n\t\twriter.add_scalar('alpha', 0.5*math.exp(-2*s), count)\n\t\tif i % 1000 == 0:\n\t\t\tytest, yhat_test, test_labels = testing()\n\t\t\tspio.savemat(results_file, {'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels})\n\t\t\ttmp_val_loss = get_error2(ytest, yhat_test, test_labels, num_classes)\n\t\t\twriter.add_scalar('val_loss', tmp_val_loss, count)\n\t\t\tval_loss.append(tmp_val_loss)\n\t\t# cleanup\n\t\tdel xdata_real, xdata_render, label_real, label_render, ydata_real, ydata_render\n\t\tdel ydata_bin, ydata_res, output_bin, output_res\n\t\tdel output_real, output_render, loss, sample_real, sample_render\n\t\tbar.update(i)\n\t\t# stop\n\t\tif i == max_iterations:\n\t\t\tbreak\n\trender_loader.dataset.shuffle_images()\n\treal_loader.dataset.shuffle_images()\n\n\ndef training():\n\tglobal count, val_loss, s\n\tmodel.train()\n\tbar = progressbar.ProgressBar(max_value=max_iterations)\n\tfor i, (sample_real, sample_render) in enumerate(zip(real_loader, render_loader)):\n\t\t# forward steps\n\t\t# output\n\t\txdata_real = Variable(sample_real['xdata'].cuda())\n\t\tlabel_real = Variable(sample_real['label'].cuda())\n\t\tydata_real = [Variable(sample_real['ydata_bin'].cuda()), Variable(sample_real['ydata'].cuda())]\n\t\toutput_real = model(xdata_real, label_real)\n\t\txdata_render = Variable(sample_render['xdata'].cuda())\n\t\tlabel_render = Variable(sample_render['label'].cuda())\n\t\tydata_render = [Variable(sample_render['ydata_bin'].cuda()), Variable(sample_render['ydata'].cuda())]\n\t\toutput_render = model(xdata_render, label_render)\n\t\t# loss\n\t\tydata_bin = torch.cat((ydata_real[0], ydata_render[0]))\n\t\tydata = torch.cat((ydata_real[1], ydata_render[1]))\n\t\toutput_bin = torch.cat((output_real[0], output_render[0]))\n\t\t_, ind = torch.max(output_bin, dim=1)\n\t\ty = torch.index_select(cluster_centers_, 0, ind)\n\t\toutput = y + torch.cat((output_real[1], output_render[1]))\n\t\tLc = ce_loss(output_bin, ydata_bin)\n\t\tLr = gve_loss(output, ydata)\n\t\tloss = Lc + math.exp(-s)*Lr + s\n\t\t# parameter updates\n\t\toptimizer.zero_grad()\n\t\tloss.backward()\n\t\toptimizer.step()\n\t\ts = math.log(Lr)\n\t\t# store\n\t\tcount += 1\n\t\twriter.add_scalar('train_loss', loss.item(), count)\n\t\twriter.add_scalar('alpha', math.exp(-s), count)\n\t\tif i % 1000 == 0:\n\t\t\tytest, yhat_test, test_labels = testing()\n\t\t\tspio.savemat(results_file, {'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels})\n\t\t\ttmp_val_loss = get_error2(ytest, yhat_test, test_labels, num_classes)\n\t\t\twriter.add_scalar('val_loss', tmp_val_loss, count)\n\t\t\tval_loss.append(tmp_val_loss)\n\t\t# cleanup\n\t\tdel xdata_real, xdata_render, label_real, label_render, ydata_real, ydata_render\n\t\tdel ydata_bin, ydata, output_bin, output\n\t\tdel output_real, output_render, sample_real, sample_render, loss\n\t\tbar.update(i)\n\t\t# stop\n\t\tif i == max_iterations:\n\t\t\tbreak\n\trender_loader.dataset.shuffle_images()\n\treal_loader.dataset.shuffle_images()\n\n\ndef testing():\n\tmodel.eval()\n\typred = []\n\tytrue = []\n\tlabels = []\n\tfor i, sample in enumerate(test_loader):\n\t\txdata = Variable(sample['xdata'].cuda())\n\t\tlabel = Variable(sample['label'].cuda())\n\t\toutput = model(xdata, label)\n\t\typred_bin = np.argmax(output[0].data.cpu().numpy(), axis=1)\n\t\typred_res = output[1].data.cpu().numpy()\n\t\typred.append(kmeans_dict[ypred_bin, :] + ypred_res)\n\t\tytrue.append(sample['ydata'].numpy())\n\t\tlabels.append(sample['label'].numpy())\n\t\tdel xdata, label, output, sample\n\t\tgc.collect()\n\typred = np.concatenate(ypred)\n\tytrue = np.concatenate(ytrue)\n\tlabels = np.concatenate(labels)\n\tmodel.train()\n\treturn ytrue, ypred, labels\n\n\ndef save_checkpoint(filename):\n\ttorch.save(model.state_dict(), filename)\n\n\n# initialization\ntraining_init()\nytest, yhat_test, test_labels = testing()\nprint('\\nMedErr: {0}'.format(get_error2(ytest, yhat_test, test_labels, num_classes)))\n\ns = 0 # reset\nfor epoch in range(args.num_epochs):\n\ttic = time.time()\n\t# scheduler.step()\n\t# training step\n\ttraining()\n\t# save model at end of epoch\n\tsave_checkpoint(model_file)\n\t# validation\n\tytest, yhat_test, test_labels = testing()\n\tprint('\\nMedErr: {0}'.format(get_error2(ytest, yhat_test, test_labels, num_classes)))\n\t# time and output\n\ttoc = time.time() - tic\n\tprint('Epoch: {0} done in time {1}s'.format(epoch, toc))\n\t# cleanup\n\tgc.collect()\nwriter.close()\nval_loss = np.stack(val_loss)\nspio.savemat(plots_file, {'val_loss': val_loss})\n\n# evaluate the model\nytest, yhat_test, test_labels = testing()\nprint('\\nMedErr: {0}'.format(get_error2(ytest, yhat_test, test_labels, num_classes)))\nspio.savemat(results_file, {'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels})",
"# -*- coding: utf-8 -*-\n\"\"\"\nRiemannian Bin and Delta model for the axis-angle representation\n\"\"\"\n\nimport torch\nfrom torch import nn, optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\n\nfrom dataGenerators import TestImages, my_collate\nfrom binDeltaGenerators import RBDGenerator\nfrom axisAngle import get_error2, get_R, get_y\nfrom binDeltaModels import OneBinDeltaModel, OneDeltaPerBinModel\nfrom helperFunctions import classes, eps\n\nimport numpy as np\nimport scipy.io as spio\nimport math\nimport gc\nimport os\nimport time\nimport progressbar\nimport pickle\nimport argparse\nfrom tensorboardX import SummaryWriter\n\nparser = argparse.ArgumentParser(description='Riemannian Bin & Delta Model')\nparser.add_argument('--gpu_id', type=str, default='0')\nparser.add_argument('--render_path', type=str, default='data/renderforcnn/')\nparser.add_argument('--augmented_path', type=str, default='data/augmented2/')\nparser.add_argument('--pascal3d_path', type=str, default='data/flipped_new/test')\nparser.add_argument('--save_str', type=str)\nparser.add_argument('--dict_size', type=int, default=200)\nparser.add_argument('--num_workers', type=int, default=4)\nparser.add_argument('--feature_network', type=str, default='resnet')\nparser.add_argument('--N0', type=int, default=2048)\nparser.add_argument('--N1', type=int, default=1000)\nparser.add_argument('--N2', type=int, default=500)\nparser.add_argument('--N3', type=int, default=100)\nparser.add_argument('--init_lr', type=float, default=1e-4)\nparser.add_argument('--num_epochs', type=int, default=3)\nparser.add_argument('--max_iterations', type=float, default=np.inf)\nparser.add_argument('--multires', type=bool, default=False)\nargs = parser.parse_args()\nprint(args)\n# assign GPU\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id\n\n# save stuff here\nresults_file = os.path.join('results', args.save_str)\nmodel_file = os.path.join('models', args.save_str + '.tar')\nplots_file = os.path.join('plots', args.save_str)\nlog_dir = os.path.join('logs', args.save_str)\n\n# kmeans data\nkmeans_file = 'data/kmeans_dictionary_axis_angle_' + str(args.dict_size) + '.pkl'\nkmeans = pickle.load(open(kmeans_file, 'rb'))\nnum_clusters = kmeans.n_clusters\nrotations_dict = np.stack([get_R(kmeans.cluster_centers_[i]) for i in range(kmeans.n_clusters)])\n\n# relevant variables\nndim = 3\nnum_classes = len(classes)\n\n\n# Loss\nclass riemannian_exp(nn.Module):\n\tdef __init__(self, pose_dict):\n\t\tsuper().__init__()\n\t\tself.key_poses = torch.from_numpy(pose_dict).float().cuda()\n\t\tproj = np.array([[0,0,0,0,0,-1,0,1,0], [0,0,1,0,0,0,-1,0,0], [0,-1,0,1,0,0,0,0,0]])\n\t\tself.proj = torch.from_numpy(proj).float().cuda()\n\t\tself.Id = torch.eye(3).float().cuda()\n\n\tdef forward(self, ybin, yres):\n\t\t_, ind = torch.max(ybin, dim=1)\n\t\tangle = torch.norm(yres, 2, 1)\n\t\taxis = F.normalize(yres)\n\t\taxis = torch.mm(axis, self.proj).view(-1, 3, 3)\n\t\ty = torch.stack([self.Id + torch.sin(angle[i])*axis[i] + (1.0 - torch.cos(angle[i]))*torch.mm(axis[i], axis[i]) for i in range(angle.size(0))])\n\t\ty = torch.bmm(torch.index_select(self.key_poses, 0, ind), y)\n\t\treturn y\n\n\nclass geodesic_loss(nn.Module):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\n\tdef forward(self, ypred, ytrue):\n\t\t# geodesic loss between predicted and gt rotations\n\t\ttmp = torch.stack([torch.trace(torch.mm(ypred[i].t(), ytrue[i])) for i in range(ytrue.size(0))])\n\t\tangle = torch.acos(torch.clamp((tmp - 1.0) / 2, -1 + eps, 1 - eps))\n\t\treturn torch.mean(angle)\n\n\nmse_loss = nn.MSELoss().cuda()\nce_loss = nn.CrossEntropyLoss().cuda()\nmy_exp = riemannian_exp(rotations_dict).cuda()\ngve_loss = geodesic_loss().cuda()\n\n# DATA\n# datasets\nreal_data = RBDGenerator(args.augmented_path, 'real', kmeans_file)\nrender_data = RBDGenerator(args.render_path, 'render', kmeans_file)\ntest_data = TestImages(args.pascal3d_path)\n# setup data loaders\nreal_loader = DataLoader(real_data, batch_size=args.num_workers, shuffle=True, num_workers=args.num_workers, pin_memory=True, collate_fn=my_collate)\nrender_loader = DataLoader(render_data, batch_size=args.num_workers, shuffle=True, num_workers=args.num_workers, pin_memory=True, collate_fn=my_collate)\ntest_loader = DataLoader(test_data, batch_size=32)\nprint('Real: {0} \\t Render: {1} \\t Test: {2}'.format(len(real_loader), len(render_loader), len(test_loader)))\n\nif np.isinf(args.max_iterations):\n\tmax_iterations = min(len(real_loader), len(render_loader))\nelse:\n\tmax_iterations = args.max_iterations\n\n# my_model\nif not args.multires:\n\tmodel = OneBinDeltaModel(args.feature_network, num_classes, num_clusters, args.N0, args.N1, args.N2, ndim)\nelse:\n\tmodel = OneDeltaPerBinModel(args.feature_network, num_classes, num_clusters, args.N0, args.N1, args.N2, args.N3, ndim)\n\n# print(model)\n# loss and optimizer\noptimizer = optim.Adam(model.parameters(), lr=args.init_lr)\n# scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)\n# store stuff\nwriter = SummaryWriter(log_dir)\ncount = 0\nval_loss = []\ns = 0\n\n\n# OPTIMIZATION functions\ndef training_init():\n\tglobal count, val_loss, s\n\tmodel.train()\n\tbar = progressbar.ProgressBar(max_value=max_iterations)\n\tfor i, (sample_real, sample_render) in enumerate(zip(real_loader, render_loader)):\n\t\t# forward steps\n\t\txdata_real = Variable(sample_real['xdata'].cuda())\n\t\tlabel_real = Variable(sample_real['label'].cuda())\n\t\tydata_real = [Variable(sample_real['ydata_bin'].cuda()), Variable(sample_real['ydata_res'].cuda())]\n\t\toutput_real = model(xdata_real, label_real)\n\t\txdata_render = Variable(sample_render['xdata'].cuda())\n\t\tlabel_render = Variable(sample_render['label'].cuda())\n\t\tydata_render = [Variable(sample_render['ydata_bin'].cuda()), Variable(sample_render['ydata_res'].cuda())]\n\t\toutput_render = model(xdata_render, label_render)\n\t\t# loss\n\t\tydata_bin = torch.cat((ydata_real[0], ydata_render[0]))\n\t\tydata_res = torch.cat((ydata_real[1], ydata_render[1]))\n\t\toutput_bin = torch.cat((output_real[0], output_render[0]))\n\t\toutput_res = torch.cat((output_real[1], output_render[1]))\n\t\tLc = ce_loss(output_bin, ydata_bin)\n\t\tLr = mse_loss(output_res, ydata_res)\n\t\tloss = Lc + 0.5*math.exp(-2*s)*Lr + s\n\t\t# parameter updates\n\t\toptimizer.zero_grad()\n\t\tloss.backward()\n\t\toptimizer.step()\n\t\ts = 0.5*math.log(Lr)\n\t\t# store\n\t\tcount += 1\n\t\twriter.add_scalar('train_loss', loss.item(), count)\n\t\twriter.add_scalar('alpha', 0.5*math.exp(-2*s), count)\n\t\tif i % 1000 == 0:\n\t\t\tytest, yhat_test, test_labels = testing()\n\t\t\tspio.savemat(results_file, {'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels})\n\t\t\ttmp_val_loss = get_error2(ytest, yhat_test, test_labels, num_classes)\n\t\t\twriter.add_scalar('val_loss', tmp_val_loss, count)\n\t\t\tval_loss.append(tmp_val_loss)\n\t\t# cleanup\n\t\tdel xdata_real, xdata_render, label_real, label_render, ydata_real, ydata_render\n\t\tdel ydata_bin, ydata_res, output_bin, output_res\n\t\tdel output_real, output_render, loss, sample_real, sample_render\n\t\tbar.update(i)\n\t\t# stop\n\t\tif i == max_iterations:\n\t\t\tbreak\n\trender_loader.dataset.shuffle_images()\n\treal_loader.dataset.shuffle_images()\n\n\ndef training():\n\tglobal count, val_loss, s\n\tmodel.train()\n\tbar = progressbar.ProgressBar(max_value=max_iterations)\n\tfor i, (sample_real, sample_render) in enumerate(zip(real_loader, render_loader)):\n\t\t# forward steps\n\t\txdata_real = Variable(sample_real['xdata'].cuda())\n\t\tlabel_real = Variable(sample_real['label'].cuda())\n\t\tydata_real = [Variable(sample_real['ydata_bin'].cuda()), Variable(sample_real['ydata_rot'].cuda())]\n\t\toutput_real = model(xdata_real, label_real)\n\t\txdata_render = Variable(sample_render['xdata'].cuda())\n\t\tlabel_render = Variable(sample_render['label'].cuda())\n\t\tydata_render = [Variable(sample_render['ydata_bin'].cuda()), Variable(sample_render['ydata_rot'].cuda())]\n\t\toutput_render = model(xdata_render, label_render)\n\t\t# loss\n\t\tydata_bin = torch.cat((ydata_real[0], ydata_render[0]))\n\t\tydata_rot = torch.cat((ydata_real[1], ydata_render[1]))\n\t\toutput_bin = torch.cat((output_real[0], output_render[0]))\n\t\toutput_res = torch.cat((output_real[1], output_render[1]))\n\t\toutput_rot = my_exp(output_bin, output_res)\n\t\tLc = ce_loss(output_bin, ydata_bin)\n\t\tLr = gve_loss(output_rot, ydata_rot)\n\t\tloss = Lc + math.exp(-s)*Lr + s\n\t\t# parameter updates\n\t\toptimizer.zero_grad()\n\t\tloss.backward()\n\t\toptimizer.step()\n\t\ts = math.log(Lr)\n\t\t# store\n\t\tcount += 1\n\t\twriter.add_scalar('train_loss', loss.item(), count)\n\t\twriter.add_scalar('alpha', math.exp(-s), count)\n\t\tif i % 1000 == 0:\n\t\t\tytest, yhat_test, test_labels = testing()\n\t\t\tspio.savemat(results_file, {'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels})\n\t\t\ttmp_val_loss = get_error2(ytest, yhat_test, test_labels, num_classes)\n\t\t\twriter.add_scalar('val_loss', tmp_val_loss, count)\n\t\t\tval_loss.append(tmp_val_loss)\n\t\t# cleanup\n\t\tdel xdata_real, xdata_render, label_real, label_render, ydata_real, ydata_render\n\t\tdel output_bin, output_res, output_rot, ydata_rot, ydata_bin\n\t\tdel output_real, output_render, sample_real, sample_render, loss\n\t\tbar.update(i)\n\t\t# stop\n\t\tif i == max_iterations:\n\t\t\tbreak\n\trender_loader.dataset.shuffle_images()\n\treal_loader.dataset.shuffle_images()\n\n\ndef testing():\n\tmodel.eval()\n\typred = []\n\tytrue = []\n\tlabels = []\n\tfor i, sample in enumerate(test_loader):\n\t\txdata = Variable(sample['xdata'].cuda())\n\t\tlabel = Variable(sample['label'].cuda())\n\t\toutput = model(xdata, label)\n\t\typred_bin = np.argmax(output[0].data.cpu().numpy(), axis=1)\n\t\typred_res = output[1].data.cpu().numpy()\n\t\ty = [get_y(np.dot(rotations_dict[ypred_bin[j]], get_R(ypred_res[j]))) for j in range(ypred_bin.shape[0])]\n\t\typred.append(y)\n\t\tytrue.append(sample['ydata'].numpy())\n\t\tlabels.append(sample['label'].numpy())\n\t\tdel xdata, label, output, sample\n\t\tgc.collect()\n\typred = np.concatenate(ypred)\n\tytrue = np.concatenate(ytrue)\n\tlabels = np.concatenate(labels)\n\tmodel.train()\n\treturn ytrue, ypred, labels\n\n\ndef save_checkpoint(filename):\n\ttorch.save(model.state_dict(), filename)\n\n\n# initialization\ntraining_init()\nytest, yhat_test, test_labels = testing()\nprint('\\nMedErr: {0}'.format(get_error2(ytest, yhat_test, test_labels, num_classes)))\n\nfor epoch in range(args.num_epochs):\n\ttic = time.time()\n\t# scheduler.step()\n\t# training step\n\ttraining()\n\t# save model at end of epoch\n\tsave_checkpoint(model_file)\n\t# validation\n\tytest, yhat_test, test_labels = testing()\n\tprint('\\nMedErr: {0}'.format(get_error2(ytest, yhat_test, test_labels, num_classes)))\n\t# time and output\n\ttoc = time.time() - tic\n\tprint('Epoch: {0} done in time {1}s'.format(epoch, toc))\n\t# cleanup\n\tgc.collect()\nwriter.close()\nval_loss = np.stack(val_loss)\nspio.savemat(plots_file, {'val_loss': val_loss})\n\n# evaluate the model\nytest, yhat_test, test_labels = testing()\nprint('\\nMedErr: {0}'.format(get_error2(ytest, yhat_test, test_labels, num_classes)))\nspio.savemat(results_file, {'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels})\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nElhoseiny model based on Geodesic Regression model R_G\n\"\"\"\n\nimport torch\nfrom torch import nn, optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\n\nfrom dataGenerators import ImagesAll, TestImages, my_collate\nfrom axisAngle import get_error2, geodesic_loss\nfrom poseModels import model_3layer\nfrom helperFunctions import classes, get_accuracy\nfrom featureModels import resnet_model, vgg_model\n\nimport numpy as np\nimport scipy.io as spio\nimport gc\nimport os\nimport time\nimport progressbar\nimport argparse\nfrom tensorboardX import SummaryWriter\n\nparser = argparse.ArgumentParser(description='Pure Regression Models')\nparser.add_argument('--gpu_id', type=str, default='0')\nparser.add_argument('--render_path', type=str, default='data/renderforcnn/')\nparser.add_argument('--augmented_path', type=str, default='data/augmented2/')\nparser.add_argument('--pascal3d_path', type=str, default='data/flipped_new/test/')\nparser.add_argument('--save_str', type=str)\nparser.add_argument('--num_workers', type=int, default=4)\nparser.add_argument('--feature_network', type=str, default='resnet')\nparser.add_argument('--N0', type=int, default=2048)\nparser.add_argument('--N1', type=int, default=1000)\nparser.add_argument('--N2', type=int, default=500)\nparser.add_argument('--init_lr', type=float, default=1e-4)\nparser.add_argument('--num_epochs', type=int, default=3)\nargs = parser.parse_args()\nprint(args)\n# assign GPU\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id\n\n# save stuff here\nresults_file = os.path.join('results', args.save_str)\nmodel_file = os.path.join('models', args.save_str + '.tar')\nplots_file = os.path.join('plots', args.save_str)\nlog_dir = os.path.join('logs', args.save_str)\n\n# relevant variables\nydata_type = 'axis_angle'\nndim = 3\nnum_classes = len(classes)\n\nmse_loss = nn.MSELoss().cuda()\ngve_loss = geodesic_loss().cuda()\nce_loss = nn.CrossEntropyLoss().cuda()\n\n# DATA\n# datasets\nreal_data = ImagesAll(args.augmented_path, 'real', ydata_type)\nrender_data = ImagesAll(args.render_path, 'render', ydata_type)\ntest_data = TestImages(args.pascal3d_path, ydata_type)\n# setup data loaders\nreal_loader = DataLoader(real_data, batch_size=args.num_workers, shuffle=True, num_workers=args.num_workers, pin_memory=True, collate_fn=my_collate)\nrender_loader = DataLoader(render_data, batch_size=args.num_workers, shuffle=True, num_workers=args.num_workers, pin_memory=True, collate_fn=my_collate)\ntest_loader = DataLoader(test_data, batch_size=32)\nprint('Real: {0} \\t Render: {1} \\t Test: {2}'.format(len(real_loader), len(render_loader), len(test_loader)))\nmax_iterations = min(len(real_loader), len(render_loader))\n\n\n# my_model\nclass ElhoseinyModel(nn.Module):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.num_classes = num_classes\n\t\tif args.feature_network == 'resnet':\n\t\t\tself.feature_model = resnet_model('resnet50', 'layer4').cuda()\n\t\telif args.feature_network == 'vgg':\n\t\t\tself.feature_model = vgg_model('vgg13', 'fc6').cuda()\n\t\tself.pose_model = model_3layer(args.N0, args.N1, args.N2, ndim).cuda()\n\t\tself.category_model = nn.Linear(args.N0, num_classes).cuda()\n\n\tdef forward(self, x):\n\t\tx = self.feature_model(x)\n\t\ty0 = self.category_model(x)\n\t\ty1 = self.pose_model(x)\n\t\ty1 = np.pi*F.tanh(y1)\n\t\tdel x\n\t\treturn [y0, y1] # cat, pose\n\n\nmodel = ElhoseinyModel()\n# print(model)\n# loss and optimizer\noptimizer = optim.Adam(model.parameters(), lr=args.init_lr)\nscheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)\n# store stuff\nwriter = SummaryWriter(log_dir)\ncount = 0\nval_err, val_acc = [], []\n\n\n# OPTIMIZATION functions\ndef training_init():\n\tglobal count, val_err, val_acc\n\tmodel.train()\n\tbar = progressbar.ProgressBar(max_value=max_iterations)\n\tfor i, (sample_real, sample_render) in enumerate(zip(real_loader, render_loader)):\n\t\t# forward steps\n\t\txdata_real = Variable(sample_real['xdata'].cuda())\n\t\tlabel_real = Variable(sample_real['label'].cuda())\n\t\tydata_real = Variable(sample_real['ydata'].cuda())\n\t\toutput_real = model(xdata_real)\n\t\txdata_render = Variable(sample_render['xdata'].cuda())\n\t\tlabel_render = Variable(sample_render['label'].cuda())\n\t\tydata_render = Variable(sample_render['ydata'].cuda())\n\t\toutput_render = model(xdata_render)\n\t\toutput_pose = torch.cat((output_real[1], output_render[1]))\n\t\tgt_pose = torch.cat((ydata_real, ydata_render))\n\t\tLr = mse_loss(output_pose, gt_pose)\n\t\tLc = ce_loss(output_real[0], label_real.squeeze())\n\t\tloss = Lc + Lr\n\t\toptimizer.zero_grad()\n\t\tloss.backward()\n\t\toptimizer.step()\n\t\t# store\n\t\tcount += 1\n\t\twriter.add_scalar('train_loss', loss.item(), count)\n\t\tif i % 1000 == 0:\n\t\t\tytrue_cat, ytrue_pose, ypred_cat, ypred_pose = testing()\n\t\t\tspio.savemat(results_file, {'ytrue_cat': ytrue_cat, 'ytrue_pose': ytrue_pose, 'ypred_cat': ypred_cat, 'ypred_pose': ypred_pose})\n\t\t\ttmp_acc = get_accuracy(ytrue_cat, ypred_cat, num_classes)\n\t\t\ttmp_err = get_error2(ytrue_pose, ypred_pose, ytrue_cat, num_classes)\n\t\t\twriter.add_scalar('val_acc', tmp_acc, count)\n\t\t\twriter.add_scalar('val_err', tmp_err, count)\n\t\t\tval_acc.append(tmp_acc)\n\t\t\tval_err.append(tmp_err)\n\t\t# cleanup\n\t\tdel xdata_real, xdata_render, label_real, label_render, ydata_real, ydata_render, Lr, Lc\n\t\tdel output_real, output_render, sample_real, sample_render, loss, output_pose, gt_pose\n\t\tbar.update(i)\n\t\t# stop\n\t\tif i == max_iterations:\n\t\t\tbreak\n\trender_loader.dataset.shuffle_images()\n\treal_loader.dataset.shuffle_images()\n\n\ndef training():\n\tglobal count, val_err, val_acc\n\tmodel.train()\n\tbar = progressbar.ProgressBar(max_value=max_iterations)\n\tfor i, (sample_real, sample_render) in enumerate(zip(real_loader, render_loader)):\n\t\t# forward steps\n\t\txdata_real = Variable(sample_real['xdata'].cuda())\n\t\tlabel_real = Variable(sample_real['label'].cuda())\n\t\tydata_real = Variable(sample_real['ydata'].cuda())\n\t\toutput_real = model(xdata_real)\n\t\txdata_render = Variable(sample_render['xdata'].cuda())\n\t\tlabel_render = Variable(sample_render['label'].cuda())\n\t\tydata_render = Variable(sample_render['ydata'].cuda())\n\t\toutput_render = model(xdata_render)\n\t\toutput_pose = torch.cat((output_real[1], output_render[1]))\n\t\tgt_pose = torch.cat((ydata_real, ydata_render))\n\t\tLr = gve_loss(output_pose, gt_pose)\n\t\tLc = ce_loss(output_real[0], label_real.squeeze())\n\t\tloss = 0.1*Lc + Lr\n\t\toptimizer.zero_grad()\n\t\tloss.backward()\n\t\toptimizer.step()\n\t\t# store\n\t\tcount += 1\n\t\twriter.add_scalar('train_loss', loss.item(), count)\n\t\tif i % 1000 == 0:\n\t\t\tytrue_cat, ytrue_pose, ypred_cat, ypred_pose = testing()\n\t\t\tspio.savemat(results_file, {'ytrue_cat': ytrue_cat, 'ytrue_pose': ytrue_pose, 'ypred_cat': ypred_cat, 'ypred_pose': ypred_pose})\n\t\t\ttmp_acc = get_accuracy(ytrue_cat, ypred_cat, num_classes)\n\t\t\ttmp_err = get_error2(ytrue_pose, ypred_pose, ytrue_cat, num_classes)\n\t\t\twriter.add_scalar('val_acc', tmp_acc, count)\n\t\t\twriter.add_scalar('val_err', tmp_err, count)\n\t\t\tval_acc.append(tmp_acc)\n\t\t\tval_err.append(tmp_err)\n\t\t# cleanup\n\t\tdel xdata_real, xdata_render, label_real, label_render, ydata_real, ydata_render, Lr, Lc\n\t\tdel output_real, output_render, sample_real, sample_render, loss, output_pose, gt_pose\n\t\tbar.update(i)\n\t\t# stop\n\t\tif i == max_iterations:\n\t\t\tbreak\n\trender_loader.dataset.shuffle_images()\n\treal_loader.dataset.shuffle_images()\n\n\ndef testing():\n\tmodel.eval()\n\tytrue_cat, ytrue_pose = [], []\n\typred_cat, ypred_pose = [], []\n\tfor i, sample in enumerate(test_loader):\n\t\txdata = Variable(sample['xdata'].cuda())\n\t\toutput = model(xdata)\n\t\toutput_cat = output[0]\n\t\toutput_pose = output[1]\n\t\ttmp_labels = np.argmax(output_cat.data.cpu().numpy(), axis=1)\n\t\typred_cat.append(tmp_labels)\n\t\tlabel = Variable(sample['label'])\n\t\tytrue_cat.append(sample['label'].squeeze().numpy())\n\t\typred_pose.append(output_pose.data.cpu().numpy())\n\t\tytrue_pose.append(sample['ydata'].numpy())\n\t\tdel xdata, label, output, sample, output_cat, output_pose\n\t\tgc.collect()\n\tytrue_cat = np.concatenate(ytrue_cat)\n\typred_cat = np.concatenate(ypred_cat)\n\tytrue_pose = np.concatenate(ytrue_pose)\n\typred_pose = np.concatenate(ypred_pose)\n\tmodel.train()\n\treturn ytrue_cat, ytrue_pose, ypred_cat, ypred_pose\n\n\ndef save_checkpoint(filename):\n\ttorch.save(model.state_dict(), filename)\n\n\n# initialization\ntraining_init()\nytrue_cat, ytrue_pose, ypred_cat, ypred_pose = testing()\nspio.savemat(results_file, {'ytrue_cat': ytrue_cat, 'ytrue_pose': ytrue_pose, 'ypred_cat': ypred_cat, 'ypred_pose': ypred_pose})\ntmp_acc = get_accuracy(ytrue_cat, ypred_cat, num_classes)\ntmp_err = get_error2(ytrue_pose, ypred_pose, ytrue_cat, num_classes)\nprint('Acc: {0} \\t Err: {1}'.format(tmp_acc, tmp_err))\n\nfor epoch in range(args.num_epochs):\n\ttic = time.time()\n\tscheduler.step()\n\t# training step\n\ttraining()\n\t# save model at end of epoch\n\tsave_checkpoint(model_file)\n\t# validation\n\tytrue_cat, ytrue_pose, ypred_cat, ypred_pose = testing()\n\tspio.savemat(results_file, {'ytrue_cat': ytrue_cat, 'ytrue_pose': ytrue_pose, 'ypred_cat': ypred_cat, 'ypred_pose': ypred_pose})\n\ttmp_acc = get_accuracy(ytrue_cat, ypred_cat, num_classes)\n\ttmp_err = get_error2(ytrue_pose, ypred_pose, ytrue_cat, num_classes)\n\tprint('Acc: {0} \\t Err: {1}'.format(tmp_acc, tmp_err))\n\t# time and output\n\ttoc = time.time() - tic\n\tprint('Epoch: {0} done in time {1}s'.format(epoch, toc))\n\t# cleanup\n\tgc.collect()\nwriter.close()\nval_acc = np.stack(val_acc)\nval_err = np.stack(val_err)\nspio.savemat(plots_file, {'val_acc': val_acc, 'val_err': val_err})\n\n# evaluate the model\nytrue_cat, ytrue_pose, ypred_cat, ypred_pose = testing()\nspio.savemat(results_file, {'ytrue_cat': ytrue_cat, 'ytrue_pose': ytrue_pose, 'ypred_cat': ypred_cat, 'ypred_pose': ypred_pose})\ntmp_acc = get_accuracy(ytrue_cat, ypred_cat, num_classes)\ntmp_err = get_error2(ytrue_pose, ypred_pose, ytrue_cat, num_classes)\nprint('Acc: {0} \\t Err: {1}'.format(tmp_acc, tmp_err))\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.optim.lr_scheduler.LambdaLR",
"torch.utils.data.DataLoader",
"numpy.concatenate",
"torch.nn.Linear",
"scipy.io.savemat",
"torch.argmax"
],
[
"torch.nn.MSELoss",
"torch.nn.CrossEntropyLoss",
"torch.max",
"torch.cat",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"numpy.stack",
"numpy.concatenate",
"scipy.io.savemat",
"torch.index_select",
"numpy.isinf"
],
[
"torch.mean",
"torch.max",
"torch.cat",
"torch.sin",
"torch.utils.data.DataLoader",
"numpy.concatenate",
"torch.nn.CrossEntropyLoss",
"torch.norm",
"torch.mm",
"torch.eye",
"torch.from_numpy",
"numpy.stack",
"torch.index_select",
"torch.cos",
"scipy.io.savemat",
"numpy.array",
"torch.nn.functional.normalize",
"numpy.isinf",
"torch.clamp",
"torch.nn.MSELoss"
],
[
"torch.nn.CrossEntropyLoss",
"torch.cat",
"torch.utils.data.DataLoader",
"numpy.stack",
"numpy.concatenate",
"torch.autograd.Variable",
"torch.nn.Linear",
"scipy.io.savemat",
"torch.nn.functional.tanh",
"torch.nn.MSELoss",
"torch.optim.lr_scheduler.StepLR"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
vikaskurapati/pysph
|
[
"3e14c7d25d7d4726b6e5b69d4ba4db4fcc38ab6f"
] |
[
"pysph/base/tests/test_nnps.py"
] |
[
"\"\"\"unittests for the serial NNPS\n\nYou can run the tests like so:\n\n $ pytest -v test_nnps.py\n\"\"\"\nimport numpy\nfrom numpy import random\n\n# PySPH imports\nfrom pysph.base.point import IntPoint, Point\nfrom pysph.base.utils import get_particle_array\nfrom pysph.base import nnps\nfrom compyle.config import get_config\n\n# Carrays from PyZoltan\nfrom cyarray.carray import UIntArray, IntArray\n\n# Python testing framework\nimport unittest\nimport pytest\nfrom pytest import importorskip\n\n\nclass SimpleNNPSTestCase(unittest.TestCase):\n \"\"\"Simplified NNPS test case\n\n We distribute particles manually and perform sanity checks on NNPS\n\n \"\"\"\n\n def setUp(self):\n \"\"\"Default set-up used by all the tests\n\n Particles with the following coordinates (x, y, z) are placed in a box\n\n 0 : -1.5 , 0.25 , 0.5\n 1 : 0.33 , -0.25, 0.25\n 2 : 1.25 , -1.25, 1.25\n 3 : 0.05 , 1.25 , -0.5\n 4 : -0.5 , 0.5 , -1.25\n 5 : -0.75, 0.75 , -1.25\n 6 : -1.25, 0.5 , 0.5\n 7 : 0.5 , 1.5 , -0.5\n 8 : 0.5 , -0.5 , 0.5\n 9 : 0.5 , 1.75 , -0.75\n\n The cell size is set to 1. Valid cell indices and the\n particles they contain are given below:\n\n (-2, 0, 0) : particle 0, 6\n (0, -1, 0) : particle 1, 8\n (1, -2, 1) : particle 2\n (0, 1, -1) : particle 3, 7, 9\n (-1, 0, -2): particle 4, 5\n\n \"\"\"\n x = numpy.array([\n -1.5, 0.33, 1.25, 0.05, -0.5, -0.75, -1.25, 0.5, 0.5, 0.5])\n\n y = numpy.array([\n 0.25, -0.25, -1.25, 1.25, 0.5, 0.75, 0.5, 1.5, -0.5, 1.75])\n\n z = numpy.array([\n 0.5, 0.25, 1.25, -0.5, -1.25, -1.25, 0.5, -0.5, 0.5, -0.75])\n\n # using a degenrate (h=0) array will set cell size to 1 for NNPS\n h = numpy.zeros_like(x)\n\n pa = get_particle_array(x=x, y=y, z=z, h=h)\n\n self.dict_box_sort_nnps = nnps.DictBoxSortNNPS(\n dim=3, particles=[pa], radius_scale=1.0\n )\n\n self.box_sort_nnps = nnps.BoxSortNNPS(\n dim=3, particles=[pa], radius_scale=1.0\n )\n\n self.ll_nnps = nnps.LinkedListNNPS(\n dim=3, particles=[pa], radius_scale=1.0\n )\n\n self.sp_hash_nnps = nnps.SpatialHashNNPS(\n dim=3, particles=[pa], radius_scale=1.0\n )\n\n self.ext_sp_hash_nnps = nnps.ExtendedSpatialHashNNPS(\n dim=3, particles=[pa], radius_scale=1.0\n )\n\n self.strat_radius_nnps = nnps.StratifiedHashNNPS(\n dim=3, particles=[pa], radius_scale=1.0\n )\n\n # these are the expected cells\n self.expected_cells = {\n IntPoint(-2, 0, 0): [0, 6],\n IntPoint(0, -1, 0): [1, 8],\n IntPoint(1, -2, 1): [2],\n IntPoint(0, 1, -1): [3, 7, 9],\n IntPoint(-1, 0, -2): [4, 5]\n }\n\n def test_cell_size(self):\n \"SimpleNNPS :: test cell_size\"\n nnps = self.dict_box_sort_nnps\n self.assertAlmostEqual(nnps.cell_size, 1.0, 14)\n\n nnps = self.box_sort_nnps\n self.assertAlmostEqual(nnps.cell_size, 1.0, 14)\n\n nnps = self.ll_nnps\n self.assertAlmostEqual(nnps.cell_size, 1.0, 14)\n\n nnps = self.sp_hash_nnps\n self.assertAlmostEqual(nnps.cell_size, 1.0, 14)\n\n nnps = self.ext_sp_hash_nnps\n self.assertAlmostEqual(nnps.cell_size, 1.0, 14)\n\n nnps = self.strat_radius_nnps\n self.assertAlmostEqual(nnps.cell_size, 1.0, 14)\n\n def test_cells(self):\n \"SimpleNNPS :: test cells\"\n nnps = self.dict_box_sort_nnps\n cells = self.expected_cells\n\n # check each cell for it's contents\n for key in cells:\n self.assertTrue(key in nnps.cells)\n\n cell = nnps.cells.get(key)\n\n cell_indices = list(cell.lindices[0].get_npy_array())\n expected_indices = cells.get(key)\n\n self.assertTrue(cell_indices == expected_indices)\n\n\nclass NNPS2DTestCase(unittest.TestCase):\n def setUp(self):\n \"\"\"Default set-up used by all the tests\n\n Two sets of particle arrays (a & b) are created and neighbors\n are checked from a -> b, b -> a , a -> a and b -> b\n\n \"\"\"\n numpy.random.seed(123)\n self.numPoints1 = numPoints1 = 1 << 11\n self.numPoints2 = numPoints2 = 1 << 10\n\n self.pa1 = pa1 = self._create_random(numPoints1)\n self.pa2 = pa2 = self._create_random(numPoints2)\n\n # the list of particles\n self.particles = [pa1, pa2]\n\n def _create_random(self, numPoints):\n # average particle spacing and volume in the unit cube\n dx = pow(1.0 / numPoints, 1. / 2.)\n\n # create random points in the interval [-1, 1]^3\n x1, y1 = random.random((2, numPoints)) * 2.0 - 1.0\n z1 = numpy.zeros_like(x1)\n\n h1 = numpy.ones_like(x1) * 1.2 * dx\n gid1 = numpy.arange(numPoints).astype(numpy.uint32)\n\n # first particle array\n pa = get_particle_array(\n x=x1, y=y1, z=z1, h=h1, gid=gid1)\n\n return pa\n\n def _assert_neighbors(self, nbrs_nnps, nbrs_brute_force):\n # ensure that the lengths of the arrays are the same\n self.assertEqual(nbrs_nnps.length, nbrs_brute_force.length)\n nnbrs = nbrs_nnps.length\n\n _nbrs1 = nbrs_nnps.get_npy_array()\n _nbrs2 = nbrs_brute_force.get_npy_array()\n\n # sort the neighbors\n nbrs1 = sorted(_nbrs1[:nnbrs])\n nbrs2 = _nbrs2\n nbrs2.sort()\n\n # check each neighbor\n for i in range(nnbrs):\n self.assertEqual(nbrs1[i], nbrs2[i])\n\n def _test_neighbors_by_particle(self, src_index, dst_index, dst_numPoints):\n # nnps and the two neighbor lists\n nps = self.nps\n nbrs1 = UIntArray()\n nbrs2 = UIntArray()\n\n nps.set_context(src_index, dst_index)\n\n # get the neighbors and sort the result\n for i in range(dst_numPoints):\n nps.get_nearest_particles(src_index, dst_index, i, nbrs1)\n nps.brute_force_neighbors(src_index, dst_index, i, nbrs2)\n\n # ensure that the neighbor lists are the same\n self._assert_neighbors(nbrs1, nbrs2)\n\n\nclass DictBoxSortNNPS2DTestCase(NNPS2DTestCase):\n \"\"\"Test for the original box-sort algorithm\"\"\"\n\n def setUp(self):\n NNPS2DTestCase.setUp(self)\n self.nps = nnps.DictBoxSortNNPS(\n dim=2, particles=self.particles, radius_scale=2.0\n )\n\n def test_neighbors_aa(self):\n self._test_neighbors_by_particle(src_index=0, dst_index=0,\n dst_numPoints=self.numPoints1)\n\n def test_neighbors_ab(self):\n self._test_neighbors_by_particle(src_index=0, dst_index=1,\n dst_numPoints=self.numPoints2)\n\n def test_neighbors_ba(self):\n self._test_neighbors_by_particle(src_index=1, dst_index=0,\n dst_numPoints=self.numPoints1)\n\n def test_neighbors_bb(self):\n self._test_neighbors_by_particle(src_index=1, dst_index=1,\n dst_numPoints=self.numPoints2)\n\n def test_repeated(self):\n self.test_neighbors_aa()\n self.test_neighbors_ab()\n self.test_neighbors_ba()\n self.test_neighbors_bb()\n\n\nclass OctreeGPUNNPS2DTestCase(DictBoxSortNNPS2DTestCase):\n \"\"\"Test for Z-Order SFC based OpenCL algorithm\"\"\"\n\n def setUp(self):\n NNPS2DTestCase.setUp(self)\n cl = importorskip(\"pyopencl\")\n from pysph.base import gpu_nnps\n\n cfg = get_config()\n self._orig_use_double = cfg.use_double\n cfg.use_double = False\n\n self.nps = gpu_nnps.OctreeGPUNNPS(\n dim=2, particles=self.particles, radius_scale=2.0,\n backend='opencl'\n )\n\n def tearDown(self):\n super(OctreeGPUNNPS2DTestCase, self).tearDown()\n get_config().use_double = self._orig_use_double\n\n\nclass OctreeGPUNNPSDouble2DTestCase(DictBoxSortNNPS2DTestCase):\n \"\"\"Test for Z-Order SFC based OpenCL algorithm\"\"\"\n\n def setUp(self):\n NNPS2DTestCase.setUp(self)\n cl = importorskip(\"pyopencl\")\n from pysph.base import gpu_nnps\n cfg = get_config()\n self._orig_use_double = cfg.use_double\n cfg.use_double = True\n\n self.nps = gpu_nnps.OctreeGPUNNPS(\n dim=2, particles=self.particles, radius_scale=2.0,\n backend='opencl'\n )\n\n def tearDown(self):\n super(OctreeGPUNNPSDouble2DTestCase, self).tearDown()\n get_config().use_double = self._orig_use_double\n\n\nclass NNPSTestCase(unittest.TestCase):\n \"\"\"Standard nearest neighbor queries and comparison with the brute\n force approach.\n\n We randomly distribute particles in 3-space and compare the list\n of neighbors using the NNPS algorithms and the brute force\n approach.\n\n The following particle arrays are set up for testing\n 1) pa1, pa2: uniformly distributed distribution with a constant h. pa1\n and p2 have a different number of particles and hence, a different h.\n 2) pa3: Uniformly distributed distribution for both the coordinates and\n for h.\n 3) pa4: h varies along with spatial coordinates.\n \"\"\"\n\n def setUp(self):\n \"\"\"Default set-up used by all the tests\n \"\"\"\n numpy.random.seed(123)\n # Datasets with constant h\n self.numPoints1 = numPoints1 = 1 << 11\n self.numPoints2 = numPoints2 = 1 << 10\n\n # Datasets with varying h\n self.numPoints3 = numPoints3 = 1 << 10\n\n # FIXME: Tets fail with m4=9\n # Looks like the issue arises due to rounding errors which should be\n # acceptable to a degree. Need to modify tests or brute force NNPS to\n # handle such cases appropriately\n m4 = 8\n self.numPoints4 = numPoints4 = m4 ** 3\n\n self.pa1 = pa1 = self._create_random(numPoints1)\n self.pa2 = pa2 = self._create_random(numPoints2)\n self.pa3 = pa3 = self._create_random_variable_h(numPoints3)\n self.pa4 = pa4 = self._create_linear_radius(0.1, 0.4, m4)\n\n # the list of particles\n self.particles = [pa1, pa2, pa3, pa4]\n\n def _create_random(self, numPoints):\n # average particle spacing and volume in the unit cube\n dx = pow(1.0 / numPoints, 1. / 3.)\n\n # create random points in the interval [-1, 1]^3\n x1, y1, z1 = random.random((3, numPoints)) * 2.0 - 1.0\n h1 = numpy.ones_like(x1) * 1.2 * dx\n gid1 = numpy.arange(numPoints).astype(numpy.uint32)\n\n # first particle array\n pa = get_particle_array(\n x=x1, y=y1, z=z1, h=h1, gid=gid1)\n\n return pa\n\n def _create_linear_radius(self, dx_min, dx_max, m):\n n = m ** 3\n base = numpy.linspace(1., (dx_max / dx_min), m)\n hl = base * dx_min\n xl = numpy.cumsum(hl)\n x, y, z = numpy.meshgrid(xl, xl, xl)\n x, y, z = x.ravel(), y.ravel(), z.ravel()\n h1, h2, h3 = numpy.meshgrid(hl, hl, hl)\n h = (h1 ** 2 + h2 ** 2 + h3 ** 2) ** 0.5\n h = h.ravel()\n gid = numpy.arange(n).astype(numpy.uint32)\n\n pa = get_particle_array(\n x=x, y=y, z=z, h=h, gid=gid\n )\n\n return pa\n\n def _create_random_variable_h(self, num_points):\n # average particle spacing and volume in the unit cube\n dx = pow(1.0 / num_points, 1. / 3.)\n\n # create random points in the interval [-1, 1]^3\n x1, y1, z1 = random.random((3, num_points)) * 2.0 - 1.0\n h1 = numpy.ones_like(x1) * \\\n numpy.random.uniform(1, 4, size=num_points) * 1.2 * dx\n gid1 = numpy.arange(num_points).astype(numpy.uint32)\n\n # first particle array\n pa = get_particle_array(\n x=x1, y=y1, z=z1, h=h1, gid=gid1)\n\n return pa\n\n def _assert_neighbors(self, nbrs_nnps, nbrs_brute_force):\n # ensure that the lengths of the arrays are the same\n if nbrs_nnps.length != nbrs_brute_force.length:\n print(nbrs_nnps.get_npy_array(), nbrs_brute_force.get_npy_array())\n self.assertEqual(nbrs_nnps.length, nbrs_brute_force.length)\n nnbrs = nbrs_nnps.length\n\n _nbrs1 = nbrs_nnps.get_npy_array()\n _nbrs2 = nbrs_brute_force.get_npy_array()\n\n # sort the neighbors\n nbrs1 = sorted(_nbrs1[:nnbrs])\n nbrs2 = _nbrs2\n nbrs2.sort()\n\n # check each neighbor\n for i in range(nnbrs):\n self.assertEqual(nbrs1[i], nbrs2[i])\n\n def _test_neighbors_by_particle(self, src_index, dst_index, dst_numPoints):\n # nnps and the two neighbor lists\n nps = self.nps\n nbrs1 = UIntArray()\n nbrs2 = UIntArray()\n\n nps.set_context(src_index, dst_index)\n\n # get the neighbors and sort the result\n for i in range(dst_numPoints):\n nps.get_nearest_particles(src_index, dst_index, i, nbrs1)\n nps.brute_force_neighbors(src_index, dst_index, i, nbrs2)\n\n # ensure that the neighbor lists are the same\n\n self._assert_neighbors(nbrs1, nbrs2)\n\n\nclass DictBoxSortNNPSTestCase(NNPSTestCase):\n \"\"\"Test for the original box-sort algorithm\"\"\"\n\n def setUp(self):\n \"\"\"\n Default setup and tests used for 3D NNPS tests\n\n We run the tests on the following pairs of particle arrays:\n\n Set 1) Same particle arrays. Both have constant h.\n 1) a -> a\n 2) b -> b\n\n Set 2) Different particle arrays with constant h.\n 1) a -> b\n 2) b -> a\n\n Set 3) Variable h\n 1) c -> c\n 2) d -> d\n\n We then repeat the above tests again to ensure that we get the\n correct results even when running NNPS repeatedly\n \"\"\"\n NNPSTestCase.setUp(self)\n self.nps = nnps.DictBoxSortNNPS(\n dim=3, particles=self.particles, radius_scale=2.0\n )\n\n def test_neighbors_aa(self):\n self._test_neighbors_by_particle(src_index=0, dst_index=0,\n dst_numPoints=self.numPoints1)\n\n def test_neighbors_ab(self):\n self._test_neighbors_by_particle(src_index=0, dst_index=1,\n dst_numPoints=self.numPoints2)\n\n def test_neighbors_ba(self):\n self._test_neighbors_by_particle(src_index=1, dst_index=0,\n dst_numPoints=self.numPoints1)\n\n def test_neighbors_bb(self):\n self._test_neighbors_by_particle(src_index=1, dst_index=1,\n dst_numPoints=self.numPoints2)\n\n def test_neighbors_cc(self):\n self._test_neighbors_by_particle(src_index=2, dst_index=2,\n dst_numPoints=self.numPoints3)\n\n def test_neighbors_dd(self):\n self._test_neighbors_by_particle(src_index=3, dst_index=3,\n dst_numPoints=self.numPoints4)\n\n def test_repeated(self):\n self.test_neighbors_aa()\n self.test_neighbors_ab()\n self.test_neighbors_ba()\n self.test_neighbors_bb()\n self.test_neighbors_cc()\n self.test_neighbors_dd()\n\n\nclass BoxSortNNPSTestCase(DictBoxSortNNPSTestCase):\n \"\"\"Test for the original box-sort algorithm\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n self.nps = nnps.BoxSortNNPS(\n dim=3, particles=self.particles, radius_scale=2.0\n )\n\n\nclass SpatialHashNNPSTestCase(DictBoxSortNNPSTestCase):\n \"\"\"Test for Spatial Hash algorithm\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n self.nps = nnps.SpatialHashNNPS(\n dim=3, particles=self.particles, radius_scale=2.0\n )\n\n\nclass SingleLevelStratifiedHashNNPSTestCase(DictBoxSortNNPSTestCase):\n \"\"\"Test for Stratified hash algorithm with num_levels = 1\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n self.nps = nnps.StratifiedHashNNPS(\n dim=3, particles=self.particles, radius_scale=2.0\n )\n\n\nclass MultipleLevelsStratifiedHashNNPSTestCase(DictBoxSortNNPSTestCase):\n \"\"\"Test for Stratified hash algorithm with num_levels = 2\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n self.nps = nnps.StratifiedHashNNPS(\n dim=3, particles=self.particles, radius_scale=2.0,\n num_levels=2\n )\n\n\nclass SingleLevelStratifiedSFCNNPSTestCase(DictBoxSortNNPSTestCase):\n \"\"\"Test for Stratified SFC algorithm with num_levels = 1\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n self.nps = nnps.StratifiedSFCNNPS(\n dim=3, particles=self.particles, radius_scale=2.0\n )\n\n\nclass MultipleLevelsStratifiedSFCNNPSTestCase(DictBoxSortNNPSTestCase):\n \"\"\"Test for Stratified SFC algorithm with num_levels = 2\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n self.nps = nnps.StratifiedSFCNNPS(\n dim=3, particles=self.particles, radius_scale=2.0,\n num_levels=2\n )\n\n\nclass ExtendedSpatialHashNNPSTestCase(DictBoxSortNNPSTestCase):\n \"\"\"Test for Extended Spatial Hash algorithm\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n self.nps = nnps.ExtendedSpatialHashNNPS(\n dim=3, particles=self.particles, radius_scale=2.0\n )\n\n\nclass OctreeNNPSTestCase(DictBoxSortNNPSTestCase):\n \"\"\"Test for Octree based algorithm\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n self.nps = nnps.OctreeNNPS(\n dim=3, particles=self.particles, radius_scale=2.0\n )\n\n\nclass CellIndexingNNPSTestCase(DictBoxSortNNPSTestCase):\n \"\"\"Test for Cell Indexing based algorithm\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n self.nps = nnps.CellIndexingNNPS(\n dim=3, particles=self.particles, radius_scale=2.0\n )\n\n\nclass ZOrderNNPSTestCase(DictBoxSortNNPSTestCase):\n \"\"\"Test for Z-Order SFC based algorithm\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n self.nps = nnps.ZOrderNNPS(\n dim=3, particles=self.particles, radius_scale=2.0\n )\n\n\nclass ExtendedZOrderNNPSAsymmetricTestCase(DictBoxSortNNPSTestCase):\n \"\"\"Test for asymmetric Extended Z-Order SFC based algorithm\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n self.nps = nnps.ExtendedZOrderNNPS(\n dim=3, particles=self.particles, radius_scale=2.0, H=1,\n asymmetric=True\n )\n\n\nclass ExtendedZOrderNNPSSymmetricTestCase(DictBoxSortNNPSTestCase):\n \"\"\"Test for symmetric Extended Z-Order SFC based algorithm\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n self.nps = nnps.ExtendedZOrderNNPS(\n dim=3, particles=self.particles, radius_scale=2.0, H=1,\n asymmetric=False\n )\n\n\nclass ExtendedZOrderNNPSSubdividedAsymTestCase(DictBoxSortNNPSTestCase):\n \"\"\"Test for asymmetric Extended Z-Order SFC based algorithm with\n subdivision\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n self.nps = nnps.ExtendedZOrderNNPS(\n dim=3, particles=self.particles, radius_scale=2.0, H=3,\n asymmetric=True\n )\n\n @pytest.mark.xfail(reason=\"ExtendedZOrderNNPS failing for \\\n different dest and src index\")\n def test_neighbors_ab(self):\n self._test_neighbors_by_particle(src_index=0, dst_index=1,\n dst_numPoints=self.numPoints2)\n\n @pytest.mark.xfail(reason=\"ExtendedZOrderNNPS failing for \\\n different dest and src index\")\n def test_neighbors_ba(self):\n self._test_neighbors_by_particle(src_index=1, dst_index=0,\n dst_numPoints=self.numPoints1)\n\n @pytest.mark.xfail(reason=\"ExtendedZOrderNNPS failing for \\\n different dest and src index\")\n def test_repeated(self):\n self.test_neighbors_aa()\n self.test_neighbors_ab()\n self.test_neighbors_ba()\n self.test_neighbors_bb()\n self.test_neighbors_cc()\n self.test_neighbors_dd()\n\n\nclass ExtendedZOrderNNPSSubdividedSymTestCase(\n ExtendedZOrderNNPSSubdividedAsymTestCase):\n \"\"\"Test for symmetric Extended Z-Order SFC based algorithm with\n subdivision\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n self.nps = nnps.ExtendedZOrderNNPS(\n dim=3, particles=self.particles, radius_scale=2.0, H=3,\n asymmetric=False\n )\n\n\nclass ZOrderGPUNNPSTestCase(DictBoxSortNNPSTestCase):\n \"\"\"Test for Z-Order SFC based OpenCL algorithm\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n cl = importorskip(\"pyopencl\")\n from pysph.base import gpu_nnps\n cfg = get_config()\n self._orig_use_double = cfg.use_double\n cfg.use_double = False\n\n self.nps = gpu_nnps.ZOrderGPUNNPS(\n dim=3, particles=self.particles, radius_scale=2.0,\n backend='opencl'\n )\n\n def tearDown(self):\n super(ZOrderGPUNNPSTestCase, self).tearDown()\n get_config().use_double = self._orig_use_double\n\n\nclass ZOrderGPUNNPSTestCaseCUDA(ZOrderGPUNNPSTestCase):\n def setUp(self):\n NNPSTestCase.setUp(self)\n importorskip(\"pycuda\")\n from pysph.base import gpu_nnps\n cfg = get_config()\n self._orig_use_double = cfg.use_double\n cfg.use_double = False\n\n self.nps = gpu_nnps.ZOrderGPUNNPS(\n dim=3, particles=self.particles, radius_scale=2.0,\n backend='cuda'\n )\n\n def tearDown(self):\n super(ZOrderGPUNNPSTestCase, self).tearDown()\n get_config().use_double = self._orig_use_double\n\n\nclass BruteForceNNPSTestCase(DictBoxSortNNPSTestCase):\n \"\"\"Test for OpenCL brute force algorithm\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n cl = importorskip(\"pyopencl\")\n from pysph.base import gpu_nnps\n cfg = get_config()\n self._orig_use_double = cfg.use_double\n cfg.use_double = False\n\n self.nps = gpu_nnps.BruteForceNNPS(\n dim=3, particles=self.particles, radius_scale=2.0,\n backend='opencl'\n )\n\n def tearDown(self):\n super(BruteForceNNPSTestCase, self).tearDown()\n get_config().use_double = self._orig_use_double\n\n\nclass OctreeGPUNNPSTestCase(DictBoxSortNNPSTestCase):\n \"\"\"Test for Z-Order SFC based OpenCL algorithm\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n cl = importorskip(\"pyopencl\")\n from pysph.base import gpu_nnps\n cfg = get_config()\n self._orig_use_double = cfg.use_double\n cfg.use_double = False\n\n self.nps = gpu_nnps.OctreeGPUNNPS(\n dim=3, particles=self.particles, radius_scale=2.0,\n backend='opencl'\n )\n\n def tearDown(self):\n super(OctreeGPUNNPSTestCase, self).tearDown()\n get_config().use_double = self._orig_use_double\n\n\nclass ZOrderGPUDoubleNNPSTestCase(DictBoxSortNNPSTestCase):\n \"\"\"Test for Z-Order SFC based OpenCL algorithm\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n cl = importorskip(\"pyopencl\")\n from pysph.base import gpu_nnps\n cfg = get_config()\n self._orig_use_double = cfg.use_double\n cfg.use_double = True\n self.nps = gpu_nnps.ZOrderGPUNNPS(\n dim=3, particles=self.particles, radius_scale=2.0,\n backend='opencl'\n )\n\n def tearDown(self):\n super(ZOrderGPUDoubleNNPSTestCase, self).tearDown()\n get_config().use_double = self._orig_use_double\n\n\nclass ZOrderGPUDoubleNNPSTestCaseCUDA(ZOrderGPUDoubleNNPSTestCase):\n \"\"\"Test for Z-Order SFC based OpenCL algorithm\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n importorskip(\"pycuda\")\n from pysph.base import gpu_nnps\n cfg = get_config()\n self._orig_use_double = cfg.use_double\n cfg.use_double = True\n self.nps = gpu_nnps.ZOrderGPUNNPS(\n dim=3, particles=self.particles, radius_scale=2.0,\n backend='cuda'\n )\n\n def tearDown(self):\n super(ZOrderGPUDoubleNNPSTestCase, self).tearDown()\n get_config().use_double = self._orig_use_double\n\n\nclass OctreeGPUDoubleNNPSTestCase(DictBoxSortNNPSTestCase):\n \"\"\"Test for Octree based OpenCL algorithm\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n cl = importorskip(\"pyopencl\")\n from pysph.base import gpu_nnps\n cfg = get_config()\n self._orig_use_double = cfg.use_double\n cfg.use_double = True\n self.nps = gpu_nnps.OctreeGPUNNPS(\n dim=3, particles=self.particles, radius_scale=2.0,\n backend='opencl'\n )\n\n def tearDown(self):\n super(OctreeGPUDoubleNNPSTestCase, self).tearDown()\n get_config().use_double = self._orig_use_double\n\n\nclass TestZOrderGPUNNPSWithSorting(DictBoxSortNNPSTestCase):\n def setUp(self):\n NNPSTestCase.setUp(self)\n cl = importorskip(\"pyopencl\")\n from pysph.base import gpu_nnps\n cfg = get_config()\n self._orig_use_double = cfg.use_double\n cfg.use_double = False\n self.nps = gpu_nnps.ZOrderGPUNNPS(\n dim=3, particles=self.particles, radius_scale=2.0,\n backend='opencl'\n )\n self.nps.spatially_order_particles(0)\n self.nps.spatially_order_particles(1)\n\n for pa in self.particles:\n pa.gpu.pull()\n\n def tearDown(self):\n super(TestZOrderGPUNNPSWithSorting, self).tearDown()\n get_config().use_double = self._orig_use_double\n\n\nclass TestZOrderGPUNNPSWithSortingCUDA(TestZOrderGPUNNPSWithSorting):\n def setUp(self):\n NNPSTestCase.setUp(self)\n cl = importorskip(\"pycuda\")\n from pysph.base import gpu_nnps\n cfg = get_config()\n self._orig_use_double = cfg.use_double\n cfg.use_double = False\n self.nps = gpu_nnps.ZOrderGPUNNPS(\n dim=3, particles=self.particles, radius_scale=2.0,\n backend='cuda'\n )\n self.nps.spatially_order_particles(0)\n self.nps.spatially_order_particles(1)\n\n for pa in self.particles:\n pa.gpu.pull()\n\n def tearDown(self):\n super(TestZOrderGPUNNPSWithSorting, self).tearDown()\n get_config().use_double = self._orig_use_double\n\n\nclass OctreeGPUNNPSWithSortingTestCase(DictBoxSortNNPSTestCase):\n def setUp(self):\n NNPSTestCase.setUp(self)\n cl = importorskip(\"pyopencl\")\n from pysph.base import gpu_nnps\n cfg = get_config()\n self._orig_use_double = cfg.use_double\n cfg.use_double = False\n self.nps = gpu_nnps.OctreeGPUNNPS(\n dim=3, particles=self.particles, radius_scale=2.0,\n backend='opencl'\n )\n self.nps.spatially_order_particles(0)\n self.nps.spatially_order_particles(1)\n\n for pa in self.particles:\n pa.gpu.pull()\n\n def tearDown(self):\n super(OctreeGPUNNPSWithSortingTestCase, self).tearDown()\n get_config().use_double = self._orig_use_double\n\n\nclass OctreeGPUNNPSWithPartitioningTestCase(DictBoxSortNNPSTestCase):\n def setUp(self):\n NNPSTestCase.setUp(self)\n cl = importorskip(\"pyopencl\")\n from pysph.base import gpu_nnps\n cfg = get_config()\n self._orig_use_double = cfg.use_double\n cfg.use_double = False\n self.nps = gpu_nnps.OctreeGPUNNPS(\n dim=3, particles=self.particles, radius_scale=2.0,\n use_partitions=True, backend='opencl'\n )\n\n for pa in self.particles:\n pa.gpu.pull()\n\n def tearDown(self):\n super(OctreeGPUNNPSWithPartitioningTestCase, self).tearDown()\n get_config().use_double = self._orig_use_double\n\n\nclass StratifiedSFCGPUNNPSTestCase(DictBoxSortNNPSTestCase):\n \"\"\"Test for Stratified SFC based OpenCL algorithm\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n cl = importorskip(\"pyopencl\")\n from pysph.base import gpu_nnps\n\n self.nps = gpu_nnps.StratifiedSFCGPUNNPS(\n dim=3, particles=self.particles, radius_scale=2.0,\n num_levels=2, backend='opencl'\n )\n\n @pytest.mark.xfail(reason=\"StratifiedSFCGPUNNPS failing for \\\n variable h cases\")\n def test_neighbors_dd(self):\n self._test_neighbors_by_particle(src_index=3, dst_index=3,\n dst_numPoints=self.numPoints4)\n\n @pytest.mark.xfail(reason=\"StratifiedSFCGPUNNPS failing for \\\n variable h cases\")\n def test_repeated(self):\n self.test_neighbors_aa()\n self.test_neighbors_ab()\n self.test_neighbors_ba()\n self.test_neighbors_bb()\n self.test_neighbors_cc()\n self.test_neighbors_dd()\n\n\nclass CompressedOctreeNNPSTestCase(DictBoxSortNNPSTestCase):\n \"\"\"Test for Compressed Octree based algorithm\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n self.nps = nnps.CompressedOctreeNNPS(\n dim=3, particles=self.particles, radius_scale=2.0\n )\n\n\nclass LinkedListNNPSTestCase(DictBoxSortNNPSTestCase):\n \"\"\"Test for the original box-sort algorithm\"\"\"\n\n def setUp(self):\n NNPSTestCase.setUp(self)\n self.nps = nnps.LinkedListNNPS(\n dim=3, particles=self.particles, radius_scale=2.0\n )\n\n def test_cell_index_positivity(self):\n nps = self.nps\n ncells_tot = nps.ncells_tot\n ncells_per_dim = nps.ncells_per_dim\n dim = nps.dim\n\n # cell indices should be positive. We iterate over the\n # flattened indices, get the unflattened version and check\n # that each component remains positive\n for cell_index in range(ncells_tot):\n cid = nnps.py_unflatten(cell_index, ncells_per_dim, dim)\n\n self.assertTrue(cid.x > -1)\n self.assertTrue(cid.y > -1)\n self.assertTrue(cid.z > -1)\n\n\nclass TestNNPSOnLargeDomain(unittest.TestCase):\n def _make_particles(self, nx=20):\n x, y, z = numpy.random.random((3, nx, nx, nx))\n x = numpy.ravel(x)\n y = numpy.ravel(y)\n z = numpy.ravel(z)\n h = numpy.ones_like(x) * 1.3 / nx\n\n pa = get_particle_array(name='fluid', x=x, y=y, z=z, h=h)\n # Place one particle far far away\n # On Linux and OSX this works even if sz is 100000.\n # However, on Windows this fails but works with 1000,\n # hence we set it to 1000.\n sz = 1000.0\n pa.add_particles(x=[sz], y=[sz], z=[sz])\n return pa\n\n def test_linked_list_nnps_raises_exception_for_large_domain(self):\n # Given/When\n pa = self._make_particles(20)\n # Then\n self.assertRaises(\n RuntimeError, nnps.LinkedListNNPS, dim=3, particles=[pa],\n cache=True\n )\n\n def test_box_sort_works_for_large_domain(self):\n # Given\n pa = self._make_particles(20)\n # We turn on cache so it computes all the neighbors quickly for us.\n nps = nnps.BoxSortNNPS(dim=3, particles=[pa], cache=True)\n nbrs = UIntArray()\n direct = UIntArray()\n nps.set_context(0, 0)\n for i in range(pa.get_number_of_particles()):\n nps.get_nearest_particles(0, 0, i, nbrs)\n nps.brute_force_neighbors(0, 0, i, direct)\n x = nbrs.get_npy_array()\n y = direct.get_npy_array()\n x.sort()\n y.sort()\n assert numpy.all(x == y)\n\n def test_spatial_hash_works_for_large_domain(self):\n # Given\n pa = self._make_particles(20)\n # We turn on cache so it computes all the neighbors quickly for us.\n nps = nnps.SpatialHashNNPS(dim=3, particles=[pa], cache=True)\n nbrs = UIntArray()\n direct = UIntArray()\n nps.set_context(0, 0)\n for i in range(pa.get_number_of_particles()):\n nps.get_nearest_particles(0, 0, i, nbrs)\n nps.brute_force_neighbors(0, 0, i, direct)\n x = nbrs.get_npy_array()\n y = direct.get_npy_array()\n x.sort()\n y.sort()\n assert numpy.all(x == y)\n\n def test_extended_spatial_hash_works_for_large_domain(self):\n # Given\n pa = self._make_particles(20)\n # We turn on cache so it computes all the neighbors quickly for us.\n nps = nnps.ExtendedSpatialHashNNPS(dim=3, particles=[pa], cache=True)\n nbrs = UIntArray()\n direct = UIntArray()\n nps.set_context(0, 0)\n for i in range(pa.get_number_of_particles()):\n nps.get_nearest_particles(0, 0, i, nbrs)\n nps.brute_force_neighbors(0, 0, i, direct)\n x = nbrs.get_npy_array()\n y = direct.get_npy_array()\n x.sort()\n y.sort()\n assert numpy.all(x == y)\n\n def test_octree_works_for_large_domain(self):\n # Given\n pa = self._make_particles(20)\n # We turn on cache so it computes all the neighbors quickly for us.\n nps = nnps.OctreeNNPS(dim=3, particles=[pa], cache=True)\n nbrs = UIntArray()\n direct = UIntArray()\n nps.set_context(0, 0)\n for i in range(pa.get_number_of_particles()):\n nps.get_nearest_particles(0, 0, i, nbrs)\n nps.brute_force_neighbors(0, 0, i, direct)\n x = nbrs.get_npy_array()\n y = direct.get_npy_array()\n x.sort()\n y.sort()\n assert numpy.all(x == y)\n\n def test_compressed_octree_works_for_large_domain(self):\n # Given\n pa = self._make_particles(20)\n # We turn on cache so it computes all the neighbors quickly for us.\n nps = nnps.CompressedOctreeNNPS(dim=3, particles=[pa], cache=True)\n nbrs = UIntArray()\n direct = UIntArray()\n nps.set_context(0, 0)\n for i in range(pa.get_number_of_particles()):\n nps.get_nearest_particles(0, 0, i, nbrs)\n nps.brute_force_neighbors(0, 0, i, direct)\n x = nbrs.get_npy_array()\n y = direct.get_npy_array()\n x.sort()\n y.sort()\n assert numpy.all(x == y)\n\n\nclass TestLinkedListNNPSWithSorting(unittest.TestCase):\n def _make_particles(self, nx=20):\n x = numpy.linspace(0, 1, nx)\n h = numpy.ones_like(x) / (nx - 1)\n\n pa = get_particle_array(name='fluid', x=x, h=h)\n nps = nnps.LinkedListNNPS(dim=1, particles=[pa], sort_gids=True)\n return pa, nps\n\n def test_nnps_sorts_without_gids(self):\n # Given\n pa, nps = self._make_particles(10)\n\n # When\n nps.set_context(0, 0)\n # Test the that gids are actually huge and invalid.\n self.assertEqual(numpy.max(pa.gid), numpy.min(pa.gid))\n self.assertTrue(numpy.max(pa.gid) > pa.gid.size)\n\n # Then\n nbrs = UIntArray()\n for i in range(pa.get_number_of_particles()):\n nps.get_nearest_particles(0, 0, i, nbrs)\n nb = nbrs.get_npy_array()\n sorted_nbrs = sorted(nb.copy())\n self.assertTrue(numpy.all(nb == sorted_nbrs))\n\n def test_nnps_sorts_with_valid_gids(self):\n # Given\n pa, nps = self._make_particles(10)\n pa.gid[:] = numpy.arange(pa.x.size)\n nps.update()\n\n # When\n nps.set_context(0, 0)\n # Test the that gids are actually valid.\n self.assertEqual(numpy.max(pa.gid), pa.gid.size - 1)\n self.assertEqual(numpy.min(pa.gid), 0)\n\n # Then\n nbrs = UIntArray()\n for i in range(pa.get_number_of_particles()):\n nps.get_nearest_particles(0, 0, i, nbrs)\n nb = nbrs.get_npy_array()\n sorted_nbrs = sorted(nb.copy())\n self.assertTrue(numpy.all(nb == sorted_nbrs))\n\n\nclass TestSpatialHashNNPSWithSorting(TestLinkedListNNPSWithSorting):\n def _make_particles(self, nx=20):\n x = numpy.linspace(0, 1, nx)\n h = numpy.ones_like(x) / (nx - 1)\n\n pa = get_particle_array(name='fluid', x=x, h=h)\n nps = nnps.SpatialHashNNPS(dim=1, particles=[pa], sort_gids=True)\n return pa, nps\n\n\nclass TestMultipleLevelsStratifiedHashNNPSWithSorting(\n TestLinkedListNNPSWithSorting):\n def _make_particles(self, nx=20):\n x = numpy.linspace(0, 1, nx)\n h = numpy.ones_like(x) / (nx - 1)\n\n pa = get_particle_array(name='fluid', x=x, h=h)\n nps = nnps.StratifiedHashNNPS(dim=1, particles=[pa], num_levels=2,\n sort_gids=True)\n return pa, nps\n\n\nclass TestMultipleLevelsStratifiedSFCNNPSWithSorting(\n TestLinkedListNNPSWithSorting):\n def _make_particles(self, nx=20):\n x = numpy.linspace(0, 1, nx)\n h = numpy.ones_like(x) / (nx - 1)\n\n pa = get_particle_array(name='fluid', x=x, h=h)\n nps = nnps.StratifiedSFCNNPS(dim=1, particles=[pa], num_levels=2,\n sort_gids=True)\n return pa, nps\n\n\ndef test_large_number_of_neighbors_linked_list():\n x = numpy.random.random(1 << 14) * 0.1\n y = x.copy()\n z = x.copy()\n h = numpy.ones_like(x)\n pa = get_particle_array(name='fluid', x=x, y=y, z=z, h=h)\n\n nps = nnps.LinkedListNNPS(dim=3, particles=[pa], cache=False)\n nbrs = UIntArray()\n nps.get_nearest_particles(0, 0, 0, nbrs)\n # print(nbrs.length)\n assert nbrs.length == len(x)\n\n\ndef test_neighbor_cache_update_doesnt_leak():\n # Given\n x, y, z = numpy.random.random((3, 1000))\n pa = get_particle_array(name='fluid', x=x, y=y, z=z, h=0.05)\n\n nps = nnps.LinkedListNNPS(dim=3, particles=[pa], cache=True)\n nps.set_context(0, 0)\n nps.cache[0].find_all_neighbors()\n old_length = sum(x.length for x in nps.cache[0]._neighbor_arrays)\n\n # When\n nps.update()\n nps.set_context(0, 0)\n nps.cache[0].find_all_neighbors()\n\n # Then\n new_length = sum(x.length for x in nps.cache[0]._neighbor_arrays)\n assert new_length == old_length\n\n\nnnps_classes = [\n nnps.BoxSortNNPS,\n nnps.CellIndexingNNPS,\n nnps.CompressedOctreeNNPS,\n nnps.ExtendedSpatialHashNNPS,\n nnps.LinkedListNNPS,\n nnps.OctreeNNPS,\n nnps.SpatialHashNNPS,\n nnps.StratifiedHashNNPS,\n nnps.StratifiedSFCNNPS,\n nnps.ZOrderNNPS\n]\n\n\[email protected](\"cls\", nnps_classes)\ndef test_corner_case_1d_few_cells(cls):\n x, y, z = [0.131, 0.359], [1.544, 1.809], [-3.6489999, -2.8559999]\n pa = get_particle_array(name='fluid', x=x, y=y, z=z, h=1.0)\n nbrs = UIntArray()\n bf_nbrs = UIntArray()\n nps = cls(dim=3, particles=[pa], radius_scale=0.7)\n for i in range(2):\n nps.get_nearest_particles(0, 0, i, nbrs)\n nps.brute_force_neighbors(0, 0, i, bf_nbrs)\n assert sorted(nbrs) == sorted(bf_nbrs), 'Failed for particle: %d' % i\n\n\ndef test_use_2d_for_1d_data_with_llnps():\n y = numpy.array([1.0, 1.5])\n h = numpy.ones_like(y)\n pa = get_particle_array(name='fluid', y=y, h=h)\n nps = nnps.LinkedListNNPS(dim=2, particles=[pa], cache=False)\n nbrs = UIntArray()\n nps.get_nearest_particles(0, 0, 0, nbrs)\n print(nbrs.length)\n assert nbrs.length == len(y)\n\n\ndef test_use_3d_for_1d_data_with_llnps():\n y = numpy.array([1.0, 1.5])\n h = numpy.ones_like(y)\n pa = get_particle_array(name='fluid', y=y, h=h)\n nps = nnps.LinkedListNNPS(dim=3, particles=[pa], cache=False)\n nbrs = UIntArray()\n nps.get_nearest_particles(0, 0, 0, nbrs)\n print(nbrs.length)\n assert nbrs.length == len(y)\n\n\ndef test_large_number_of_neighbors_spatial_hash():\n x = numpy.random.random(1 << 14) * 0.1\n y = x.copy()\n z = x.copy()\n h = numpy.ones_like(x)\n pa = get_particle_array(name='fluid', x=x, y=y, z=z, h=h)\n\n nps = nnps.SpatialHashNNPS(dim=3, particles=[pa], cache=False)\n nbrs = UIntArray()\n nps.get_nearest_particles(0, 0, 0, nbrs)\n # print(nbrs.length)\n assert nbrs.length == len(x)\n\n\ndef test_large_number_of_neighbors_octree():\n x = numpy.random.random(1 << 14) * 0.1\n y = x.copy()\n z = x.copy()\n h = numpy.ones_like(x)\n pa = get_particle_array(name='fluid', x=x, y=y, z=z, h=h)\n\n nps = nnps.OctreeNNPS(dim=3, particles=[pa], cache=False)\n nbrs = UIntArray()\n nps.get_nearest_particles(0, 0, 0, nbrs)\n # print(nbrs.length)\n assert nbrs.length == len(x)\n\n\ndef test_flatten_unflatten():\n # first consider the 2D case where we assume a 4 X 5 grid of cells\n dim = 2\n ncells_per_dim = IntArray(3)\n ncells_per_dim[0] = 4\n ncells_per_dim[1] = 5\n ncells_per_dim[2] = 0\n\n # valid un-flattened cell indices\n cids = [[i, j] for i in range(4) for j in range(5)]\n for _cid in cids:\n cid = IntPoint(_cid[0], _cid[1], 0)\n flattened = nnps.py_flatten(cid, ncells_per_dim, dim)\n unflattened = nnps.py_unflatten(flattened, ncells_per_dim, dim)\n\n # the unflattened index should match with cid\n assert (cid == unflattened)\n\n # 3D\n dim = 3\n ncells_per_dim = IntArray(3)\n ncells_per_dim[0] = 4\n ncells_per_dim[1] = 5\n ncells_per_dim[2] = 2\n\n # valid un-flattened indices\n cids = [[i, j, k] for i in range(4) for j in range(5) for k in range(2)]\n for _cid in cids:\n cid = IntPoint(_cid[0], _cid[1], _cid[2])\n flattened = nnps.py_flatten(cid, ncells_per_dim, dim)\n unflattened = nnps.py_unflatten(flattened, ncells_per_dim, dim)\n\n # the unflattened index should match with cid\n assert (cid == unflattened)\n\n\ndef test_1D_get_valid_cell_index():\n dim = 1\n\n # simulate a dummy distribution such that 10 cells are along the\n # 'x' direction\n n_cells = 10\n ncells_per_dim = IntArray(3)\n\n ncells_per_dim[0] = n_cells\n ncells_per_dim[1] = 1\n ncells_per_dim[2] = 1\n\n # target cell\n cx = 1\n cy = cz = 0\n\n # as long as cy and cz are 0, the function should return the valid\n # flattened cell index for the cell\n for i in [-1, 0, 1]:\n index = nnps.py_get_valid_cell_index(\n IntPoint(cx + i, cy, cz), ncells_per_dim, dim, n_cells)\n assert index != -1\n\n # index should be -1 whenever cy and cz are > 1. This is\n # specifically the case that was failing earlier.\n for j in [-1, 1]:\n for k in [-1, 1]:\n index = nnps.py_get_valid_cell_index(\n IntPoint(cx, cy + j, cz + k), ncells_per_dim, dim, n_cells)\n assert index == -1\n\n # When the cx > n_cells or < -1 it should be invalid\n for i in [-2, -1, n_cells, n_cells + 1]:\n index = nnps.py_get_valid_cell_index(\n IntPoint(i, cy, cz), ncells_per_dim, dim, n_cells)\n assert index == -1\n\n\ndef test_get_centroid():\n cell = nnps.Cell(IntPoint(0, 0, 0), cell_size=0.1, narrays=1)\n centroid = Point()\n cell.get_centroid(centroid)\n\n assert (abs(centroid.x - 0.05) < 1e-10)\n assert (abs(centroid.y - 0.05) < 1e-10)\n assert (abs(centroid.z - 0.05) < 1e-10)\n\n cell = nnps.Cell(IntPoint(1, 2, 3), cell_size=0.5, narrays=1)\n cell.get_centroid(centroid)\n\n assert (abs(centroid.x - 0.75) < 1e-10)\n assert (abs(centroid.y - 1.25) < 1e-10)\n assert (abs(centroid.z - 1.75) < 1e-10)\n\n\ndef test_get_bbox():\n cell_size = 0.1\n cell = nnps.Cell(IntPoint(0, 0, 0), cell_size=cell_size, narrays=1)\n centroid = Point()\n boxmin = Point()\n boxmax = Point()\n\n cell.get_centroid(centroid)\n cell.get_bounding_box(boxmin, boxmax)\n\n assert (abs(boxmin.x - (centroid.x - 1.5 * cell_size)) < 1e-10)\n assert (abs(boxmin.y - (centroid.y - 1.5 * cell_size)) < 1e-10)\n assert (abs(boxmin.z - (centroid.z - 1.5 * cell_size)) < 1e-10)\n\n assert (abs(boxmax.x - (centroid.x + 1.5 * cell_size)) < 1e-10)\n assert (abs(boxmax.y - (centroid.y + 1.5 * cell_size)) < 1e-10)\n assert (abs(boxmax.z - (centroid.z + 1.5 * cell_size)) < 1e-10)\n\n cell_size = 0.5\n cell = nnps.Cell(IntPoint(1, 2, 0), cell_size=cell_size, narrays=1)\n\n cell.get_centroid(centroid)\n cell.get_bounding_box(boxmin, boxmax)\n\n assert (abs(boxmin.x - (centroid.x - 1.5 * cell_size)) < 1e-10)\n assert (abs(boxmin.y - (centroid.y - 1.5 * cell_size)) < 1e-10)\n assert (abs(boxmin.z - (centroid.z - 1.5 * cell_size)) < 1e-10)\n\n assert (abs(boxmax.x - (centroid.x + 1.5 * cell_size)) < 1e-10)\n assert (abs(boxmax.y - (centroid.y + 1.5 * cell_size)) < 1e-10)\n assert (abs(boxmax.z - (centroid.z + 1.5 * cell_size)) < 1e-10)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.random.random",
"numpy.ones_like",
"numpy.random.seed",
"numpy.linspace",
"numpy.meshgrid",
"numpy.arange",
"numpy.min",
"numpy.cumsum",
"numpy.all",
"numpy.max",
"numpy.random.uniform",
"numpy.zeros_like",
"numpy.ravel",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
macrosynergy/macrosynergy
|
[
"6806573c21fb8035cbbdf2a3c591fe4de80fb18f",
"6806573c21fb8035cbbdf2a3c591fe4de80fb18f"
] |
[
"macrosynergy/panel/view_correlations.py",
"macrosynergy/management/shape_dfs.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom typing import List, Union, Tuple\n\nfrom macrosynergy.management.simulate_quantamental_data import make_qdf\nfrom macrosynergy.management.check_availability import reduce_df\n\n\ndef correl_matrix(df: pd.DataFrame, xcats: List[str] = None, cids: List[str] = None,\n start: str = '2000-01-01', end: str = None, val: str = 'value',\n title: str = None, size: Tuple[float] = (14, 8), max_color: float=None):\n\n \"\"\"Display correlation matrix either across xcats (if more than one xcat) or cids\n\n :param <pd.Dataframe> df: standardized dataframe with the following necessary columns:\n 'cid', 'xcats', 'real_date' and at least one column with values of interest.\n :param <List[str]> xcats: extended categories to be correlated. Default is all in the dataframe.\n If xcats contains only one category the correlation coefficients across sections are displayed.\n If xcats contains more than one category the correlation coefficients across categories are displayed.\n :param <List[str]> cids: cross sections to be correlated. Default is all in the dataframe.\n :param <str> start: earliest date in ISO format. Default is None and earliest date in df is used.\n :param <str> end: latest date in ISO format. Default is None and latest date in df is used.\n :param <str> val: name of column that contains the values of interest. Default is 'value'.\n :param <str> title: chart heading. If none is given, a default title is used.\n :param <Tuple[float]> size: two-element tuple setting width/height of figure. Default is (14, 8).\n :param <float> max_color: maximum values of positive/negative correlation coefficients for color scale\n Default is none. If a value is given it applies symmetrically to positive and negative values.\n\n \"\"\"\n\n xcats = xcats if isinstance(xcats, list) else [xcats]\n min_color = None if max_color is None else -max_color # define minimum of color scale\n\n df, xcats, cids = reduce_df(df, xcats, cids, start, end, out_all=True)\n\n s_date = df['real_date'].min().strftime('%Y-%m-%d')\n e_date = df['real_date'].max().strftime('%Y-%m-%d')\n\n if len(xcats) == 1:\n\n df_w = df.pivot(index='real_date', columns='cid', values=val)\n if title is None:\n title = f'Cross-sectional correlation of {xcats[0]} from {s_date} to {e_date}'\n\n else:\n\n df_w = df.pivot(index=('cid', 'real_date'), columns='xcat', values=val)\n if title is None:\n title = f'Cross-category correlation from {s_date} to {e_date}'\n\n sns.set(style=\"ticks\")\n corr = df_w.corr()\n mask = np.triu(np.ones_like(corr, dtype=bool)) # generate mask for upper triangle\n fig, ax = plt.subplots(figsize=size) # set up figure\n sns.heatmap(corr, mask=mask, cmap='vlag_r', center=0, vmin=min_color, vmax=max_color,\n square=False, linewidths=.5, cbar_kws={\"shrink\": .5})\n ax.set(xlabel='', ylabel='')\n ax.set_title(title, fontsize=14)\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n\n cids = ['AUD', 'CAD', 'GBP', 'NZD']\n xcats = ['XR', 'CRY', 'GROWTH', 'INFL']\n df_cids = pd.DataFrame(index=cids, columns=['earliest', 'latest', 'mean_add', 'sd_mult'])\n df_cids.loc['AUD', ] = ['2010-01-01', '2020-12-31', 0.1, 1]\n df_cids.loc['CAD', ] = ['2011-01-01', '2020-11-30', 0, 1]\n df_cids.loc['GBP', ] = ['2012-01-01', '2020-11-30', 0, 2]\n df_cids.loc['NZD', ] = ['2012-01-01', '2020-09-30', -0.1, 2]\n\n df_xcats = pd.DataFrame(index=xcats, columns=['earliest', 'latest', 'mean_add', 'sd_mult', 'ar_coef', 'back_coef'])\n df_xcats.loc['XR', ] = ['2010-01-01', '2020-12-31', 0.1, 1, 0, 0.3]\n df_xcats.loc['CRY', ] = ['2011-01-01', '2020-10-30', 1, 2, 0.95, 0.5]\n df_xcats.loc['GROWTH', ] = ['2011-01-01', '2020-10-30', 1, 2, 0.9, 0.5]\n df_xcats.loc['INFL', ] = ['2011-01-01', '2020-10-30', 1, 2, 0.8, 0.5]\n\n dfd = make_qdf(df_cids, df_xcats, back_ar=0.75)\n\n correl_matrix(dfd, xcats=xcats, cids=cids, max_color=0.2)\n correl_matrix(dfd, xcats=xcats[0], cids=cids, title='Correlation')\n\n correl_matrix(dfd, xcats=xcats, cids=cids)\n\n",
"import numpy as np\nimport pandas as pd\nfrom typing import List, Union, Tuple\nimport random\n\nfrom macrosynergy.management.simulate_quantamental_data import make_qdf\n\n\ndef reduce_df(df: pd.DataFrame, xcats: List[str] = None, cids: List[str] = None,\n start: str = None, end: str = None, blacklist: dict = None,\n out_all: bool = False, intersect: bool = False):\n \"\"\"\n Filter dataframe by xcats and cids and notify about missing xcats and cids.\n\n :param <pd.Dataframe> df: standardized dataframe with the following necessary columns:\n 'cid', 'xcats', 'real_date'.\n :param <List[str]> xcats: extended categories to be checked on. Default is all in the\n dataframe.\n :param <List[str]> cids: cross sections to be checked on. Default is all in the\n dataframe.\n :param <str> start: string representing earliest date. Default is None.\n :param <str> end: string representing the latest date. Default is None.\n :param <dict> blacklist: cross sections with date ranges that should be excluded from\n the data frame. If one cross section has several blacklist periods append numbers\n to the cross-section code.\n :param <bool> out_all: if True the function returns reduced dataframe and selected/\n available xcats and cids.\n Default is False, i.e. only the dataframe is returned\n :param <bool> intersect: if True only retains cids that are available for all xcats.\n Default is False.\n\n :return <pd.Dataframe>: reduced dataframe that also removes duplicates or\n (for out_all True) dataframe and available and selected xcats and cids.\n \"\"\"\n\n dfx = df[df['real_date'] >= pd.to_datetime(start)] if start is not None else df\n dfx = dfx[dfx['real_date'] <= pd.to_datetime(end)] if end is not None else dfx\n\n if blacklist is not None:\n for key, value in blacklist.items():\n filt1 = dfx['cid'] == key[:3]\n filt2 = dfx['real_date'] >= pd.to_datetime(value[0])\n filt3 = dfx['real_date'] <= pd.to_datetime(value[1])\n dfx = dfx[~(filt1 & filt2 & filt3)]\n\n xcats_in_df = dfx['xcat'].unique()\n if xcats is None:\n xcats = sorted(xcats_in_df)\n else:\n missing = sorted(set(xcats) - set(xcats_in_df))\n if len(missing) > 0:\n print(f\"Missing categories: {missing}.\")\n xcats.remove(missing)\n\n dfx = dfx[dfx['xcat'].isin(xcats)]\n\n if intersect:\n df_uns = dfx.groupby('xcat')['cid'].unique()\n cids_in_df = list(df_uns[0])\n for i in range(1, len(df_uns)):\n cids_in_df = [cid for cid in df_uns[i] if cid in cids_in_df]\n else:\n cids_in_df = dfx['cid'].unique()\n\n if cids is None:\n cids = sorted(cids_in_df)\n else:\n if not isinstance(cids, list):\n cids = [cids]\n missing = sorted(set(cids) - set(cids_in_df))\n if len(missing) > 0:\n print(f'Missing cross sections: {missing}')\n cids = sorted(list(set(cids).intersection(set(cids_in_df))))\n dfx = dfx[dfx['cid'].isin(cids)]\n\n if out_all:\n return dfx.drop_duplicates(), xcats, cids\n else:\n return dfx.drop_duplicates()\n\n\ndef reduce_df_by_ticker(df: pd.DataFrame, ticks: List[str] = None, start: str = None,\n end: str = None, blacklist: dict = None):\n \"\"\"\n Filter dataframe by xcats and cids and notify about missing xcats and cids\n\n :param <pd.Dataframe> df: standardized dataframe with the following columns:\n 'cid', 'xcats', 'real_date'.\n :param <List[str]> ticks: tickers (cross sections + base categories)\n :param <str> start: string in ISO 8601 representing earliest date. Default is None.\n :param <str> end: string ISO 8601 representing the latest date. Default is None.\n :param <dict> blacklist: cross sections with date ranges that should be excluded from\n the dataframe. If one cross section has several blacklist\n periods append numbers to the cross section code.\n\n :return <pd.Dataframe>: reduced dataframe that also removes duplicates\n \"\"\"\n\n dfx = df[df[\"real_date\"] >= pd.to_datetime(start)] if start is not None else df\n dfx = dfx[dfx[\"real_date\"] <= pd.to_datetime(end)] if end is not None else dfx\n\n if blacklist is not None: # blacklisting by cross-section\n for key, value in blacklist.items():\n filt1 = dfx[\"cid\"] == key[:3]\n filt2 = dfx[\"real_date\"] >= pd.to_datetime(value[0])\n filt3 = dfx[\"real_date\"] <= pd.to_datetime(value[1])\n dfx = dfx[~(filt1 & filt2 & filt3)]\n\n dfx[\"ticker\"] = df[\"cid\"] + '_' + df[\"xcat\"]\n ticks_in_df = dfx[\"ticker\"].unique()\n if ticks is None:\n ticks = sorted(ticks_in_df)\n else:\n missing = sorted(set(ticks) - set(ticks_in_df))\n if len(missing) > 0:\n print(f'Missing tickers: {missing}')\n ticks.remove(missing)\n\n dfx = dfx[dfx[\"ticker\"].isin(ticks)]\n\n return dfx.drop_duplicates()\n\ndef categories_df(df: pd.DataFrame, xcats: List[str], cids: List[str] = None,\n val: str = 'value', start: str = None, end: str = None,\n blacklist: dict = None, years: int = None, freq: str = 'M',\n lag: int = 0, fwin: int = 1, xcat_aggs: List[str] = ('mean', 'mean')):\n\n \"\"\"Create custom two-categories dataframe with appropriate frequency and lags\n suitable for analysis.\n\n :param <pd.Dataframe> df: standardized dataframe with the following necessary columns:\n 'cid', 'xcats', 'real_date' and at least one column with values of interest.\n :param <List[str]> xcats: exactly two extended categories whose relationship is to be\n analyzed.\n :param <List[str]> cids: cross sections to be included. Default is all in the\n dataframe.\n :param <str> start: earliest date in ISO 8601 format. Default is None, i.e. earliest\n date in data frame is used.\n :param <str> end: latest date in ISO 8601 format. Default is None, i.e. latest date\n in data frame is used.\n :param <dict> blacklist: cross sections with date ranges that should be excluded from\n the data frame. If one cross section has several blacklist periods append numbers\n to the cross section code.\n :param <int> years: Number of years over which data are aggregated. Supersedes freq\n and does not allow lags, Default is None, i.e. no multi-year aggregation.\n :param <str> val: name of column that contains the values of interest. Default is\n 'value'.\n :param <str> freq: letter denoting frequency at which the series are to be sampled.\n This must be one of 'D', 'W', 'M', 'Q', 'A'. Default is 'M'.\n :param <int> lag: Lag (delay of arrival) of second category in periods as set by\n freq. Default is 0.\n Note: for analyses with dependent and explanatory categories, the second takes\n the role of the explanatory.\n :param <int> fwin: Forward moving average window of first category. Default is 1,\n i.e no average.\n Note: This parameter is used mainly for target returns as dependent variables.\n :param <List[str]> xcat_aggs: Exactly two aggregation methods. Default is 'mean' for\n both.\n\n :return <pd.Dataframe>: custom data frame with two category columns\n \"\"\"\n\n assert freq in ['D', 'W', 'M', 'Q', 'A']\n assert not (years is not None) & (lag != 0), 'Lags cannot be applied to year groups.'\n if years is not None:\n assert isinstance(start, str), 'Year aggregation requires a start date.'\n\n df, xcats, cids = reduce_df(df, xcats, cids, start, end, blacklist, out_all=True)\n\n col_names = ['cid', 'xcat', 'real_date', val]\n dfc = pd.DataFrame(columns=col_names)\n\n if years is None:\n for i in range(2):\n dfw = df[df['xcat'] == xcats[i]].pivot(index='real_date', columns='cid',\n values=val)\n dfw = dfw.resample(freq).agg(xcat_aggs[i])\n if (i == 0) & (fwin > 1):\n dfw = dfw.rolling(window=fwin).mean().shift(1 - fwin)\n if (i == 1) & (lag > 0):\n dfw = dfw.shift(lag)\n dfx = pd.melt(dfw.reset_index(), id_vars=['real_date'],\n value_vars=cids, value_name=val)\n dfx['xcat'] = xcats[i]\n dfc = dfc.append(dfx[col_names])\n else:\n s_year = pd.to_datetime(start).year\n e_year = df['real_date'].max().year + 1\n\n grouping = int((e_year - s_year) / years)\n remainder = (e_year - s_year) % years\n\n year_groups = {}\n for group in range(grouping):\n value = [i for i in range(s_year, s_year + years)]\n key = f\"{s_year} - {s_year + (years - 1)}\"\n year_groups[key] = value\n\n s_year += years\n\n v = [i for i in range(s_year, s_year + (remainder + 1))]\n year_groups[f\"{s_year} - now\"] = v\n list_y_groups = list(year_groups.keys())\n\n translate_ = lambda year: list_y_groups[int((year % 2000) / years)]\n df['custom_date'] = df['real_date'].dt.year.apply(translate_)\n for i in range(2):\n dfx = df[df['xcat'] == xcats[i]]\n dfx = dfx.groupby(['xcat', 'cid',\n 'custom_date']).agg(xcat_aggs[i]).reset_index()\n dfx = dfx.rename(columns={\"custom_date\": \"real_date\"})\n dfc = dfc.append(dfx[col_names])\n\n return dfc.pivot(index=('cid', 'real_date'), columns='xcat',\n values=val).dropna()[xcats]\n\n\nif __name__ == \"__main__\":\n\n cids = ['NZD', 'AUD', 'GBP', 'CAD']\n xcats = ['XR', 'CRY', 'GROWTH', 'INFL']\n df_cids = pd.DataFrame(index=cids, columns=['earliest', 'latest', 'mean_add',\n 'sd_mult'])\n df_cids.loc['AUD'] = ['2000-01-01', '2020-12-31', 0.1, 1]\n df_cids.loc['CAD'] = ['2001-01-01', '2020-11-30', 0, 1]\n df_cids.loc['GBP'] = ['2002-01-01', '2020-11-30', 0, 2]\n df_cids.loc['NZD'] = ['2002-01-01', '2020-09-30', -0.1, 2]\n\n df_xcats = pd.DataFrame(index=xcats, columns=['earliest', 'latest', 'mean_add',\n 'sd_mult', 'ar_coef', 'back_coef'])\n df_xcats.loc['XR'] = ['2000-01-01', '2020-12-31', 0.1, 1, 0, 0.3]\n df_xcats.loc['CRY'] = ['2000-01-01', '2020-10-30', 1, 2, 0.95, 1]\n df_xcats.loc['GROWTH'] = ['2001-01-01', '2020-10-30', 1, 2, 0.9, 1]\n df_xcats.loc['INFL'] = ['2001-01-01', '2020-10-30', 1, 2, 0.8, 0.5]\n\n black = {'AUD': ['2000-01-01', '2003-12-31'], 'GBP': ['2018-01-01', '2100-01-01']}\n\n random.seed(2)\n dfd = make_qdf(df_cids, df_xcats, back_ar=0.75)\n\n dfd_x1 = reduce_df(dfd, xcats=xcats[:-1], cids=cids[0],\n start='2012-01-01', end='2018-01-31')\n print(dfd_x1['xcat'].unique())\n\n dfd_x2 = reduce_df(dfd, xcats=xcats, cids=cids, start='2012-01-01', end='2018-01-31')\n dfd_x3 = reduce_df(dfd, xcats=xcats, cids=cids, blacklist=black)\n\n tickers = [cid + \"_XR\" for cid in cids]\n dfd_xt = reduce_df_by_ticker(dfd, ticks=tickers, blacklist=black)\n\n # Testing categories_df().\n dfc1 = categories_df(dfd, xcats=['GROWTH', 'CRY'], cids=cids, freq='M', lag=0,\n xcat_aggs=['mean', 'mean'], start='2000-01-01', blacklist=black)\n\n dfc2 = categories_df(dfd, xcats=['GROWTH', 'CRY'], cids=cids, freq='M', lag=0,\n fwin=3, xcat_aggs=['mean', 'mean'],\n start='2000-01-01', blacklist=black)\n\n dfc3 = categories_df(dfd, xcats=['GROWTH', 'CRY'], cids=cids, freq='M', lag=0,\n xcat_aggs=['mean', 'mean'], start='2000-01-01', blacklist=black,\n years=10)\n\n # Testing reduce_df()\n filt1 = ~((dfd['cid'] == 'AUD') & (dfd['xcat'] == 'XR'))\n filt2 = ~((dfd['cid'] == 'NZD') & (dfd['xcat'] == 'INFL'))\n dfdx = dfd[filt1 & filt2] # simulate missing cross sections\n dfd_x1, xctx, cidx = reduce_df(dfdx, xcats=['XR', 'CRY', 'INFL'], cids=cids,\n intersect=True, out_all=True)"
] |
[
[
"matplotlib.pyplot.show",
"numpy.ones_like",
"matplotlib.pyplot.subplots",
"pandas.DataFrame"
],
[
"pandas.to_datetime",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
BrefCool/SimpleImgClassifier
|
[
"f8839aa9d52bf0a4dcfe61e824955ed1e2d6de9f"
] |
[
"model.py"
] |
[
"import tensorflow as tf\r\nimport tensorflow as tf\r\nimport tensorflow.contrib as tfcontrib\r\nfrom tensorflow.python import keras\r\nfrom tensorflow.python.keras import layers\r\nfrom tensorflow.python.keras import losses\r\nfrom tensorflow.python.keras import models\r\nfrom tensorflow.python.keras import backend as K\r\n\r\n# -------------------------------Unet Model( image segmentation )---------------------------------------------\r\nclass UnetModelGenerator(object):\r\n\r\n @staticmethod\r\n def dice_coeff(y_true, y_pred):\r\n smooth = 1.\r\n # Flatten\r\n y_true_f = tf.reshape(y_true, [-1])\r\n y_pred_f = tf.reshape(y_pred, [-1])\r\n intersection = tf.reduce_sum(y_true_f * y_pred_f)\r\n score = (2. * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth)\r\n return score\r\n\r\n @staticmethod\r\n def bce_dice_loss(y_true, y_pred):\r\n loss = losses.binary_crossentropy(y_true, y_pred) + UnetModelGenerator.dice_loss(y_true, y_pred)\r\n return loss\r\n\r\n @staticmethod\r\n def dice_loss(y_true, y_pred):\r\n loss = 1 - UnetModelGenerator.dice_coeff(y_true, y_pred)\r\n return loss\r\n\r\n @staticmethod\r\n def conv_block(input_tensor, num_filters):\r\n encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(input_tensor)\r\n encoder = layers.BatchNormalization()(encoder)\r\n encoder = layers.Activation('relu')(encoder)\r\n encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(encoder)\r\n encoder = layers.BatchNormalization()(encoder)\r\n encoder = layers.Activation('relu')(encoder)\r\n return encoder\r\n\r\n @staticmethod\r\n def encoder_block(input_tensor, num_filters):\r\n encoder = UnetModelGenerator.conv_block(input_tensor, num_filters)\r\n encoder_pool = layers.MaxPooling2D((2, 2), strides=(2, 2))(encoder)\r\n\r\n return encoder_pool, encoder\r\n\r\n @staticmethod\r\n def decoder_block(input_tensor, concat_tensor, num_filters):\r\n decoder = layers.Conv2DTranspose(num_filters, (2, 2), strides=(2, 2), padding='same')(input_tensor)\r\n decoder = layers.concatenate([concat_tensor, decoder], axis=-1)\r\n decoder = layers.BatchNormalization()(decoder)\r\n decoder = layers.Activation('relu')(decoder)\r\n decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)\r\n decoder = layers.BatchNormalization()(decoder)\r\n decoder = layers.Activation('relu')(decoder)\r\n decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)\r\n decoder = layers.BatchNormalization()(decoder)\r\n decoder = layers.Activation('relu')(decoder)\r\n return decoder\r\n\r\n @staticmethod\r\n def generate_outputs(inputs):\r\n encoder0_pool, encoder0 = UnetModelGenerator.encoder_block(inputs, 32) # 128\r\n encoder1_pool, encoder1 = UnetModelGenerator.encoder_block(encoder0_pool, 64) # 64\r\n encoder2_pool, encoder2 = UnetModelGenerator.encoder_block(encoder1_pool, 128) # 32\r\n encoder3_pool, encoder3 = UnetModelGenerator.encoder_block(encoder2_pool, 256) # 16\r\n encoder4_pool, encoder4 = UnetModelGenerator.encoder_block(encoder3_pool, 512) # 8\r\n center = UnetModelGenerator.conv_block(encoder4_pool, 1024) # center\r\n decoder4 = UnetModelGenerator.decoder_block(center, encoder4, 512) # 16\r\n decoder3 = UnetModelGenerator.decoder_block(decoder4, encoder3, 256) # 32\r\n decoder2 = UnetModelGenerator.decoder_block(decoder3, encoder2, 128) # 64\r\n decoder1 = UnetModelGenerator.decoder_block(decoder2, encoder1, 64) # 128\r\n decoder0 = UnetModelGenerator.decoder_block(decoder1, encoder0, 32) # 256\r\n outputs = layers.Conv2D(1, (1, 1), activation='sigmoid')(decoder0)\r\n return outputs\r\n\r\n def __init__(self, input_img_shape):\r\n self.inputs = layers.Input(shape=input_img_shape)\r\n self.outputs = UnetModelGenerator.generate_outputs(self.inputs)\r\n self.keras_model = models.Model(inputs=[self.inputs], outputs=[self.outputs])\r\n\r\n def summary(self):\r\n self.keras_model.summary()\r\n\r\n def get_model(self):\r\n return self.keras_model\r\n# -------------------------------------------------------------------------------------------------------------\r\n# --------------------- self-designed sequential nn model( image classification )-----------------------------\r\nclass SeqNnModelGenerator(object):\r\n\r\n @staticmethod\r\n def loss(y_true, y_pred):\r\n return losses.categorical_crossentropy(y_true, y_pred)\r\n\r\n def __init__(self, input_img_shape, num_classes):\r\n self.num_classes = num_classes\r\n self.inputs = layers.Input(shape=input_img_shape)\r\n self.outputs = self.generate_outputs()\r\n self.keras_model = models.Model(inputs=[self.inputs], outputs=[self.outputs])\r\n\r\n def generate_outputs(self):\r\n flatterned = layers.Flatten()(self.inputs)\r\n d1 = layers.Dense(64, activation='relu')(flatterned)\r\n d1_dropout = layers.Dropout(rate=0.25)(d1)\r\n d2 = layers.Dense(128, activation='relu')(d1_dropout)\r\n d2_dropout = layers.Dropout(rate=0.5)(d2)\r\n outputs = layers.Dense(self.num_classes, activation='softmax')(d2_dropout)\r\n return outputs\r\n\r\n def summary(self):\r\n self.keras_model.summary()\r\n\r\n def get_model(self):\r\n return self.keras_model\r\n# -------------------------------------------------------------------------------------------------------\r\n# ---------------------------------- CNN model( image classification ) ----------------------------------\r\nclass CnnModel(object):\r\n\r\n def __init__(self, input_img_shape, num_classes):\r\n self.num_classes = num_classes\r\n self.inputs = layers.Input(shape=input_img_shape)\r\n self.outputs = self.generate_outputs()\r\n self.keras_model = models.Model(inputs=[self.inputs], outputs=[self.outputs])\r\n\r\n def generate_outputs(self):\r\n conv_l1 = layers.Conv2D(32, (3, 3), padding='same', activation='relu')(self.inputs)\r\n conv_l1 = layers.MaxPooling2D((2, 2), strides=(2, 2))(conv_l1)\r\n conv_l2 = layers.Conv2D(64, (3, 3), padding='same', activation='relu')(conv_l1)\r\n conv_l2 = layers.MaxPooling2D((2, 2), strides=(2, 2))(conv_l2)\r\n conv_l3 = layers.Conv2D(128, (3, 3), padding='same', activation='relu')(conv_l2)\r\n conv_l3 = layers.MaxPooling2D((2, 2), strides=(2, 2))(conv_l3)\r\n l3_dropout = layers.Dropout(rate=0.5)(conv_l3)\r\n flatterned = layers.Flatten()(l3_dropout)\r\n l4 = layers.Dense(1024, activation='relu')(flatterned)\r\n l4_droupout = layers.Dropout(rate=0.5)(l4)\r\n outputs = layers.Dense(self.num_classes, activation='softmax')(l4_droupout)\r\n return outputs\r\n\r\n def summary(self):\r\n self.keras_model.summary()\r\n\r\n def get_model(self):\r\n return self.keras_model\r\n# ----------------------------------------------------------------------------------------------------------\r\n# ---------------------------------------Inception V3 Model-------------------------------------------------\r\n# class InceptionModel(object):\r\n#\r\n# def __init__(self, input_img_shape, num_classes):\r\n# self.num_classes = num_classes\r\n# self.inputs = layers.Input(shape=input_img_shape)\r\n# self.outputs = self.generate_outputs()\r\n# self.keras_model = models.\r\n#\r\n# def generate_outputs(self):\r\n# conv_l1 = layers.Conv2D(32, (3, 3), padding='same', activation='relu')(self.inputs)\r\n# conv_l1 = layers.MaxPooling2D((2, 2), strides=(2, 2))(conv_l1)\r\n# conv_l2 = layers.Conv2D(64, (3, 3), padding='same', activation='relu')(conv_l1)\r\n# conv_l2 = layers.MaxPooling2D((2, 2), strides=(2, 2))(conv_l2)\r\n# conv_l3 = layers.Conv2D(128, (3, 3), padding='same', activation='relu')(conv_l2)\r\n# conv_l3 = layers.MaxPooling2D((2, 2), strides=(2, 2))(conv_l3)\r\n# l3_dropout = layers.Dropout(rate=0.5)(conv_l3)\r\n# flatterned = layers.Flatten()(l3_dropout)\r\n# l4 = layers.Dense(1024, activation='relu')(flatterned)\r\n# l4_droupout = layers.Dropout(rate=0.5)(l4)\r\n# outputs = layers.Dense(self.num_classes, activation='softmax')(l4_droupout)\r\n# return outputs\r\n#\r\n# def summary(self):\r\n# self.keras_model.summary()\r\n#\r\n# def get_model(self):\r\n# return self.keras_model"
] |
[
[
"tensorflow.python.keras.layers.BatchNormalization",
"tensorflow.python.keras.layers.Activation",
"tensorflow.python.keras.layers.Flatten",
"tensorflow.python.keras.layers.MaxPooling2D",
"tensorflow.python.keras.layers.concatenate",
"tensorflow.python.keras.layers.Dense",
"tensorflow.reduce_sum",
"tensorflow.reshape",
"tensorflow.python.keras.layers.Conv2DTranspose",
"tensorflow.python.keras.losses.categorical_crossentropy",
"tensorflow.python.keras.models.Model",
"tensorflow.python.keras.layers.Dropout",
"tensorflow.python.keras.layers.Conv2D",
"tensorflow.python.keras.layers.Input",
"tensorflow.python.keras.losses.binary_crossentropy"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.5",
"1.4"
]
}
] |
CognitiveDave/e2eSensorNetwork
|
[
"81fc5405563809def7b7fdd6fbf8b6276185b634"
] |
[
"Sensors_MongoAgg.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom pymongo import MongoClient\nimport os\nimport json\nfrom bson.json_util import dumps\nimport datetime\n\n\ndbuser = \"\"\n_id = \"\"\ndbpassword = \"\"\n\nuri = f\"mongodb://{dbuser}:{dbpassword}@ds043972.mlab.com:43972/{_id}\"\n \nstable = {\n 'Oxidising Gas': 400,\n 'Reducing Gas': 3.0,\n 'nh3 Gas': 3.0\n}\n \n\n\n# In[2]:\n\n\nclient = MongoClient(uri, retryWrites=False)\ndb = client[_id]\n\n\n# In[3]:\n\n\ncursor = db.readings.find({})\ndocs = []\nfor document in cursor:\n docs.append(document)\n\n\n# In[ ]:\n\n\nimport pandas as pd\ndf = pd.DataFrame(docs)\ndf['Volts'] = df['CPU Volts'].astype(float)\nformat = '%d/%m/%Y %H:%M'\ndf['Datetime'] = pd.to_datetime(df['Date'] + ' ' + df['Time'], format=format)\ndf = df.set_index(pd.DatetimeIndex(df['Datetime']))\n\n\n# In[ ]:\n\n\nx = df['2020-06-29 18:00:00' : '2020-06-30 23:59:59' ] \n\n\n# In[ ]:\n\n\nx[x['Device']=='Enviro']['Reducing Gas'].plot()\n\n\n# In[71]:\n\n\nfrom datetime import date\ntoday = date.today()\n\n\ndef day_summary(present=True, target=[None]): \n if present:\n today = date.today()\n d1 = today.strftime(\"%d/%m/%Y\") \n match = {\"$match\" : {\"Date\" : d1}} \n else:\n if (len(target) == 1):\n d1 = target[0]\n match = {\"$match\" : {\"Date\" : d1}} \n \n else:\n d1 = target[0]\n d2 = target[1]\n match = {\"$match\" : {\"$and\" : [{\"Date\" : { \"$gte\":d1, \"$lte\": d2}}]}} \n \n group = {\n \"$group\" : {'_id': {\n \"Host\": \"$Host\",\n \"Device\": \"$Device\",\n \"Date\": \"$Date\"\n }, \n 'Avg Temp': {'$avg':'$CPU Core'},\n 'Avg Freq': {'$avg':'$CPU Clock'},\n 'Obs': {'$sum': 1},\n 'Humidity' : {'$avg': '$Room Humidity'},\n 'Pressure' : {'$avg': '$Room Pressure'},\n 'Ox Gas' : {'$avg': '$Oxidising Gas'},\n 'Red Gas' : {'$avg': '$Reducing Gas'},\n 'nh3 Gas' : {'$avg' : '$nh3 Gas'}\n } \n } \n \n proj = {\n \"$project\": {\n \"Date\" : { \n \"$dateFromString\" : \n {\n \"dateString\": '$Date',\n \"timezone\": 'Europe/Dublin'\n }\n },\n \"Host\":1,\n \"Device\":1,\n \"CPU Core\":1,\n \"Room Pressure\":1,\n \"Room Pressure\":1,\n \"Oxidising Gas\":1,\n \"Reducing Gas\":1,\n \"nh3 Gas\":1,\n \"CPU Clock\":1\n }\n } \n \n pipeline = [\n match,\n group\n ]\n\n agg_sum = db.readings.aggregate(pipeline) \n docs = []\n for b in agg_sum:\n rec = {}\n rec['Host'] = b['_id']['Host']\n rec['Device'] = b['_id']['Device']\n rec['Date'] = b['_id']['Date']\n del b['_id'] \n for key in b.keys():\n rec[key] = b[key] \n docs.append(rec) \n \n return docs\n \n \n\n\n# In[72]:\n\n\nd = day_summary(False, ['26/06/2020', '30/06/2020'])\n\n\n# In[73]:\n\n\nd\n\n\n# In[75]:\n\n\nimport pandas as pd\nff = pd.DataFrame(d)\n\n\n# In[76]:\n\n\nff\n\n\n# In[77]:\n\n\nff['Date'].unique()\n\n\n# In[ ]:\n\n\n\n\n"
] |
[
[
"pandas.DatetimeIndex",
"pandas.to_datetime",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
trentford/iem
|
[
"7264d24f2d79a3cd69251a09758e6531233a732f",
"7264d24f2d79a3cd69251a09758e6531233a732f",
"7264d24f2d79a3cd69251a09758e6531233a732f",
"7264d24f2d79a3cd69251a09758e6531233a732f",
"7264d24f2d79a3cd69251a09758e6531233a732f",
"7264d24f2d79a3cd69251a09758e6531233a732f"
] |
[
"scripts/prism/ingest_prism.py",
"htdocs/frost/frost_ts.py",
"htdocs/plotting/auto/scripts/p13.py",
"htdocs/plotting/auto/scripts100/p193.py",
"scripts/iemre/stage4_12z_adjust.py",
"htdocs/plotting/auto/scripts/p74.py"
] |
[
"\"\"\"Ingest the PRISM data into a local yearly netCDF file\n\n1. Download from their FTP site\n2. Unzip into /mesonet/tmp\n3. Open the actual BIL file with rasterio\n4. Copy data into netcdf file\n5. Cleanup\n\n\"\"\"\nfrom __future__ import print_function\nimport sys\nimport datetime\nimport os\nimport subprocess\n\nimport rasterio\nimport numpy as np\nfrom pyiem.iemre import daily_offset\nfrom pyiem.util import ncopen\n\n\ndef do_process(valid, fn):\n \"\"\"Process this file, please \"\"\"\n # shape of data is (1, 621, 1405)\n data = rasterio.open(fn).read()\n varname = fn.split(\"_\")[1]\n nc = ncopen(\"/mesonet/data/prism/%s_daily.nc\" % (valid.year,), 'a')\n idx = daily_offset(valid)\n nc.variables[varname][idx] = np.flipud(data[0])\n nc.close()\n\n\ndef do_download(valid):\n \"\"\"Make the download happen!\"\"\"\n files = []\n for varname in ['ppt', 'tmax', 'tmin']:\n d = \"2\" if varname == 'ppt' else '1'\n for classify in ['stable', 'provisional', 'early']:\n localfn = valid.strftime((\"PRISM_\" + varname + \"_\" + classify +\n \"_4kmD\" + d +\n \"_%Y%m%d_bil\"))\n subprocess.call(\"rm -f %s*\" % (localfn,), shell=True)\n\n uri = valid.strftime((\"ftp://prism.nacse.org/daily/\" + varname +\n \"/%Y/\" + localfn + \".zip\"))\n # prevent zero byte files\n subprocess.call((\"wget -q --timeout=120 -O %s.zip %s || \"\n \" rm -f %s.zip\"\n ) % (localfn, uri, localfn), shell=True)\n if os.path.isfile(localfn + \".zip\"):\n break\n\n subprocess.call(\"unzip -q %s.zip\" % (localfn, ), shell=True)\n files.append(localfn + \".bil\")\n\n return files\n\n\ndef do_cleanup(valid):\n \"\"\"do cleanup\"\"\"\n subprocess.call(\"rm -f PRISM*%s*\" % (valid.strftime(\"%Y%m%d\"), ),\n shell=True)\n\n\ndef main(argv):\n \"\"\"Do Something\"\"\"\n os.chdir(\"/mesonet/tmp\")\n valid = datetime.date(int(argv[1]), int(argv[2]), int(argv[3]))\n files = do_download(valid)\n for fn in files:\n do_process(valid, fn)\n\n do_cleanup(valid)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n",
"#!/usr/bin/env python\n\"\"\"Generate some line charts from ISU Frost Model Output\"\"\"\nimport sys\nimport os\nimport cgi\nimport datetime\n\nimport numpy as np\nimport pytz\nimport matplotlib.dates as mdates\nfrom pyiem.util import ncopen, ssw\nfrom pyiem.plot.use_agg import plt\nfrom pyiem.datatypes import temperature\n\n\ndef get_latest_time(model):\n ''' Figure out the latest model runtime '''\n utc = datetime.datetime.utcnow()\n utc = utc.replace(tzinfo=pytz.UTC)\n utc = utc.replace(hour=12, minute=0, second=0, microsecond=0)\n limit = 24\n while not os.path.isfile(\n utc.strftime((\"/mesonet/share/frost/\" +\n model + \"/%Y%m%d%H%M_iaoutput.nc\"))):\n utc -= datetime.timedelta(hours=12)\n limit -= 1\n if limit < 0:\n return None\n return utc\n\n\ndef get_times(nc):\n ''' Return array of datetimes for the time array '''\n tm = nc.variables['time']\n sts = datetime.datetime.strptime(tm.units.replace('minutes since ', ''),\n '%Y-%m-%d %H:%M:%S')\n sts = sts.replace(tzinfo=pytz.utc)\n res = []\n for t in tm[:]:\n res.append(sts + datetime.timedelta(minutes=float(t)))\n return res\n\n\ndef get_ij(lon, lat, nc):\n ''' Figure out the closest grid cell '''\n dist = ((nc.variables['lon'][:] - lon)**2 +\n (nc.variables['lat'][:] - lat)**2)**.5\n return np.unravel_index(np.argmin(dist), dist.shape)\n\n\ndef add_labels(fig):\n \"\"\"Create a legend for the condition variable\"\"\"\n fig.text(0.85, 0.8, \"Frost\", color='red')\n fig.text(0.85, 0.75, \"Ice/Snow\", color='orange')\n fig.text(0.85, 0.7, \"Wet\", color='green')\n fig.text(0.85, 0.65, \"Dew\", color=\"brown\")\n fig.text(0.85, 0.6, \"Frz Rain\", color=\"purple\")\n\n\ndef get_icond_color(model, val):\n \"\"\" Get the color for this Model and icond\n\n METRO: 1-8 dry, wet, ice/snow, mix, dew, melting snow, blk ice, icing rain\n BRIDGET: 0-5 dry, frosty, icy/snowy, melting, freezing, wet\n \"\"\"\n if val is None or val < 0:\n return 'none'\n if model == 'metro':\n colors = ['white', 'white', 'green', 'orange', 'orange', 'brown',\n 'blue', 'orange', 'purple']\n else:\n colors = ['white', 'tan', 'orange', 'blue', 'purple', 'green']\n if val > (len(colors) - 1):\n return 'none'\n return colors[val]\n\n\ndef get_ifrost_color(val):\n \"\"\"Which color to use\"\"\"\n if val is None or val == -1:\n return 'none'\n colors = ['#EEEEEE', 'r']\n try:\n return colors[val]\n except Exception as _exp:\n return 'none'\n\n\ndef process(model, lon, lat):\n \"\"\" Generate a plot for this given combination \"\"\"\n fig = plt.figure()\n ax = fig.add_axes([0.1, 0.1, 0.7, 0.8])\n modelts = get_latest_time(model)\n if modelts is None:\n ax.text(0.5, 0.5, \"No Data Found to Plot!\", ha='center')\n ssw(\"Content-Type: image/png\\n\\n\")\n fig.savefig(getattr(sys.stdout, 'buffer', sys.stdout), format=\"png\")\n return\n nc = ncopen(\n modelts.strftime((\"/mesonet/share/frost/\" +\n model + \"/%Y%m%d%H%M_iaoutput.nc\")))\n times = get_times(nc)\n i, j = get_ij(lon, lat, nc)\n\n ax.plot(times,\n temperature(nc.variables['bdeckt'][:, i, j], 'K').value('F'),\n color='k',\n label='Bridge Deck Temp' if model == 'bridget' else 'Pavement')\n ax.plot(times, temperature(nc.variables['tmpk'][:, i, j], 'K').value(\"F\"),\n color='r', label='Air Temp')\n ax.plot(times, temperature(nc.variables['dwpk'][:, i, j], 'K').value(\"F\"),\n color='g', label='Dew Point')\n # ax.set_ylim(-30,150)\n ax.set_title((\"ISUMM5 %s Timeseries\\n\"\n \"i: %s j:%s lon: %.2f lat: %.2f Model Run: %s\"\n ) % (model, i, j, nc.variables['lon'][i, j],\n nc.variables['lat'][i, j],\n modelts.astimezone(pytz.timezone(\"America/Chicago\")\n ).strftime(\"%-d %b %Y %-I:%M %p\")))\n\n ax.xaxis.set_major_locator(\n mdates.DayLocator(interval=1, tz=pytz.timezone(\"America/Chicago\")))\n ax.xaxis.set_major_formatter(\n mdates.DateFormatter('%d %b\\n%Y', tz=pytz.timezone(\"America/Chicago\")))\n ax.axhline(32, linestyle='-.')\n ax.grid(True)\n ax.set_ylabel(r\"Temperature $^\\circ$F\")\n\n (ymin, ymax) = ax.get_ylim()\n\n for i2, ifrost in enumerate(nc.variables['ifrost'][:-1, i, j]):\n ax.barh(ymax-1, 1.0/24.0/4.0, left=times[i2],\n fc=get_ifrost_color(ifrost), ec='none')\n for i2, icond in enumerate(nc.variables['icond'][:-1, i, j]):\n ax.barh(ymax-2, 1.0/24.0/4.0, left=times[i2],\n fc=get_icond_color(model, icond), ec='none')\n\n # Shrink current axis's height by 10% on the bottom\n box = ax.get_position()\n ax.set_position([box.x0, box.y0 + box.height * 0.1,\n box.width, box.height * 0.9])\n ax.legend(loc='upper center',\n bbox_to_anchor=(0.5, -0.12), fancybox=True, shadow=True, ncol=3)\n add_labels(fig)\n\n ssw(\"Content-Type: image/png\\n\\n\")\n fig.savefig(getattr(sys.stdout, 'buffer', sys.stdout), format=\"png\")\n\n\ndef main():\n \"\"\" Go Main Go \"\"\"\n form = cgi.FieldStorage()\n if 'lon' in form and 'lat' in form:\n process(form.getfirst('model'), float(form.getfirst('lon')),\n float(form.getfirst('lat')))\n\n\nif __name__ == '__main__':\n # main\n main()\n",
"\"\"\"warmest 91 days\"\"\"\nimport datetime\n\nimport psycopg2.extras\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nfrom pyiem.network import Table as NetworkTable\nfrom pyiem.plot.use_agg import plt\nfrom pyiem.util import get_autoplot_context, get_dbconn\n\n\nPDICT = {'end_summer': 'End of Summer', 'start_summer': 'Start of Summer'}\n\n\ndef get_description():\n \"\"\" Return a dict describing how to call this plotter \"\"\"\n desc = dict()\n desc['data'] = True\n desc['description'] = \"\"\"This chart presents the start or end date of the\n warmest 91 day period each year.\n \"\"\"\n desc['arguments'] = [\n dict(type='station', name='station', default='IA2203',\n label='Select Station:', network='IACLIMATE'),\n dict(type='select', name='which', default='end_summer',\n label='Which value to plot:', options=PDICT),\n ]\n return desc\n\n\ndef plotter(fdict):\n \"\"\" Go \"\"\"\n pgconn = get_dbconn('coop')\n cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n ctx = get_autoplot_context(fdict, get_description())\n which = ctx['which']\n station = ctx['station']\n network = \"%sCLIMATE\" % (station[:2],)\n nt = NetworkTable(network)\n\n table = \"alldata_%s\" % (station[:2],)\n\n cursor.execute(\"\"\"\n select year, extract(doy from day) as d from\n (select day, year, rank() OVER (PARTITION by year ORDER by avg DESC)\n from\n (select day, year, avg((high+low)/2.) OVER\n (ORDER by day ASC rows 91 preceding) from \"\"\" + table + \"\"\"\n where station = %s and day > '1893-01-01') as foo)\n as foo2 where rank = 1\n ORDER by year ASC\n \"\"\", (station, ))\n years = []\n maxsday = []\n today = datetime.date.today()\n delta = 0 if which == 'end_summer' else 91\n for row in cursor:\n if row['year'] == today.year and row['d'] < 270:\n continue\n maxsday.append(row['d'] - delta)\n years.append(row['year'])\n\n df = pd.DataFrame(dict(year=pd.Series(years),\n doy=pd.Series(maxsday)))\n maxsday = np.array(maxsday)\n\n (fig, ax) = plt.subplots(1, 1)\n ax.scatter(years, maxsday)\n ax.grid(True)\n ax.set_ylabel(\"%s Date\" % ('End' if delta == 0 else 'Start',))\n ax.set_title((\"%s [%s] %s\\n\"\n \"%s Date of Warmest (Avg Temp) 91 Day Period\"\n ) % (nt.sts[station]['name'], station, PDICT.get(which),\n 'End' if delta == 0 else 'Start'))\n\n yticks = []\n yticklabels = []\n for i in np.arange(min(maxsday)-5, max(maxsday)+5, 1):\n ts = datetime.datetime(2000, 1, 1) + datetime.timedelta(days=i)\n if ts.day in [1, 8, 15, 22, 29]:\n yticks.append(i)\n yticklabels.append(ts.strftime(\"%-d %b\"))\n ax.set_yticks(yticks)\n ax.set_yticklabels(yticklabels)\n\n h_slope, intercept, r_value, _, _ = stats.linregress(years, maxsday)\n ax.plot(years, h_slope * np.array(years) + intercept, lw=2, color='r')\n\n avgd = datetime.datetime(2000, 1, 1) + datetime.timedelta(\n days=int(np.average(maxsday)))\n ax.text(0.1, 0.03, \"Avg Date: %s, slope: %.2f days/century, R$^2$=%.2f\" % (\n avgd.strftime(\"%-d %b\"), h_slope * 100., r_value ** 2),\n transform=ax.transAxes, va='bottom')\n ax.set_xlim(min(years)-1, max(years)+1)\n ax.set_ylim(min(maxsday)-5, max(maxsday)+5)\n\n return fig, df\n\n\nif __name__ == '__main__':\n plotter(dict())\n",
"\"\"\"Generalized mapper of AZOS data\"\"\"\nimport datetime\nimport os\n\nimport numpy as np\nimport pygrib\nfrom pyiem.datatypes import distance\nfrom pyiem.plot.use_agg import plt\nfrom pyiem.plot import MapPlot\nfrom pyiem.util import get_autoplot_context, utc\n\nPDICT = {'120': 'Five Day',\n '168': 'Seven Day'}\nPDICT2 = {'0': '0z (7 PM CDT)',\n '12': '12z (7 AM CDT)'}\nPDICT3 = {'both': 'Plot both USDM + WPC Forecast',\n 'wpc': 'Plot just WPC Forecast'}\nPDICT4 = {'auto': 'Auto-scale',\n '10': '10 inch max',\n '7': '7 inch max',\n '3.5': '3.5 inch max',\n }\n\n\ndef get_description():\n \"\"\" Return a dict describing how to call this plotter \"\"\"\n desc = dict()\n desc['data'] = False\n desc['cache'] = 600\n desc['description'] = \"\"\"Generates a map of WPC Quantitative Precipitation\n Forecast (QPF) and most recent US Drought Monitor to the date choosen to\n plot the WPC forecast.\"\"\"\n utcnow = datetime.datetime.utcnow()\n desc['arguments'] = [\n dict(type='csector', name='csector', default='IA',\n label='Select state/sector to plot'),\n dict(type='date', name='date', default=utcnow.strftime(\"%Y/%m/%d\"),\n label='Select WPC Issuance Date:', min=\"2018/05/11\",\n max=utcnow.strftime(\"%Y/%m/%d\")),\n dict(type='select', name='z', default='0', options=PDICT2,\n label='Select WPC Issuance Time'),\n dict(type='select', name='f', default='120', options=PDICT,\n label='Select WPC Forecast Period:'),\n dict(type='select', name='opt', default='both', options=PDICT3,\n label='Plotting Options:'),\n dict(type='select', name='scale', default='auto', options=PDICT4,\n label='WPC Plotting Max Value for Color Ramp:'),\n ]\n return desc\n\n\ndef plotter(fdict):\n \"\"\" Go \"\"\"\n ctx = get_autoplot_context(fdict, get_description())\n csector = ctx['csector']\n date = ctx['date']\n z = ctx['z']\n period = ctx['f']\n scale = ctx['scale']\n valid = utc(date.year, date.month, date.day, int(z))\n gribfn = valid.strftime((\"/mesonet/ARCHIVE/data/%Y/%m/%d/model/wpc/\"\n \"p\" + period + \"m_%Y%m%d%Hf\" + period + \".grb\"))\n if not os.path.isfile(gribfn):\n raise ValueError(\"gribfn %s missing\" % (gribfn, ))\n\n grbs = pygrib.open(gribfn)\n grb = grbs[1]\n precip = distance(grb.values, 'MM').value('IN')\n lats, lons = grb.latlons()\n\n title = (\"Weather Prediction Center %s Quantitative \"\n \"Precipitation Forecast\") % (PDICT[period])\n subtitle = (\"%sWPC Forcast %s UTC to %s UTC\"\n ) % ((\"US Drought Monitor Overlaid, \"\n if ctx['opt'] == 'both' else ''),\n valid.strftime(\"%d %b %Y %H\"),\n (valid +\n datetime.timedelta(hours=int(period))\n ).strftime(\"%d %b %Y %H\"))\n mp = MapPlot(sector=('state' if len(csector) == 2 else csector),\n state=ctx['csector'],\n title=title,\n subtitle=subtitle,\n continentalcolor='white',\n titlefontsize=16)\n cmap = plt.get_cmap('gist_ncar')\n cmap.set_under('#EEEEEE')\n cmap.set_over('black')\n if scale == 'auto':\n levs = np.linspace(0, np.max(precip) * 1.1, 10)\n levs = [round(lev, 2) for lev in levs]\n levs[0] = 0.01\n elif scale == '10':\n levs = np.arange(0, 10.1, 1.)\n levs[0] = 0.01\n elif scale == '7':\n levs = np.arange(0, 7.1, 0.5)\n levs[0] = 0.01\n elif scale == '3.5':\n levs = np.arange(0, 3.6, 0.25)\n levs[0] = 0.01\n mp.pcolormesh(lons, lats, precip,\n levs, cmap=cmap, units='inch', clip_on=False)\n if ctx['opt'] == 'both':\n mp.draw_usdm(valid=valid, filled=False, hatched=True)\n\n return mp.fig\n\n\nif __name__ == '__main__':\n plotter(dict())\n",
"\"\"\"\nWe need to use the QC'd 24h 12z total to fix the 1h problems :(\n\"\"\"\nfrom __future__ import print_function\nimport sys\nimport datetime\n\nimport numpy as np\nimport pytz\nfrom scipy.interpolate import NearestNDInterpolator\nimport pygrib\nfrom pyiem import iemre\nfrom pyiem.util import ncopen\n\n\ndef merge(ts):\n \"\"\"\n Process an hour's worth of stage4 data into the hourly RE\n \"\"\"\n\n # Load up the 12z 24h total, this is what we base our deltas on\n fn = (\"/mesonet/ARCHIVE/data/%s/stage4/ST4.%s.24h.grib\"\n ) % (ts.strftime(\"%Y/%m/%d\"), ts.strftime(\"%Y%m%d%H\"))\n\n grbs = pygrib.open(fn)\n grb = grbs[1]\n val = grb.values\n lats, lons = grb.latlons()\n # can save a bit of memory as we don't need all data\n stride = slice(None, None, 3)\n lats = np.ravel(lats[stride, stride])\n lons = np.ravel(lons[stride, stride])\n vals = np.ravel(val[stride, stride])\n # Clip large values\n vals = np.where(vals > 250., 0, vals)\n nn = NearestNDInterpolator((lons, lats), vals)\n xi, yi = np.meshgrid(iemre.XAXIS, iemre.YAXIS)\n stage4 = nn(xi, yi)\n # Prevent Large numbers, negative numbers\n stage4 = np.where(stage4 < 10000., stage4, 0.)\n stage4 = np.where(stage4 < 0., 0., stage4)\n\n # Open up our RE file\n nc = ncopen(iemre.get_hourly_ncname(ts.year), 'a', timeout=300)\n ts0 = ts - datetime.timedelta(days=1)\n offset0 = iemre.hourly_offset(ts0)\n offset1 = iemre.hourly_offset(ts)\n # Running at 12 UTC 1 Jan\n if offset0 > offset1:\n offset0 = 0\n iemre_total = np.sum(nc.variables[\"p01m\"][offset0:offset1, :, :], axis=0)\n iemre_total = np.where(iemre_total > 0., iemre_total, 0.00024)\n iemre_total = np.where(iemre_total < 10000., iemre_total, 0.00024)\n multiplier = stage4 / iemre_total\n for offset in range(offset0, offset1):\n # Get the unmasked dadta\n data = nc.variables[\"p01m\"][offset, :, :]\n\n # Keep data within reason\n data = np.where(data > 10000., 0., data)\n # 0.00024 / 24\n adjust = np.where(data > 0, data, 0.00001) * multiplier\n adjust = np.where(adjust > 250.0, 0, adjust)\n nc.variables[\"p01m\"][offset, :, :] = np.where(adjust < 0.01, 0, adjust)\n ts = ts0 + datetime.timedelta(hours=offset-offset0)\n nc.sync()\n nc.close()\n\n\ndef main(argv):\n \"\"\"Go Main Go\"\"\"\n if len(argv) == 4:\n ts = datetime.datetime(int(argv[1]), int(argv[2]),\n int(argv[3]), 12)\n else:\n ts = datetime.datetime.utcnow()\n ts = ts - datetime.timedelta(days=1)\n ts = ts.replace(hour=12, minute=0, second=0, microsecond=0)\n ts = ts.replace(tzinfo=pytz.utc)\n merge(ts)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n",
"\"\"\"Days below threshold\"\"\"\n\nfrom scipy import stats\nfrom pandas.io.sql import read_sql\nfrom pyiem import network\nfrom pyiem.plot.use_agg import plt\nfrom pyiem.util import get_autoplot_context, get_dbconn\n\nPDICT = {'above': 'At or Above Threshold',\n 'below': 'Below Threshold'}\nPDICT2 = {'winter': 'Winter (Dec, Jan, Feb)',\n 'spring': 'Spring (Mar, Apr, May)',\n 'summer': 'Summer (Jun, Jul, Aug)',\n 'fall': 'Fall (Sep, Oct, Nov)',\n 'all': 'Entire Year'}\nPDICT3 = {'high': 'High Temperature',\n 'low': 'Low Temperature',\n 'precip': 'Precipitation'}\n\n\ndef get_description():\n \"\"\" Return a dict describing how to call this plotter \"\"\"\n desc = dict()\n desc['data'] = True\n desc['description'] = \"\"\"The number of days for a given season that are\n either above or below some temperature threshold.\"\"\"\n desc['arguments'] = [\n dict(type='station', name='station', default='IA2203',\n label='Select Station', network='IACLIMATE'),\n dict(type='select', name='season', default='winter',\n label='Select Season:', options=PDICT2),\n dict(type='select', name='dir', default='below',\n label='Threshold Direction:', options=PDICT),\n dict(type='select', name='var', default='low',\n label='Which Daily Variable:', options=PDICT3),\n dict(type='float', name='threshold', default=0,\n label='Temperature (F) or Precip (in) Threshold:'),\n dict(type=\"year\", name=\"year\", default=1893,\n label=\"Start Year of Plot\"),\n\n ]\n return desc\n\n\ndef plotter(fdict):\n \"\"\" Go \"\"\"\n pgconn = get_dbconn('coop')\n ctx = get_autoplot_context(fdict, get_description())\n station = ctx['station']\n season = ctx['season']\n direction = ctx['dir']\n varname = ctx['var']\n threshold = ctx['threshold']\n startyear = ctx['year']\n\n table = \"alldata_%s\" % (station[:2],)\n nt = network.Table(\"%sCLIMATE\" % (station[:2],))\n\n b = \"%s %s %s\" % (varname, \">=\" if direction == 'above' else '<',\n threshold)\n\n df = read_sql(\"\"\"\n SELECT extract(year from day + '%s month'::interval) as yr,\n sum(case when month in (12, 1, 2) and \"\"\" + b + \"\"\"\n then 1 else 0 end) as winter,\n sum(case when month in (3, 4, 5) and \"\"\" + b + \"\"\"\n then 1 else 0 end) as spring,\n sum(case when month in (6, 7, 8) and \"\"\" + b + \"\"\"\n then 1 else 0 end) as summer,\n sum(case when month in (9, 10, 11) and \"\"\" + b + \"\"\"\n then 1 else 0 end) as fall,\n sum(case when \"\"\" + b + \"\"\" then 1 else 0 end) as all\n from \"\"\" + table + \"\"\" WHERE station = %s and year >= %s\n GROUP by yr ORDER by yr ASC\n \"\"\", pgconn, params=(1 if season != 'all' else 0, station, startyear),\n index_col='yr')\n if df.empty:\n raise ValueError(\"No data found for query\")\n\n (fig, ax) = plt.subplots(1, 1, figsize=(8, 6))\n avgv = df[season].mean()\n\n colorabove = 'r'\n colorbelow = 'b'\n if direction == 'below':\n colorabove = 'b'\n colorbelow = 'r'\n bars = ax.bar(df.index.values, df[season], fc=colorabove,\n ec=colorabove, align='center')\n for i, bar in enumerate(bars):\n if df[season].values[i] < avgv:\n bar.set_facecolor(colorbelow)\n bar.set_edgecolor(colorbelow)\n ax.axhline(avgv, lw=2, color='k', zorder=2, label='Average')\n h_slope, intercept, r_value, _, _ = stats.linregress(df.index.values,\n df[season])\n ax.plot(df.index.values, h_slope * df.index.values + intercept, '--',\n lw=2, color='k', label='Trend')\n ax.text(0.01, 0.99, \"Avg: %.1f, slope: %.2f days/century, R$^2$=%.2f\" % (\n avgv, h_slope * 100., r_value ** 2),\n transform=ax.transAxes, va='top', bbox=dict(color='white'))\n ax.set_xlabel(\"Year\")\n ax.set_xlim(df.index.min() - 1, df.index.max() + 1)\n ax.set_ylim(0, max([df[season].max() + df[season].max() / 7., 3]))\n ax.set_ylabel(\"Number of Days\")\n ax.grid(True)\n msg = (\"[%s] %s %.0f-%.0f Number of Days [%s] \"\n \"with %s %s %g%s\"\n ) % (station, nt.sts[station]['name'],\n df.index.min(), df.index.max(), PDICT2[season],\n PDICT3[varname], PDICT[direction],\n threshold, \"$^\\circ$F\" if varname != 'precip' else 'inch')\n tokens = msg.split()\n sz = int(len(tokens) / 2)\n ax.set_title(\" \".join(tokens[:sz]) + \"\\n\" + \" \".join(tokens[sz:]))\n ax.legend(ncol=1)\n\n return fig, df\n\n\nif __name__ == '__main__':\n plotter(dict())\n"
] |
[
[
"numpy.flipud"
],
[
"numpy.argmin"
],
[
"scipy.stats.linregress",
"numpy.array",
"pandas.Series",
"numpy.average"
],
[
"numpy.arange",
"numpy.max"
],
[
"scipy.interpolate.NearestNDInterpolator",
"numpy.ravel",
"numpy.meshgrid",
"numpy.where",
"numpy.sum"
],
[
"scipy.stats.linregress",
"pandas.io.sql.read_sql"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
Swall0w/clib
|
[
"46f659783d5a0a6ec5994c3c707c1cc8a7934385"
] |
[
"clib/converts/format_image_size.py"
] |
[
"import numpy as np\nfrom skimage.transform import resize as imresize\n\n\ndef batch(batch):\n format_size = batch[0][0].shape[1]\n format_batch = []\n\n for index, item in enumerate(batch):\n original_image = item[0]\n transpose_image = np.transpose(original_image, (1, 2, 0))\n resized_image = imresize(transpose_image,\n (format_size, format_size),\n mode='reflect')\n resized_image = resized_image.transpose(2, 0, 1).astype(np.float32)\n format_batch.append((resized_image, batch[index][1]))\n return format_batch\n\n\ndef resize_to_yolo(img):\n if img.ndim == 2:\n raise ValueError(\n \"image shoule be RGB format. But image is {}\".format(img.ndim))\n input_height, input_width, _ = img.shape\n min_pixel = 320\n max_pixel = 448\n\n min_edge = np.minimum(input_width, input_height)\n if min_edge < min_pixel:\n input_width *= min_pixel / min_edge\n input_height *= min_pixel / min_edge\n max_edge = np.maximum(input_width, input_height)\n if max_edge > max_pixel:\n input_width *= max_pixel / max_edge\n input_height *= max_pixel / max_edge\n\n input_width = int(input_width / 32 + round(input_width % 32 / 32)) * 32\n input_height = int(input_height / 32 + round(input_height % 32 / 32)) * 32\n img = imresize(img, (input_height, input_width), mode='reflect')\n\n return img\n"
] |
[
[
"numpy.maximum",
"numpy.minimum",
"numpy.transpose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alejandro-ariza/scikit-fda
|
[
"a3626eeaac81aac14660233ff7554ae9a1550434",
"a3626eeaac81aac14660233ff7554ae9a1550434"
] |
[
"skfda/preprocessing/registration/_shift_registration.py",
"skfda/representation/basis/_bspline.py"
] |
[
"\"\"\"Class to apply Shift Registration to functional data\"\"\"\n\n# Pablo Marcos Manchón\n# [email protected]\n\nfrom scipy.integrate import simps\nfrom sklearn.utils.validation import check_is_fitted\n\nimport numpy as np\n\nfrom ... import FData, FDataGrid\nfrom ..._utils import constants, check_is_univariate\nfrom .base import RegistrationTransformer\n\n\nclass ShiftRegistration(RegistrationTransformer):\n r\"\"\"Register a functional dataset using shift alignment.\n\n Realizes the registration of a set of curves using a shift aligment\n [RaSi2005-7-2]_. Let :math:`\\{x_i(t)\\}_{i=1}^{N}` be a functional dataset,\n calculates :math:`\\delta_{i}` for each sample such that\n :math:`x_i(t + \\delta_{i})` minimizes the least squares criterion:\n\n .. math::\n \\text{REGSSE} = \\sum_{i=1}^{N} \\int_{\\mathcal{T}}\n [x_i(t + \\delta_i) - \\hat\\mu(t)]^2 ds\n\n Estimates each shift parameter :math:`\\delta_i` iteratively by\n using a modified Newton-Raphson algorithm, updating the template\n :math:`\\mu` in each iteration as is described in detail in\n [RaSi2005-7-9-1]_.\n\n Method only implemented for univariate functional data.\n\n Args:\n max_iter (int, optional): Maximun number of iterations.\n Defaults sets to 5. Generally 2 or 3 iterations are sufficient to\n obtain a good alignment.\n tol (float, optional): Tolerance allowable. The process will stop if\n :math:`\\max_{i}|\\delta_{i}^{(\\nu)}-\\delta_{i}^{(\\nu-1)}|<tol`.\n Default sets to 1e-2.\n template (str, callable or FData, optional): Template to use in the\n least squares criterion. If template=\"mean\" it is use the\n functional mean as in the original paper. The template can be a\n callable that will receive an FDataGrid with the samples and will\n return another FDataGrid as a template, such as any of the means or\n medians of the module `skfda.explotatory.stats`.\n If the template is an FData is used directly as the final\n template to the registration, if it is a callable or \"mean\" the\n template is computed iteratively constructing a temporal template\n in each iteration. In [RaSi2005-7-9-1]_ is described in detail this\n procedure. Defaults to \"mean\".\n extrapolation (str or :class:`Extrapolation`, optional): Controls the\n extrapolation mode for points outside the :term:`domain` range.\n By default uses the method defined in the data to be transformed.\n See the `extrapolation` documentation to obtain more information.\n step_size (int or float, optional): Parameter to adjust the rate of\n convergence in the Newton-Raphson algorithm, see [RaSi2005-7-9-1]_.\n Defaults to 1.\n restrict_domain (bool, optional): If True restricts the :term:`domain`\n to avoid the need of using extrapolation, in which\n case only the fit_transform method will be available, as training\n and transformation must be done together. Defaults to False.\n initial (str or array_like, optional): Array with an initial estimation\n of shifts. Default uses a list of zeros for the initial shifts.\n output_points (array_like, optional): Set of points where the\n functions are evaluated to obtain the discrete\n representation of the object to integrate. If None is\n passed it calls numpy.linspace in FDataBasis and uses the\n `grid_points` in FDataGrids.\n\n Attributes:\n template_ (FData): Template :math:`\\mu` learned during the fitting\n used to the transformation.\n deltas_ (numpy.ndarray): List of shifts :math:`\\delta_i` applied\n during the last transformation.\n n_iter_ (int): Number of iterations performed during the last\n transformation.\n\n Note:\n Due to the use of derivatives for the estimation of the shifts, the\n samples to be registered may be smooth for the correct convergence of\n the method.\n\n Examples:\n\n >>> from skfda.preprocessing.registration import ShiftRegistration\n >>> from skfda.datasets import make_sinusoidal_process\n >>> from skfda.representation.basis import Fourier\n\n\n Registration and creation of dataset in discretized form:\n\n >>> fd = make_sinusoidal_process(n_samples=10, error_std=0,\n ... random_state=1)\n >>> reg = ShiftRegistration(extrapolation=\"periodic\")\n >>> fd_registered = reg.fit_transform(fd)\n >>> fd_registered\n FDataGrid(...)\n\n Shifts applied during the transformation\n\n >>> reg.deltas_.round(3)\n array([-0.128, 0.187, 0.027, 0.034, -0.106, 0.114, ..., -0.06 ])\n\n\n Registration and creation of a dataset in basis form using the\n transformation previosly fitted:\n\n >>> fd = make_sinusoidal_process(n_samples=2, error_std=0,\n ... random_state=2)\n >>> fd_basis = fd.to_basis(Fourier())\n >>> reg.transform(fd_basis)\n FDataBasis(...)\n\n\n References:\n .. [RaSi2005-7-2] Ramsay, J., Silverman, B. W. (2005). Shift\n registration. In *Functional Data Analysis* (pp. 129-132).\n Springer.\n .. [RaSi2005-7-9-1] Ramsay, J., Silverman, B. W. (2005). Shift\n registration by the Newton-Raphson algorithm. In *Functional\n Data Analysis* (pp. 142-144). Springer.\n \"\"\"\n\n def __init__(self, max_iter=5, tol=1e-2, template=\"mean\",\n extrapolation=None, step_size=1, restrict_domain=False,\n initial=\"zeros\", output_points=None):\n self.max_iter = max_iter\n self.tol = tol\n self.template = template\n self.restrict_domain = restrict_domain\n self.extrapolation = extrapolation\n self.step_size = step_size\n self.initial = initial\n self.output_points = output_points\n\n def _compute_deltas(self, fd, template):\n r\"\"\"Compute the shifts to perform the registration.\n\n Args:\n fd (FData: Functional object to be registered.\n template (str, FData or callable): Template to align the\n the samples. \"mean\" to compute the mean iteratively as in\n the original paper, an FData with the templated calculated or\n a callable wich constructs the template.\n\n Returns:\n tuple: A tuple with an array of deltas and an FDataGrid with the\n template.\n\n \"\"\"\n check_is_univariate(fd)\n\n domain_range = fd.domain_range[0]\n\n # Initial estimation of the shifts\n if self.initial is \"zeros\":\n delta = np.zeros(fd.n_samples)\n\n elif len(self.initial) != fd.n_samples:\n raise ValueError(f\"the initial shift ({len(self.initial)}) must \"\n f\"have the same length than the number of samples\"\n f\" ({fd.n_samples})\")\n else:\n delta = np.asarray(self.initial)\n\n # Fine equispaced mesh to evaluate the samples\n if self.output_points is None:\n\n try:\n output_points = fd.grid_points[0]\n nfine = len(output_points)\n except AttributeError:\n nfine = max(fd.n_basis * constants.BASIS_MIN_FACTOR + 1,\n constants.N_POINTS_COARSE_MESH)\n output_points = np.linspace(*domain_range, nfine)\n\n else:\n nfine = len(self.output_points)\n output_points = np.asarray(self.output_points)\n\n # Auxiliar array to avoid multiple memory allocations\n delta_aux = np.empty(fd.n_samples)\n\n # Computes the derivate of originals curves in the mesh points\n fd_deriv = fd.derivative(order=1)\n D1x = fd_deriv(output_points)[..., 0]\n\n # Second term of the second derivate estimation of REGSSE. The\n # first term has been dropped to improve convergence (see references)\n d2_regsse = simps(np.square(D1x), output_points, axis=1)\n\n max_diff = self.tol + 1\n self.n_iter_ = 0\n\n # Case template fixed\n if isinstance(template, FData):\n original_template = template\n tfine_aux = template.evaluate(output_points)[0, ..., 0]\n\n if self.restrict_domain:\n template_points_aux = tfine_aux\n\n template = \"fixed\"\n else:\n tfine_aux = np.empty(nfine)\n\n # Auxiliar array if the domain will be restricted\n if self.restrict_domain:\n D1x_tmp = D1x\n tfine_tmp = output_points\n tfine_aux_tmp = tfine_aux\n domain = np.empty(nfine, dtype=np.dtype(bool))\n\n ones = np.ones(fd.n_samples)\n output_points_rep = np.outer(ones, output_points)\n\n # Newton-Rhapson iteration\n while max_diff > self.tol and self.n_iter_ < self.max_iter:\n\n # Updates the limits for non periodic functions ignoring the ends\n if self.restrict_domain:\n # Calculates the new limits\n a = domain_range[0] - min(np.min(delta), 0)\n b = domain_range[1] - max(np.max(delta), 0)\n\n # New interval is (a,b)\n np.logical_and(tfine_tmp >= a, tfine_tmp <= b, out=domain)\n output_points = tfine_tmp[domain]\n tfine_aux = tfine_aux_tmp[domain]\n D1x = D1x_tmp[:, domain]\n # Reescale the second derivate could be other approach\n # d2_regsse =\n # d2_regsse_original * ( 1 + (a - b) / (domain[1] - domain[0]))\n d2_regsse = simps(np.square(D1x), output_points, axis=1)\n\n # Recompute base points for evaluation\n output_points_rep = np.outer(ones, output_points)\n\n # Computes the new values shifted\n x = fd(output_points_rep + np.atleast_2d(delta).T,\n aligned=False,\n extrapolation=self.extrapolation)[..., 0]\n\n if template == \"mean\":\n x.mean(axis=0, out=tfine_aux)\n elif template == \"fixed\" and self.restrict_domain:\n tfine_aux = template_points_aux[domain]\n elif callable(template): # Callable\n fd_x = FDataGrid(x, grid_points=output_points)\n fd_tfine = template(fd_x)\n tfine_aux = fd_tfine.data_matrix.ravel()\n\n # Calculates x - mean\n np.subtract(x, tfine_aux, out=x)\n\n d1_regsse = simps(np.multiply(x, D1x, out=x),\n output_points, axis=1)\n # Updates the shifts by the Newton-Rhapson iteration\n # delta = delta - step_size * d1_regsse / d2_regsse\n np.divide(d1_regsse, d2_regsse, out=delta_aux)\n np.multiply(delta_aux, self.step_size, out=delta_aux)\n np.subtract(delta, delta_aux, out=delta)\n\n # Updates convergence criterions\n max_diff = np.abs(delta_aux, out=delta_aux).max()\n self.n_iter_ += 1\n\n if template == \"fixed\":\n\n # Stores the original template instead of building it again\n template = original_template\n else:\n\n # Stores the template in an FDataGrid\n template = FDataGrid(tfine_aux, grid_points=output_points)\n\n return delta, template\n\n def fit_transform(self, X: FData, y=None):\n \"\"\"Fit the estimator and transform the data.\n\n Args:\n X (FData): Functional dataset to be transformed.\n y (ignored): not used, present for API consistency by convention.\n\n Returns:\n FData: Functional data registered.\n\n \"\"\"\n self.deltas_, self.template_ = self._compute_deltas(X, self.template)\n\n return X.shift(self.deltas_, restrict_domain=self.restrict_domain,\n extrapolation=self.extrapolation,\n eval_points=self.output_points)\n\n def fit(self, X: FData, y=None):\n \"\"\"Fit the estimator.\n\n Args:\n X (FData): Functional dataset used to construct the template for\n the alignment.\n y (ignored): not used, present for API consistency by convention.\n\n Returns:\n RegistrationTransformer: self\n\n Raises:\n AttributeError: If this method is call when restrict_domain=True.\n\n \"\"\"\n if self.restrict_domain:\n raise AttributeError(\"fit and predict are not available when \"\n \"restrict_domain=True, fitting and \"\n \"transformation should be done together. Use \"\n \"an extrapolation method with \"\n \"restrict_domain=False or fit_predict\")\n\n # If the template is an FData, fit doesnt learn anything\n if isinstance(self.template, FData):\n self.template_ = self.template\n\n else:\n _, self.template_ = self._compute_deltas(X, self.template)\n\n return self\n\n def transform(self, X: FData, y=None):\n \"\"\"Register the data.\n\n Transforms the data using the template previously learned during\n fitting.\n\n Args:\n X (FData): Functional dataset to be transformed.\n y (ignored): not used, present for API consistency by convention.\n\n Returns:\n FData: Functional data registered.\n\n Raises:\n AttributeError: If this method is call when restrict_domain=True.\n\n \"\"\"\n\n if self.restrict_domain:\n raise AttributeError(\"fit and predict are not available when \"\n \"restrict_domain=True, fitting and \"\n \"transformation should be done together. Use \"\n \"an extrapolation method with \"\n \"restrict_domain=False or fit_predict\")\n\n # Check is fitted\n check_is_fitted(self, 'template_')\n\n deltas, template = self._compute_deltas(X, self.template_)\n self.template_ = template\n self.deltas_ = deltas\n\n return X.shift(deltas, restrict_domain=self.restrict_domain,\n extrapolation=self.extrapolation,\n eval_points=self.output_points)\n\n def inverse_transform(self, X: FData, y=None):\n \"\"\"Applies the inverse transformation.\n\n Applies the opossite shift used in the last call to `transform`.\n\n Args:\n X (FData): Functional dataset to be transformed.\n y (ignored): not used, present for API consistency by convention.\n\n Returns:\n FData: Functional data registered.\n\n Examples:\n\n Creates a synthetic functional dataset.\n\n >>> from skfda.preprocessing.registration import ShiftRegistration\n >>> from skfda.datasets import make_sinusoidal_process\n >>> fd = make_sinusoidal_process(error_std=0, random_state=1)\n >>> fd.extrapolation = 'periodic'\n\n Dataset registration and centering.\n\n >>> reg = ShiftRegistration()\n >>> fd_registered = reg.fit_transform(fd)\n >>> fd_centered = fd_registered - fd_registered.mean()\n\n Reverse the translation applied during the registration.\n\n >>> reg.inverse_transform(fd_centered)\n FDataGrid(...)\n\n \"\"\"\n if not hasattr(self, \"deltas_\"):\n raise AttributeError(\"Data must be previously transformed to learn\"\n \" the inverse transformation\")\n elif len(X) != len(self.deltas_):\n raise ValueError(\"Data must contain the same number of samples \"\n \"than the dataset previously transformed\")\n\n return X.shift(-self.deltas_, restrict_domain=self.restrict_domain,\n extrapolation=self.extrapolation,\n eval_points=self.output_points)\n",
"import numpy as np\nimport scipy.interpolate\nfrom numpy import polyint, polymul, polyval\nfrom scipy.interpolate import BSpline as SciBSpline, PPoly\n\nfrom ..._utils import _domain_range\nfrom ._basis import Basis\n\n\nclass BSpline(Basis):\n r\"\"\"BSpline basis.\n\n BSpline basis elements are defined recursively as:\n\n .. math::\n B_{i, 1}(x) = 1 \\quad \\text{if } t_i \\le x < t_{i+1},\n \\quad 0 \\text{ otherwise}\n\n .. math::\n B_{i, k}(x) = \\frac{x - t_i}{t_{i+k} - t_i} B_{i, k-1}(x)\n + \\frac{t_{i+k+1} - x}{t_{i+k+1} - t_{i+1}} B_{i+1, k-1}(x)\n\n Where k indicates the order of the spline.\n\n Implementation details: In order to allow a discontinuous behaviour at\n the boundaries of the domain it is necessary to placing m knots at the\n boundaries [RS05]_. This is automatically done so that the user only has to\n specify a single knot at the boundaries.\n\n Attributes:\n domain_range (tuple): A tuple of length 2 containing the initial and\n end values of the interval over which the basis can be evaluated.\n n_basis (int): Number of functions in the basis.\n order (int): Order of the splines. One greather than their degree.\n knots (list): List of knots of the spline functions.\n\n Examples:\n Constructs specifying number of basis and order.\n\n >>> bss = BSpline(n_basis=8, order=4)\n\n If no order is specified defaults to 4 because cubic splines are\n the most used. So the previous example is the same as:\n\n >>> bss = BSpline(n_basis=8)\n\n It is also possible to create a BSpline basis specifying the knots.\n\n >>> bss = BSpline(knots=[0, 0.2, 0.4, 0.6, 0.8, 1])\n\n Once we create a basis we can evaluate each of its functions at a\n set of points.\n\n >>> bss = BSpline(n_basis=3, order=3)\n >>> bss([0, 0.5, 1])\n array([[[ 1. ],\n [ 0.25],\n [ 0. ]],\n [[ 0. ],\n [ 0.5 ],\n [ 0. ]],\n [[ 0. ],\n [ 0.25],\n [ 1. ]]])\n\n And evaluates first derivative\n\n >>> deriv = bss.derivative()\n >>> deriv([0, 0.5, 1])\n array([[[-2.],\n [-1.],\n [ 0.]],\n [[ 2.],\n [ 0.],\n [-2.]],\n [[ 0.],\n [ 1.],\n [ 2.]]])\n\n References:\n .. [RS05] Ramsay, J., Silverman, B. W. (2005). *Functional Data\n Analysis*. Springer. 50-51.\n\n \"\"\"\n\n def __init__(self, domain_range=None, n_basis=None, order=4, knots=None):\n \"\"\"Bspline basis constructor.\n\n Args:\n domain_range (tuple, optional): Definition of the interval where\n the basis defines a space. Defaults to (0,1) if knots are not\n specified. If knots are specified defaults to the first and\n last element of the knots.\n n_basis (int, optional): Number of splines that form the basis.\n order (int, optional): Order of the splines. One greater that\n their degree. Defaults to 4 which mean cubic splines.\n knots (array_like): List of knots of the splines. If domain_range\n is specified the first and last elements of the knots have to\n match with it.\n\n \"\"\"\n\n if domain_range is not None:\n domain_range = _domain_range(domain_range)\n\n if len(domain_range) != 1:\n raise ValueError(\"Domain range should be unidimensional.\")\n\n domain_range = domain_range[0]\n\n # Knots default to equally space points in the domain_range\n if knots is None:\n if n_basis is None:\n raise ValueError(\"Must provide either a list of knots or the\"\n \"number of basis.\")\n else:\n knots = tuple(knots)\n knots = sorted(knots)\n if domain_range is None:\n domain_range = (knots[0], knots[-1])\n else:\n if domain_range[0] != knots[0] or domain_range[1] != knots[-1]:\n raise ValueError(\"The ends of the knots must be the same \"\n \"as the domain_range.\")\n\n # n_basis default to number of knots + order of the splines - 2\n if n_basis is None:\n n_basis = len(knots) + order - 2\n\n if (n_basis - order + 2) < 2:\n raise ValueError(f\"The number of basis ({n_basis}) minus the \"\n f\"order of the bspline ({order}) should be \"\n f\"greater than 3.\")\n\n self._order = order\n self._knots = None if knots is None else tuple(knots)\n super().__init__(domain_range=domain_range, n_basis=n_basis)\n\n # Checks\n if self.n_basis != self.order + len(self.knots) - 2:\n raise ValueError(f\"The number of basis ({self.n_basis}) has to \"\n f\"equal the order ({self.order}) plus the \"\n f\"number of knots ({len(self.knots)}) minus 2.\")\n\n @property\n def knots(self):\n if self._knots is None:\n return tuple(np.linspace(*self.domain_range[0],\n self.n_basis - self.order + 2))\n else:\n return self._knots\n\n @property\n def order(self):\n return self._order\n\n def _evaluation_knots(self):\n \"\"\"\n Get the knots adding m knots to the boundary in order to allow a\n discontinuous behaviour at the boundaries of the domain [RS05]_.\n\n References:\n .. [RS05] Ramsay, J., Silverman, B. W. (2005). *Functional Data\n Analysis*. Springer. 50-51.\n \"\"\"\n return np.array((self.knots[0],) * (self.order - 1) + self.knots +\n (self.knots[-1],) * (self.order - 1))\n\n def _evaluate(self, eval_points):\n\n # Input is scalar\n eval_points = eval_points[..., 0]\n\n # Places m knots at the boundaries\n knots = self._evaluation_knots()\n\n # c is used the select which spline the function splev below computes\n c = np.zeros(len(knots))\n\n # Initialise empty matrix\n mat = np.empty((self.n_basis, len(eval_points)))\n\n # For each basis computes its value for each evaluation point\n for i in range(self.n_basis):\n # write a 1 in c in the position of the spline calculated in each\n # iteration\n c[i] = 1\n # compute the spline\n mat[i] = scipy.interpolate.splev(eval_points,\n (knots, c, self.order - 1))\n c[i] = 0\n\n return mat\n\n def _derivative_basis_and_coefs(self, coefs, order=1):\n if order >= self.order:\n return (\n BSpline(n_basis=1, domain_range=self.domain_range, order=1),\n np.zeros((len(coefs), 1)))\n\n deriv_splines = [self._to_scipy_BSpline(coefs[i]).derivative(order)\n for i in range(coefs.shape[0])]\n\n deriv_coefs = [BSpline._from_scipy_BSpline(spline)[1]\n for spline in deriv_splines]\n\n deriv_basis = BSpline._from_scipy_BSpline(deriv_splines[0])[0]\n\n return deriv_basis, np.array(deriv_coefs)[:, 0:deriv_basis.n_basis]\n\n def rescale(self, domain_range=None):\n r\"\"\"Return a copy of the basis with a new domain range, with the\n corresponding values rescaled to the new bounds.\n The knots of the BSpline will be rescaled in the new interval.\n\n Args:\n domain_range (tuple, optional): Definition of the interval\n where the basis defines a space. Defaults uses the same as\n the original basis.\n \"\"\"\n\n knots = np.array(self.knots, dtype=np.dtype('float'))\n\n if domain_range is not None: # Rescales the knots\n knots -= knots[0]\n knots *= ((domain_range[1] - domain_range[0]\n ) / (self.knots[-1] - self.knots[0]))\n knots += domain_range[0]\n\n # Fix possible round error\n knots[0] = domain_range[0]\n knots[-1] = domain_range[1]\n\n else:\n # TODO: Allow multiple dimensions\n domain_range = self.domain_range[0]\n\n return BSpline(domain_range, self.n_basis, self.order, knots)\n\n def __repr__(self):\n \"\"\"Representation of a BSpline basis.\"\"\"\n return (f\"{self.__class__.__name__}(domain_range={self.domain_range}, \"\n f\"n_basis={self.n_basis}, order={self.order}, \"\n f\"knots={self.knots})\")\n\n def _gram_matrix(self):\n # Places m knots at the boundaries\n knots = self._evaluation_knots()\n\n # c is used the select which spline the function\n # PPoly.from_spline below computes\n c = np.zeros(len(knots))\n\n # Initialise empty list to store the piecewise polynomials\n ppoly_lst = []\n\n no_0_intervals = np.where(np.diff(knots) > 0)[0]\n\n # For each basis gets its piecewise polynomial representation\n for i in range(self.n_basis):\n\n # Write a 1 in c in the position of the spline\n # transformed in each iteration\n c[i] = 1\n\n # Gets the piecewise polynomial representation and gets\n # only the positions for no zero length intervals\n # This polynomial are defined relatively to the knots\n # meaning that the column i corresponds to the ith knot.\n # Let the ith knot be a\n # Then f(x) = pp(x - a)\n pp = PPoly.from_spline((knots, c, self.order - 1))\n pp_coefs = pp.c[:, no_0_intervals]\n\n # We have the coefficients for each interval in coordinates\n # (x - a), so we will need to subtract a when computing the\n # definite integral\n ppoly_lst.append(pp_coefs)\n c[i] = 0\n\n # Now for each pair of basis computes the inner product after\n # applying the linear differential operator\n matrix = np.zeros((self.n_basis, self.n_basis))\n\n for interval in range(len(no_0_intervals)):\n for i in range(self.n_basis):\n poly_i = np.trim_zeros(ppoly_lst[i][:,\n interval], 'f')\n # Indefinite integral\n square = polymul(poly_i, poly_i)\n integral = polyint(square)\n\n # Definite integral\n matrix[i, i] += np.diff(polyval(\n integral, self.knots[interval: interval + 2]\n - self.knots[interval]))[0]\n\n # The Gram matrix is banded, so not all intervals are used\n for j in range(i + 1, min(i + self.order, self.n_basis)):\n poly_j = np.trim_zeros(ppoly_lst[j][:, interval], 'f')\n\n # Indefinite integral\n integral = polyint(polymul(poly_i, poly_j))\n\n # Definite integral\n matrix[i, j] += np.diff(polyval(\n integral, self.knots[interval: interval + 2]\n - self.knots[interval])\n )[0]\n\n # The matrix is symmetric\n matrix[j, i] = matrix[i, j]\n\n return matrix\n\n def _to_scipy_BSpline(self, coefs):\n\n knots = np.concatenate((\n np.repeat(self.knots[0], self.order - 1),\n self.knots,\n np.repeat(self.knots[-1], self.order - 1)))\n\n return SciBSpline(knots, coefs, self.order - 1)\n\n @staticmethod\n def _from_scipy_BSpline(bspline):\n order = bspline.k\n knots = bspline.t\n\n # Remove additional knots at the borders\n if order != 0:\n knots = knots[order: -order]\n\n coefs = bspline.c\n domain_range = [knots[0], knots[-1]]\n\n return BSpline(domain_range, order=order + 1, knots=knots), coefs\n\n @property\n def inknots(self):\n \"\"\"Return number of basis.\"\"\"\n return self.knots[1:len(self.knots) - 1]\n\n def __eq__(self, other):\n return (super().__eq__(other)\n and self.order == other.order\n and self.knots == other.knots)\n\n def __hash__(self):\n return hash((super().__hash__(), self.order, self.knots))\n"
] |
[
[
"numpy.square",
"sklearn.utils.validation.check_is_fitted",
"numpy.abs",
"numpy.multiply",
"numpy.linspace",
"numpy.asarray",
"numpy.min",
"numpy.divide",
"numpy.subtract",
"numpy.dtype",
"numpy.ones",
"numpy.max",
"numpy.atleast_2d",
"numpy.outer",
"numpy.logical_and",
"numpy.zeros",
"numpy.empty"
],
[
"numpy.linspace",
"numpy.polymul",
"numpy.polyint",
"numpy.dtype",
"scipy.interpolate.PPoly.from_spline",
"numpy.diff",
"numpy.trim_zeros",
"scipy.interpolate.BSpline",
"numpy.polyval",
"numpy.repeat",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"1.7",
"1.0",
"1.2",
"1.8"
],
"tensorflow": []
}
] |
das-projects/deepOCR
|
[
"ffc6db691605b7b4837da9619ab6e918fa1c18de",
"ffc6db691605b7b4837da9619ab6e918fa1c18de",
"ffc6db691605b7b4837da9619ab6e918fa1c18de"
] |
[
"deepocr/datasets/ic13.py",
"deepocr/utils/metrics.py",
"deepocr/io/image/base.py"
] |
[
"# Copyright (C) 2022, Arijit Das.\n# Code adapted from doctr and huggingface\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport csv\nimport os\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Tuple\n\nimport numpy as np\n\nfrom .datasets import AbstractDataset\nfrom .utils import convert_target_to_relative\n\n__all__ = [\"IC13\"]\n\n\nclass IC13(AbstractDataset):\n \"\"\"IC13 dataset from `\"ICDAR 2013 Robust Reading Competition\" <https://rrc.cvc.uab.es/>`_.\n\n Example::\n >>> # NOTE: You need to download both image and label parts from Focused Scene Text challenge Task2.1 2013-2015.\n >>> from deepocr.datasets import IC13\n >>> train_set = IC13(img_folder=\"/path/to/Challenge2_Training_Task12_Images\",\n >>> label_folder=\"/path/to/Challenge2_Training_Task1_GT\")\n >>> img, target = train_set[0]\n >>> test_set = IC13(img_folder=\"/path/to/Challenge2_Test_Task12_Images\",\n >>> label_folder=\"/path/to/Challenge2_Test_Task1_GT\")\n >>> img, target = test_set[0]\n\n Args:\n img_folder: folder with all the images of the dataset\n label_folder: folder with all annotation files for the images\n use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)\n **kwargs: keyword arguments from `AbstractDataset`.\n \"\"\"\n\n def __init__(\n self,\n img_folder: str,\n label_folder: str,\n use_polygons: bool = False,\n **kwargs: Any,\n ) -> None:\n super().__init__(img_folder, pre_transforms=convert_target_to_relative, **kwargs)\n\n # File existence check\n if not os.path.exists(label_folder) or not os.path.exists(img_folder):\n raise FileNotFoundError(\n f\"unable to locate {label_folder if not os.path.exists(label_folder) else img_folder}\")\n\n self.data: List[Tuple[Path, Dict[str, Any]]] = []\n np_dtype = np.float32\n\n img_names = os.listdir(img_folder)\n\n for img_name in img_names:\n\n img_path = Path(img_folder, img_name)\n label_path = Path(label_folder, \"gt_\" + Path(img_name).stem + \".txt\")\n\n with open(label_path, newline='\\n') as f:\n _lines = [\n [val[:-1] if val.endswith(\",\") else val for val in row]\n for row in csv.reader(f, delimiter=' ', quotechar=\"'\")\n ]\n labels = [line[-1] for line in _lines]\n # xmin, ymin, xmax, ymax\n box_targets = np.array([list(map(int, line[:4])) for line in _lines], dtype=np_dtype)\n if use_polygons:\n # (x, y) coordinates of top left, top right, bottom right, bottom left corners\n box_targets = np.array(\n [\n [\n [coords[0], coords[1]],\n [coords[2], coords[1]],\n [coords[2], coords[3]],\n [coords[0], coords[3]],\n ] for coords in box_targets\n ], dtype=np_dtype\n )\n self.data.append((img_path, dict(boxes=box_targets, labels=labels)))\n",
"# Copyright (C) 2022, Arijit Das.\n# Code adapted from doctr and huggingface\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nfrom typing import Dict, List, Optional, Tuple\n\nimport cv2\nimport numpy as np\nfrom scipy.optimize import linear_sum_assignment\nfrom unidecode import unidecode\n\n__all__ = ['TextMatch', 'box_iou', 'box_ioa', 'mask_iou', 'polygon_iou',\n 'nms', 'LocalizationConfusion', 'OCRMetric', 'DetectionMetric']\n\n\ndef string_match(word1: str, word2: str) -> Tuple[bool, bool, bool, bool]:\n \"\"\"Performs string comparison with multiple levels of tolerance\n\n Args:\n word1: a string\n word2: another string\n\n Returns:\n a tuple with booleans specifying respectively whether the raw strings, their lower-case counterparts, their\n unidecode counterparts and their lower-case unidecode counterparts match\n \"\"\"\n raw_match = (word1 == word2)\n caseless_match = (word1.lower() == word2.lower())\n unidecode_match = (unidecode(word1) == unidecode(word2))\n\n # Warning: the order is important here otherwise the pair (\"EUR\", \"€\") cannot be matched\n unicase_match = (unidecode(word1).lower() == unidecode(word2).lower())\n\n return raw_match, caseless_match, unidecode_match, unicase_match\n\n\nclass TextMatch:\n r\"\"\"Implements text match metric (word-level accuracy) for recognition task.\n\n The raw aggregated metric is computed as follows:\n\n .. math::\n \\forall X, Y \\in \\mathcal{W}^N,\n TextMatch(X, Y) = \\frac{1}{N} \\sum\\limits_{i=1}^N f_{Y_i}(X_i)\n\n with the indicator function :math:`f_{a}` defined as:\n\n .. math::\n \\forall a, x \\in \\mathcal{W},\n f_a(x) = \\left\\{\n \\begin{array}{ll}\n 1 & \\mbox{if } x = a \\\\\n 0 & \\mbox{otherwise.}\n \\end{array}\n \\right.\n\n where :math:`\\mathcal{W}` is the set of all possible character sequences,\n :math:`N` is a strictly positive integer.\n\n Example::\n >>> from deepocr.utils import TextMatch\n >>> metric = TextMatch()\n >>> metric.update(['Hello', 'world'], ['hello', 'world'])\n >>> metric.summary()\n \"\"\"\n\n def __init__(self) -> None:\n self.reset()\n\n def update(\n self,\n gt: List[str],\n pred: List[str],\n ) -> None:\n \"\"\"Update the state of the metric with new predictions\n\n Args:\n gt: list of groung-truth character sequences\n pred: list of predicted character sequences\n \"\"\"\n\n if len(gt) != len(pred):\n raise AssertionError(\"prediction size does not match with ground-truth labels size\")\n\n for gt_word, pred_word in zip(gt, pred):\n _raw, _caseless, _unidecode, _unicase = string_match(gt_word, pred_word)\n self.raw += int(_raw)\n self.caseless += int(_caseless)\n self.unidecode += int(_unidecode)\n self.unicase += int(_unicase)\n\n self.total += len(gt)\n\n def summary(self) -> Dict[str, float]:\n \"\"\"Computes the aggregated metrics\n\n Returns:\n a dictionary with the exact match score for the raw data, its lower-case counterpart, its unidecode\n counterpart and its lower-case unidecode counterpart\n \"\"\"\n if self.total == 0:\n raise AssertionError(\"you need to update the metric before getting the summary\")\n\n return dict(\n raw=self.raw / self.total,\n caseless=self.caseless / self.total,\n unidecode=self.unidecode / self.total,\n unicase=self.unicase / self.total,\n )\n\n def reset(self) -> None:\n self.raw = 0\n self.caseless = 0\n self.unidecode = 0\n self.unicase = 0\n self.total = 0\n\n\ndef box_iou(boxes_1: np.ndarray, boxes_2: np.ndarray) -> np.ndarray:\n \"\"\"Computes the IoU between two sets of bounding boxes\n\n Args:\n boxes_1: bounding boxes of shape (N, 4) in format (xmin, ymin, xmax, ymax)\n boxes_2: bounding boxes of shape (M, 4) in format (xmin, ymin, xmax, ymax)\n Returns:\n the IoU matrix of shape (N, M)\n \"\"\"\n\n iou_mat = np.zeros((boxes_1.shape[0], boxes_2.shape[0]), dtype=np.float32)\n\n if boxes_1.shape[0] > 0 and boxes_2.shape[0] > 0:\n l1, t1, r1, b1 = np.split(boxes_1, 4, axis=1)\n l2, t2, r2, b2 = np.split(boxes_2, 4, axis=1)\n\n left = np.maximum(l1, l2.T)\n top = np.maximum(t1, t2.T)\n right = np.minimum(r1, r2.T)\n bot = np.minimum(b1, b2.T)\n\n intersection = np.clip(right - left, 0, np.Inf) * np.clip(bot - top, 0, np.Inf)\n union = (r1 - l1) * (b1 - t1) + ((r2 - l2) * (b2 - t2)).T - intersection\n iou_mat = intersection / union\n\n return iou_mat\n\n\ndef box_ioa(boxes_1: np.ndarray, boxes_2: np.ndarray) -> np.ndarray:\n \"\"\"Computes the IoA (intersection over area) between two sets of bounding boxes:\n ioa(i, j) = inter(i, j) / area(i)\n\n Args:\n boxes_1: bounding boxes of shape (N, 4) in format (xmin, ymin, xmax, ymax)\n boxes_2: bounding boxes of shape (M, 4) in format (xmin, ymin, xmax, ymax)\n Returns:\n the IoA matrix of shape (N, M)\n \"\"\"\n\n ioa_mat = np.zeros((boxes_1.shape[0], boxes_2.shape[0]), dtype=np.float32)\n\n if boxes_1.shape[0] > 0 and boxes_2.shape[0] > 0:\n l1, t1, r1, b1 = np.split(boxes_1, 4, axis=1)\n l2, t2, r2, b2 = np.split(boxes_2, 4, axis=1)\n\n left = np.maximum(l1, l2.T)\n top = np.maximum(t1, t2.T)\n right = np.minimum(r1, r2.T)\n bot = np.minimum(b1, b2.T)\n\n intersection = np.clip(right - left, 0, np.Inf) * np.clip(bot - top, 0, np.Inf)\n area = (r1 - l1) * (b1 - t1)\n ioa_mat = intersection / area\n\n return ioa_mat\n\n\ndef mask_iou(masks_1: np.ndarray, masks_2: np.ndarray) -> np.ndarray:\n \"\"\"Computes the IoU between two sets of boolean masks\n\n Args:\n masks_1: boolean masks of shape (N, H, W)\n masks_2: boolean masks of shape (M, H, W)\n\n Returns:\n the IoU matrix of shape (N, M)\n \"\"\"\n\n if masks_1.shape[1:] != masks_2.shape[1:]:\n raise AssertionError(\"both boolean masks should have the same spatial shape\")\n\n iou_mat = np.zeros((masks_1.shape[0], masks_2.shape[0]), dtype=np.float32)\n\n if masks_1.shape[0] > 0 and masks_2.shape[0] > 0:\n axes = tuple(range(2, masks_1.ndim + 1))\n intersection = np.logical_and(masks_1[:, None, ...], masks_2[None, ...]).sum(axis=axes)\n union = np.logical_or(masks_1[:, None, ...], masks_2[None, ...]).sum(axis=axes)\n iou_mat = intersection / union\n\n return iou_mat\n\n\ndef polygon_iou(\n polys_1: np.ndarray,\n polys_2: np.ndarray,\n mask_shape: Tuple[int, int],\n use_broadcasting: bool = False\n) -> np.ndarray:\n \"\"\"Computes the IoU between two sets of rotated bounding boxes\n\n Args:\n polys_1: rotated bounding boxes of shape (N, 4, 2)\n polys_2: rotated bounding boxes of shape (M, 4, 2)\n mask_shape: spatial shape of the intermediate masks\n use_broadcasting: if set to True, leverage broadcasting speedup by consuming more memory\n\n Returns:\n the IoU matrix of shape (N, M)\n \"\"\"\n\n if polys_1.ndim != 3 or polys_2.ndim != 3:\n raise AssertionError(\"expects boxes to be in format (N, 4, 2)\")\n\n iou_mat = np.zeros((polys_1.shape[0], polys_2.shape[0]), dtype=np.float32)\n\n if polys_1.shape[0] > 0 and polys_2.shape[0] > 0:\n if use_broadcasting:\n masks_1 = rbox_to_mask(polys_1, shape=mask_shape)\n masks_2 = rbox_to_mask(polys_2, shape=mask_shape)\n iou_mat = mask_iou(masks_1, masks_2)\n else:\n # Save memory by doing the computation for each pair\n for idx, b1 in enumerate(polys_1):\n m1 = _rbox_to_mask(b1, mask_shape)\n for _idx, b2 in enumerate(polys_2):\n m2 = _rbox_to_mask(b2, mask_shape)\n iou_mat[idx, _idx] = np.logical_and(m1, m2).sum() / np.logical_or(m1, m2).sum()\n\n return iou_mat\n\n\ndef _rbox_to_mask(box: np.ndarray, shape: Tuple[int, int]) -> np.ndarray:\n \"\"\"Converts a rotated bounding box to a boolean mask\n\n Args:\n box: rotated bounding box of shape (4, 2)\n shape: spatial shapes of the output masks\n\n Returns:\n the boolean mask of the specified shape\n \"\"\"\n\n mask = np.zeros(shape, dtype=np.uint8)\n # Get absolute coords\n if box.dtype != int:\n abs_box = box.copy()\n abs_box[:, 0] = abs_box[:, 0] * shape[1]\n abs_box[:, 1] = abs_box[:, 1] * shape[0]\n abs_box = abs_box.round().astype(int)\n else:\n abs_box = box\n abs_box[2:] = abs_box[2:] + 1\n cv2.fillPoly(mask, [abs_box - 1], 1)\n\n return mask.astype(bool)\n\n\ndef rbox_to_mask(boxes: np.ndarray, shape: Tuple[int, int]) -> np.ndarray:\n \"\"\"Converts rotated bounding boxes to boolean masks\n\n Args:\n boxes: rotated bounding boxes of shape (N, 4, 2)\n shape: spatial shapes of the output masks\n\n Returns:\n the boolean masks of shape (N, H, W)\n \"\"\"\n\n masks = np.zeros((boxes.shape[0], *shape), dtype=np.uint8)\n\n if boxes.shape[0] > 0:\n # Get absolute coordinates\n if boxes.dtype != np.int:\n abs_boxes = boxes.copy()\n abs_boxes[:, :, 0] = abs_boxes[:, :, 0] * shape[1]\n abs_boxes[:, :, 1] = abs_boxes[:, :, 1] * shape[0]\n abs_boxes = abs_boxes.round().astype(np.int)\n else:\n abs_boxes = boxes\n abs_boxes[:, 2:] = abs_boxes[:, 2:] + 1\n\n # TODO: optimize slicing to improve vectorization\n for idx, _box in enumerate(abs_boxes):\n cv2.fillPoly(masks[idx], [_box - 1], 1)\n return masks.astype(bool)\n\n\ndef nms(boxes: np.ndarray, thresh: float = .5) -> List[int]:\n \"\"\"Perform non-max suppression, borrowed from <https://github.com/rbgirshick/fast-rcnn>`_.\n\n Args:\n boxes: np array of straight boxes: (*, 5), (xmin, ymin, xmax, ymax, score)\n thresh: iou threshold to perform box suppression.\n\n Returns:\n A list of box indexes to keep\n \"\"\"\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n scores = boxes[:, 4]\n\n areas = (x2 - x1) * (y2 - y1)\n order = scores.argsort()[::-1]\n\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1)\n h = np.maximum(0.0, yy2 - yy1)\n inter = w * h\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n\n inds = np.where(ovr <= thresh)[0]\n order = order[inds + 1]\n return keep\n\n\nclass LocalizationConfusion:\n r\"\"\"Implements common confusion metrics and mean IoU for localization evaluation.\n\n The aggregated metrics are computed as follows:\n\n .. math::\n \\forall Y \\in \\mathcal{B}^N, \\forall X \\in \\mathcal{B}^M, \\\\\n Recall(X, Y) = \\frac{1}{N} \\sum\\limits_{i=1}^N g_{X}(Y_i) \\\\\n Precision(X, Y) = \\frac{1}{M} \\sum\\limits_{i=1}^M g_{X}(Y_i) \\\\\n meanIoU(X, Y) = \\frac{1}{M} \\sum\\limits_{i=1}^M \\max\\limits_{j \\in [1, N]} IoU(X_i, Y_j)\n\n with the function :math:`IoU(x, y)` being the Intersection over Union between bounding boxes :math:`x` and\n :math:`y`, and the function :math:`g_{X}` defined as:\n\n .. math::\n \\forall y \\in \\mathcal{B},\n g_X(y) = \\left\\{\n \\begin{array}{ll}\n 1 & \\mbox{if } y\\mbox{ has been assigned to any }(X_i)_i\\mbox{ with an }IoU \\geq 0.5 \\\\\n 0 & \\mbox{otherwise.}\n \\end{array}\n \\right.\n\n where :math:`\\mathcal{B}` is the set of possible bounding boxes,\n :math:`N` (number of ground truths) and :math:`M` (number of predictions) are strictly positive integers.\n\n Example::\n >>> import numpy as np\n >>> from deepocr.utils import LocalizationConfusion\n >>> metric = LocalizationConfusion(iou_thresh=0.5)\n >>> metric.update(np.asarray([[0, 0, 100, 100]]), np.asarray([[0, 0, 70, 70], [110, 95, 200, 150]]))\n >>> metric.summary()\n\n Args:\n iou_thresh: minimum IoU to consider a pair of prediction and ground truth as a match\n use_polygons: if set to True, predictions and targets will be expected to have rotated format\n mask_shape: if use_polygons is True, describes the spatial shape of the image used\n use_broadcasting: if use_polygons is True, use broadcasting for IoU computation by consuming more memory\n \"\"\"\n\n def __init__(\n self,\n iou_thresh: float = 0.5,\n use_polygons: bool = False,\n mask_shape: Tuple[int, int] = (1024, 1024),\n use_broadcasting: bool = True,\n ) -> None:\n self.iou_thresh = iou_thresh\n self.use_polygons = use_polygons\n self.mask_shape = mask_shape\n self.use_broadcasting = use_broadcasting\n self.reset()\n\n def update(self, gts: np.ndarray, preds: np.ndarray) -> None:\n \"\"\"Updates the metric\n\n Args:\n gts: a set of relative bounding boxes either of shape (N, 4) or (N, 5) if they are rotated ones\n preds: a set of relative bounding boxes either of shape (M, 4) or (M, 5) if they are rotated ones\n \"\"\"\n\n if preds.shape[0] > 0:\n # Compute IoU\n if self.use_polygons:\n iou_mat = polygon_iou(gts, preds, self.mask_shape, self.use_broadcasting)\n else:\n iou_mat = box_iou(gts, preds)\n self.tot_iou += float(iou_mat.max(axis=0).sum())\n\n # Assign pairs\n gt_indices, pred_indices = linear_sum_assignment(-iou_mat)\n self.matches += int((iou_mat[gt_indices, pred_indices] >= self.iou_thresh).sum())\n\n # Update counts\n self.num_gts += gts.shape[0]\n self.num_preds += preds.shape[0]\n\n def summary(self) -> Tuple[Optional[float], Optional[float], Optional[float]]:\n \"\"\"Computes the aggregated metrics\n\n Returns:\n a tuple with the recall, precision and meanIoU scores\n \"\"\"\n\n # Recall\n recall = self.matches / self.num_gts if self.num_gts > 0 else None\n\n # Precision\n precision = self.matches / self.num_preds if self.num_preds > 0 else None\n\n # mean IoU\n mean_iou = self.tot_iou / self.num_preds if self.num_preds > 0 else None\n\n return recall, precision, mean_iou\n\n def reset(self) -> None:\n self.num_gts = 0\n self.num_preds = 0\n self.matches = 0\n self.tot_iou = 0.\n\n\nclass OCRMetric:\n r\"\"\"Implements an end-to-end OCR metric.\n\n The aggregated metrics are computed as follows:\n\n .. math::\n \\forall (B, L) \\in \\mathcal{B}^N \\times \\mathcal{L}^N,\n \\forall (\\hat{B}, \\hat{L}) \\in \\mathcal{B}^M \\times \\mathcal{L}^M, \\\\\n Recall(B, \\hat{B}, L, \\hat{L}) = \\frac{1}{N} \\sum\\limits_{i=1}^N h_{B,L}(\\hat{B}_i, \\hat{L}_i) \\\\\n Precision(B, \\hat{B}, L, \\hat{L}) = \\frac{1}{M} \\sum\\limits_{i=1}^M h_{B,L}(\\hat{B}_i, \\hat{L}_i) \\\\\n meanIoU(B, \\hat{B}) = \\frac{1}{M} \\sum\\limits_{i=1}^M \\max\\limits_{j \\in [1, N]} IoU(\\hat{B}_i, B_j)\n\n with the function :math:`IoU(x, y)` being the Intersection over Union between bounding boxes :math:`x` and\n :math:`y`, and the function :math:`h_{B, L}` defined as:\n\n .. math::\n \\forall (b, l) \\in \\mathcal{B} \\times \\mathcal{L},\n h_{B,L}(b, l) = \\left\\{\n \\begin{array}{ll}\n 1 & \\mbox{if } b\\mbox{ has been assigned to a given }B_j\\mbox{ with an } \\\\\n & IoU \\geq 0.5 \\mbox{ and that for this assignment, } l = L_j\\\\\n 0 & \\mbox{otherwise.}\n \\end{array}\n \\right.\n\n where :math:`\\mathcal{B}` is the set of possible bounding boxes,\n :math:`\\mathcal{L}` is the set of possible character sequences,\n :math:`N` (number of ground truths) and :math:`M` (number of predictions) are strictly positive integers.\n\n Example::\n >>> import numpy as np\n >>> from deepocr.utils import OCRMetric\n >>> metric = OCRMetric(iou_thresh=0.5)\n >>> metric.update(np.asarray([[0, 0, 100, 100]]), np.asarray([[0, 0, 70, 70], [110, 95, 200, 150]]),\n ['hello'], ['hello', 'world'])\n >>> metric.summary()\n\n Args:\n iou_thresh: minimum IoU to consider a pair of prediction and ground truth as a match\n use_polygons: if set to True, predictions and targets will be expected to have rotated format\n mask_shape: if use_polygons is True, describes the spatial shape of the image used\n use_broadcasting: if use_polygons is True, use broadcasting for IoU computation by consuming more memory\n \"\"\"\n\n def __init__(\n self,\n iou_thresh: float = 0.5,\n use_polygons: bool = False,\n mask_shape: Tuple[int, int] = (1024, 1024),\n use_broadcasting: bool = True,\n ) -> None:\n self.iou_thresh = iou_thresh\n self.use_polygons = use_polygons\n self.mask_shape = mask_shape\n self.use_broadcasting = use_broadcasting\n self.reset()\n\n def update(\n self,\n gt_boxes: np.ndarray,\n pred_boxes: np.ndarray,\n gt_labels: List[str],\n pred_labels: List[str],\n ) -> None:\n \"\"\"Updates the metric\n\n Args:\n gt_boxes: a set of relative bounding boxes either of shape (N, 4) or (N, 5) if they are rotated ones\n pred_boxes: a set of relative bounding boxes either of shape (M, 4) or (M, 5) if they are rotated ones\n gt_labels: a list of N string labels\n pred_labels: a list of M string labels\n \"\"\"\n\n if gt_boxes.shape[0] != len(gt_labels) or pred_boxes.shape[0] != len(pred_labels):\n raise AssertionError(\"there should be the same number of boxes and string both for the ground truth \"\n \"and the predictions\")\n\n # Compute IoU\n if pred_boxes.shape[0] > 0:\n if self.use_polygons:\n iou_mat = polygon_iou(gt_boxes, pred_boxes, self.mask_shape, self.use_broadcasting)\n else:\n iou_mat = box_iou(gt_boxes, pred_boxes)\n\n self.tot_iou += float(iou_mat.max(axis=0).sum())\n\n # Assign pairs\n gt_indices, pred_indices = linear_sum_assignment(-iou_mat)\n is_kept = iou_mat[gt_indices, pred_indices] >= self.iou_thresh\n # String comparison\n for gt_idx, pred_idx in zip(gt_indices[is_kept], pred_indices[is_kept]):\n _raw, _caseless, _unidecode, _unicase = string_match(gt_labels[gt_idx], pred_labels[pred_idx])\n self.raw_matches += int(_raw)\n self.caseless_matches += int(_caseless)\n self.unidecode_matches += int(_unidecode)\n self.unicase_matches += int(_unicase)\n\n self.num_gts += gt_boxes.shape[0]\n self.num_preds += pred_boxes.shape[0]\n\n def summary(self) -> Tuple[Dict[str, Optional[float]], Dict[str, Optional[float]], Optional[float]]:\n \"\"\"Computes the aggregated metrics\n\n Returns:\n a tuple with the recall & precision for each string comparison and the mean IoU\n \"\"\"\n\n # Recall\n recall = dict(\n raw=self.raw_matches / self.num_gts if self.num_gts > 0 else None,\n caseless=self.caseless_matches / self.num_gts if self.num_gts > 0 else None,\n unidecode=self.unidecode_matches / self.num_gts if self.num_gts > 0 else None,\n unicase=self.unicase_matches / self.num_gts if self.num_gts > 0 else None,\n )\n\n # Precision\n precision = dict(\n raw=self.raw_matches / self.num_preds if self.num_preds > 0 else None,\n caseless=self.caseless_matches / self.num_preds if self.num_preds > 0 else None,\n unidecode=self.unidecode_matches / self.num_preds if self.num_preds > 0 else None,\n unicase=self.unicase_matches / self.num_preds if self.num_preds > 0 else None,\n )\n\n # mean IoU (overall detected boxes)\n mean_iou = self.tot_iou / self.num_preds if self.num_preds > 0 else None\n\n return recall, precision, mean_iou\n\n def reset(self) -> None:\n self.num_gts = 0\n self.num_preds = 0\n self.tot_iou = 0.\n self.raw_matches = 0\n self.caseless_matches = 0\n self.unidecode_matches = 0\n self.unicase_matches = 0\n\n\nclass DetectionMetric:\n r\"\"\"Implements an object detection metric.\n\n The aggregated metrics are computed as follows:\n\n .. math::\n \\forall (B, C) \\in \\mathcal{B}^N \\times \\mathcal{C}^N,\n \\forall (\\hat{B}, \\hat{C}) \\in \\mathcal{B}^M \\times \\mathcal{C}^M, \\\\\n Recall(B, \\hat{B}, C, \\hat{C}) = \\frac{1}{N} \\sum\\limits_{i=1}^N h_{B,C}(\\hat{B}_i, \\hat{C}_i) \\\\\n Precision(B, \\hat{B}, C, \\hat{C}) = \\frac{1}{M} \\sum\\limits_{i=1}^M h_{B,C}(\\hat{B}_i, \\hat{C}_i) \\\\\n meanIoU(B, \\hat{B}) = \\frac{1}{M} \\sum\\limits_{i=1}^M \\max\\limits_{j \\in [1, N]} IoU(\\hat{B}_i, B_j)\n\n with the function :math:`IoU(x, y)` being the Intersection over Union between bounding boxes :math:`x` and\n :math:`y`, and the function :math:`h_{B, C}` defined as:\n\n .. math::\n \\forall (b, c) \\in \\mathcal{B} \\times \\mathcal{C},\n h_{B,C}(b, c) = \\left\\{\n \\begin{array}{ll}\n 1 & \\mbox{if } b\\mbox{ has been assigned to a given }B_j\\mbox{ with an } \\\\\n & IoU \\geq 0.5 \\mbox{ and that for this assignment, } c = C_j\\\\\n 0 & \\mbox{otherwise.}\n \\end{array}\n \\right.\n\n where :math:`\\mathcal{B}` is the set of possible bounding boxes,\n :math:`\\mathcal{C}` is the set of possible class indices,\n :math:`N` (number of ground truths) and :math:`M` (number of predictions) are strictly positive integers.\n\n Example::\n >>> import numpy as np\n >>> from deepocr.utils import DetectionMetric\n >>> metric = DetectionMetric(iou_thresh=0.5)\n >>> metric.update(np.asarray([[0, 0, 100, 100]]), np.asarray([[0, 0, 70, 70], [110, 95, 200, 150]]),\n np.zeros(1, dtype=np.int64), np.array([0, 1], dtype=np.int64))\n >>> metric.summary()\n\n Args:\n iou_thresh: minimum IoU to consider a pair of prediction and ground truth as a match\n use_polygons: if set to True, predictions and targets will be expected to have rotated format\n mask_shape: if use_polygons is True, describes the spatial shape of the image used\n use_broadcasting: if use_polygons is True, use broadcasting for IoU computation by consuming more memory\n \"\"\"\n\n def __init__(\n self,\n iou_thresh: float = 0.5,\n use_polygons: bool = False,\n mask_shape: Tuple[int, int] = (1024, 1024),\n use_broadcasting: bool = True,\n ) -> None:\n self.iou_thresh = iou_thresh\n self.use_polygons = use_polygons\n self.mask_shape = mask_shape\n self.use_broadcasting = use_broadcasting\n self.reset()\n\n def update(\n self,\n gt_boxes: np.ndarray,\n pred_boxes: np.ndarray,\n gt_labels: np.ndarray,\n pred_labels: np.ndarray,\n ) -> None:\n \"\"\"Updates the metric\n\n Args:\n gt_boxes: a set of relative bounding boxes either of shape (N, 4) or (N, 5) if they are rotated ones\n pred_boxes: a set of relative bounding boxes either of shape (M, 4) or (M, 5) if they are rotated ones\n gt_labels: an array of class indices of shape (N,)\n pred_labels: an array of class indices of shape (M,)\n \"\"\"\n\n if gt_boxes.shape[0] != gt_labels.shape[0] or pred_boxes.shape[0] != pred_labels.shape[0]:\n raise AssertionError(\"there should be the same number of boxes and string both for the ground truth \"\n \"and the predictions\")\n\n # Compute IoU\n if pred_boxes.shape[0] > 0:\n if self.use_polygons:\n iou_mat = polygon_iou(gt_boxes, pred_boxes, self.mask_shape, self.use_broadcasting)\n else:\n iou_mat = box_iou(gt_boxes, pred_boxes)\n\n self.tot_iou += float(iou_mat.max(axis=0).sum())\n\n # Assign pairs\n gt_indices, pred_indices = linear_sum_assignment(-iou_mat)\n is_kept = iou_mat[gt_indices, pred_indices] >= self.iou_thresh\n # Category comparison\n self.num_matches += int((gt_labels[gt_indices[is_kept]] == pred_labels[pred_indices[is_kept]]).sum())\n\n self.num_gts += gt_boxes.shape[0]\n self.num_preds += pred_boxes.shape[0]\n\n def summary(self) -> Tuple[Optional[float], Optional[float], Optional[float]]:\n \"\"\"Computes the aggregated metrics\n\n Returns:\n a tuple with the recall & precision for each class prediction and the mean IoU\n \"\"\"\n\n # Recall\n recall = self.num_matches / self.num_gts if self.num_gts > 0 else None\n\n # Precision\n precision = self.num_matches / self.num_preds if self.num_preds > 0 else None\n\n # mean IoU (overall detected boxes)\n mean_iou = self.tot_iou / self.num_preds if self.num_preds > 0 else None\n\n return recall, precision, mean_iou\n\n def reset(self) -> None:\n self.num_gts = 0\n self.num_preds = 0\n self.tot_iou = 0.\n self.num_matches = 0\n",
"# Copyright (C) 2022, Arijit Das.\n# Code adapted from doctr and huggingface\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nfrom pathlib import Path\nfrom typing import Optional, Tuple\n\nimport cv2\nimport numpy as np\n\nfrom deepocr.utils.common_types import AbstractFile\n\n__all__ = ['read_img_as_numpy']\n\n\ndef read_img_as_numpy(\n file: AbstractFile,\n output_size: Optional[Tuple[int, int]] = None,\n rgb_output: bool = True,\n) -> np.ndarray:\n \"\"\"Read an image file into numpy format\n\n Example::\n >>> from deepocr.io.image import read_img_as_numpy as read_img\n >>> page = read_img(\"path/to/your/doc.jpg\")\n\n Args:\n file: the path to the image file\n output_size: the expected output size of each page in format H x W\n rgb_output: whether the output ndarray channel order should be RGB instead of BGR.\n Returns:\n the page decoded as numpy ndarray of shape H x W x 3\n \"\"\"\n\n if isinstance(file, (str, Path)):\n if not Path(file).is_file():\n raise FileNotFoundError(f\"unable to access {file}\")\n img = cv2.imread(str(file), cv2.IMREAD_COLOR)\n elif isinstance(file, bytes):\n file = np.frombuffer(file, np.uint8)\n img = cv2.imdecode(file, cv2.IMREAD_COLOR)\n else:\n raise TypeError(\"unsupported object type for argument 'file'\")\n\n # Validity check\n if img is None:\n raise ValueError(\"unable to read file.\")\n # Resizing\n if isinstance(output_size, tuple):\n img = cv2.resize(img, output_size[::-1], interpolation=cv2.INTER_LINEAR)\n # Switch the channel order\n if rgb_output:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n"
] |
[
[
"numpy.array"
],
[
"numpy.split",
"numpy.maximum",
"numpy.minimum",
"numpy.clip",
"numpy.logical_or",
"scipy.optimize.linear_sum_assignment",
"numpy.logical_and",
"numpy.zeros",
"numpy.where"
],
[
"numpy.frombuffer"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.4",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rajsemlawat02/NER-using-Deep-Learning
|
[
"56f9248e890b579eb1a93352fb7420fd2a137c83",
"56f9248e890b579eb1a93352fb7420fd2a137c83"
] |
[
"Task 3: Hindi data/process_data.py",
"Task 3: Hindi data/get_word_vectors.py"
] |
[
"import numpy as np\nfrom keras.preprocessing import sequence\n# For getting English word vectors\nfrom get_word_vectors import get_word_vector, get_sentence_vectors\nimport codecs\n\n\nclass DataHandler():\n\t\"\"\"\n\tClass for handling all data processing and preparing training/testing data\"\"\"\n\n\tdef __init__(self, datapath):\n\t\t# Default values\n\t\tself.LEN_NAMED_CLASSES = 12 # 4 names and 1 null class\n\t\tself.NULL_CLASS = \"O\"\n\t\tself.LEN_WORD_VECTORS = 50\n\n\t\tself.tags = []\n\t\t# string tags mapped to int and one hot vectors \n\t\tself.tag_id_map = {}\n\t\tself.tag_to_one_hot_map = {}\n\n\t\t# All data(to be filled by read_data method)\n\t\tself.x = []\n\t\tself.y = []\n\n\t\tself.read_data(datapath)\n\n\tdef read_data(self, datapath):\n\t\t_id = 0\n\t\tsentence = []\n\t\tsentence_tags = []\n\t\tall_data = []\n\t\tpos = 0\n\t\tis_entity = False\n\t\twith codecs.open(datapath, 'r') as f:\n\t\t\tfor l in f:\t\t\t\t\n\t\t\t\tline = l.strip().split()\n\t\t\t\tif line:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tword, named_tag = line[0], line[1]\n\t\t\t\t\texcept:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif named_tag != self.NULL_CLASS:\n\t\t\t\t\t\tis_entity = True\n\t\t\t\t\tif named_tag not in self.tags:\n\t\t\t\t\t\tself.tags.append(named_tag)\n\t\t\t\t\t\tself.tag_id_map[_id] = named_tag\n\t\t\t\t\t\tone_hot_vec = np.zeros(self.LEN_NAMED_CLASSES, dtype = np.int32)\n\t\t\t\t\t\tone_hot_vec[_id] = 1\n\t\t\t\t\t\tself.tag_to_one_hot_map[named_tag] = one_hot_vec\n\n\t\t\t\t\t\t_id+=1;\n\n\t\t\t\t\t# Get word vectors for given word\t\n\t\t\t\t\tsentence.append(get_word_vector(word)[:self.LEN_WORD_VECTORS])\n\t\t\t\t\tsentence_tags.append(self.tag_to_one_hot_map[named_tag])\n\t\t\t\telse:\n\t\t\t\t\tif not is_entity:\n\t\t\t\t\t\tis_entity = False\n\t\t\t\t\t\tsentence_tags = []\n\t\t\t\t\t\tsentence = []\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tall_data.append( (sentence, sentence_tags) );\n\t\t\t\t\tsentence_tags = []\n\t\t\t\t\tsentence = []\n\t\t\t\t\tis_entity = False\n\n\t\t\t\tif pos > 1000000:\n\t\t\t\t\tbreak;\n\t\t\t\tpos+=1\n\n\t\t#Find length of largest sentence\n\t\tself.max_len = 0\n\t\tfor pair in all_data:\n\t\t\tif self.max_len < len(pair[0]):\n\t\t\t\tself.max_len = len(pair[0])\n\n\t\tfor vectors, one_hot_tags in all_data:\n\t\t\t# Pad the sequences and make them all of same length\n\t\t\ttemp_X = np.zeros(self.LEN_WORD_VECTORS, dtype = np.int32)\n\t\t\ttemp_Y = np.array(self.tag_to_one_hot_map[self.NULL_CLASS])\n\t\t\tpad_length = self.max_len - len(vectors)\n\n\t\t\t#Insert into main data list\n\t\t\tself.x.append( ((pad_length)*[temp_X]) + vectors)\n\t\t\tself.y.append( ((pad_length)*[temp_Y]) + one_hot_tags)\n\n\t\tself.x = np.array(self.x)\n\t\tself.y = np.array(self.y)\n\n\tdef get_data(self):\n\t\t# Returns proper data for training/testing\n\t\treturn (self.x, self.y)\n\n\tdef encode_sentence(self, sentence):\n\t\tvectors = get_sentence_vectors(sentence)\n\t\tvectors = [v[:self.LEN_WORD_VECTORS] for v in vectors]\n\t\treturn sequence.pad_sequences([vectors], maxlen=self.max_len, dtype=np.float32)\n\n\tdef decode_result(self, result_sequence):\n\t\tpred_named_tags = []\n\t\tfor pred in result_sequence:\n\t\t\t_id = np.argmax(pred)\n\t\t\tpred_named_tags.append(self.tag_id_map[_id])\n\t\treturn pred_named_tags\n\n\n\n\n\n\n",
"import gensim.models.word2vec as w2v\nimport numpy as np\nimport os\n\ntrained_model = w2v.Word2Vec.load(os.path.join(\"../data/\", \"hindi_word2Vec_small.w2v\"))\n\ndef get_sentence_vectors(sentence):\n\t\"\"\"\n\tReturns word vectors for complete sentence as a python list\"\"\"\n\ts = sentence.strip().split()\n\tvec = [ get_word_vector(word) for word in s ]\n\treturn vec\n\ndef get_word_vector(word):\n\t\"\"\"\n\tReturns word vectors for a single word as a python list\"\"\"\n\ts = word.decode(\"utf-8\")\n\ttry:\n\t\tvect = trained_model.wv[s]\n\texcept:\n\t\tvect = np.zeros(50, dtype = np.float32)\n\treturn vect\n\n\t"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.argmax"
],
[
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gtesei/fast-furious
|
[
"b974e6b71be92ad8892864794af57631291ebac1",
"b974e6b71be92ad8892864794af57631291ebac1",
"b974e6b71be92ad8892864794af57631291ebac1",
"b974e6b71be92ad8892864794af57631291ebac1",
"b974e6b71be92ad8892864794af57631291ebac1",
"b974e6b71be92ad8892864794af57631291ebac1"
] |
[
"competitions/deloitte/base3_dev1_no_outlier.py",
"competitions/deloitte/base4.py",
"dataset/images2/simple_classification.py",
"dataset/images2/serializerDogsCatsSURF_Train.py",
"competitions/jigsaw-toxic-comment-classification-challenge/gru9___all_data.py",
"competitions/quora-question-pairs/pv.py"
] |
[
"import numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import LabelEncoder\nimport re \nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.model_selection import train_test_split\nimport xgboost as xgb\nfrom sklearn.cluster import MiniBatchKMeans\n\n\ndef process_am(x):\n aa = ''\n if type(x) == pd.core.series.Series:\n x = x.values\n aa = [aa + x[i] for i in range(len(x))]\n aa = aa[0]\n aa = re.sub('\"',\" \", aa)\n elif type(x) == str:\n aa = x\n aa = re.sub('\"',\" \", aa)\n aal = []\n _aal = aa.split(',')\n for aa in _aal:\n aa = re.sub(\"{\",\" \", aa)\n aa = re.sub(\"}\",\" \", aa)\n aa = re.sub(\",\",\" \", aa)\n aa = re.sub(\":\",\" \", aa)\n aa = re.sub('’n',\"\", aa)\n aa = aa.strip()\n aa = re.sub('\\s+',\"_\", aa)\n aa = aa.lower()\n if len(aa)>0: \n aal.append(aa)\n return dict.fromkeys(set(aal), 1)\n\ndef perc2float(x):\n return float(x.strip('%'))/100\n\n\n########################\ntrain = pd.read_csv('train.csv')\ntest = pd.read_csv('test.csv')\n\nprint(\"train:\",train.shape)\nprint(\"test:\",test.shape)\n\n# 1. log_price\nprint(\"1. log_price\")\ny_train = train['log_price']\ntrain = train.drop(['log_price'],axis=1)\nassert train.shape[1] == test.shape[1]\nfor i in range(train.shape[1]):\n assert train.columns[i] == test.columns[i]\n\ntrain_obs = len(train)\nall_data = pd.concat([train,test],axis=0)\n\n# 2. property_type, room_type, bed_type\nprint('--------------> Feature Engineering ... ')\nprint(\"2. property_type, room_type, bed_type\")\nencoder = LabelEncoder()\nencoder.fit(all_data['property_type']) \nall_data['property_type'] = encoder.transform(all_data['property_type'])\n\nall_data['room_type'] = all_data['room_type'].map( {'Entire home/apt':5, 'Private room':3, 'Shared room':1})\n\nall_data.bed_type = all_data.bed_type.fillna('missing')\nencoder = LabelEncoder()\nencoder.fit(all_data['bed_type']) \nall_data['bed_type'] = encoder.transform(all_data['bed_type'])\n\n# 3. amenities \nprint(\"3. amenities\")\nam_list = [process_am( all_data.iloc[i]['amenities']) for i in range(len(all_data))]\nassert len(am_list) == len(all_data)\nv = DictVectorizer(sparse=False)\nX = v.fit_transform(am_list)\namenities_df = pd.DataFrame(data=X,columns=v.feature_names_)\namenities_df.index = all_data.index\nall_data = pd.concat([all_data,amenities_df],axis=1)\nall_data = all_data.drop(['amenities'],axis=1)\ndel amenities_df\n\n#4. accommodates , bathrooms\n\n#5. cancellation_policy, cleaning_fee\nprint(\"5. cancellation_policy, cleaning_fee\")\nall_data['cancellation_policy'] = all_data['cancellation_policy'].map( {\n 'super_strict_60':20, \n 'super_strict_30':30, \n 'strict':50,\n 'moderate':10,\n 'flexible':5,\n 'long_term':1,\n})\n\nall_data['cleaning_fee'] = all_data['cleaning_fee'].map( {\n True:1, \n False:0\n})\n\n# 6. city\nprint(\"6. city\")\nencoder = LabelEncoder()\nencoder.fit(all_data['city']) \nall_data['city'] = encoder.transform(all_data['city'])\n\n# 7. description TODO\nprint(\"7. description ... TODO\")\nall_data['description'] = all_data['description'].fillna('')\nall_data = all_data.drop(['description'],axis=1)\n\n\n# 8. first_review , last_review , number_of_reviews , review_scores_rating\nprint(\"7. 8. first_review , last_review , number_of_reviews , review_scores_rating ... TODO better\")\nmost_recent_review = pd.to_datetime(all_data.last_review).max()\ndelta_last_review = most_recent_review - pd.to_datetime(all_data.last_review)\ndelta_last_review = delta_last_review.fillna(-1)\ndelta_last_review = delta_last_review.map(lambda x: x.total_seconds()/(60*60*24))\nall_data['delta_most_recent_review'] = delta_last_review\n\ndelta_rev = pd.to_datetime(all_data.last_review) - pd.to_datetime(all_data.first_review)\ndelta_rev = delta_rev.fillna(-1)\ndelta_rev = delta_rev.map(lambda x: x.total_seconds()/(60*60*24))\nall_data['delta_rev'] = delta_rev\n\nall_data = all_data.drop(['first_review','last_review'],axis=1)\nall_data['review_scores_rating'] = all_data['review_scores_rating'].fillna(-1)\n\n# 9. host_has_profile_pic, host_identity_verified, host_since\nprint(\"9. host_has_profile_pic, host_identity_verified, host_since \")\nall_data['host_has_profile_pic'] = all_data['host_has_profile_pic'].fillna('f')\nall_data['host_identity_verified'] = all_data['host_identity_verified'].fillna('f')\nall_data['host_has_profile_pic'] = all_data['host_has_profile_pic'].map({'t':1,'f':0})\nall_data['host_identity_verified'] = all_data['host_identity_verified'].map({'t':1,'f':0})\n\nall_data['is_host_since_na'] = all_data['host_since'].isnull().map({True: 1 , False: 0 }) \nhost_oldest = pd.to_datetime(all_data.host_since).min()\ndelta_host = pd.to_datetime(all_data.host_since) - host_oldest \ndelta_host = delta_host.fillna(-1)\ndelta_host = delta_host.map(lambda x: x.total_seconds()/(60*60*24))\nall_data['delta_host'] = delta_host\n\ndelta_host_lev = np.zeros(len(all_data))\nfor i in range(len(all_data)):\n if all_data.iloc[i]['is_host_since_na'] == 1:\n delta_host_lev[i] = -1\n elif all_data.iloc[i]['delta_host'] < 1871.0:\n delta_host_lev[i] = 1\n elif all_data.iloc[i]['delta_host'] < 2398.0: \n delta_host_lev[i] = 2\n else:\n delta_host_lev[i] = 3\nall_data['delta_host_lev'] = delta_host_lev \n\nhost_since_year = pd.to_datetime(all_data['host_since']).dt.year\nhost_since_year = host_since_year.fillna(2018)\nall_data['host_since_year'] = host_since_year\nhost_since_month = pd.to_datetime(all_data['host_since']).dt.month\nhost_since_month = host_since_month.fillna(-1)\nall_data['host_since_month'] = host_since_month\n\nall_data = all_data.drop(['host_since'],axis=1)\n\n# 10. host_response_rate , instant_bookable\nprint(\"10. host_response_rate , instant_bookable \")\nall_data['instant_bookable'] = all_data['instant_bookable'].map({'t':1,'f':0})\nall_data.host_response_rate = all_data.host_response_rate.fillna('0%')\nall_data.host_response_rate = all_data.host_response_rate.apply(perc2float)\n\n\n# 11. latitude,longitude TODO ... leave as-is for now \nprint(\"11. latitude,longitude .......... TODO \")\nkmeans = MiniBatchKMeans(n_clusters=100, batch_size=10000).fit(all_data[['latitude','longitude']]) ## TODO: tune the number of cluster \nall_data.loc[:, 'geo_cluster_100'] = kmeans.predict(all_data[['latitude','longitude']])\nkmeans = MiniBatchKMeans(n_clusters=1000, batch_size=10000).fit(all_data[['latitude','longitude']]) ## TODO: tune the number of cluster \nall_data.loc[:, 'geo_cluster_1000'] = kmeans.predict(all_data[['latitude','longitude']])\nkmeans = MiniBatchKMeans(n_clusters=1500, batch_size=10000).fit(all_data[['latitude','longitude']]) ## TODO: tune the number of cluster \nall_data.loc[:, 'geo_cluster_1500'] = kmeans.predict(all_data[['latitude','longitude']])\nkmeans = MiniBatchKMeans(n_clusters=2000, batch_size=10000).fit(all_data[['latitude','longitude']]) ## TODO: tune the number of cluster \nall_data.loc[:, 'geo_cluster_2000'] = kmeans.predict(all_data[['latitude','longitude']])\nkmeans = MiniBatchKMeans(n_clusters=3000, batch_size=10000).fit(all_data[['latitude','longitude']]) ## TODO: tune the number of cluster \nall_data.loc[:, 'geo_cluster_3000'] = kmeans.predict(all_data[['latitude','longitude']])\n\n# 12. name, neighbourhood, thumbnail_url, zipcode \nprint(\"11. name, neighbourhood, thumbnail_url, zipcode .......... TODO better \")\nall_data['thumbnail_url_ok'] = 0 \nall_data['thumbnail_url_ok'] [all_data.thumbnail_url.isnull() == False ] = 1\n\n# neighbourhood\nall_data['is_neighbourhood_na'] = all_data['neighbourhood'].isnull().map({True: 1 , False: 0 }) \nall_data['neighbourhood'] = all_data['neighbourhood'].fillna('UKN')\nencoder = LabelEncoder()\nencoder.fit(all_data['neighbourhood']) \nall_data['neighbourhood'] = encoder.transform(all_data['neighbourhood'])\n\n# zipcode\nall_data['is_zipcode_na'] = all_data['zipcode'].isnull().map({True: 1 , False: 0 }) \nall_data['zipcode'] = all_data['zipcode'].fillna('UKN')\nencoder = LabelEncoder()\nencoder.fit(all_data['zipcode']) \nall_data['zipcode'] = encoder.transform(all_data['zipcode'])\n\n\nall_data = all_data.drop(['name','thumbnail_url',],axis=1)\n\n\n# 12. bedrooms, beds , bed_type , bathrooms\nall_data['is_bedrooms_na'] = all_data['bedrooms'].isnull().map({True: 1 , False: 0 }) \nall_data['is_beds_na'] = all_data['beds'].isnull().map({True: 1 , False: 0 }) \nall_data['is_bathrooms_na'] = all_data['bathrooms'].isnull().map({True: 1 , False: 0 }) \nall_data.bedrooms = all_data.bedrooms.fillna(0)\nall_data.beds = all_data.beds.fillna(0)\nall_data.bathrooms = all_data.bathrooms.fillna(0)\n\n## remove 2 outliers \nprint(\">> removing 2 outliers ... \")\nall_data = all_data.drop((y_train).argsort()[:2])\ny_train = y_train.drop((y_train).argsort()[:2])\nall_data.index = range(len(all_data))\ntrain_obs = train_obs - 2 \n\n\n## rem sequnece \nall_data = all_data.drop(['id'],axis=1)\n\nassert np.sum(all_data.isnull()).sum() == 0 \n\n################## \nprint('--------------> Modeling ... ')\nXtr, Xv, ytr, yv = train_test_split(all_data[:train_obs].values, y_train, test_size=0.1, random_state=1973)\ndtrain = xgb.DMatrix(Xtr, label=ytr)\ndvalid = xgb.DMatrix(Xv, label=yv)\ndtest = xgb.DMatrix(all_data[train_obs:].values)\nwatchlist = [(dtrain, 'train'), (dvalid, 'valid')]\n\n#Try different parameters! My favorite is random search :)\nxgb_pars = {'min_child_weight': 50,\n 'eta': 0.005,\n 'colsample_bytree': 0.3,\n 'max_depth': 10,\n 'subsample': 0.8,\n 'lambda': 0.5,\n 'nthread': -1,\n 'booster' : 'gbtree',\n 'silent': 1,\n 'eval_metric': 'rmse',\n 'objective': 'reg:linear'}\n\nmodel = xgb.train(xgb_pars, dtrain, 10000, watchlist, early_stopping_rounds=50,maximize=False, verbose_eval=10)\n\nprint('Modeling RMSE %.5f' % model.best_score)\n\nprint('--------------> Submission ... ')\ntest['log_price'] = model.predict(dtest)\nsubfn = \"base3dev1_noOut_val_\"+str(model.best_score)+\"__rnd_\"+str(model.best_iteration)+\".csv\"\ntest[['id', 'log_price']].to_csv(subfn, index=False)\n\nprint('--------------> Retrain all data + Feature importance ... ')\ndtrain = xgb.DMatrix(all_data[:train_obs].values, label=y_train)\ndtest = xgb.DMatrix(all_data[train_obs:].values)\nmodel = xgb.train(xgb_pars, dtrain, model.best_iteration+5, maximize=False, verbose_eval=10)\nprint('-----> Submission ... ')\ntest['log_price'] = model.predict(dtest)\nsubfn = \"base3dev1_noOut_all_data__rnd_\"+str(model.best_iteration)+\".csv\"\ntest[['id', 'log_price']].to_csv(subfn, index=False)\n\nprint('-----> Feature importance ... ')\nfeature_names = all_data.columns\nfeature_importance_dict = model.get_fscore()\nfs = ['f%i' % i for i in range(len(feature_names))]\nf1 = pd.DataFrame({'f': list(feature_importance_dict.keys()), 'importance': list(feature_importance_dict.values())})\nf2 = pd.DataFrame({'f': fs, 'feature_name': feature_names})\nfeature_importance = pd.merge(f1, f2, how='right', on='f')\nfeature_importance = feature_importance.fillna(0)\nfeature_importance.sort_values(by='importance', ascending=False)\nprint(feature_importance.sort_values)\nsubfn = \"error__feat_importance_base3dev1_noOut.csv\" \nfeature_importance.to_csv(subfn, index=False) \n\n\n\n\n\n",
"import numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import LabelEncoder\nimport re \nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.model_selection import train_test_split\nimport xgboost as xgb\nfrom sklearn.cluster import MiniBatchKMeans\nimport nltk \nfrom nltk.corpus import stopwords\nimport os \n\nstops = set(stopwords.words(\"english\"))\ntokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n\ndef load_w2v_model(dir = './' , we_fn = 'glove.840B.300d.txt'):\n print(' >> Indexing word vectors ...')\n embeddings_index = {}\n f = open(os.path.join(dir, we_fn))\n for line in f:\n values = line.split(' ')\n word = values[0] #print(\"values:\",values)\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n print(' >> Found %s word vectors. [1]' % len(embeddings_index))\n return embeddings_index\n\ndef count_desc_len(x):\n return len(review_to_sentences(x, tokenizer=tokenizer, stops=stops, remove_stopwords=True))\n\ndef string_to_wordlist( review, stops, remove_stopwords ):\n # Function to convert a document to a sequence of words,\n # optionally removing stop words. Returns a list of words.\n #\n # 1. Remove HTML\n #review_text = BeautifulSoup(review,'html.parser').get_text()\n #\n # 2. Remove non-letters\n review_text = re.sub(\"[^a-zA-Z]\",\" \", review)\n #\n # 3. Convert words to lower case and split them\n words = review_text.lower().split()\n #\n # 4. Optionally remove stop words (false by default)\n if remove_stopwords:\n #stops = set(stopwords.words(\"english\"))\n words = [w for w in words if not w in stops]\n #\n # 5. Return a list of words\n return(words)\n\n\n# Define a function to split a review into parsed sentences\ndef review_to_sentences( review, tokenizer, stops , remove_stopwords ):\n # Function to split a review into parsed sentences. Returns a\n # list of sentences, where each sentence is a list of words\n #\n # 1. Use the NLTK tokenizer to split the paragraph into sentences\n raw_sentences = tokenizer.tokenize(review.strip())\n #\n # 2. Loop over each sentence\n sentences = []\n for raw_sentence in raw_sentences:\n # If a sentence is empty, skip it\n if len(raw_sentence) > 0:\n # Otherwise, call review_to_wordlist to get a list of words\n sentences.extend( string_to_wordlist( raw_sentence, stops = stops , remove_stopwords=remove_stopwords ))\n #\n # Return the list of sentences (each sentence is a list of words,\n # so this returns a list of lists\n return sentences\n\n\ndef process_am(x):\n aa = ''\n if type(x) == pd.core.series.Series:\n x = x.values\n aa = [aa + x[i] for i in range(len(x))]\n aa = aa[0]\n aa = re.sub('\"',\" \", aa)\n elif type(x) == str:\n aa = x\n aa = re.sub('\"',\" \", aa)\n aal = []\n _aal = aa.split(',')\n for aa in _aal:\n aa = re.sub(\"{\",\" \", aa)\n aa = re.sub(\"}\",\" \", aa)\n aa = re.sub(\",\",\" \", aa)\n aa = re.sub(\":\",\" \", aa)\n aa = re.sub('’n',\"\", aa)\n aa = aa.strip()\n aa = re.sub('\\s+',\"_\", aa)\n aa = aa.lower()\n if len(aa)>0: \n aal.append(aa)\n return dict.fromkeys(set(aal), 1)\n\ndef perc2float(x):\n return float(x.strip('%'))/100\n\n\n########################\ntrain = pd.read_csv('train.csv')\ntest = pd.read_csv('test.csv')\n\nprint(\"train:\",train.shape)\nprint(\"test:\",test.shape)\n\n# 1. log_price\nprint(\"1. log_price\")\ny_train = train['log_price']\ntrain = train.drop(['log_price'],axis=1)\nassert train.shape[1] == test.shape[1]\nfor i in range(train.shape[1]):\n assert train.columns[i] == test.columns[i]\n\ntrain_obs = len(train)\nall_data = pd.concat([train,test],axis=0)\n\n# 2. property_type, room_type, bed_type\nprint('--------------> Feature Engineering ... ')\nprint(\"2. property_type, room_type, bed_type\")\nencoder = LabelEncoder()\nencoder.fit(all_data['property_type']) \nall_data['property_type'] = encoder.transform(all_data['property_type'])\n\nall_data['room_type'] = all_data['room_type'].map( {'Entire home/apt':5, 'Private room':3, 'Shared room':1})\n\nall_data.bed_type = all_data.bed_type.fillna('missing')\nencoder = LabelEncoder()\nencoder.fit(all_data['bed_type']) \nall_data['bed_type'] = encoder.transform(all_data['bed_type'])\n\n# 3. amenities \nprint(\"3. amenities\")\nam_list = [process_am( all_data.iloc[i]['amenities']) for i in range(len(all_data))]\nassert len(am_list) == len(all_data)\nv = DictVectorizer(sparse=False)\nX = v.fit_transform(am_list)\namenities_df = pd.DataFrame(data=X,columns=v.feature_names_)\namenities_df.index = all_data.index\nall_data = pd.concat([all_data,amenities_df],axis=1)\nall_data = all_data.drop(['amenities'],axis=1)\ndel amenities_df\n\n#4. accommodates , bathrooms\nall_data.bathrooms = all_data.bathrooms.fillna(0)\n\n#5. cancellation_policy, cleaning_fee\nprint(\"5. cancellation_policy, cleaning_fee\")\nall_data['cancellation_policy'] = all_data['cancellation_policy'].map( {\n 'super_strict_60':20, \n 'super_strict_30':30, \n 'strict':50,\n 'moderate':10,\n 'flexible':5,\n 'long_term':1,\n})\n\nall_data['cleaning_fee'] = all_data['cleaning_fee'].map( {\n True:1, \n False:0\n})\n\n# 6. city\nprint(\"6. city\")\nencoder = LabelEncoder()\nencoder.fit(all_data['city']) \nall_data['city'] = encoder.transform(all_data['city'])\n\n# 7. description TODO\nprint(\"7. description ... TODO\")\nall_data['description'] = all_data['description'].fillna('')\n\nall_data['description_len'] = all_data['description'].apply(count_desc_len)\n\nembeddings_index = load_w2v_model()\n\nfeatureVec = np.zeros((len(all_data),300),dtype=\"float32\")\nwarn_w2v = 0 \nfor i in range(len(all_data)):\n words = review_to_sentences(all_data.iloc[i]['description'], tokenizer=tokenizer, stops=stops, remove_stopwords=True)\n featureVec_i = np.zeros((300),dtype=\"float32\")\n #\n nwords = 0.\n # \n #\n # Loop over each word in the review and, if it is in the model's\n # vocaublary, add its feature vector to the total\n for word in words:\n if word in embeddings_index.keys(): \n nwords = nwords + 1.\n featureVec_i = np.add(featureVec_i,embeddings_index[word])\n # \n # Divide the result by the number of words to get the average\n if nwords > 0: \n featureVec_i = np.divide(featureVec_i,nwords)\n else:\n #print(\">>> WARNING <<< No words in vocaublary\")\n warn_w2v = warn_w2v + 1 \n #print(str(words))\n featureVec[i] = featureVec_i\n\nprint(\" >> No words in vocaublary for \",warn_w2v,\"cases\")\n\ndesc_w2v = pd.DataFrame(data=featureVec , columns=['desc_w2v_'+str(i) for i in range(300)])\ndesc_w2v.index = all_data.index\nall_data = pd.concat([all_data,desc_w2v],axis=1)\n\nkmeans = MiniBatchKMeans(n_clusters=100, batch_size=10000).fit(featureVec) ## TODO: tune the number of cluster \nall_data.loc[:, 'w2v_desc_cluster_100'] = kmeans.predict(featureVec)\n\nkmeans = MiniBatchKMeans(n_clusters=1000, batch_size=10000).fit(featureVec) ## TODO: tune the number of cluster \nall_data.loc[:, 'w2v_desc_cluster_1000'] = kmeans.predict(featureVec)\n\nkmeans = MiniBatchKMeans(n_clusters=3000, batch_size=10000).fit(featureVec) ## TODO: tune the number of cluster \nall_data.loc[:, 'w2v_desc_cluster_3000'] = kmeans.predict(featureVec)\n\nall_data = all_data.drop(['description'],axis=1)\n\n\n# 8. first_review , last_review , number_of_reviews , review_scores_rating\nprint(\"7. 8. first_review , last_review , number_of_reviews , review_scores_rating ... TODO better\")\nmost_recent_review = pd.to_datetime(all_data.last_review).max()\ndelta_last_review = most_recent_review - pd.to_datetime(all_data.last_review)\ndelta_last_review = delta_last_review.fillna(-1)\ndelta_last_review = delta_last_review.map(lambda x: x.total_seconds()/(60*60*24))\nall_data['delta_most_recent_review'] = delta_last_review\n\ndelta_rev = pd.to_datetime(all_data.last_review) - pd.to_datetime(all_data.first_review)\ndelta_rev = delta_rev.fillna(-1)\ndelta_rev = delta_rev.map(lambda x: x.total_seconds()/(60*60*24))\nall_data['delta_rev'] = delta_rev\n\ndelta_rev_density = all_data.number_of_reviews+0.0000000000000001 / delta_rev\ndelta_rev_density = delta_rev_density.fillna(0)\nall_data['delta_rev_density'] = delta_rev_density\n\nall_data = all_data.drop(['first_review','last_review'],axis=1)\nall_data['review_scores_rating'] = all_data['review_scores_rating'].fillna(-1)\n\n# 9. host_has_profile_pic, host_identity_verified, host_since\nprint(\"9. host_has_profile_pic, host_identity_verified, host_since \")\nall_data['host_has_profile_pic'] = all_data['host_has_profile_pic'].fillna('f')\nall_data['host_identity_verified'] = all_data['host_identity_verified'].fillna('f')\nall_data['host_has_profile_pic'] = all_data['host_has_profile_pic'].map({'t':1,'f':0})\nall_data['host_identity_verified'] = all_data['host_identity_verified'].map({'t':1,'f':0})\n\nhost_oldest = pd.to_datetime(all_data.host_since).min()\ndelta_host = pd.to_datetime(all_data.host_since) - host_oldest \ndelta_host = delta_host.fillna(-1)\ndelta_host = delta_host.map(lambda x: x.total_seconds()/(60*60*24))\nall_data['delta_host'] = delta_host\n\nall_data = all_data.drop(['host_since'],axis=1)\n\n# 10. host_response_rate , instant_bookable\nprint(\"10. host_response_rate , instant_bookable \")\nall_data['instant_bookable'] = all_data['instant_bookable'].map({'t':1,'f':0})\nall_data.host_response_rate = all_data.host_response_rate.fillna('0%')\nall_data.host_response_rate = all_data.host_response_rate.apply(perc2float)\n\n\n# 11. latitude,longitude TODO ... leave as-is for now \nprint(\"11. latitude,longitude .......... TODO \")\nkmeans = MiniBatchKMeans(n_clusters=100, batch_size=10000).fit(all_data[['latitude','longitude']]) ## TODO: tune the number of cluster \nall_data.loc[:, 'geo_cluster_100'] = kmeans.predict(all_data[['latitude','longitude']])\nkmeans = MiniBatchKMeans(n_clusters=1000, batch_size=10000).fit(all_data[['latitude','longitude']]) ## TODO: tune the number of cluster \nall_data.loc[:, 'geo_cluster_1000'] = kmeans.predict(all_data[['latitude','longitude']])\nkmeans = MiniBatchKMeans(n_clusters=3000, batch_size=10000).fit(all_data[['latitude','longitude']]) ## TODO: tune the number of cluster \nall_data.loc[:, 'geo_cluster_3000'] = kmeans.predict(all_data[['latitude','longitude']])\nkmeans = MiniBatchKMeans(n_clusters=5000, batch_size=10000).fit(all_data[['latitude','longitude']]) ## TODO: tune the number of cluster \nall_data.loc[:, 'geo_cluster_5000'] = kmeans.predict(all_data[['latitude','longitude']])\n\n# 12. name, neighbourhood, thumbnail_url, zipcode \nprint(\"11. name, neighbourhood, thumbnail_url, zipcode .......... TODO better \")\nall_data['thumbnail_url_ok'] = 0 \nall_data['thumbnail_url_ok'] [all_data.thumbnail_url.isnull() == False ] = 1\n\nall_data['neighbourhood'] = all_data['neighbourhood'].fillna('UKN')\nencoder = LabelEncoder()\nencoder.fit(all_data['neighbourhood']) \nall_data['neighbourhood'] = encoder.transform(all_data['neighbourhood'])\n\nall_data['zipcode'] = all_data['zipcode'].fillna('UKN')\nencoder = LabelEncoder()\nencoder.fit(all_data['zipcode']) \nall_data['zipcode'] = encoder.transform(all_data['zipcode'])\n\nall_data = all_data.drop(['name','thumbnail_url',],axis=1)\n\n\n# 12. bedrooms, beds , bed_type \nall_data.bedrooms = all_data.bedrooms.fillna(0)\nall_data.beds = all_data.beds.fillna(0)\n\n\n## rem sequnece \nall_data = all_data.drop(['id'],axis=1)\n\nassert np.sum(all_data.isnull()).sum() == 0 \n\n################## \nprint('--------------> Modeling ... ')\nXtr, Xv, ytr, yv = train_test_split(all_data[:train_obs].values, y_train, test_size=0.1, random_state=1973)\ndtrain = xgb.DMatrix(Xtr, label=ytr)\ndvalid = xgb.DMatrix(Xv, label=yv)\ndtest = xgb.DMatrix(all_data[train_obs:].values)\nwatchlist = [(dtrain, 'train'), (dvalid, 'valid')]\n\n#Try different parameters! My favorite is random search :)\nxgb_pars = {'min_child_weight': 50,\n 'eta': 0.01,\n 'colsample_bytree': 0.3,\n 'max_depth': 10,\n 'subsample': 0.8,\n 'lambda': 0.5,\n 'nthread': -1,\n 'booster' : 'gbtree',\n 'silent': 1,\n 'eval_metric': 'rmse',\n 'objective': 'reg:linear'}\n\nmodel = xgb.train(xgb_pars, dtrain, 10000, watchlist, early_stopping_rounds=50,maximize=False, verbose_eval=10)\n\nprint('Modeling RMSE %.5f' % model.best_score)\n\nprint('--------------> Submission ... ')\ntest['log_price'] = model.predict(dtest)\nsubfn = \"base4plus__val_\"+str(model.best_score)+\"__rnd_\"+str(model.best_iteration)+\".csv\"\ntest[['id', 'log_price']].to_csv(subfn, index=False)\n\nprint('--------------> Retrain all data + Feature importance ... ')\ndtrain = xgb.DMatrix(all_data[:train_obs].values, label=y_train)\ndtest = xgb.DMatrix(all_data[train_obs:].values)\nmodel = xgb.train(xgb_pars, dtrain, model.best_iteration+5, maximize=False, verbose_eval=10)\nprint('-----> Submission ... ')\ntest['log_price'] = model.predict(dtest)\nsubfn = \"base4plus__all_data__rnd_\"+str(model.best_iteration)+\".csv\"\ntest[['id', 'log_price']].to_csv(subfn, index=False)\n\nprint('-----> Feature importance ... ')\nfeature_names = all_data.columns\nfeature_importance_dict = model.get_fscore()\nfs = ['f%i' % i for i in range(len(feature_names))]\nf1 = pd.DataFrame({'f': list(feature_importance_dict.keys()), 'importance': list(feature_importance_dict.values())})\nf2 = pd.DataFrame({'f': fs, 'feature_name': feature_names})\nfeature_importance = pd.merge(f1, f2, how='right', on='f')\nfeature_importance = feature_importance.fillna(0)\nfeature_importance.sort_values(by='importance', ascending=False)\nprint(feature_importance.sort_values)\nsubfn = \"error__feat_importance_base4plus.csv\" \nfeature_importance.to_csv(subfn, index=False) \n\n\n\n\n\n",
"import mahotas as mh\nfrom sklearn import cross_validation\nfrom sklearn.linear_model.logistic import LogisticRegression\nimport numpy as np\nfrom glob import glob\nfrom edginess import edginess_sobel\n\n#basedir = 'simple-dataset'\n\nbasedir = 'simple-dataset/'\n\ndef features_for(im):\n im = mh.imread(im,as_grey=True).astype(np.uint8)\n return mh.features.haralick(im).mean(0)\n\nfeatures = []\nsobels = []\nlabels = []\nimages = glob('{}/*.jpg'.format(basedir))\nfor im in images:\n features.append(features_for(im))\n sobels.append(edginess_sobel(mh.imread(im, as_grey=True)))\n labels.append(im[:-len('00.jpg')])\n\nfeatures = np.array(features)\nlabels = np.array(labels)\n\nn = features.shape;\nnl = labels.shape;\n\nprint('features='+str(n))\nprint(str(features))\nprint ('labels='+str(nl))\nprint(str(labels))\n\nscores = cross_validation.cross_val_score(LogisticRegression(), features, labels, cv=5)\nprint('Accuracy (5 fold x-val) with Logistic Regrssion [std features]: {}%'.format(0.1* round(1000*scores.mean())))\n\nscores = cross_validation.cross_val_score(LogisticRegression(), np.hstack([np.atleast_2d(sobels).T,features]), labels, cv=5).mean()\nprint('Accuracy (5 fold x-val) with Logistic Regrssion [std features + sobel]: {}%'.format(0.1* round(1000*scores.mean())))\n\n",
"import mahotas as mh\nfrom sklearn import cross_validation\nfrom sklearn.linear_model.logistic import LogisticRegression\nimport numpy as np\nfrom glob import glob\nfrom edginess import edginess_sobel\n\n\ndef features_for(im):\n im = mh.imread(im,as_grey=True).astype(np.uint8)\n #return mh.features.haralick(im).mean(0)\n return np.squeeze(mh.features.haralick(im)).reshape(-1)\n\nfeatures = []\nsobels = []\nlabels = []\n\n####################################################\nprint('SURFing ...')\ntfeatures = features\nfrom sklearn.cluster import KMeans\nfrom mahotas.features import surf\n\n\nbasedir = 'small_train-dogs-cats'\nimages = glob('{}/*.jpg'.format(basedir))\nalldescriptors = []\ni = 0;\nfor im in images:\n im = mh.imread(im, as_grey=1)\n im = im.astype(np.uint8)\n alldescriptors.append(surf.surf(im, descriptor_only=True))\n i += 1\n print ('image:'+str(i))\n\nprint('Descriptors done')\nk = 256\nkm = KMeans(k)\n\nconcatenated = np.concatenate(alldescriptors)\nconcatenated = concatenated[::64]\nprint('k-meaning...')\nkm.fit(concatenated)\nfeatures = []\n\n\nbasedir = 'train-dogs-vs-cats'\nimages = glob('{}/*.jpg'.format(basedir))\nfor im in images:\n im = mh.imread(im, as_grey=1)\n im = im.astype(np.uint8)\n d = surf.surf(im, descriptor_only=True)\n c = km.predict(d)\n features.append(\n np.array([np.sum(c == i) for i in xrange(k)])\n )\n if (\"train-dogs-vs-cats/cat\" in im[:-len('.jpg')]):\n labels.append('cat')\n elif (\"train-dogs-vs-cats/dog\" in im[:-len('.jpg')]):\n labels.append('dog')\n else:\n raise Exception (\"unrecognized label:\"+str(im[:-len('.jpg')]))\n\nfeatures = np.array(features)\n\nnp.savetxt(\"featuresDogsCatsSURF.zat\", features, delimiter=\",\")\nnp.savetxt(\"SURF_concatenated.zat\", concatenated, delimiter=\",\")\n\nprint('predicting...')\nscoreSURFlr = cross_validation.cross_val_score(LogisticRegression(), features, labels, cv=5).mean()\nprint('Accuracy (5 fold x-val) with Log. Reg [SURF features]: %s%%' % (0.1* round(1000*scoreSURFlr.mean())))\n\n\n\n\n\n\n\n",
"import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nfrom subprocess import check_output\nfrom keras.models import Model\nfrom keras.layers import Dense, Embedding, Input , Activation\nfrom keras.layers import LSTM, Bidirectional, GlobalMaxPool1D, Dropout, GRU\nfrom keras.preprocessing import text, sequence\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers import Flatten , Conv1D , GlobalMaxPooling1D , GlobalAveragePooling1D, MaxPooling1D\nfrom keras.models import Sequential\nimport re , os \nimport logging, gensim , random\nfrom gensim.models import word2vec\nfrom keras.layers.merge import concatenate\n\n# conf \nlist_classes = [\"toxic\", \"severe_toxic\", \"obscene\", \"threat\", \"insult\", \"identity_hate\"]\nmax_features = 20000\n#ax_features = 15000\n\n\n######## ARMONY #####################################\n# maxlen 200 (2x)\n# EMBEDDING_DIM 100 (x) <--- \n# GRU 100 (layers = 1) (x) \n# num_dense 100 (x) \n#####################################################\n\n## Epochs on x-val: 1 \n\nmaxlen = 600\n \nEMBEDDING_DIM = 300\nwe_fn='glove.840B.300d.txt'\n\nnum_lstm = 300 \nlstm_layers = 1\nrate_drop_dense = 0.1\nnum_dense = 300\n\n#batch_size = 32\nbatch_size = 512\nepochs = 10\n\n# load data \ntrain = pd.read_csv(\"data/train.csv\")\n#train = train[:2000]\ntest = pd.read_csv(\"data/test.csv\")\n#test = test[:2000]\ntrain = train.sample(frac=1)\n\n\n# pre-processing \ndef pre_process_pre_trained_embed(train,test,we_fn='glove.6B.300d.txt'):\n\tprint('>> Indexing word vectors ...')\n\tembeddings_index = {}\n\tf = open(os.path.join('data', we_fn))\n\tfor line in f:\n\t values = line.split(' ')\n\t word = values[0] #print(\"values:\",values)\n\t coefs = np.asarray(values[1:], dtype='float32')\n\t embeddings_index[word] = coefs\n\tf.close()\n\t#model = gensim.models.Word2Vec.load(os.path.join('data', we_fn))\n\t#for k,v in model.wv.vocab.items():\n # embeddings_index[k] = model[k]\n\tprint('Found %s word vectors.' % len(embeddings_index))\n\n\tprint(\">> pre-processing ... \")\n\tlist_sentences_train = train[\"comment_text\"].fillna(\"__NA__\").values\n\ty = train[list_classes].values\n\tlist_sentences_test = test[\"comment_text\"].fillna(\"__NA__\").values\n\ttokenizer = text.Tokenizer(num_words=max_features)\n\ttokenizer.fit_on_texts(list(list_sentences_train) + list(list_sentences_test))\n\tlist_tokenized_train = tokenizer.texts_to_sequences(list(list_sentences_train))\n\tlist_tokenized_test = tokenizer.texts_to_sequences(list(list_sentences_test))\n\tword_index = tokenizer.word_index\n\tX_t = sequence.pad_sequences(list_tokenized_train, maxlen=maxlen)\n\tX_te = sequence.pad_sequences(list_tokenized_test, maxlen=maxlen)\n\n\t# prepare embedding matrix\n\tprint('>> Preparing embedding matrix...')\n\tnum_words = min(max_features, len(word_index))\n\tembedding_matrix = np.zeros((num_words, EMBEDDING_DIM))\n\tfor word, i in word_index.items():\n\t if i >= max_features:\n\t continue\n\t embedding_vector = embeddings_index.get(word)\n\t if embedding_vector is not None:\n\t # words not found in embedding index will be all-zeros.\n\t embedding_matrix[i] = embedding_vector\n\n\treturn X_t, X_te, y , embedding_matrix\n\ndef pre_process(train,test):\n\tprint(\">> pre-processing ... \")\n\tlist_sentences_train = train[\"comment_text\"].fillna(\"__NA__\").values\n\ty = train[list_classes].values\n\tlist_sentences_test = test[\"comment_text\"].fillna(\"__NA__\").values\n\ttokenizer = text.Tokenizer(num_words=max_features)\n\ttokenizer.fit_on_texts(list(list_sentences_train) + list(list_sentences_test))\n\tlist_tokenized_train = tokenizer.texts_to_sequences(list(list_sentences_train))\n\tlist_tokenized_test = tokenizer.texts_to_sequences(list(list_sentences_test))\n\tX_t = sequence.pad_sequences(list_tokenized_train, maxlen=maxlen)\n\tX_te = sequence.pad_sequences(list_tokenized_test, maxlen=maxlen)\n\treturn X_t, X_te, y\n\ndef get_gru_bidirectional_avg(embed_size = 200 , \n\t\t\t embedding_matrix = None, \n num_lstm = 50 , \n rate_drop_dense = 0.1,\n num_dense = 50):\n \n if embedding_matrix is None: \n\t print(\">> get_model_bidirectional_avg [no pre-trained word embeddings]<<\")\n\t inp = Input(shape=(maxlen, ))\n\t x = Embedding(max_features, embed_size)(inp)\n else:\n print(\">> get_model_bidirectional_avg [pre-trained word embeddings]<<\")\n embedding_layer = Embedding(max_features,embed_size,weights=[embedding_matrix],input_length=maxlen,trainable=False)\n inp = Input(shape=(maxlen, ) , dtype='int32')\n x = embedding_layer(inp)\n x = Bidirectional(GRU(num_lstm, return_sequences=True))(x)\n x = Dropout(rate_drop_dense)(x)\n\n #add a GlobalAveragePooling1D, which will average the embeddings of all words in the document\n x = GlobalAveragePooling1D()(x)\n\n x = Dense(num_dense, activation=\"relu\")(x)\n x = Dropout(rate_drop_dense)(x)\n #x = BatchNormalization()(x)\n x = Dense(6, activation=\"sigmoid\")(x)\n\n model = Model(inputs=inp, outputs=x)\n model.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n return model\n\ndef get_bidirectional(embed_size = 200 , \n embedding_matrix = None, \n num_lstm = 50 , \n rate_drop_dense = 0.1,\n num_dense = 50):\n \n if embedding_matrix is None: \n print(\">> get_model_bidirectional_avg [no pre-trained word embeddings]<<\")\n inp = Input(shape=(maxlen, ))\n x = Embedding(max_features, embed_size)(inp)\n else:\n print(\">> get_model_bidirectional_avg [pre-trained word embeddings]<<\")\n #embedding_layer = Embedding(max_features,embed_size,weights=[embedding_matrix],input_length=maxlen,trainable=True)\n embedding_layer = Embedding(max_features,embed_size,weights=[embedding_matrix],input_length=maxlen)\n inp = Input(shape=(maxlen, ) , dtype='int32')\n x = embedding_layer(inp)\n #x = Bidirectional(LSTM(num_lstm, return_sequences=True, dropout=rate_drop_dense, recurrent_dropout=rate_drop_dense))(x)\n for i in range(lstm_layers):\n x = Bidirectional(GRU(num_lstm, return_sequences=True, dropout=rate_drop_dense, recurrent_dropout=rate_drop_dense,trainable=True))(x)\n #x = Dropout(rate_drop_dense)(x)\n\n #add a GlobalAveragePooling1D, which will average the embeddings of all words in the document\n #x1 = GlobalAveragePooling1D()(x)\n #x2 = GlobalMaxPool1D()(x)\n #x = concatenate([x1, x2])\n\n x = GlobalMaxPool1D()(x)\n \n #x = BatchNormalization()(x)\n #x = Dropout(rate_drop_dense)(x)\n\n ## 1 layer\n #x = Dense(num_dense, activation=\"relu\")(x)\n\n #x = BatchNormalization()(x)\n #x = Dropout(rate_drop_dense)(x)\n\n # 2 layer \n x = Dense(num_dense, activation=\"relu\")(x)\n\n #x = BatchNormalization()(x)\n x = Dropout(rate_drop_dense)(x)\n\n # output \n #x = Dropout(rate_drop_dense)(x)\n #x = BatchNormalization()(x)\n x = Dense(6, activation=\"sigmoid\")(x)\n\n model = Model(inputs=inp, outputs=x)\n model.compile(loss='binary_crossentropy',\n optimizer='adam',\n #optimizer='nadam',\n metrics=['accuracy'])\n\n return model\n\n\ndef get_model_conv(embed_size = 200 , \n rate_drop_dense = 0.2,\n filters = 250, \n kernel_size = 3, \n num_dense = 50):\n print(\">> Conv1D <<\")\n\n model = Sequential()\n model.add(Embedding(max_features, embed_size, input_length=maxlen))\n model.add(Dropout(rate_drop_dense))\n model.add(Conv1D(filters,kernel_size,padding='valid',activation='relu',strides=1))\n \n # we use max pooling:\n model.add(GlobalMaxPooling1D())\n \n # We add a vanilla hidden layer:\n model.add(Dense(num_dense))\n model.add(Dropout(rate_drop_dense))\n model.add(Activation('relu'))\n\n # output layer \n model.add(Dense(6, activation='sigmoid'))\n\n model.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n return model\n\ndef get_model_bidirectional(embed_size = 200 , \n num_lstm = 64 , \n rate_drop_lstm = 0, \n rate_drop_dense = 0.1,\n num_dense = 50):\n model = Sequential()\n model.add(Embedding(max_features, embed_size, input_length=maxlen))\n model.add(Bidirectional(GRU(num_lstm)))\n model.add(Dropout(rate_drop_dense))\n model.add(Dense(num_dense, activation='relu'))\n model.add(Dropout(rate_drop_dense))\n model.add(Dense(6, activation='sigmoid'))\n\n model.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n return model\n\ndef get_cnn_lstm(embed_size = 200 , \n rate_drop_dense = 0.2,\n filters = 64, \n lstm_output_size = 70, \n kernel_size = 5, \n num_dense = 50):\n print(\">>> cnn + gru <<\")\n\n model = Sequential()\n model.add(Embedding(max_features, embed_size, input_length=maxlen))\n model.add(Dropout(rate_drop_dense))\n model.add(Conv1D(filters,\n kernel_size,\n padding='valid',\n activation='relu',\n strides=1))\n\n model.add(MaxPooling1D(pool_size=4))\n model.add(Bidirectional(GRU(lstm_output_size)))\n\n model.add(Dense(num_dense, activation='relu'))\n model.add(Dropout(rate_drop_dense))\n\n model.add(Dense(6, activation='sigmoid'))\n\n model.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n return model\n\n\n# train \n#X_t, X_te, y = pre_process(train=train,test=test)\nX_t, X_te, y , embedding_matrix = pre_process_pre_trained_embed(train=train,test=test,we_fn=we_fn)\nmodel = get_bidirectional(embed_size = EMBEDDING_DIM , embedding_matrix = embedding_matrix,num_lstm = num_lstm,rate_drop_dense = rate_drop_dense,num_dense = num_dense)\nprint(model.summary())\n#file_path=\"weights_base.best.hdf5\"\n#checkpoint = ModelCheckpoint(file_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n#early = EarlyStopping(monitor=\"val_loss\", mode=\"min\", patience=0)\n#callbacks_list = [checkpoint, early] #early\n#model.fit(X_t, y, batch_size=batch_size, epochs=epochs, validation_split=0.1, callbacks=callbacks_list)\nmodel.fit(X_t, y, batch_size=batch_size, epochs=2)\n\n# predict\nprint(\">>> predicting on test set ... \")\n#model.load_weights(file_path)\ny_test = model.predict(X_te)\n\n#sub\nsample_submission = pd.read_csv(\"data/sample_submission.csv\")\nsample_submission[list_classes] = y_test\nsample_submission.to_csv(\"sub_gru9_300e_in600_feat20000_poolMax_dense300_adam_all_data_1.csv.gz\", index=False , compression='gzip')\n",
"'''\nSingle model may achieve LB scores at around 0.29+ ~ 0.30+\nAverage ensembles can easily get 0.28+ or less\nDon't need to be an expert of feature engineering\nAll you need is a GPU!!!!!!!\n'''\n\n########################################\n## import packages\n########################################\nimport os\nimport re\nimport csv\nimport codecs\nimport numpy as np\nimport pandas as pd\n\nfrom nltk.corpus import stopwords\nfrom nltk.stem import SnowballStemmer\nfrom string import punctuation\n\nfrom gensim.models import KeyedVectors\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation\nfrom keras.layers.merge import concatenate\nfrom keras.models import Model\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\n\nimport sys\n#reload(sys)\n#sys.setdefaultencoding('utf-8')\n\nimport gensim \nfrom numpy import linalg as LA\n\n########################################\n## set directories and parameters\n########################################\nBASE_DIR = './data/'\nTRAIN_DATA_FILE = BASE_DIR + 'train.csv'\nTEST_DATA_FILE = BASE_DIR + 'test.csv'\nEMBEDDING_DIM = 400\nTEXT_WINDOW = 9 \n\n########################################\n## process texts in datasets\n########################################\nprint('Processing text dataset')\n\n# The function \"text_to_wordlist\" is from\n# https://www.kaggle.com/currie32/quora-question-pairs/the-importance-of-cleaning-text\ndef text_to_wordlist(text, remove_stopwords=False, stem_words=False):\n # Clean the text, with the option to remove stopwords and to stem words.\n # Convert words to lower case and split them\n text = text.lower().split()\n # Optionally, remove stop words\n if remove_stopwords:\n stops = set(stopwords.words(\"english\"))\n text = [w for w in text if not w in stops]\n text = \" \".join(text)\n # Clean the text\n text = re.sub(r\"[^A-Za-z0-9^,!.\\/'+-=]\", \" \", text)\n text = re.sub(r\"what's\", \"what is \", text)\n text = re.sub(r\"\\'s\", \" \", text)\n text = re.sub(r\"\\'ve\", \" have \", text)\n text = re.sub(r\"can't\", \"cannot \", text)\n text = re.sub(r\"n't\", \" not \", text)\n text = re.sub(r\"i'm\", \"i am \", text)\n text = re.sub(r\"\\'re\", \" are \", text)\n text = re.sub(r\"\\'d\", \" would \", text)\n text = re.sub(r\"\\'ll\", \" will \", text)\n text = re.sub(r\",\", \" \", text)\n text = re.sub(r\"\\.\", \" \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\/\", \" \", text)\n text = re.sub(r\"\\^\", \" ^ \", text)\n text = re.sub(r\"\\+\", \" + \", text)\n text = re.sub(r\"\\-\", \" - \", text)\n text = re.sub(r\"\\=\", \" = \", text)\n text = re.sub(r\"'\", \" \", text)\n text = re.sub(r\"60k\", \" 60000 \", text)\n text = re.sub(r\":\", \" : \", text)\n text = re.sub(r\" e g \", \" eg \", text)\n text = re.sub(r\" b g \", \" bg \", text)\n text = re.sub(r\" u s \", \" american \", text)\n text = re.sub(r\"\\0s\", \"0\", text)\n text = re.sub(r\" 9 11 \", \"911\", text)\n text = re.sub(r\"e - mail\", \"email\", text)\n text = re.sub(r\"j k\", \"jk\", text)\n text = re.sub(r\"\\s{2,}\", \" \", text) \n # Optionally, shorten words to their stems\n if stem_words:\n text = text.split()\n stemmer = SnowballStemmer('english')\n stemmed_words = [stemmer.stem(word) for word in text]\n text = \" \".join(stemmed_words)\n \n # Return a list of words\n return(text)\n\ntexts_1 = [] \ntexts_2 = []\nlabels = []\nwith codecs.open(TRAIN_DATA_FILE, encoding='utf-8') as f:\n reader = csv.reader(f, delimiter=',')\n header = next(reader)\n for values in reader:\n texts_1.append(text_to_wordlist(values[3]))\n texts_2.append(text_to_wordlist(values[4]))\n labels.append(int(values[5]))\n\nprint('Found %s texts in train.csv' % len(texts_1))\n\ntest_texts_1 = []\ntest_texts_2 = []\ntest_ids = []\nwith codecs.open(TEST_DATA_FILE, encoding='utf-8') as f:\n reader = csv.reader(f, delimiter=',')\n header = next(reader)\n for values in reader:\n test_texts_1.append(text_to_wordlist(values[1]))\n test_texts_2.append(text_to_wordlist(values[2]))\n test_ids.append(values[0])\n\nprint('Found %s texts in test.csv' % len(test_texts_1))\n\n# \"[..] In our experiments, we cross validate the window size using the validation set, and the optimal window size is 8. The vector presented to the classifier is a concatenation of two vectors, one from PV-DBOW and one from PV-DM. In PV-DBOW, the learned vector representations have 400 dimensions. In PV-DM, the learned vector representations have 400 dimensions for both words and paragraphs. To predict the 8-th word, we concatenate the paragraph vectors and 7 word vectors. Special characters such as ,.!? are treated as a normal word. If the paragraph has less than 9 words, we pre-pad with a special NULL word symbol.[..]\"\n\nmaster_text_list = texts_1 + texts_2 + test_texts_1 + test_texts_2 \nmaster_corpus = list() \n \nfor i, line in enumerate(master_text_list):\n master_corpus.append(gensim.models.doc2vec.TaggedDocument(gensim.utils.simple_preprocess(line), [i]))\n\nprint(master_corpus[:2])\ndel texts_1 , texts_2 , test_texts_1 , test_texts_2 \ndel master_text_list\n\n# dm defines the training algorithm. By default (dm=1), ‘distributed memory’ (PV-DM) is used. Otherwise, distributed bag of words (PV-DBOW) is employed.\n\n# model #1: PV-DBOW\nmodel_pv_dbow = gensim.models.doc2vec.Doc2Vec(size=EMBEDDING_DIM,window=8, workers=4, iter=30, dm=2) \nmodel_pv_dbow.build_vocab(master_corpus)\n\nmodel_pv_dbow.save('doc2vec_pv_dbow')\n\ninferred_vector = model_pv_dbow.infer_vector(['only', 'you', 'can', 'prevent', 'forrest', 'fires'])\n#model_pv_dbow.most_similar([inferred_vector], topn=len(model_pv_dbow.docvecs))\n\nmodel_pv_dbow = gensim.models.doc2vec.Doc2Vec.load('doc2vec_pv_dbow')\ninferred_vector_2 = model_pv_dbow.infer_vector(['only', 'you', 'can', 'prevent', 'forrest', 'fires'])\nprint(\"norm-diff\",LA.norm(inferred_vector-inferred_vector_2)) # 0.0 \n\nassert LA.norm(inferred_vector-inferred_vector_2) < 0.1 \ndel model_pv_dbow \n\n\n# model #2: PV-DM\nmodel_pv_dm = gensim.models.doc2vec.Doc2Vec(size=EMBEDDING_DIM,window=8, workers=4, iter=30, dm=1) \nmodel_pv_dm.build_vocab(master_corpus)\n\nmodel_pv_dm.save('doc2vec_pv_dm')\n\ninferred_vector = model_pv_dm.infer_vector(['only', 'you', 'can', 'prevent', 'forrest', 'fires'])\n\nmodel_pv_dm = gensim.models.doc2vec.Doc2Vec.load('doc2vec_pv_dm')\ninferred_vector_2 = model_pv_dm.infer_vector(['only', 'you', 'can', 'prevent', 'forrest', 'fires'])\nprint(\"norm-diff\",LA.norm(inferred_vector-inferred_vector_2)) # 0.0 \n\nassert LA.norm(inferred_vector-inferred_vector_2) < 0.1 \n\ndel model_pv_dm\n\nprint('End.')\n\n\n\n"
] |
[
[
"pandas.concat",
"pandas.read_csv",
"pandas.merge",
"pandas.to_datetime",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"sklearn.feature_extraction.DictVectorizer",
"sklearn.cluster.MiniBatchKMeans",
"sklearn.preprocessing.LabelEncoder"
],
[
"pandas.concat",
"pandas.read_csv",
"pandas.merge",
"pandas.to_datetime",
"numpy.asarray",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"sklearn.feature_extraction.DictVectorizer",
"numpy.add",
"sklearn.cluster.MiniBatchKMeans",
"sklearn.preprocessing.LabelEncoder",
"numpy.zeros",
"numpy.divide"
],
[
"sklearn.linear_model.logistic.LogisticRegression",
"numpy.array",
"numpy.atleast_2d"
],
[
"sklearn.cluster.KMeans",
"numpy.concatenate",
"numpy.savetxt",
"sklearn.linear_model.logistic.LogisticRegression",
"numpy.array",
"numpy.sum"
],
[
"numpy.asarray",
"pandas.read_csv",
"numpy.zeros"
],
[
"numpy.linalg.norm"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gmooers96/CBRAIN-CAM
|
[
"c5a26e415c031dea011d7cb0b8b4c1ca00751e2a",
"c5a26e415c031dea011d7cb0b8b4c1ca00751e2a"
] |
[
"MAPS/Latant_Space_Constrained_VAEs/sample_stats_constrained_squared.py",
"MAPS/autoencoder/ae_metrics.py"
] |
[
"import argparse \nimport json \n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\n\nimport keras\nfrom keras import layers\nfrom keras import backend as K\nimport tensorflow as tf \n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA \nfrom sklearn.manifold import TSNE\n\nfrom train_latent_constraint_square import encoder_gen, decoder_gen\nimport numpy as np\nimport gc \nimport tensorflow_probability as tfp \nfrom scipy import spatial\n\ndef f_norm(true, pred):\n covariance_truth = tfp.stats.covariance(true)\n covariance_prediction = tfp.stats.covariance(pred)\n covariance_truth = tf.cast(covariance_truth, dtype=tf.float32)\n f_dist = tf.norm(covariance_prediction-covariance_truth, ord=\"euclidean\")\n return f_dist\n\n\ndef reconstruct_targets_paper(vae, test_data, targets, id, dataset_max, dataset_min):\n \"\"\"\n TODO\n \"\"\"\n original_samples = []\n recon_means = []\n recon_vars = []\n\n vmin = 1000\n vmax = -1\n\n vmin_var = 1000\n vmax_var = -1\n\n for target in targets:\n\n sample = test_data[target]\n sample_mean_var = vae.predict(np.expand_dims(sample, 0))\n sample_mean = sample_mean_var[0, :128*30]\n sample_log_var = sample_mean_var[0, 128*30:]\n\n # Sample reconstruction based on predicted mean and variance\n recon_mean = sample_mean\n recon_var = np.exp(sample_log_var)\n recon_sample = recon_mean + recon_var\n # recon_sample = np.random.multivariate_normal(sample_mean, np.exp(sample_log_var) * np.identity(128*30))\n \n # Rescale original sample and reconstruction to original scale\n sample = np.interp(sample, (0, 1), (dataset_min, dataset_max))\n recon_mean = np.interp(recon_mean, (0, 1), (dataset_min, dataset_max))\n recon_sample = np.interp(recon_sample, (0, 1), (dataset_min, dataset_max))\n recon_var = recon_sample - recon_mean\n\n # Get min and max of original and reconstructed \n max_reconstructed = np.max(recon_mean)\n max_recon_var = np.max(recon_var)\n print(\"max of reconstructed\", max_reconstructed)\n max_sample = np.max(sample.reshape((128*30,)))\n print(\"max of original\", max_sample)\n min_reconstructed = np.min(recon_mean)\n min_recon_var = np.min(recon_var)\n print(\"min of reconstructed\", min_reconstructed)\n min_sample = np.min(sample.reshape((128*30,)))\n print(\"min of original\", min_sample)\n\n # Reshape reconstructed sample \n recon_mean = recon_mean.reshape((30, 128))\n recon_var = recon_var.reshape((30, 128))\n\n original_samples.append(sample[:, :, 0])\n recon_means.append(recon_mean)\n recon_vars.append(recon_var)\n\n vmin = min(vmin, min_reconstructed, min_sample)\n vmax = max(vmax, max_reconstructed, max_sample)\n\n vmin_var = min(vmin_var, min_recon_var)\n vmax_var = max(vmax_var, max_recon_var)\n\n fig_size = plt.rcParams[\"figure.figsize\"]\n fig_size[0] = 10\n fig_size[1] = 8\n plt.rcParams[\"figure.figsize\"] = fig_size\n fig, axs = plt.subplots(len(targets), 2, sharex=True, sharey=True, constrained_layout=True)\n\n def fmt(x, pos):\n return \"{:.2f}\".format(x)\n #np.save(\"CI_Figure_Data/True_Means.npy\", original_samples)\n #np.save(\"CI_Figure_Data/Reconstruct_Means.npy\", recon_means)\n for i in range(len(targets)): \n y_ticks = np.arange(1400, 0, -400)\n #print(\"y ticks\", y_ticks)\n\n sub_img = axs[i, 0].imshow(original_samples[i], cmap='RdBu_r', vmin=vmin, vmax=vmax)\n axs[i, 0].invert_yaxis()\n axs[i, 0].set_yticklabels(y_ticks)\n\n if i == 2:\n axs[i, 0].set_ylabel(\"Pressure (hpa)\", fontsize=12, labelpad=10)\n \n sub_img = axs[i, 1].imshow(recon_means[i], cmap='RdBu_r', vmin=vmin, vmax=vmax)\n axs[i, 1].invert_yaxis()\n\n if i == 0:\n axs[i, 0].set_title(\"Original\", fontsize = 12)\n axs[i, 1].set_title(\"VAE Reconstruction Mean\",fontsize=12)\n\n if i == len(targets) - 1:\n axs[i, 0].set_xlabel('CRMs', fontsize=12, labelpad=5)\n axs[i, 1].set_xlabel('CRMs', fontsize=12, labelpad=5)\n fig.colorbar(sub_img, ax=axs[:, 1], label=\"Vertical Velocity\", shrink=0.6)\n #axs[i,1].set_yticks([])\n #if i < len(targets) - 2:\n #axs[i, 0].set_xticks([])\n #axs[i, 1].set_xticks([])\n\n\n # Hide x labels and tick labels for all but bottom plot.\n for row in axs:\n for ax in row:\n ax.label_outer()\n\n plt.savefig('./model_graphs/reconstructions/Paper_target_test_reconstructions_{}.png'.format(id))\n plt.savefig('./model_graphs/reconstructions/Paper_target_test_reconstructions_{}.pdf'.format(id))\n\n\ndef reconstruct_targets(vae, test_data, targets, id, dataset_max, dataset_min):\n \"\"\"\n TODO\n \"\"\"\n original_samples = []\n recon_means = []\n recon_vars = []\n\n vmin = 1000\n vmax = -1\n\n vmin_var = 1000\n vmax_var = -1\n\n for target in targets:\n\n sample = test_data[target]\n sample_mean_var = vae.predict(np.expand_dims(sample, 0))\n sample_mean = sample_mean_var[0, :128*30]\n sample_log_var = sample_mean_var[0, 128*30:]\n\n # Sample reconstruction based on predicted mean and variance\n recon_mean = sample_mean\n recon_var = np.exp(sample_log_var)\n recon_sample = recon_mean + recon_var\n # recon_sample = np.random.multivariate_normal(sample_mean, np.exp(sample_log_var) * np.identity(128*30))\n \n # Rescale original sample and reconstruction to original scale\n sample = np.interp(sample, (0, 1), (dataset_min, dataset_max))\n recon_mean = np.interp(recon_mean, (0, 1), (dataset_min, dataset_max))\n recon_sample = np.interp(recon_sample, (0, 1), (dataset_min, dataset_max))\n recon_var = recon_sample - recon_mean\n\n # Get min and max of original and reconstructed \n max_reconstructed = np.max(recon_mean)\n max_recon_var = np.max(recon_var)\n print(\"max of reconstructed\", max_reconstructed)\n max_sample = np.max(sample.reshape((128*30,)))\n print(\"max of original\", max_sample)\n min_reconstructed = np.min(recon_mean)\n min_recon_var = np.min(recon_var)\n print(\"min of reconstructed\", min_reconstructed)\n min_sample = np.min(sample.reshape((128*30,)))\n print(\"min of original\", min_sample)\n\n # Reshape reconstructed sample \n recon_mean = recon_mean.reshape((30, 128))\n recon_var = recon_var.reshape((30, 128))\n\n original_samples.append(sample[:, :, 0])\n recon_means.append(recon_mean)\n recon_vars.append(recon_var)\n\n vmin = min(vmin, min_reconstructed, min_sample)\n vmax = max(vmax, max_reconstructed, max_sample)\n\n vmin_var = min(vmin_var, min_recon_var)\n vmax_var = max(vmax_var, max_recon_var)\n\n fig_size = plt.rcParams[\"figure.figsize\"]\n fig_size[0] = 10\n fig_size[1] = 8\n plt.rcParams[\"figure.figsize\"] = fig_size\n fig, axs = plt.subplots(len(targets), 3, sharex=True, sharey=True, constrained_layout=True)\n\n def fmt(x, pos):\n return \"{:.2f}\".format(x)\n\n for i in range(len(targets)): \n y_ticks = np.arange(1800, 0, -800)\n print(\"y ticks\", y_ticks)\n\n sub_img = axs[i, 0].imshow(original_samples[i], cmap='RdBu_r', vmin=vmin, vmax=vmax)\n axs[i, 0].invert_yaxis()\n axs[i, 0].set_yticklabels(y_ticks)\n\n if i == 2:\n axs[i, 0].set_ylabel(\"Pressure (mbs)\", fontsize=12, labelpad=10)\n \n sub_img = axs[i, 1].imshow(recon_means[i], cmap='RdBu_r', vmin=vmin, vmax=vmax)\n axs[i, 1].invert_yaxis()\n\n sub_img_var = axs[i, 2].imshow(recon_vars[i], cmap='RdBu_r', vmin=vmin_var, vmax=vmax_var)\n axs[i, 2].invert_yaxis()\n\n if i == 0:\n axs[i, 0].set_title(\"Original\")\n axs[i, 1].set_title(\"Reconstruction Mean\")\n axs[i, 2].set_title(\"Reconstruction Variance\")\n\n if i == len(targets) - 1:\n axs[i, 0].set_xlabel('CRMs', fontsize=12, labelpad=5)\n axs[i, 1].set_xlabel('CRMs', fontsize=12, labelpad=5)\n axs[i, 2].set_xlabel('CRMs', fontsize=12, labelpad=5)\n fig.colorbar(sub_img, ax=axs[:, 1], label=\"Vertical Velocity\", shrink=0.6)\n cb = fig.colorbar(sub_img_var, ax=axs[:, 2], shrink=0.6, format=ticker.FuncFormatter(fmt))\n cb.set_label(\"Variance\", labelpad=10)\n\n\n # Hide x labels and tick labels for all but bottom plot.\n for row in axs:\n for ax in row:\n ax.label_outer()\n\n plt.savefig('./model_graphs/reconstructions/target_test_reconstructions_{}.png'.format(id))\n\ndef sample_reconstructions(vae, train_data, test_data, id, dataset_max, dataset_min): \n \"\"\"\n TODO \n \"\"\"\n \n original_samples = []\n recon_samples = []\n\n min_max = []\n\n for i in range(5):\n rand_sample = np.random.randint(0, len(train_data))\n\n sample = train_data[rand_sample]\n sample_mean_var = vae.predict(np.expand_dims(sample, 0))\n sample_mean = sample_mean_var[0, :128*30]\n sample_log_var = sample_mean_var[0, 128*30:]\n\n recon_sample = sample_mean\n \n sample = np.interp(sample, (0, 1), (dataset_min, dataset_max))\n recon_sample = np.interp(recon_sample, (0, 1), (dataset_min, dataset_max))\n\n print(\"original sample\", sample.reshape((128*30,)))\n print(\"reconstructed sample\", recon_sample)\n print(np.max(np.abs(sample.reshape((128*30,)) - recon_sample)))\n max_reconstructed = np.max(np.abs(recon_sample))\n print(\"max of reconstructed\", max_reconstructed)\n max_sample = np.max(sample.reshape((128*30,)))\n print(\"max of original\", max_sample)\n min_reconstructed = np.min(recon_sample)\n print(\"min of reconstructed\", min_reconstructed)\n min_sample = np.min(sample.reshape((128*30,)))\n print(\"min of original\", min_sample)\n recon_sample = recon_sample.reshape((30, 128))\n\n original_samples.append(sample[:, :, 0])\n recon_samples.append(recon_sample)\n\n min_max.append((min(min_reconstructed, min_sample), max(max_reconstructed, max_sample)))\n\n fig, axs = plt.subplots(5, 2)\n\n for i in range(5): \n vmin = min_max[i][0]\n vmax = min_max[i][1]\n \n sub_img = axs[i, 0].imshow(original_samples[i], cmap='RdBu_r', vmin=vmin, vmax=vmax)\n axs[i, 0].set_ylim(axs[i, 0].get_ylim()[::-1])\n fig.colorbar(sub_img, ax=axs[i, 0])\n\n sub_img = axs[i, 1].imshow(recon_samples[i], cmap='RdBu_r', vmin=vmin, vmax=vmax)\n axs[i, 1].set_ylim(axs[i, 1].get_ylim()[::-1])\n fig.colorbar(sub_img, ax=axs[i, 1])\n\n plt.savefig('./model_graphs/reconstructions/reconstructed_train_samples_{}.png'.format(id))\n\ndef sample_latent_space(vae_encoder, train_data, test_data, id, dataset_min, dataset_max, test_labels, dataset_type): \n \"\"\"\n Create a scatter plot of the latent space containing all test samples.\n \"\"\"\n\n # Predict latent train & test data\n _, _, z_test = vae_encoder.predict(test_data)\n _, _, z_train = vae_encoder.predict(train_data)\n\n # Apply scaling and tsne \n sc = StandardScaler()\n z_train_std = sc.fit_transform(z_train)\n \n z_test_std = sc.transform(z_test)\n\n # Instantiate PCA \n pca = PCA(n_components=32)\n pca.fit(z_train_std)\n\n z_test_pca = pca.transform(z_test_std)\n\n # Instantiate TSNE\n tsne = TSNE(n_components=2)\n\n z_test_tsne = tsne.fit_transform(z_test_pca)\n np.save(\"/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/Saved_Data/2D_Latent_Space__{}\".format(id), z_test_tsne)\n if dataset_type == \"half_deep_convection\":\n colors = [\"#FF4940\", \"#3D9AD1\"]\n # Make plot of latent test data \n #plt.scatter(x=z_test_tsne[np.where(test_labels == 0), 0], y=z_test_tsne[np.where(test_labels == 0), 1], c=colors[0],s=1, label=\"Deep Convection\")\n #plt.scatter(x=z_test_tsne[np.where(test_labels == 1), 0], y=z_test_tsne[np.where(test_labels == 1), 1], c=colors[1], s=1, label=\"Shallow Convection\")\n print(\"made it here\")\n convection = np.squeeze(z_test_tsne[np.where(test_labels == 0),:])\n no_convection = np.squeeze(z_test_tsne[np.where(test_labels == 1),:])\n #fake = np.squeeze(z_test_tsne[np.where(test_labels == 2),:])\n plt.scatter(x=convection[:, 0], y=convection[:, 1], c=\"#FF4940\", s=0.4, label=\"N0 convective Activity\")\n plt.scatter(x=no_convection[:, 0], y=no_convection[:, 1], c=\"#3D9AD1\", s=0.4, label=\"Convective Activity\")\n #plt.scatter(x=fake[:, 0], y=fake[:, 1], c=\"yellow\", s=0.4, label=\"White Noise\")\n plt.legend()\n\n else:\n plt.scatter(x=z_test_tsne[:, 0], y=z_test_tsne[:, 1], s=1)\n plt.colorbar()\n\n plt.savefig('./model_graphs/latent_space/Amazon_binary_latent_space_with_pca_{}.png'.format(id))\n\n\ndef sample_latent_space_var(vae_encoder, train_data, test_data, id, dataset_min, dataset_max, test_labels, dataset_type): \n \"\"\"\n Create a scatter plot of the latent space containing all test samples.\n \"\"\"\n\n # Predict latent train & test data\n test_mean, test_log_var, z_test = vae_encoder.predict(test_data)\n train_mean, train_log_var, z_train = vae_encoder.predict(train_data)\n #np.save(\"PCA_Trials/Covariance_Test_Z_Samples.npy\", z_test)\n #np.save(\"PCA_Trials/Covariance_Test_Mean_Samples.npy\", test_mean)\n #np.save(\"PCA_Trials/Covariance_Test_Log_Var_Samples.npy\", test_log_var)\n train_mean_var = np.concatenate((train_mean, train_log_var), axis=1)\n test_mean_var = np.concatenate((test_mean, test_log_var), axis=1)\n\n #print(dfsdsdgsdg)\n #np.save(\"PCA_Trials/Covariance_Train_High_Dim_Latent_Space.npy\", train_mean_var)\n #np.save(\"PCA_Trials/Covariance_Test_High_Dim_Latent_Space.npy\", test_mean_var)\n # Apply scaling and tsne \n sc = StandardScaler()\n z_train_std = sc.fit_transform(train_mean_var)\n #z_train_std = sc.fit_transform(train_log_var)\n \n z_test_std = sc.transform(test_mean_var)\n #z_test_std = sc.transform(test_log_var)\n # Instantiate PCA \n pca = PCA(n_components=2)\n pca.fit(z_train_std)\n z_test_pca = pca.transform(z_test_std)\n\n np.save(\"/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/Latant_Space_Constrained_VAEs/model_graphs/latant_space/2D_PCA_Latent_Space__{}\".format(id), z_test_pca)\n print(\"Made it to the save\")\n if dataset_type == \"half_deep_convection\":\n colors = [\"#FF4940\", \"#3D9AD1\"]\n print(\"made it here\")\n convection = np.squeeze(z_test_pca[np.where(test_labels == 0),:])\n no_convection = np.squeeze(z_test_pca[np.where(test_labels == 1),:])\n #fake = np.squeeze(z_test_tsne[np.where(test_labels == 2),:])\n plt.scatter(x=convection[:, 0], y=convection[:, 1], c=\"#FF4940\", s=0.4, label=\"No Convective Activity\")\n plt.scatter(x=no_convection[:, 0], y=no_convection[:, 1], c=\"#3D9AD1\", s=0.4, label=\"Convective Activity\")\n #plt.scatter(x=fake[:, 0], y=fake[:, 1], c=\"yellow\", s=0.4, label=\"Blue Noise\")\n plt.legend()\n\n else:\n #plt.scatter(x=z_test_tsne[:, 0], y=z_test_tsne[:, 1], c=test_labels, s=1)\n plt.scatter(x=z_test_pca[:, 0], y=z_test_pca[:, 1], s=0.1)\n plt.colorbar()\n\n plt.savefig('./model_graphs/latant_space/2D_PCA_latent_space_{}.png'.format(id)) \n\n\ndef interpolate_points(p1, p2, n_steps=100):\n \"linear interpolation -- https://openreview.net/pdf?id=S1fQSiCcYm\"\n ratios = np.linspace(0, 1, num=n_steps)\n vectors = list()\n for ratio in ratios:\n v = (1.0 - ratio) * p1 + ratio * p2\n vectors.append(v)\n return np.asarray(vectors)\n\ndef slerp(count, low, high):\n \"\"\"Spherical interpolation. val has a range of 0 to 1.\"\"\"\n values = np.linspace(0, 1, num=count)\n output_array = np.empty(shape=(count,low.size))\n for i in range(len(values)):\n val = values[i]\n omega = np.arccos(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)))\n so = np.sin(omega)\n output_array[i,:] = np.sin((1.0-val)*omega) / so * low + np.sin(val*omega)/so * high\n return output_array\n\n#https://arxiv.org/pdf/1803.05428.pdf\n#https://www.inference.vc/high-dimensional-gaussian-distributions-are-soap-bubble/\ndef original_slerp(val, low, high):\n \"\"\"Spherical interpolation. val has a range of 0 to 1. https://github.com/dribnet/plat/blob/master/plat/interpolate.py\"\"\"\n if val <= 0:\n return low\n elif val >= 1:\n return high\n elif np.allclose(low, high):\n return low\n omega = np.arccos(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)))\n so = np.sin(omega)\n return np.sin((1.0-val)*omega) / so * low + np.sin(val*omega)/so * high\n\n\ndef numpy_slerp(t, p0, p1):\n omega = np.arccos(np.dot(p0/np.linalg.norm(p0), p1/np.linalg.norm(p1)))\n so = np.sin(omega)\n return np.sin((1.0-t)*omega) / so * p0 + np.sin(t*omega)/so * p1\n\ndef latent_space_interpolation(vae, decoder, vae_encoder, train_data, test_data, id, dataset_min, dataset_max, test_labels, dataset_type):\n sample_one = np.expand_dims(test_data[15880,:,:], axis=0)\n sample_two = np.expand_dims(test_data[6548,:,:],axis=0)\n \n test_mean_one, test_log_var_one, z_test_one = vae_encoder.predict(sample_one)\n test_mean_two, test_log_var_two, z_test_two = vae_encoder.predict(sample_two)\n \n count = 100\n interpolated_images = np.empty(shape=(count,len(z_test_two[0])))\n interpolated_orig_images = np.empty(shape=(count,len(sample_one[0])*len(sample_one[0][0])))\n values = np.linspace(0, 1, num=count)\n for i in range(count):\n interpolated_images[i,:]= numpy_slerp(values[i], z_test_one.flatten(),z_test_two.flatten())\n interpolated_orig_images[i,:]= numpy_slerp(values[i], sample_one.flatten(),sample_two.flatten())\n \n reconstructed_Image_Series = decoder.predict(interpolated_images)\n reconstructed_Image_finals = reconstructed_Image_Series[:,:3840]\n\n np.save(\"/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/Interpolation_Data/203_Original_Images_W_Comp_15880_6548.npy\", interpolated_orig_images)\n np.save(\"/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/Interpolation_Data/203_Latent_Images_W_Comp_15880_6548.npy\", interpolated_images)\n np.save(\"/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/Interpolation_Data/203_Reconstructed_Images_W_Comp_15880_6548.npy\", reconstructed_Image_finals)\n \n print(\"Passed the saves\") \n interpolated_images.shape\n num_images = 10\n np.random.seed(42)\n plt.figure(figsize=(30, 8))\n\n for i, image_idx in enumerate(interpolated_images):\n \n ax = plt.subplot(5, num_images, i + 1)\n plt.imshow(interpolated_images[i].reshape(64, 16).T)\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.set_title(\"Encoded: {}\".format(i))\n \n ax = plt.subplot(5, num_images,num_images+ i + 1)\n reconstructed_image = decoder.predict(np.expand_dims(interpolated_images[i,:],axis=0))\n plt.imshow(np.squeeze(reconstructed_image)[:3840].reshape(128,30).T)\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.set_title(\"Latent: {}\".format(i))\n \n ax = plt.subplot(5, num_images,2*num_images+ i + 1)\n plt.imshow(interpolated_orig_images[i].reshape(128,30).T)\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.set_title(\"Image: {}\".format(i))\n plt.savefig(\"/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/model_graphs/latent_space_interp/amazon_diurnal_trial.png\")\n \n \n \n \n \ndef sample_frob_norm(vae, decoder, vae_encoder, train_data, test_data, id, dataset_min, dataset_max, test_labels, dataset_type): \n \"\"\"\n Create a scatter plot of the latent space containing all test samples.\n \"\"\"\n\n # Predict latent train & test data\n test_mean, test_log_var, z_test = vae_encoder.predict(test_data)\n print(\"made it here\")\n sample_mean_var = decoder.predict(z_test) \n sample_mean = sample_mean_var[:, :128*30]\n truths = np.reshape(test_data, (len(test_data),30*128))\n \n\n Rough_Metric = f_norm(truths, sample_mean)\n \n sess = tf.InteractiveSession()\n RM = Rough_Metric.eval()\n gc.collect()\n print(RM.shape)\n print(RM)\n np.save(\"Saved_Data/Rough_Overall_FR_Norm__{}.npy\".format(id), RM)\n print(\"completed\") \n \n \n \ndef generate_samples(decoder, dataset_min, dataset_max, latent_dim: int, id):\n \"\"\"\n Sample points from prior and send through decoder to get \n sample images.\n \"\"\"\n # sample from prior \n num_samples = 3\n z = np.random.normal(size=(num_samples, latent_dim))\n\n # Get output from decoder \n sample_mean_var = decoder.predict(z)\n\n # Extract mean and variance \n sample_mean = sample_mean_var[:, :128*30]\n sample_log_var = sample_mean_var[:, 128*30:]\n\n fig, axs = plt.subplots(num_samples, 1)\n\n recon_samples = []\n for i in range(num_samples):\n print(sample_mean[i])\n print(sample_mean[i].shape)\n # Sample from gaussian decoder outputs \n recon_sample = np.random.multivariate_normal(sample_mean[i], np.exp(sample_log_var[i]) * np.identity(128*30))\n\n # Unnormalize sample \n recon_sample = np.interp(recon_sample, (0, 1), (dataset_min, dataset_max))\n\n # Reshape\n recon_sample = recon_sample.reshape((30, 128))\n\n recon_samples.append(recon_sample)\n\n vmin = np.min(recon_samples)\n vmax = np.max(recon_samples)\n for i in range(num_samples):\n # Show image\n sub_img = axs[i].imshow(recon_samples[i], cmap='coolwarm', vmin=vmin, vmax=vmax)\n fig.colorbar(sub_img, ax=axs[i])\n # Flip y-axis\n axs[i].set_ylim(axs[i].get_ylim()[::-1])\n \n # fig.colorbar(sub_img, ax=axs)\n plt.tight_layout()\n plt.savefig('./model_graphs/generated/generated_samples_{}.png'.format(id))\n\n\n\ndef main():\n args = argument_parsing()\n print(\"Command line args:\", args)\n\n f = open(\"./model_config/config_{}.json\".format(args.id))\n model_config = json.load(f)\n f.close()\n\n train_data = np.load(model_config[\"data\"][\"training_data_path\"])\n test_data = np.load(model_config[\"data\"][\"test_data_path\"])\n\n # test_labels = np.load(model_config[\"data\"][\"test_labels\"])[:, 0, 0]\n test_labels = np.load(model_config[\"data\"][\"test_labels\"])\n print(\"Test labels shape:\", test_labels.shape, model_config[\"data\"][\"test_labels\"])\n\n dataset_max = np.load(model_config[\"data\"][\"max_scalar\"])\n dataset_min = np.load(model_config[\"data\"][\"min_scalar\"])\n\n print(\"dataset max\", dataset_max)\n print(\"dataset min\", dataset_min)\n\n img_width = train_data.shape[1]\n img_height = train_data.shape[2]\n\n print(\"Image shape:\", img_width, img_height)\n \n # Construct VAE Encoder \n encoder_result = encoder_gen((img_width, img_height), model_config[\"encoder\"], args.id)\n # Construct VAE Decoder \n vae_decoder = decoder_gen(\n (img_width, img_height), \n model_config[\"decoder\"]\n )\n _, _, z = encoder_result.vae_encoder(encoder_result.inputs)\n x_mu_var = vae_decoder(z)\n vae = keras.Model(inputs=[encoder_result.inputs], outputs=[x_mu_var])\n # load weights from file\n vae.load_weights('./models/model_{}.th'.format(args.id))\n print(\"weights loaded\")\n\n train_data = train_data.reshape(train_data.shape+(1,))\n test_data = test_data.reshape(test_data.shape+(1,))\n\n # get side by side plots of original vs. reconstructed\n # sample_reconstructions(vae, train_data, test_data, args.id, dataset_max, dataset_min)\n #reconstruct_targets(vae, test_data, [2, 15, 66 , 85, 94], args.id, dataset_max, dataset_min)\n #reconstruct_targets_paper(vae, test_data, [23506, 66 , 23746], args.id, dataset_max, dataset_min)\n #reconstruct_targets_paper(vae, test_data, [2, 15, 66 , 85, 94], args.id, dataset_max, dataset_min)\n #sample_latent_space(encoder_result.vae_encoder, train_data, test_data, args.id, dataset_min, dataset_max, test_labels, args.dataset_type)\n sample_latent_space_var(encoder_result.vae_encoder, train_data, test_data, args.id, dataset_min, dataset_max, test_labels, args.dataset_type)\n #latent_space_interpolation(vae, vae_decoder, encoder_result.vae_encoder, train_data, test_data, args.id, dataset_min, dataset_max, test_labels, args.dataset_type)\n #sample_frob_norm(vae, vae_decoder, encoder_result.vae_encoder, train_data, test_data, args.id, dataset_min, dataset_max, test_labels, args.dataset_type)\n #generate_samples(vae_decoder, dataset_min, dataset_max, model_config[\"encoder\"][\"latent_dim\"], args.id)\n\ndef argument_parsing():\n parser = argparse.ArgumentParser()\n parser.add_argument('--id', type=int, help='This option specifies the id of the config file to use to train the VAE.')\n parser.add_argument('--dataset_type', type=str, help='Name of the dataset that model was trained on.')\n\n args = parser.parse_args()\n return args \n\nif __name__ == \"__main__\":\n main()\n",
"import argparse \nimport json \n\nimport numpy as np\nfrom scipy.stats import norm\nimport matplotlib.pyplot as plt\n\nimport keras\nfrom keras import layers\nfrom keras import backend as K\n#import tensorflow as tf \n#tf.enable_eager_execution()\n#tf.compat.v1.disable_eager_execution()\n\nfrom autoencoder import encoder_gen, decoder_gen\n\ndef spectrum_gen(h, dx):\n nx = len(h)\n\n # Get half the length of the series to avoid redudant information\n npositive = nx//2\n pslice = slice(1, npositive)\n\n # Get frequencies\n freqs = np.fft.fftfreq(nx, d=dx)[pslice] \n\n # Perform the fft \n ft = np.fft.fft(h)[pslice]\n\n # Remove imaginary componant of the fft and square\n psraw = np.conjugate(ft) *ft\n\n # Double to account for the negative half that was removed above\n psraw *= 2.0\n\n # Normalization for the power spectrum\n psraw /= nx**2\n\n # Go from the Power Spectrum to Power Density\n psdraw = psraw * dx * nx\n\n return freqs, psraw, psdraw\n\ndef spectrum_generator(targets, features, levels, time_space):\n targ_freqs, targ_psraw, targ_psdraw = spectrum_gen(np.squeeze(targets[1,:]), time_space)\n depth = len(targ_psdraw)\n target_collector = np.zeros(shape=(levels, depth))\n target_collector[:,:] = np.nan\n feature_collector = np.zeros(shape=(levels, depth))\n feature_collector[:,:] = np.nan\n counter = 0\n for i in range(levels):\n target = np.squeeze(targets[i, :])\n feature = np.squeeze(features[i, :])\n targ_freqs, targ_psraw, targ_psdraw = spectrum_gen(target, time_space)\n feat_freqs, feat_psraw, feat_psdraw = spectrum_gen(feature, time_space)\n target_collector[i, :] = targ_psdraw\n feature_collector[i, :] = feat_psdraw\n rep_target = np.nanmean(target_collector, axis = 0)\n rep_pred = np.nanmean(feature_collector, axis = 0)\n return rep_target, rep_pred, targ_freqs\n\ndef spectrum_level(targets, features, levels, time_space, level):\n targ_freqs, targ_psraw, targ_psdraw = spectrum_gen(np.squeeze(targets[1,:]), time_space)\n depth = len(targ_psdraw)\n target_collector = np.zeros(shape=(levels, depth))\n target_collector[:,:] = np.nan\n feature_collector = np.zeros(shape=(levels, depth))\n feature_collector[:,:] = np.nan\n counter = 0\n for i in range(levels):\n if i == level:\n target = np.squeeze(targets[i, :])\n feature = np.squeeze(features[i, :])\n targ_freqs, targ_psraw, targ_psdraw = spectrum_gen(target, time_space)\n feat_freqs, feat_psraw, feat_psdraw = spectrum_gen(feature, time_space)\n target_collector[i, :] = targ_psdraw\n feature_collector[i, :] = feat_psdraw\n rep_target = np.nanmean(target_collector, axis = 0)\n rep_pred = np.nanmean(feature_collector, axis = 0)\n return rep_target, rep_pred, targ_freqs\n\ndef spectral_plot(truth_array, pred_array, frequency, labeler, id):\n plt.plot(1/frequency, truth_array, label=\"Original\")\n plt.plot(1/frequency, pred_array, label=\"Our Reconstruction\")\n plt.legend()\n plt.xlabel(\"CRM Spacing\")\n plt.ylabel(r'$\\frac{m^2*crm}{s^2}$')\n plt.yscale('log')\n plt.xscale('log')\n plt.title(\"Signal at \"+labeler+\" HPa\")\n np.save(\"/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/Saved_Data/\"+labeler+\"Spectral__{}\".format(id), pred_array)\n plt.savefig('./model_graphs/spectral/'+labeler+'_overall_fft_{}.png'.format(id))\n np.save(\"/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/CI_Figure_Data/Linear_Baseline_\"+labeler+\"_.npy\", pred_array)\n np.save(\"/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/CI_Figure_Data/Spectral_Frequency_\"+labeler+\"_.npy\", frequency)\n plt.close()\n\n \n\ndef mse_metric(p, q):\n mse = np.square(np.subtract(p,q)).mean()\n return mse\n\ndef pdf_gen(dist):\n mu, std = norm.fit(dist)\n #print(\"this is the dist\", dist.shape)\n if dist.ndim > 2:\n dist = np.reshape(dist, (len(dist)*len(dist[0]),len(dist[0][0])))\n plt.hist(dist, bins=25, density=True, alpha=0.6)\n #print(\"Graphed\")\n xmin, xmax = plt.xlim()\n #print(\"limits\")\n x = np.linspace(xmin, xmax, len(dist))\n #print(\"linspace\")\n pdf = norm.pdf(x, mu, std)\n #print(\"made it to pdf func end\")\n return pdf\n\ndef hellinger(p, q):\n # p = pdf_gen(p)\n # print(\"sum of pdf\", np.sum(p))\n # q = pdf_gen(q)\n p = p/np.sum(p)\n q = q/np.sum(q)\n hd = np.sqrt(np.sum((np.sqrt(p.ravel()) - np.sqrt(q.ravel())) ** 2)) / np.sqrt(2)\n return hd\n\ndef compute_metrics(vae, random_vae, train_data, test_data, id, dataset_max, dataset_min):\n hds = []\n hds_random = []\n\n mses = []\n mses_random = []\n\n spectrum_pred_random = []\n spectrum_pred = []\n spectrum_truth = []\n \n spectrum_850_pred = []\n spectrum_850_truth = []\n \n spectrum_750_pred = []\n spectrum_750_truth = []\n \n spectrum_500_pred = []\n spectrum_500_truth = []\n \n spectrum_250_pred = []\n spectrum_250_truth = []\n\n original_samples = []\n recon_samples = []\n\n i = 0\n #test_data = test_data[:100,:,:,:]\n for sample in test_data:\n if i%100 == 0:\n print(i)\n\n sample_mean_var = vae.predict(np.expand_dims(sample, 0))\n sample_mean = sample_mean_var[0, :128*30]\n sample_log_var = sample_mean_var[0, 128*30:]\n\n sample_mean_var_random = random_vae.predict(np.expand_dims(sample, 0))\n sample_mean_random = sample_mean_var_random[0, :128*30]\n\n # For the reconstruction, we take just the mean\n recon_sample = sample_mean\n recon_sample = recon_sample.reshape((30, 128))\n\n recon_sample_random = sample_mean_random\n recon_sample_random = recon_sample_random.reshape((30, 128))\n\n original_samples.append(sample[:, :, 0])\n recon_samples.append(recon_sample)\n\n # Compute hellinger\n h = hellinger(np.array(sample[:, :, 0]), np.array(recon_sample))\n hds.append(h)\n h_random = hellinger(np.array(sample[:, :, 0]), np.array(recon_sample_random))\n hds_random.append(h_random)\n\n # Compute MSE\n mse = mse_metric(np.array(sample[:, :, 0]), np.array(recon_sample))\n mses.append(mse)\n mse_random = mse_metric(np.array(sample[:, :, 0]), np.array(recon_sample_random))\n mses_random.append(mse_random)\n\n # Compute spectral \n rep_target, rep_pred, targ_freqs = spectrum_generator(sample[:, :, 0], recon_sample, 30, 1)\n _, rep_pred_random, _ = spectrum_generator(sample[:, :, 0], recon_sample_random, 30, 1)\n \n spectrum_pred_random.append(rep_pred_random)\n spectrum_pred.append(rep_pred)\n spectrum_truth.append(rep_target)\n \n #compute spectrum level\n #850\n rep_target_850, rep_pred_850, targ_freqs_850 = spectrum_level(sample[:, :, 0], recon_sample, 30, 1, 22)\n spectrum_850_pred.append(rep_pred_850)\n spectrum_850_truth.append(rep_target_850)\n #750\n rep_target_750, rep_pred_750, targ_freqs_750 = spectrum_level(sample[:, :, 0], recon_sample, 30, 1, 20)\n spectrum_750_pred.append(rep_pred_750)\n spectrum_750_truth.append(rep_target_750)\n #500\n rep_target_500, rep_pred_500, targ_freqs_500 = spectrum_level(sample[:, :, 0], recon_sample, 30, 1, 17)\n spectrum_500_pred.append(rep_pred_500)\n spectrum_500_truth.append(rep_target_500)\n #250\n rep_target_250, rep_pred_250, targ_freqs_250 = spectrum_level(sample[:, :, 0], recon_sample, 30, 1, 14)\n spectrum_250_pred.append(rep_pred_250)\n spectrum_250_truth.append(rep_target_250)\n\n i += 1\n\n #850\n overall_truth = np.nanmean(np.array(spectrum_850_truth), axis=0)\n overall_pred = np.nanmean(np.array(spectrum_850_pred), axis=0)\n print(\"truth\", overall_truth.shape)\n print(\"Prediction\", overall_pred.shape)\n print(\"frequency\", targ_freqs_850.shape)\n spectral_plot(overall_truth, overall_pred, targ_freqs_850,\"850\", id)\n #750\n overall_truth = np.nanmean(np.array(spectrum_750_truth), axis=0)\n overall_pred = np.nanmean(np.array(spectrum_750_pred), axis=0)\n spectral_plot(overall_truth, overall_pred, targ_freqs_750,\"750\", id)\n #500\n overall_truth = np.nanmean(np.array(spectrum_500_truth), axis=0)\n overall_pred = np.nanmean(np.array(spectrum_500_pred), axis=0)\n spectral_plot(overall_truth, overall_pred, targ_freqs_500,\"500\", id)\n #250\n overall_truth = np.nanmean(np.array(spectrum_250_truth), axis=0)\n overall_pred = np.nanmean(np.array(spectrum_250_pred), axis=0)\n spectral_plot(overall_truth, overall_pred, targ_freqs_250,\"250\", id)\n \n overall_truth = np.nanmean(np.array(spectrum_truth), axis=0)\n overall_pred = np.nanmean(np.array(spectrum_pred), axis=0)\n print(\"truth\", overall_truth.shape)\n print(\"Prediction\", overall_pred.shape)\n print(\"frequency\", targ_freqs.shape)\n overall_pred_random = np.nanmean(np.array(spectrum_pred_random), axis=0)\n\n print(\"Average Hellinger:\", np.mean(hds))\n print(\"Average Hellinger Random:\", np.mean(hds_random))\n print(\"Average MSE:\", np.mean(mses))\n print(\"Average MSE Random:\", np.mean(mses_random))\n\n plt.plot(1/targ_freqs, overall_truth, label=\"Original\")\n plt.plot(1/targ_freqs, overall_pred, label=\"Our Reconstruction\")\n plt.plot(1/targ_freqs, overall_pred_random, label=\"Random Reconstruction\")\n plt.legend()\n plt.xlabel(\"CRM Spacing\")\n plt.ylabel(r'$\\frac{m^2*crm}{s^2}$')\n plt.yscale('log')\n plt.xscale('log')\n plt.title(\"Overall signal\")\n plt.savefig('./model_graphs/spectral/overall_fft_{}.png'.format(id))\n plt.close()\n\ndef main():\n args = argument_parsing()\n print(\"Command line args:\", args)\n\n f = open(\"./model_config/config_{}.json\".format(args.id))\n model_config = json.load(f)\n f.close()\n\n train_data = np.load(model_config[\"data\"][\"training_data_path\"])\n test_data = np.load(model_config[\"data\"][\"test_data_path\"])\n\n dataset_max = np.load(model_config[\"data\"][\"max_scalar\"])\n dataset_min = np.load(model_config[\"data\"][\"min_scalar\"])\n\n print(\"dataset max\", dataset_max)\n print(\"dataset min\", dataset_min)\n\n img_width = train_data.shape[1]\n img_height = train_data.shape[2]\n\n print(\"Image shape:\", img_width, img_height)\n\n # Construct VAE Encoder \n encoder_result = encoder_gen((img_width, img_height), model_config[\"encoder\"])\n encoder_result_random = encoder_gen((img_width, img_height), model_config[\"encoder\"])\n\n # Construct VAE Decoder \n vae_decoder = decoder_gen(\n (img_width, img_height), \n model_config[\"decoder\"],\n encoder_result.shape_before_flattening\n )\n vae_decoder_random = decoder_gen(\n (img_width, img_height), \n model_config[\"decoder\"],\n encoder_result.shape_before_flattening\n )\n\n #_, _, z = encoder_result.vae_encoder(encoder_result.inputs)\n #_, _, z_random = encoder_result_random.vae_encoder(encoder_result_random.inputs)\n z = encoder_result.vae_encoder(encoder_result.inputs)\n z_random = encoder_result_random.vae_encoder(encoder_result_random.inputs)\n\n x_mu_var = vae_decoder(z)\n x_mu_var_random = vae_decoder_random(z_random)\n\n vae = keras.Model(inputs=[encoder_result.inputs], outputs=[x_mu_var])\n random_vae = keras.Model(inputs=[encoder_result_random.inputs], outputs=[x_mu_var_random])\n\n # load weights from file\n vae.load_weights('./models/model_{}.th'.format(args.id))\n print(\"weights loaded\")\n\n train_data = train_data.reshape(train_data.shape+(1,))\n test_data = test_data.reshape(test_data.shape+(1,))\n\n compute_metrics(vae, random_vae, train_data, test_data, args.id, dataset_max, dataset_min)\n\ndef argument_parsing():\n parser = argparse.ArgumentParser()\n parser.add_argument('--id', type=int, help='This option specifies the id of the config file to use to train the VAE.')\n\n args = parser.parse_args()\n return args \n\nif __name__ == \"__main__\":\n main()"
] |
[
[
"matplotlib.pyplot.legend",
"numpy.expand_dims",
"numpy.linspace",
"numpy.asarray",
"numpy.squeeze",
"tensorflow.cast",
"numpy.concatenate",
"sklearn.manifold.TSNE",
"numpy.max",
"numpy.exp",
"numpy.where",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.gray",
"numpy.allclose",
"numpy.arange",
"numpy.save",
"numpy.sin",
"matplotlib.pyplot.subplot",
"numpy.interp",
"matplotlib.ticker.FuncFormatter",
"numpy.load",
"matplotlib.pyplot.figure",
"tensorflow.norm",
"tensorflow.InteractiveSession",
"numpy.min",
"matplotlib.pyplot.savefig",
"numpy.identity",
"sklearn.decomposition.PCA",
"numpy.abs",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.subplots",
"numpy.linalg.norm",
"matplotlib.pyplot.colorbar",
"numpy.random.normal",
"sklearn.preprocessing.StandardScaler",
"numpy.empty"
],
[
"matplotlib.pyplot.legend",
"numpy.expand_dims",
"numpy.sqrt",
"numpy.squeeze",
"matplotlib.pyplot.plot",
"numpy.mean",
"numpy.nanmean",
"numpy.conjugate",
"scipy.stats.norm.fit",
"numpy.subtract",
"numpy.save",
"matplotlib.pyplot.close",
"numpy.load",
"numpy.zeros",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlabel",
"numpy.fft.fftfreq",
"numpy.array",
"matplotlib.pyplot.hist",
"numpy.sum",
"matplotlib.pyplot.ylabel",
"scipy.stats.norm.pdf",
"numpy.fft.fft",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xscale"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.21",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
twicoder/BigDL
|
[
"ef4f4137965147e2bc59e41f40c4acbb50eeda97"
] |
[
"spark/dl/src/test/resources/tf/models/alexnet.py"
] |
[
"#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport tensorflow as tf\nfrom nets import alexnet\nfrom sys import argv\n\nfrom util import run_model\n\ndef main():\n \"\"\"\n You can also run these commands manually to generate the pb file\n 1. git clone https://github.com/tensorflow/models.git\n 2. export PYTHONPATH=Path_to_your_model_folder\n 3. python alexnet.py\n \"\"\"\n tf.set_random_seed(1)\n height, width = 224, 224\n inputs = tf.Variable(tf.random_uniform((1, height, width, 3)), name = 'input')\n inputs = tf.identity(inputs, \"input_node\")\n net, end_points = alexnet.alexnet_v2(inputs, is_training=False)\n print(\"nodes in the graph\")\n for n in end_points:\n print(n + \" => \" + str(end_points[n]))\n net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x), argv[2].split(','))\n run_model(net_outputs, argv[1], 'alexnet', argv[3] == 'True')\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"tensorflow.set_random_seed",
"tensorflow.random_uniform",
"tensorflow.get_default_graph",
"tensorflow.identity"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
trejsu/shaper
|
[
"03837d80f5818f807067dae51afd8b8180971d0e"
] |
[
"experiments/classify.py"
] |
[
"import argparse\nimport logging\nimport os\nfrom pathlib import Path\n\nimport pandas as pd\n\nARGS = None\n\nlogging.basicConfig(level=logging.INFO)\nlog = logging.getLogger(__name__)\n\nCURRENT_DIR = os.path.dirname(os.path.abspath(__file__))\n\nCLASSIFY_COMMANDS_PATH = os.path.join(str(Path.home()), 'classify-commands.txt')\nCSV_HEADER = 'img,true_class,top1_class,top1_prob,top2_class,top2_prob,top3_class,top3_prob,top4_class,' \\\n 'top4_prob,top5_class,top5_prob,top1,top2,top3,top4,top5\\n'\nDARKNET_CMD_TEMPLATE = 'printf \\'{}\\' | ./darknet classifier predict cfg/imagenet1k.data cfg/darknet19.cfg ' \\\n 'darknet19.weights | sed \\'s/Enter Image Path: //\\' > {}'\nDARKNET_OUTPUT_PATH = os.path.join(str(Path.home()), 'darknet-output-%d.txt')\n\n\ndef main():\n remove_old_data()\n chunks = classify_images(\n img_dir=ARGS.images_dir,\n output_file=DARKNET_OUTPUT_PATH\n )\n write_classification_results_to_csv(chunks)\n log.info(f'Results saved under {ARGS.result_csv_path}')\n\n\ndef classify_images(img_dir, output_file):\n images = os.listdir(img_dir)\n num_images = len(images)\n log.info(f'Found {num_images} images')\n chunk_len = 100 if num_images > 100 * ARGS.cpu else num_images // ARGS.cpu + 1\n log.info(f'Chunk length = {chunk_len}.')\n images_chunks = [images[i:i + chunk_len] for i in range(0, num_images, chunk_len)]\n log.info(f'Divided images to classify into {len(images_chunks)} parts.')\n classify(chunks=images_chunks, images_dir=img_dir, output_file=output_file)\n return images_chunks\n\n\ndef score_predictions(name, top_cls, img_to_cls):\n true_cls = img_to_cls[name]\n return true_cls, str(int(true_cls in top_cls[:1])), str(int(true_cls in top_cls[:2])), \\\n str(int(true_cls in top_cls[:3])), str(int(true_cls in top_cls[:4])), str(int(true_cls in top_cls[:5]))\n\n\ndef write_classification_results_to_csv(chunks):\n if os.path.exists(ARGS.result_csv_path):\n old_results = ARGS.result_csv_path + '.old'\n log.warning(f'Found old csv with results, renaming to {old_results}')\n os.rename(ARGS.result_csv_path, old_results)\n\n top1_cls, top1_prob, top2_cls, top2_prob, top3_cls, top3_prob, top4_cls, top4_prob, top5_cls, top5_prob = \\\n extract_results(output_file=DARKNET_OUTPUT_PATH, num_chunks=len(chunks))\n\n df = pd.read_csv(ARGS.img_cls_mapping)\n log.info(f'Loaded img to class mapping data frame with {len(df.index)} rows.')\n img_to_cls = dict(zip(df['img'], df['class']))\n\n with open(ARGS.result_csv_path, \"a\") as csv:\n csv.write(CSV_HEADER)\n for i, chunk in enumerate(chunks):\n for j in range(len(chunk)):\n img = chunk[j]\n name = img.split('.')[0]\n true_cls, top1, top2, top3, top4, top5 = score_predictions(name, [top1_cls[i][j], top2_cls[i][j],\n top3_cls[i][j], top4_cls[i][j],\n top5_cls[i][j]], img_to_cls)\n\n csv_line = ','.join([name, true_cls, top1_cls[i][j], top1_prob[i][j], top2_cls[i][j], top2_prob[i][j],\n top3_cls[i][j], top3_prob[i][j], top4_cls[i][j], top4_prob[i][j], top5_cls[i][j],\n top5_prob[i][j], top1, top2, top3, top4, top5]) + '\\n'\n csv.write(csv_line)\n\n\ndef extract_results(output_file, num_chunks):\n results = [[], [], [], [], [], [], [], [], [], []]\n for i in range(num_chunks):\n chunk_results = extract_results_for_one_chunk(output_file % i)\n for j in range(10):\n results[j].append(chunk_results[j])\n return results\n\n\ndef extract_results_for_one_chunk(output_file):\n log.info(f'Extracting results for {output_file}')\n with open(output_file, \"r\") as darknet_output:\n darknet = darknet_output.readlines()\n probs = [line.split('%')[0].strip() for line in darknet]\n classes = [line.split(': ')[1][:-1] for line in darknet]\n assert len(probs) % 5 == 0\n assert len(classes) % 5 == 0\n return classes[0::5], probs[0::5], classes[1::5], probs[1::5], classes[2::5], probs[2::5], \\\n classes[3::5], probs[3::5], classes[4::5], probs[4::5]\n\n\ndef classify(chunks, images_dir, output_file):\n if os.path.exists(CLASSIFY_COMMANDS_PATH):\n os.remove(CLASSIFY_COMMANDS_PATH)\n\n with open(CLASSIFY_COMMANDS_PATH, \"a\") as commands:\n for i, chunk in enumerate(chunks):\n images_string = ''\n for img in chunk:\n images_string += os.path.join(images_dir, img) + '\\\\n'\n cmd = DARKNET_CMD_TEMPLATE.format(images_string, output_file % i)\n commands.write(f'cd {ARGS.darknet_path} && {cmd} && cd {CURRENT_DIR}\\n')\n\n parallel_cmd = f'parallel -j {ARGS.cpu} < {CLASSIFY_COMMANDS_PATH}'\n log.info('Starting classification...')\n os.system(parallel_cmd)\n log.info('Classification completed')\n\n\ndef remove_old_data():\n if os.path.exists(DARKNET_OUTPUT_PATH):\n os.remove(DARKNET_OUTPUT_PATH)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--images-dir', type=str, help='Directory with input images', required=True)\n parser.add_argument('--cpu', type=int, help='Number of CPUs to use', required=True)\n parser.add_argument('--result-csv-path', type=str, help='Output path to csv classification results', required=True)\n parser.add_argument('--darknet-path', type=str, help='Path to darknet classifier', required=True)\n parser.add_argument('--img-cls-mapping', type=str, help='Path to mapping between image names and labels',\n required=True)\n ARGS = parser.parse_args()\n log.info(ARGS)\n main()\n"
] |
[
[
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
stratosGithub/COCO-GAN
|
[
"727674a6ff20114cd432971f309a2d4115ef4b0d"
] |
[
"trainer.py"
] |
[
"import os\nimport time\nimport tensorflow as tf\nimport numpy as np\nfrom numpy import sin, cos\n\n\nNO_REDUCTION = tf.losses.Reduction.NONE\n\nclass Trainer():\n def __init__(self, sess, config, real_images, \n g_builder, d_builder, cp_builder, zp_builder, \n coord_handler, patch_handler):\n self.sess = sess\n self.config = config\n self.real_images = real_images\n self.g_builder = g_builder\n self.d_builder = d_builder\n self.cp_builder = cp_builder\n self.zp_builder = zp_builder\n self.coord_handler = coord_handler\n self.patch_handler = patch_handler\n\n # Vars for graph building\n self.batch_size = self.config[\"train_params\"][\"batch_size\"]\n self.z_dim = self.config[\"model_params\"][\"z_dim\"]\n self.spatial_dim = self.config[\"model_params\"][\"spatial_dim\"]\n self.micro_patch_size = self.config[\"data_params\"][\"micro_patch_size\"]\n self.macro_patch_size = self.config[\"data_params\"][\"macro_patch_size\"]\n\n self.ratio_macro_to_micro = self.config[\"data_params\"][\"ratio_macro_to_micro\"]\n self.ratio_full_to_micro = self.config[\"data_params\"][\"ratio_full_to_micro\"]\n self.num_micro_compose_macro = self.config[\"data_params\"][\"num_micro_compose_macro\"]\n\n # Vars for training loop\n self.exp_name = config[\"log_params\"][\"exp_name\"]\n self.epochs = float(self.config[\"train_params\"][\"epochs\"])\n self.num_batches = self.config[\"data_params\"][\"num_train_samples\"] // self.batch_size\n self.coordinate_system = self.config[\"data_params\"][\"coordinate_system\"]\n self.G_update_period = self.config[\"train_params\"][\"G_update_period\"]\n self.D_update_period = self.config[\"train_params\"][\"D_update_period\"]\n self.Q_update_period = self.config[\"train_params\"][\"Q_update_period\"]\n\n # Loss weights\n self.code_loss_w = self.config[\"loss_params\"][\"code_loss_w\"]\n self.coord_loss_w = self.config[\"loss_params\"][\"coord_loss_w\"]\n self.gp_lambda = self.config[\"loss_params\"][\"gp_lambda\"]\n\n # Extrapolation parameters handling\n self.train_extrap = self.config[\"train_params\"][\"train_extrap\"]\n if self.train_extrap:\n assert self.config[\"train_params\"][\"num_extrap_steps\"] is not None\n assert self.coordinate_system is not \"euclidean\", \\\n \"I didn't handle extrapolation in {} coordinate system!\".format(self.coordinate_system)\n self.num_extrap_steps = self.config[\"train_params\"][\"num_extrap_steps\"]\n else:\n self.num_extrap_steps = 0\n\n\n def _train_content_prediction_model(self):\n return (self.Q_update_period>0) and (self.config[\"train_params\"][\"qlr\"]>0)\n\n\n def sample_prior(self):\n return np.random.uniform(-1., 1., [self.batch_size, self.z_dim]).astype(np.float32)\n\n \n def _dup_z_for_macro(self, z):\n # Duplicate with nearest neighbor, different to `tf.tile`.\n # E.g., \n # tensor: [[1, 2], [3, 4]]\n # repeat: 3\n # output: [[1, 2], [1, 2], [1, 2], [3, 4], [3, 4], [3, 4]]\n ch = z.shape[-1]\n repeat = self.num_micro_compose_macro\n extend = tf.expand_dims(z, 1)\n extend_dup = tf.tile(extend, [1, repeat, 1])\n return tf.reshape(extend_dup, [-1, ch])\n\n\n def build_graph(self):\n\n # Input nodes\n # Note: the input node name was wrong in the checkpoint \n self.micro_coord_fake = tf.placeholder(tf.float32, [None, self.spatial_dim], name='micro_coord_fake')\n self.macro_coord_fake = tf.placeholder(tf.float32, [None, self.spatial_dim], name='macro_coord_fake')\n self.micro_coord_real = tf.placeholder(tf.float32, [None, self.spatial_dim], name='micro_coord_real')\n self.macro_coord_real = tf.placeholder(tf.float32, [None, self.spatial_dim], name='macro_coord_real')\n\n # Reversing angle for cylindrical coordinate is complicated, directly pass values here\n self.y_angle_ratio = tf.placeholder(tf.float32, [None, 1], name='y_angle_ratio') \n self.z = tf.placeholder(tf.float32, [None, self.z_dim], name='z')\n \n # Crop real micro for visualization\n if self.coordinate_system == \"euclidean\":\n self.real_micro = self.patch_handler.crop_micro_from_full_gpu(\n self.real_images, self.micro_coord_real[:, 0:1], self.micro_coord_real[:, 1:2])\n elif self.coordinate_system == \"cylindrical\":\n self.real_micro = self.patch_handler.crop_micro_from_full_gpu(\n self.real_images, self.micro_coord_real[:, 0:1], self.y_angle_ratio)\n\n # Real part\n self.real_macro = self.patch_handler.concat_micro_patches_gpu(\n self.real_micro, ratio_over_micro=self.ratio_macro_to_micro)\n (self.disc_real, disc_real_h) = self.d_builder(self.real_macro, self.macro_coord_real, is_training=True)\n self.c_real_pred = self.cp_builder(disc_real_h, is_training=True)\n self.z_real_pred = self.zp_builder(disc_real_h, is_training=True)\n\n # Fake part\n z_dup_macro = self._dup_z_for_macro(self.z)\n self.gen_micro = self.g_builder(z_dup_macro, self.micro_coord_fake, is_training=True)\n self.gen_macro = self.patch_handler.concat_micro_patches_gpu(\n self.gen_micro, ratio_over_micro=self.ratio_macro_to_micro)\n (self.disc_fake, disc_fake_h) = self.d_builder(self.gen_macro, self.macro_coord_fake, is_training=True)\n self.c_fake_pred = self.cp_builder(disc_fake_h, is_training=True)\n self.z_fake_pred = self.zp_builder(disc_fake_h, is_training=True)\n\n # Testing graph\n if self.config[\"log_params\"][\"merge_micro_patches_in_cpu\"]:\n self.gen_micro_test = self.g_builder(self.z, self.micro_coord_fake, is_training=False)\n else:\n (self.gen_micro_test, self.gen_full_test) = self.generate_full_image_gpu(self.z)\n\n # Patch-Guided Image Generation graph\n if self._train_content_prediction_model():\n (_, disc_real_h_rec) = self.d_builder(self.real_macro, None, is_training=False)\n estim_z = self.zp_builder(disc_real_h_rec, is_training=False)\n # I didn't especially handle this.\n # if self.config[\"log_params\"][\"merge_micro_patches_in_cpu\"]:\n (_, self.rec_full) = self.generate_full_image_gpu(self.z)\n\n print(\" [Build] Composing Loss Functions \")\n self._compose_losses()\n\n print(\" [Build] Creating Optimizers \")\n self._create_optimizers()\n\n\n def _calc_gradient_penalty(self):\n \"\"\" Gradient Penalty for patches D \"\"\"\n # This is borrowed from https://github.com/kodalinaveen3/DRAGAN/blob/master/DRAGAN.ipynb\n alpha = tf.random_uniform(shape=tf.shape(self.real_macro), minval=0.,maxval=1.)\n differences = self.gen_macro - self.real_macro # This is different from MAGAN\n interpolates = self.real_macro + (alpha * differences)\n disc_inter, _ = self.d_builder(interpolates, None, is_training=True)\n gradients = tf.gradients(disc_inter, [interpolates])[0]\n slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))\n gradient_penalty = tf.reduce_mean((slopes - 1.) ** 2)\n return gradient_penalty, slopes\n\n\n def _compose_losses(self):\n\n # Content consistency loss\n self.code_loss = tf.reduce_mean(self.code_loss_w * tf.losses.absolute_difference(self.z, self.z_fake_pred))\n\n # Spatial consistency loss (reduce later)\n self.coord_mse_real = self.coord_loss_w * tf.losses.mean_squared_error(self.macro_coord_real, self.c_real_pred, reduction=NO_REDUCTION)\n self.coord_mse_fake = self.coord_loss_w * tf.losses.mean_squared_error(self.macro_coord_fake, self.c_fake_pred, reduction=NO_REDUCTION)\n\n # (For extrapolation training) Mask-out out-of-bound (OOB) coordinate loss since the gradients are useless\n if self.train_extrap:\n upper_bound = tf.ones([self.batch_size, self.spatial_dim], tf.float32) + 1e-4\n lower_bound = - upper_bound\n exceed_upper_bound = tf.greater(self.macro_coord_fake, upper_bound)\n exceed_lower_bound = tf.less(self.macro_coord_fake, lower_bound)\n\n oob_mask_sep = tf.math.logical_or(exceed_upper_bound, exceed_lower_bound)\n oob_mask_merge = tf.math.logical_or(oob_mask_sep[:, 0], oob_mask_sep[:, 1])\n for i in range(2, self.spatial_dim):\n oob_mask_merge = tf.math.logical_or(oob_mask_merge, oob_mask_sep[:, i])\n oob_mask = tf.tile(tf.expand_dims(oob_mask_merge, 1), [1, self.spatial_dim])\n self.coord_mse_fake = tf.where(oob_mask, tf.stop_gradient(self.coord_mse_fake), self.coord_mse_fake)\n\n self.coord_mse_real = tf.reduce_mean(self.coord_mse_real)\n self.coord_mse_fake = tf.reduce_mean(self.coord_mse_fake)\n self.coord_loss = self.coord_mse_real + self.coord_mse_fake\n\n # WGAN loss\n self.adv_real = - tf.reduce_mean(self.disc_real)\n self.adv_fake = tf.reduce_mean(self.disc_fake)\n self.d_adv_loss = self.adv_real + self.adv_fake\n self.g_adv_loss = - self.adv_fake\n\n # Gradient penalty loss of WGAN-GP\n gradient_penalty, self.gp_slopes = self._calc_gradient_penalty()\n self.gp_loss = self.config[\"loss_params\"][\"gp_lambda\"] * gradient_penalty\n\n # Total loss\n self.d_loss = self.d_adv_loss + self.gp_loss + self.coord_loss + self.code_loss\n self.g_loss = self.g_adv_loss + self.coord_loss + self.code_loss\n self.q_loss = self.g_adv_loss + self.code_loss\n\n # Wasserstein distance for visualization\n self.w_dist = - self.adv_real - self.adv_fake\n\n \n def _create_optimizers(self):\n\n t_vars = tf.trainable_variables()\n d_vars = [var for var in t_vars if 'D' in var.name]\n g_vars = [var for var in t_vars if 'G' in var.name]\n q_vars = [var for var in t_vars if 'Q' in var.name]\n \n # optimizers\n G_update_ops = tf.get_collection(self.g_builder.update_collection)\n D_update_ops = tf.get_collection(self.d_builder.update_collection)\n Q_update_ops = tf.get_collection(self.zp_builder.update_collection)\n GD_update_ops = tf.get_collection(self.cp_builder.update_collection)\n\n with tf.control_dependencies(G_update_ops + GD_update_ops):\n self.g_optim = tf.train.AdamOptimizer(\n self.config[\"train_params\"][\"glr\"], \n beta1=self.config[\"train_params\"][\"beta1\"], \n beta2=self.config[\"train_params\"][\"beta2\"], \n ).minimize(self.g_loss, var_list=g_vars)\n\n with tf.control_dependencies(D_update_ops + GD_update_ops):\n self.d_optim = tf.train.AdamOptimizer(\n self.config[\"train_params\"][\"dlr\"],\n beta1=self.config[\"train_params\"][\"beta1\"], \n beta2=self.config[\"train_params\"][\"beta2\"], \n ).minimize(self.d_loss, var_list=d_vars)\n\n if self._train_content_prediction_model():\n with tf.control_dependencies(Q_update_ops):\n self.q_optim = tf.train.AdamOptimizer(\n self.config[\"train_params\"][\"qlr\"],\n beta1=self.config[\"train_params\"][\"beta1\"], \n beta2=self.config[\"train_params\"][\"beta2\"], \n ).minimize(self.q_loss, var_list=q_vars)\n\n if self.train_extrap:\n with tf.variable_scope(\"extrap_optim\"):\n g_vars_partial = [\n var for var in g_vars if (\"g_resblock_0\" in var.name or \"g_resblock_1\" in var.name)] \n with tf.control_dependencies(G_update_ops + GD_update_ops):\n self.g_optim_extrap = tf.train.AdamOptimizer(\n self.config[\"train_params\"][\"glr\"], \n beta1=self.config[\"train_params\"][\"beta1\"], \n beta2=self.config[\"train_params\"][\"beta2\"], \n ).minimize(self.g_loss, var_list=g_vars_partial)\n \n with tf.control_dependencies(D_update_ops + GD_update_ops):\n self.d_optim_extrap = tf.train.AdamOptimizer(\n self.config[\"train_params\"][\"dlr\"], \n beta1=self.config[\"train_params\"][\"beta1\"], \n beta2=self.config[\"train_params\"][\"beta2\"], \n ).minimize(self.d_loss, var_list=d_vars)\n\n\n def rand_sample_full_test(self):\n if self.config[\"log_params\"][\"merge_micro_patches_in_cpu\"]:\n z = self.sample_prior()\n _, full_images = self.generate_full_image_cpu(z)\n else:\n full_images = self.sess.run(\n self.gen_full_test, feed_dict={self.z: self.sample_prior()})\n return full_images\n\n \n def generate_full_image_gpu(self, z):\n all_micro_patches = []\n all_micro_coord = []\n num_patches_x = self.ratio_full_to_micro[0] + self.num_extrap_steps*2\n num_patches_y = self.ratio_full_to_micro[1] + self.num_extrap_steps*2\n for yy in range(num_patches_y):\n for xx in range(num_patches_x):\n if self.coordinate_system == \"euclidean\":\n micro_coord_single = tf.constant([\n self.coord_handler.euclidean_coord_int_full_to_float_micro(xx, num_patches_x, extrap_steps=self.num_extrap_steps), \n self.coord_handler.euclidean_coord_int_full_to_float_micro(yy, num_patches_y, extrap_steps=self.num_extrap_steps),\n ])\n elif self.coordinate_system == \"cylindrical\":\n theta_ratio = self.coord_handler.hyperbolic_coord_int_full_to_float_micro(yy, num_patches_y)\n micro_coord_single = tf.constant([\n self.coord_handler.euclidean_coord_int_full_to_float_micro(xx, num_patches_x), \n self.coord_handler.hyperbolic_theta_to_euclidean(theta_ratio, proj_func=cos),\n self.coord_handler.hyperbolic_theta_to_euclidean(theta_ratio, proj_func=sin),\n ])\n micro_coord = tf.tile(tf.expand_dims(micro_coord_single, 0), [tf.shape(z)[0], 1])\n generated_patch = self.g_builder(z, micro_coord, is_training=False)\n all_micro_patches.append(generated_patch)\n all_micro_coord.append(micro_coord)\n\n num_patches = num_patches_x * num_patches_y\n all_micro_patches = tf.concat(all_micro_patches, 0)\n all_micro_patches_reord = self.patch_handler.reord_patches_gpu(all_micro_patches, self.batch_size, num_patches)\n full_image = self.patch_handler.concat_micro_patches_gpu(\n all_micro_patches_reord, \n ratio_over_micro=[num_patches_x, num_patches_y])\n\n return all_micro_patches, full_image\n\n\n def generate_full_image_cpu(self, z):\n all_micro_patches = []\n all_micro_coord = []\n num_patches_x = self.ratio_full_to_micro[0] + self.num_extrap_steps * 2\n num_patches_y = self.ratio_full_to_micro[1] + self.num_extrap_steps * 2\n for yy in range(num_patches_y):\n for xx in range(num_patches_x):\n if self.coordinate_system == \"euclidean\":\n micro_coord_single = np.array([\n self.coord_handler.euclidean_coord_int_full_to_float_micro(xx, num_patches_x, extrap_steps=self.num_extrap_steps),\n self.coord_handler.euclidean_coord_int_full_to_float_micro(yy, num_patches_y, extrap_steps=self.num_extrap_steps),\n ])\n elif self.coordinate_system == \"cylindrical\":\n theta_ratio = self.coord_handler.hyperbolic_coord_int_full_to_float_micro(yy, num_patches_y)\n micro_coord_single = np.array([\n self.coord_handler.euclidean_coord_int_full_to_float_micro(xx, num_patches_x),\n self.coord_handler.hyperbolic_theta_to_euclidean(theta_ratio, proj_func=cos),\n self.coord_handler.hyperbolic_theta_to_euclidean(theta_ratio, proj_func=sin),\n ])\n micro_coord = np.tile(np.expand_dims(micro_coord_single, 0), [z.shape[0], 1])\n generated_patch = self.sess.run(\n self.gen_micro_test, feed_dict={self.z: z, self.micro_coord_fake: micro_coord}) # TODO\n all_micro_patches.append(generated_patch)\n all_micro_coord.append(micro_coord)\n\n num_patches = num_patches_x * num_patches_y\n all_micro_patches = np.concatenate(all_micro_patches, 0)\n all_micro_patches_reord = self.patch_handler.reord_patches_cpu(all_micro_patches, self.batch_size, num_patches)\n full_image = self.patch_handler.concat_micro_patches_cpu(\n all_micro_patches_reord, \n ratio_over_micro=[num_patches_x, num_patches_y])\n\n return all_micro_patches, full_image\n\n\n def train(self, logger, evaluator, global_step):\n start_time = time.time()\n g_loss, d_loss, q_loss = 0, 0, 0\n z_fixed = self.sample_prior()\n cur_epoch = int(global_step / self.num_batches)\n cur_iter = global_step - cur_epoch * self.num_batches\n\n while cur_epoch < self.epochs:\n while cur_iter < self.num_batches:\n\n # Create data\n z_iter = self.sample_prior()\n macro_coord, micro_coord, y_angle_ratio = self.coord_handler.sample_coord()\n feed_dict_iter = {\n self.micro_coord_real: micro_coord,\n self.macro_coord_real: macro_coord,\n self.micro_coord_fake: micro_coord,\n self.macro_coord_fake: macro_coord,\n self.y_angle_ratio: y_angle_ratio,\n self.z: z_iter,\n }\n feed_dict_fixed = {\n self.micro_coord_real: micro_coord,\n self.macro_coord_real: macro_coord,\n self.micro_coord_fake: micro_coord,\n self.macro_coord_fake: macro_coord,\n self.y_angle_ratio: y_angle_ratio,\n self.z: z_fixed,\n }\n \n # Optimize\n if (global_step % self.D_update_period) == 0:\n _, d_summary_str, d_loss = self.sess.run(\n [self.d_optim, logger.d_summaries, self.d_loss], \n feed_dict=feed_dict_iter)\n if (global_step % self.G_update_period) == 0:\n _, g_summary_str, g_loss = self.sess.run(\n [self.g_optim, logger.g_summaries, self.g_loss], \n feed_dict=feed_dict_iter)\n\n if self.train_extrap:\n macro_coord_extrap, micro_coord_extrap, _ = \\\n self.coord_handler.sample_coord(num_extrap_steps=self.num_extrap_steps)\n # Override logging inputs as well\n feed_dict_fixed[self.micro_coord_fake] = micro_coord_extrap\n feed_dict_fixed[self.macro_coord_fake] = macro_coord_extrap\n feed_dict_iter[self.micro_coord_fake] = micro_coord_extrap\n feed_dict_iter[self.macro_coord_fake] = macro_coord_extrap\n\n if (global_step % self.D_update_period) == 0:\n _, d_summary_str, d_loss = self.sess.run(\n [self.d_optim_extrap, logger.d_summaries, self.d_loss], \n feed_dict=feed_dict_iter)\n if (global_step % self.G_update_period) == 0:\n _, g_summary_str, g_loss = self.sess.run(\n [self.g_optim_extrap, logger.g_summaries, self.g_loss], \n feed_dict=feed_dict_iter)\n\n if self._train_content_prediction_model() and (global_step % self.Q_update_period) == 0:\n _, q_loss = self.sess.run(\n [self.q_optim, self.q_loss], \n feed_dict=feed_dict_iter)\n\n # Log\n time_elapsed = time.time() - start_time\n print(\"[{}] [Epoch: {}; {:4d}/{:4d}; global_step:{}] elapsed: {:.4f}, d: {:.4f}, g: {:.4f}, q: {:.4f}\".format(\n self.exp_name, cur_epoch, cur_iter, self.num_batches, global_step, time_elapsed, d_loss, g_loss, q_loss))\n logger.log_iter(self, evaluator, cur_epoch, cur_iter, global_step, g_summary_str, d_summary_str, \n z_iter, z_fixed, feed_dict_iter, feed_dict_fixed)\n\n cur_iter += 1\n global_step += 1\n \n cur_epoch += 1\n cur_iter = 0\n"
] |
[
[
"numpy.expand_dims",
"tensorflow.concat",
"tensorflow.control_dependencies",
"numpy.concatenate",
"tensorflow.losses.absolute_difference",
"tensorflow.train.AdamOptimizer",
"tensorflow.greater",
"tensorflow.get_collection",
"tensorflow.gradients",
"tensorflow.stop_gradient",
"tensorflow.square",
"tensorflow.trainable_variables",
"tensorflow.tile",
"tensorflow.less",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.variable_scope",
"tensorflow.losses.mean_squared_error",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.ones",
"tensorflow.math.logical_or",
"numpy.random.uniform"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
lin-bo/RL_back2depot_VRP
|
[
"2a159d1df221ff314d98d79b8fde2b739a454ff7",
"2a159d1df221ff314d98d79b8fde2b739a454ff7"
] |
[
"solver/am_vrp_solver.py",
"solver/absolver.py"
] |
[
"import numpy as np\nfrom prob import VRPDGLDataset\nfrom dgl.dataloading import GraphDataLoader\nimport torch\nfrom attention_model.attention_utils.functions import load_routing_agent\nfrom solver.absolver import ABSolver\n\n\nclass amVRP:\n\n def __init__(self, size=20, method=\"greedy\"):\n \"\"\"\n args:\n size: the number of customers\n \"\"\"\n self.device = \"cpu\"\n if torch.cuda.is_available():\n self.device = \"cuda\"\n\n self.solver = load_routing_agent(size=size, name=\"vrp\")\n self.horizon = size * 2\n self._size = size\n assert method in [\"greedy\", \"sampling\"]\n self.method = method\n\n def solve(self, batch_data):\n if self.method == \"greedy\":\n batch_rep, iter_rep = 1, 1\n else:\n batch_rep, iter_rep = 1280, 1\n routes, costs = self.solver.sample_many(batch_data, batch_rep=batch_rep, iter_rep=iter_rep)\n routes = self._covertRoutes(routes)\n return routes, costs.detach().cpu().tolist()\n\n def _covertRoutes(self, batch_routes):\n batch_routes = batch_routes.cpu().detach().numpy() - 1\n batch_routes_list = []\n for routes in batch_routes:\n routes_list = []\n tour_list = []\n for i in routes:\n if i == -1 and len(tour_list) != 0:\n routes_list.append(tour_list)\n tour_list = []\n if i != -1:\n tour_list.append(i)\n if len(tour_list) != 0:\n routes_list.append(tour_list)\n batch_routes_list.append(routes_list)\n return batch_routes_list\n",
"#!/usr/bin/env python\n# coding: utf-8\n# Author: Bo Tang\n\"\"\"\nAbstract solver\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nimport numpy as np\n\nclass ABSolver(ABC):\n \"\"\"\n This is an abstract class for VRP solver\n\n Args:\n depot (int): coordinate of central depot\n loc (str): coordinates of customers\n demand (int): demands of customers\n \"\"\"\n\n def __init__(self, depot, loc, demand, seed=135):\n self.depot = depot\n self.loc = loc\n self.demand = demand\n # random seed\n np.random.seed(135)\n # graph size\n self.size = len(self.loc)\n\n @abstractmethod\n def solve(self):\n \"\"\"\n An abstract method to solve model\n \"\"\"\n raise NotImplementedError\n"
] |
[
[
"torch.cuda.is_available"
],
[
"numpy.random.seed"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
briandersn/minigo
|
[
"a3e1df9cc9802b224db6092257245e38e10aa746",
"a3e1df9cc9802b224db6092257245e38e10aa746"
] |
[
"tests/test_coords.py",
"tests/test_strategies.py"
] |
[
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy\n\nimport coords\nimport go\nfrom tests import test_utils\n\n\nclass TestCoords(test_utils.MiniGoUnitTest):\n def test_upperleft(self):\n self.assertEqual((0, 0), coords.from_sgf('aa'))\n self.assertEqual((0, 0), coords.from_flat(0))\n self.assertEqual((0, 0), coords.from_kgs('A9'))\n\n self.assertEqual('aa', coords.to_sgf((0, 0)))\n self.assertEqual(0, coords.to_flat((0, 0)))\n self.assertEqual('A9', coords.to_kgs((0, 0)))\n\n def test_topleft(self):\n self.assertEqual((0, 8), coords.from_sgf('ia'))\n self.assertEqual((0, 8), coords.from_flat(8))\n self.assertEqual((0, 8), coords.from_kgs('J9'))\n\n self.assertEqual('ia', coords.to_sgf((0, 8)))\n self.assertEqual(8, coords.to_flat((0, 8)))\n self.assertEqual('J9', coords.to_kgs((0, 8)))\n\n def test_pass(self):\n self.assertEqual(None, coords.from_sgf(''))\n self.assertEqual(None, coords.from_flat(81))\n self.assertEqual(None, coords.from_kgs('pass'))\n\n self.assertEqual('', coords.to_sgf(None))\n self.assertEqual(81, coords.to_flat(None))\n self.assertEqual('pass', coords.to_kgs(None))\n\n def test_parsing_9x9(self):\n self.assertEqual((0, 0), coords.from_sgf('aa'))\n self.assertEqual((2, 0), coords.from_sgf('ac'))\n self.assertEqual((0, 2), coords.from_sgf('ca'))\n self.assertEqual(None, coords.from_sgf(''))\n self.assertEqual('', coords.to_sgf(None))\n self.assertEqual('aa', coords.to_sgf(coords.from_sgf('aa')))\n self.assertEqual('sa', coords.to_sgf(coords.from_sgf('sa')))\n self.assertEqual((1, 17), coords.from_sgf(coords.to_sgf((1, 17))))\n self.assertEqual((8, 0), coords.from_kgs('A1'))\n self.assertEqual((0, 0), coords.from_kgs('A9'))\n self.assertEqual((7, 2), coords.from_kgs('C2'))\n self.assertEqual((7, 8), coords.from_kgs('J2'))\n\n self.assertEqual('J9', coords.to_kgs((0, 8)))\n self.assertEqual('A1', coords.to_kgs((8, 0)))\n\n def test_flatten(self):\n self.assertEqual(0, coords.to_flat((0, 0)))\n self.assertEqual(3, coords.to_flat((0, 3)))\n self.assertEqual(27, coords.to_flat((3, 0)))\n self.assertEqual((3, 0), coords.from_flat(27))\n self.assertEqual((1, 1), coords.from_flat(10))\n self.assertEqual((8, 8), coords.from_flat(80))\n self.assertEqual(10, coords.to_flat(coords.from_flat(10)))\n self.assertEqual((5, 4), coords.from_flat(coords.to_flat((5, 4))))\n\n def test_from_flat_ndindex_equivalence(self):\n ndindices = list(numpy.ndindex(go.N, go.N))\n flat_coords = list(range(go.N * go.N))\n self.assertEqual(ndindices, list(map(coords.from_flat, flat_coords)))\n",
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom unittest import mock\nimport numpy as np\n\nfrom absl import flags\n\nimport coords\nimport go\nfrom tests import test_utils\nfrom strategies import MCTSPlayer, time_recommendation\n\nALMOST_DONE_BOARD = test_utils.load_board('''\n.XO.XO.OO\nX.XXOOOO.\nXXXXXOOOO\nXXXXXOOOO\n.XXXXOOO.\nXXXXXOOOO\n.XXXXOOO.\nXXXXXOOOO\nXXXXOOOOO\n''')\n\n# Tromp taylor means black can win if we hit the move limit.\nTT_FTW_BOARD = test_utils.load_board('''\n.XXOOOOOO\nX.XOO...O\n.XXOO...O\nX.XOO...O\n.XXOO..OO\nX.XOOOOOO\n.XXOOOOOO\nX.XXXXXXX\nXXXXXXXXX\n''')\n\nSEND_TWO_RETURN_ONE = go.Position(\n board=ALMOST_DONE_BOARD,\n n=70,\n komi=2.5,\n caps=(1, 4),\n ko=None,\n recent=(go.PlayerMove(go.BLACK, (0, 1)),\n go.PlayerMove(go.WHITE, (0, 8))),\n to_play=go.BLACK\n)\n\n\nclass DummyNet():\n def __init__(self, fake_priors=None, fake_value=0):\n if fake_priors is None:\n fake_priors = np.ones((go.N ** 2) + 1) / (go.N ** 2 + 1)\n self.fake_priors = fake_priors\n self.fake_value = fake_value\n\n def run(self, position):\n return self.fake_priors, self.fake_value\n\n def run_many(self, positions):\n if not positions:\n raise ValueError(\n \"No positions passed! (Tensorflow would have failed here.\")\n return [self.fake_priors] * len(positions), [self.fake_value] * len(positions)\n\n\ndef initialize_basic_player(position=None):\n player = MCTSPlayer(DummyNet())\n player.initialize_game(position)\n first_node = player.root.select_leaf()\n first_node.incorporate_results(\n *player.network.run(player.root.position), up_to=player.root)\n return player\n\n\ndef initialize_almost_done_player():\n probs = np.array([.001] * (go.N * go.N + 1))\n probs[2:5] = 0.2 # some legal moves along the top.\n probs[-1] = 0.2 # passing is also ok\n net = DummyNet(fake_priors=probs)\n player = MCTSPlayer(net)\n # root position is white to play with no history == white passed.\n player.initialize_game(SEND_TWO_RETURN_ONE)\n return player\n\n\nclass TestMCTSPlayer(test_utils.MiniGoUnitTest):\n def test_time_controls(self):\n secs_per_move = 5\n for time_limit in (10, 100, 1000):\n # in the worst case imaginable, let's say a game goes 1000 moves long\n move_numbers = range(0, 1000, 2)\n total_time_spent = sum(\n time_recommendation(move_num, secs_per_move,\n time_limit=time_limit)\n for move_num in move_numbers)\n # we should not exceed available game time\n self.assertLess(total_time_spent, time_limit)\n # but we should have used at least 95% of our time by the end.\n self.assertGreater(total_time_spent, time_limit * 0.95)\n\n def test_inject_noise(self):\n player = initialize_basic_player()\n sum_priors = np.sum(player.root.child_prior)\n # dummyNet should return normalized priors.\n self.assertAlmostEqual(1, sum_priors)\n self.assertTrue(np.all(player.root.child_U == player.root.child_U[0]))\n\n player.root.inject_noise()\n new_sum_priors = np.sum(player.root.child_prior)\n # priors should still be normalized after injecting noise\n self.assertAlmostEqual(sum_priors, new_sum_priors)\n\n # With dirichelet noise, majority of density should be in one node.\n max_p = np.max(player.root.child_prior)\n self.assertGreater(max_p, 4 / (go.N ** 2 + 1))\n\n def test_pick_moves(self):\n player = initialize_basic_player()\n root = player.root\n root.child_N[coords.to_flat((2, 0))] = 10\n root.child_N[coords.to_flat((1, 0))] = 5\n root.child_N[coords.to_flat((3, 0))] = 1\n\n root.position.n = go.N ** 2 # move 81, or 361, or... Endgame.\n\n # Assert we're picking deterministically\n self.assertTrue(root.position.n > player.temp_threshold)\n move = player.pick_move()\n self.assertEqual((2, 0), move)\n\n # But if we're in the early part of the game, pick randomly\n root.position.n = 3\n self.assertFalse(player.root.position.n > player.temp_threshold)\n\n with mock.patch('random.random', lambda: .5):\n move = player.pick_move()\n self.assertEqual((2, 0), move)\n\n with mock.patch('random.random', lambda: .99):\n move = player.pick_move()\n self.assertEqual(move, (3, 0))\n\n def test_dont_pass_if_losing(self):\n player = initialize_almost_done_player()\n\n # check -- white is losing.\n self.assertEqual(-0.5, player.root.position.score())\n\n for _ in range(20):\n player.tree_search()\n # uncomment to debug this test\n # print(player.root.describe())\n\n # Search should converge on D9 as only winning move.\n flattened = coords.to_flat(coords.from_kgs('D9'))\n best_move = np.argmax(player.root.child_N)\n self.assertEqual(flattened, best_move)\n # D9 should have a positive value\n self.assertGreater(player.root.children[flattened].Q, 0)\n self.assertGreaterEqual(player.root.N, 20)\n # passing should be ineffective.\n self.assertLess(player.root.child_Q[-1], 0)\n # no virtual losses should be pending\n self.assertNoPendingVirtualLosses(player.root)\n # uncomment to debug this test\n # print(player.root.describe())\n\n def test_parallel_tree_search(self):\n player = initialize_almost_done_player()\n # check -- white is losing.\n self.assertEqual(-0.5, player.root.position.score())\n # initialize the tree so that the root node has populated children.\n player.tree_search(parallel_readouts=1)\n # virtual losses should enable multiple searches to happen simultaneously\n # without throwing an error...\n for _ in range(5):\n player.tree_search(parallel_readouts=4)\n # uncomment to debug this test\n # print(player.root.describe())\n\n # Search should converge on D9 as only winning move.\n flattened = coords.to_flat(coords.from_kgs('D9'))\n best_move = np.argmax(player.root.child_N)\n self.assertEqual(flattened, best_move)\n # D9 should have a positive value\n self.assertGreater(player.root.children[flattened].Q, 0)\n self.assertGreaterEqual(player.root.N, 20)\n # passing should be ineffective.\n self.assertLess(player.root.child_Q[-1], 0)\n # no virtual losses should be pending\n self.assertNoPendingVirtualLosses(player.root)\n\n def test_ridiculously_parallel_tree_search(self):\n player = initialize_almost_done_player()\n # Test that an almost complete game\n # will tree search with # parallelism > # legal moves.\n for _ in range(10):\n player.tree_search(parallel_readouts=50)\n self.assertNoPendingVirtualLosses(player.root)\n\n def test_long_game_tree_search(self):\n player = MCTSPlayer(DummyNet())\n endgame = go.Position(\n board=TT_FTW_BOARD,\n n=flags.FLAGS.max_game_length - 2,\n komi=2.5,\n ko=None,\n recent=(go.PlayerMove(go.BLACK, (0, 1)),\n go.PlayerMove(go.WHITE, (0, 8))),\n to_play=go.BLACK\n )\n player.initialize_game(endgame)\n\n # Test that MCTS can deduce that B wins because of TT-scoring\n # triggered by move limit.\n for _ in range(10):\n player.tree_search(parallel_readouts=8)\n self.assertNoPendingVirtualLosses(player.root)\n self.assertGreater(player.root.Q, 0)\n\n def test_cold_start_parallel_tree_search(self):\n # Test that parallel tree search doesn't trip on an empty tree\n player = MCTSPlayer(DummyNet(fake_value=0.17))\n player.initialize_game()\n self.assertEqual(0, player.root.N)\n self.assertFalse(player.root.is_expanded)\n leaves = player.tree_search(parallel_readouts=4)\n self.assertEqual(4, len(leaves))\n self.assertEqual(player.root, leaves[0])\n\n self.assertNoPendingVirtualLosses(player.root)\n # Even though the root gets selected 4 times by tree search, its\n # final visit count should just be 1.\n self.assertEqual(1, player.root.N)\n # 0.085 = average(0, 0.17), since 0 is the prior on the root.\n self.assertAlmostEqual(0.085, player.root.Q)\n\n def test_tree_search_failsafe(self):\n # Test that the failsafe works correctly. It can trigger if the MCTS\n # repeatedly visits a finished game state.\n probs = np.array([.001] * (go.N * go.N + 1))\n probs[-1] = 1 # Make the dummy net always want to pass\n player = MCTSPlayer(DummyNet(fake_priors=probs))\n pass_position = go.Position().pass_move()\n player.initialize_game(pass_position)\n player.tree_search(parallel_readouts=8)\n self.assertNoPendingVirtualLosses(player.root)\n\n def test_only_check_game_end_once(self):\n # When presented with a situation where the last move was a pass,\n # and we have to decide whether to pass, it should be the first thing\n # we check, but not more than that.\n\n white_passed_pos = (go.Position()\n .play_move((3, 3)) # b plays\n .play_move((3, 4)) # w plays\n .play_move((4, 3)) # b plays\n .pass_move()) # w passes - if B passes too, B would lose by komi.\n\n player = initialize_basic_player(white_passed_pos)\n # explore a child - should be a pass move.\n player.tree_search()\n pass_move = go.N * go.N\n self.assertEqual(1, player.root.children[pass_move].N)\n self.assertEqual(1, player.root.child_N[pass_move])\n player.tree_search()\n # check that we didn't visit the pass node any more times.\n self.assertEqual(player.root.child_N[pass_move], 1)\n\n def test_extract_data_normal_end(self):\n player = initialize_basic_player()\n player.tree_search()\n player.play_move(None)\n player.tree_search()\n player.play_move(None)\n self.assertTrue(player.root.is_done())\n player.set_result(player.root.position.result(), was_resign=False)\n\n data = list(player.extract_data())\n self.assertEqual(2, len(data))\n position, _, result = data[0]\n # White wins by komi\n self.assertEqual(go.WHITE, result)\n self.assertEqual(\"W+{}\".format(player.root.position.komi),\n player.result_string)\n\n def test_extract_data_resign_end(self):\n player = initialize_basic_player()\n player.tree_search()\n player.play_move((0, 0))\n player.tree_search()\n player.play_move(None)\n player.tree_search()\n # Black is winning on the board\n self.assertEqual(go.BLACK, player.root.position.result())\n # But if Black resigns\n player.set_result(go.WHITE, was_resign=True)\n\n data = list(player.extract_data())\n position, _, result = data[0]\n # Result should say White is the winner\n self.assertEqual(go.WHITE, result)\n self.assertEqual(\"W+R\", player.result_string)\n"
] |
[
[
"numpy.ndindex"
],
[
"numpy.ones",
"numpy.all",
"numpy.max",
"numpy.argmax",
"numpy.array",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
7CD/LambdaMART
|
[
"830f0d6824dbf538ff3eacd0c0d38381d6c73085",
"830f0d6824dbf538ff3eacd0c0d38381d6c73085"
] |
[
"src/pipelines/train.py",
"src/pipelines/data_split.py"
] |
[
"import argparse\nimport joblib\nimport os\nimport numpy as np\nfrom scipy.sparse import save_npz, load_npz\nimport yaml\n\nfrom src.ranking.model import LambdaMART\n\n\ndef train_model(config_path):\n config = yaml.safe_load(open(config_path))\n \n n_trees = config['train']['max_depth']\n max_depth = config['train']['max_depth']\n learning_rate = config['train']['learning_rate']\n \n X = load_npz(config['train']['X_path'])\n y = np.load(config['train']['y_path'])\n qid = np.load(config['train']['qid_path'])\n\n model = LambdaMART(n_trees, max_depth, learning_rate)\n model.fit(X, y, qid)\n\n model_name = config['model']['model_name']\n models_folder = config['model']['models_folder']\n\n joblib.dump(\n model,\n os.path.join(models_folder, model_name)\n )\n\n\nif __name__ == '__main__':\n args_parser = argparse.ArgumentParser()\n args_parser.add_argument('--config', dest='config', required=True)\n args = args_parser.parse_args()\n\n train_model(config_path=args.config)\n\n",
"import argparse\nimport os\nimport numpy as np\nfrom scipy.sparse import save_npz, load_npz\nimport yaml\n\nfrom src.data.split import train_val_split\n\n\ndef data_split(config_path):\n config = yaml.safe_load(open(config_path))\n \n processed_dataset_dir = config['data_load']['processed_dataset_dir']\n \n train_csr_path = os.path.join(processed_dataset_dir, 'train_csr.npz')\n train_y_path = os.path.join(processed_dataset_dir, 'train_y.npy')\n train_qid_path = os.path.join(processed_dataset_dir, 'train_qid.npy')\n \n train_csr = load_npz(train_csr_path)\n train_y = np.load(train_y_path)\n train_qid = np.load(train_qid_path)\n \n test_size = config['data_split']['test_size']\n random_state = config['data_split']['random_state']\n\n (X_train, y_train, qid_train), (X_val, y_val, qid_val) = train_val_split(train_csr, train_y, train_qid, \n test_size, random_state)\n \n \n save_npz(config['data_split']['train_csr_path'], X_train)\n np.save(config['data_split']['y_train_path'], y_train)\n np.save(config['data_split']['qid_train_path'], qid_train)\n \n save_npz(config['data_split']['X_val_path'], X_val)\n np.save(config['data_split']['y_val_path'], y_val)\n np.save(config['data_split']['qid_val_path'], qid_val)\n \n\nif __name__ == '__main__':\n args_parser = argparse.ArgumentParser()\n args_parser.add_argument('--config', dest='config', required=True)\n args = args_parser.parse_args()\n\n data_split(config_path=args.config)\n\n"
] |
[
[
"numpy.load",
"scipy.sparse.load_npz"
],
[
"numpy.load",
"scipy.sparse.save_npz",
"scipy.sparse.load_npz",
"numpy.save"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"1.2",
"1.7",
"1.0",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"1.7",
"1.0",
"1.2",
"1.8"
],
"tensorflow": []
}
] |
dSandley20/KomputeParticles
|
[
"099073c6db4b5345e80eaaebe97d97e0f8256849"
] |
[
"python/test/test_logistic_regression.py"
] |
[
"import pyshader as ps\nimport numpy as np\nimport kp\n\ndef test_logistic_regression():\n\n @ps.python2shader\n def compute_shader(\n index = (\"input\", \"GlobalInvocationId\", ps.ivec3),\n x_i = (\"buffer\", 0, ps.Array(ps.f32)),\n x_j = (\"buffer\", 1, ps.Array(ps.f32)),\n y = (\"buffer\", 2, ps.Array(ps.f32)),\n w_in = (\"buffer\", 3, ps.Array(ps.f32)),\n w_out_i = (\"buffer\", 4, ps.Array(ps.f32)),\n w_out_j = (\"buffer\", 5, ps.Array(ps.f32)),\n b_in = (\"buffer\", 6, ps.Array(ps.f32)),\n b_out = (\"buffer\", 7, ps.Array(ps.f32)),\n l_out = (\"buffer\", 8, ps.Array(ps.f32)),\n M = (\"buffer\", 9, ps.Array(ps.f32))):\n\n i = index.x\n\n m = M[0]\n\n w_curr = vec2(w_in[0], w_in[1])\n b_curr = b_in[0]\n\n x_curr = vec2(x_i[i], x_j[i])\n y_curr = y[i]\n\n z_dot = w_curr @ x_curr\n z = z_dot + b_curr\n y_hat = 1.0 / (1.0 + exp(-z))\n\n d_z = y_hat - y_curr\n d_w = (1.0 / m) * x_curr * d_z\n d_b = (1.0 / m) * d_z\n\n loss = -((y_curr * log(y_hat)) + ((1.0 + y_curr) * log(1.0 - y_hat)))\n\n w_out_i[i] = d_w.x\n w_out_j[i] = d_w.y\n b_out[i] = d_b\n l_out[i] = loss\n\n\n mgr = kp.Manager(0)\n\n # First we create input and ouput tensors for shader\n tensor_x_i = mgr.tensor(np.array([0.0, 1.0, 1.0, 1.0, 1.0]))\n tensor_x_j = mgr.tensor(np.array([0.0, 0.0, 0.0, 1.0, 1.0]))\n\n tensor_y = mgr.tensor(np.array([0.0, 0.0, 0.0, 1.0, 1.0]))\n\n tensor_w_in = mgr.tensor(np.array([0.001, 0.001]))\n tensor_w_out_i = mgr.tensor(np.array([0.0, 0.0, 0.0, 0.0, 0.0]))\n tensor_w_out_j = mgr.tensor(np.array([0.0, 0.0, 0.0, 0.0, 0.0]))\n\n tensor_b_in = mgr.tensor(np.array([0.0]))\n tensor_b_out = mgr.tensor(np.array([0.0, 0.0, 0.0, 0.0, 0.0]))\n\n tensor_l_out = mgr.tensor(np.array([0.0, 0.0, 0.0, 0.0, 0.0]))\n\n tensor_m = mgr.tensor(np.array([ tensor_y.size() ]))\n\n # We store them in an array for easier interaction\n params = [tensor_x_i, tensor_x_j, tensor_y, tensor_w_in, tensor_w_out_i,\n tensor_w_out_j, tensor_b_in, tensor_b_out, tensor_l_out, tensor_m]\n\n mgr.sequence().eval(kp.OpTensorSyncDevice(params))\n\n # Create a managed sequence\n sq = mgr.sequence()\n\n # Record operation to sync memory from local to GPU memory\n sq.record(kp.OpTensorSyncDevice([tensor_w_in, tensor_b_in]))\n\n # Record operation to execute GPU shader against all our parameters\n sq.record(kp.OpAlgoDispatch(mgr.algorithm(params, compute_shader.to_spirv())))\n\n # Record operation to sync memory from GPU to local memory\n sq.record(kp.OpTensorSyncLocal([tensor_w_out_i, tensor_w_out_j, tensor_b_out, tensor_l_out]))\n\n ITERATIONS = 100\n learning_rate = 0.1\n\n # Perform machine learning training and inference across all input X and Y\n for i_iter in range(ITERATIONS):\n\n # Execute an iteration of the algorithm\n sq.eval()\n\n # Calculate the parameters based on the respective derivatives calculated\n for j_iter in range(tensor_b_out.size()):\n tensor_w_in.data()[0] -= learning_rate * tensor_w_out_i.data()[j_iter]\n tensor_w_in.data()[1] -= learning_rate * tensor_w_out_j.data()[j_iter]\n tensor_b_in.data()[0] -= learning_rate * tensor_b_out.data()[j_iter]\n\n assert tensor_w_in.data()[0] < 0.01\n assert tensor_w_in.data()[0] > 0.0\n assert tensor_w_in.data()[1] > 1.5\n assert tensor_b_in.data()[0] < 0.7\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HalcyonBravado/Pydpm
|
[
"17829ac21ecba754fd36a2ab3cea7186b84fca60"
] |
[
"pydpm/example/Sampler_Demo.py"
] |
[
"from pydpm._sampler import Basic_Sampler\r\n\r\nimport numpy as np\r\n\r\n\r\ndef debug_sampler_and_plot():\r\n import numpy as np\r\n import matplotlib.pyplot as plt\r\n import seaborn as sns\r\n import scipy.stats as stats\r\n from collections import Counter\r\n\r\n sampler = Basic_Sampler('gpu')\r\n\r\n # gamma\r\n output = sampler.gamma(np.ones(1000)*4.5, 5)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n plt.plot(np.linspace(0, 100, 100), stats.gamma.pdf(np.linspace(0, 100, 100), 4.5, scale=5))\r\n plt.title('gamma(4.5, 5)')\r\n plt.show()\r\n\r\n # standard_gamma\r\n output = sampler.standard_gamma(np.ones(1000)*4.5)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n plt.plot(np.linspace(0, 20, 100), stats.gamma.pdf(np.linspace(0, 20, 100), 4.5))\r\n plt.title('standard_gamma(4.5)')\r\n plt.show()\r\n\r\n # dirichlet\r\n output = sampler.dirichlet(np.ones(1000)*4.5)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n # plt.plot(np.linspace(0, 1, 100), stats.dirichlet.pdf(np.linspace(0, 1, 100), np.ones(100)*4.5))\r\n plt.title('dirichlet(4.5)')\r\n plt.show()\r\n\r\n # beta\r\n output = sampler.beta(np.ones(1000)*0.5, 0.5)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n plt.plot(np.linspace(0, 1, 100), stats.beta.pdf(np.linspace(0, 1, 100), 0.5, 0.5))\r\n plt.title('beta(0.5, 0.5)')\r\n plt.show()\r\n\r\n # beta(2, 5)\r\n output = sampler.beta(np.ones(1000)*2, 5)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n plt.plot(np.linspace(0, 1, 100), stats.beta.pdf(np.linspace(0, 1, 100), 2, 5))\r\n plt.title('beta(2, 5)')\r\n plt.show()\r\n\r\n # normal\r\n output = sampler.normal(np.ones(1000)*5, np.ones(1000)*2)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n plt.plot(np.linspace(-2, 13, 100), stats.norm.pdf(np.linspace(-2, 13, 100), 5, scale=2))\r\n plt.title('normal(5, 2)')\r\n plt.show()\r\n\r\n # standard_normal\r\n output = sampler.standard_normal(1000)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n plt.plot(np.linspace(-3, 3, 100), stats.norm.pdf(np.linspace(-3, 3, 100)))\r\n plt.title('standard_normal()')\r\n plt.show()\r\n\r\n # uniform\r\n output = sampler.uniform(np.ones(1000)*(-2), np.ones(1000)*5)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n plt.plot(np.linspace(-3, 6, 100), stats.uniform.pdf(np.linspace(-3, 6, 100), -2, 7))\r\n plt.title('uniform(-2, 5)')\r\n plt.show()\r\n\r\n # standard_uniform\r\n output = sampler.standard_uniform(1000)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n plt.plot(np.linspace(-0.3, 1.3, 100), stats.uniform.pdf(np.linspace(-0.3, 1.3, 100)))\r\n plt.title('standard_uniform()')\r\n plt.show()\r\n\r\n # negative_binomial\r\n output = sampler.negative_binomial(np.ones(1000)*10, 0.5)\r\n plt.figure()\r\n plt.hist(output, bins=np.max(output)-np.min(output), density=True, range=(np.min(output)-0.5, np.max(output)-0.5))\r\n plt.scatter(np.arange(30), stats.nbinom._pmf(np.arange(30), 10, 0.5), c='orange', zorder=10)\r\n plt.title('negative_binomial(10, 0.5)')\r\n plt.show()\r\n\r\n # # multinomial\r\n # output = sampler.multinomial(5, [0.8, 0.2], 1000)\r\n # output = output[:, 0]\r\n # plt.figure()\r\n # plt.hist(output, bins=, density=True)\r\n # plt.title('multinomial(5, [0.8, 0.2])')\r\n # plt.show()\r\n\r\n a = np.array([np.array([[i] * 6 for i in range(6)]).reshape(-1), np.array(list(range(6)) * 6)]).T\r\n output = stats.multinomial(n=5, p=[0.8, 0.2]).pmf(a)\r\n sns.heatmap(output.reshape(6, 6), annot=True)\r\n plt.ylabel('number of the 1 kind(p=0.8)')\r\n plt.xlabel('number of the 2 kind(p=0.2)')\r\n plt.title('stats.multinomial(n=5, p=[0.8, 0.2])')\r\n plt.show()\r\n\r\n # poisson\r\n output = sampler.poisson(np.ones(1000)*10)\r\n plt.figure()\r\n plt.hist(output, bins=22, density=True, range=(-0.5, 21.5))\r\n plt.scatter(np.arange(20), stats.poisson.pmf(np.arange(20), 10), c='orange', zorder=10)\r\n plt.title('poisson(10)')\r\n plt.show()\r\n\r\n # cauchy\r\n output = sampler.cauchy(np.ones(1000)*1, 0.5)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True, range=(-5, 7))\r\n plt.plot(np.linspace(-5, 7, 100), stats.cauchy.pdf(np.linspace(-5, 7, 100), 1, 0.5))\r\n plt.title('cauchy(1, 0.5)')\r\n plt.show()\r\n\r\n # standard_cauchy\r\n output = sampler.standard_cauchy(1000)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True, range=(-7, 7))\r\n plt.plot(np.linspace(-7, 7, 100), stats.cauchy.pdf(np.linspace(-7, 7, 100)))\r\n plt.title('standard_cauchy()')\r\n plt.show()\r\n\r\n # chisquare\r\n output = sampler.chisquare(np.ones(1000)*10)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n plt.plot(np.linspace(0, 30, 100), stats.chi2.pdf(np.linspace(0, 30, 100), 10))\r\n plt.title('chisquare(10)')\r\n plt.show()\r\n\r\n # noncentral_chisquare\r\n output = sampler.noncentral_chisquare(np.ones(1000)*10, 5, 2)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n # nocentral_chi2 = scale^2 * (chi2 + 2*loc*chi + df*loc^2)\r\n # plt.plot(np.linspace(0, 150, 100), stats.chi2.pdf(np.linspace(0, 150, 100), 10, loc=5, scale=2))\r\n plt.title('noncentral_chisquare(10, loc=5, scale=2)')\r\n plt.show()\r\n\r\n # exponential\r\n lam = 0.5\r\n output = sampler.exponential(np.ones(1000)*lam)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n plt.plot(np.linspace(0.01, 4, 100), stats.expon.pdf(np.linspace(0.01, 4, 100), scale=0.5))\r\n plt.title('exponential(0.5)')\r\n plt.show()\r\n\r\n # standard_exponential\r\n output = sampler.standard_exponential(1000)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n plt.plot(np.linspace(0.01, 8, 100), stats.expon.pdf(np.linspace(0.01, 8, 100)))\r\n plt.title('standard_exponential()')\r\n plt.show()\r\n\r\n # f\r\n output = sampler.f(np.ones(1000)*10, 10)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n plt.plot(np.linspace(0, 8, 100), stats.f.pdf(np.linspace(0, 8, 100), 10, 10))\r\n plt.title('f(10, 10)')\r\n plt.show()\r\n\r\n # geometric\r\n output = sampler.geometric(np.ones(1000)*0.1)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n plt.scatter(np.arange(50), stats.geom.pmf(np.arange(50), p=0.1), c='orange', zorder=10)\r\n plt.title('geometric(0.1)')\r\n plt.show()\r\n\r\n # gumbel\r\n output = sampler.gumbel(np.ones(1000)*5, np.ones(1000)*2)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n plt.plot(np.linspace(0, 20, 100), stats.gumbel_r.pdf(np.linspace(0, 20, 100)+0.01, 5, scale=2))\r\n plt.title('gumbel(5, 2)')\r\n plt.show()\r\n np.random.gumbel()\r\n\r\n # hypergeometric\r\n output = sampler.hypergeometric(np.ones(1000)*5, 10, 10)\r\n plt.figure()\r\n plt.hist(output, bins=np.max(output)-np.min(output), density=True, range=(np.min(output)-0.5, np.max(output)-0.5))\r\n plt.scatter(np.arange(10), stats.hypergeom(15, 5, 10).pmf(np.arange(10)), c='orange', zorder=10) # hypergeom(M, n, N), total, I, tiems\r\n plt.title('hypergeometric(5, 10, 10)with replacement, need2do')\r\n plt.show()\r\n\r\n # laplace\r\n output = sampler.laplace(np.ones(1000)*5, np.ones(1000)*2)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n plt.plot(np.linspace(-10, 20, 100), stats.laplace.pdf(np.linspace(-10, 20, 100), 5, scale=2))\r\n plt.title('laplace(5, 2)')\r\n plt.show()\r\n\r\n # logistic\r\n output = sampler.logistic(np.ones(1000)*5, np.ones(1000)*2)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n plt.plot(np.linspace(-10, 20, 100), stats.logistic.pdf(np.linspace(-10, 20, 100), 5, scale=2))\r\n plt.title('logistic(5, 2)')\r\n plt.show()\r\n\r\n # power\r\n output = sampler.power(np.ones(1000)*0.5)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n plt.plot(np.linspace(0, 1.5, 100), stats.powerlaw.pdf(np.linspace(0, 1.5, 100), 0.5))\r\n plt.title('power(0.5)')\r\n plt.show()\r\n\r\n # zipf\r\n output = sampler.zipf(np.ones(1000)*1.1)\r\n counter = Counter(output)\r\n filter = np.array([[key, counter[key]] for key in counter.keys() if key < 50])\r\n plt.figure()\r\n plt.scatter(filter[:, 0], filter[:, 1] / 1000)\r\n plt.plot(np.arange(1, 50), stats.zipf(1.1).pmf(np.arange(1, 50)))\r\n plt.title('zipf(1.1)')\r\n plt.show()\r\n\r\n # pareto\r\n output = sampler.pareto(np.ones(1000)*2, np.ones(1000)*5)\r\n plt.figure()\r\n plt.hist(output[output < 40], bins=20, density=True)\r\n # plt.plot(np.linspace(0, 15, 100), stats.pareto(a=2, b=5).pdf(np.linspace(0, 15, 100))) # param1 scale,\r\n plt.title('pareto(2, 5)')\r\n plt.show()\r\n\r\n # rayleigh\r\n output = sampler.rayleigh(np.ones(1000)*2.0)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n plt.plot(np.linspace(0, 8, 100), stats.rayleigh(scale=2).pdf(np.linspace(0, 8, 100)))\r\n plt.title('rayleigh(2)')\r\n plt.show()\r\n\r\n # t\r\n output = sampler.t(np.ones(1000)*2.0)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True, range=(-6, 6))\r\n plt.plot(np.linspace(-6, 6, 100), stats.t(2).pdf(np.linspace(-6, 6, 100)))\r\n plt.title('t(2)')\r\n plt.show()\r\n\r\n # triangular\r\n output = sampler.triangular(np.ones(1000)*0.0, 0.3, 1)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n plt.plot(np.linspace(0, 1, 100), stats.triang.pdf(np.linspace(0, 1, 100), 0.3))\r\n plt.title('triangular(0, 0.3, 1)')\r\n plt.show()\r\n\r\n # weibull\r\n output = sampler.weibull(np.ones(1000)*4.5, 5)\r\n plt.figure()\r\n plt.hist(output, bins=20, density=True)\r\n plt.plot(np.linspace(0, 10, 100), stats.weibull_min.pdf(np.linspace(0, 10, 100), 4.5, scale=5))\r\n plt.title('weibull(4.5, 5)')\r\n plt.show()\r\n\r\n\r\n# -----------------test the accuracy --------------------\r\nif __name__ ==\"__main__\":\r\n plot_all_distribution_example = True # plot all distribution samplers' example and compare with its pdf/pmf.\r\n\r\n if plot_all_distribution_example:\r\n debug_sampler_and_plot()\r\n\r\n # sampler = Basic_Sampler('gpu')\r\n # while (1):\r\n # a = sampler.gamma(0.01*np.ones([5, 3, 2, 1]), times=100)\r\n # if np.sum(np.isinf(a)):\r\n # print('OK')\r\n # print(a[np.where(np.isinf(a))])\r\n\r\n # a = sampler.binomial([[50, 50]], [[0.2, 1], [0.3, 1]], times=10)\r\n # a = sampler.multinomial(50, [0.5, 0.5], times=10)\r\n # a = sampler.gamma(1.0, times=100)\r\n # print(a)"
] |
[
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.scatter",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.random.gumbel",
"numpy.arange",
"numpy.min",
"numpy.ones",
"numpy.max",
"scipy.stats.multinomial",
"scipy.stats.hypergeom",
"scipy.stats.zipf",
"scipy.stats.rayleigh",
"scipy.stats.t",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mufeili/deep_gcns_torch
|
[
"54c88d7a8923b5210797a5bf1a0d749448ba9f9f",
"54c88d7a8923b5210797a5bf1a0d749448ba9f9f",
"54c88d7a8923b5210797a5bf1a0d749448ba9f9f",
"54c88d7a8923b5210797a5bf1a0d749448ba9f9f"
] |
[
"examples/ogb/ogbn_proteins/main.py",
"examples/sem_seg_sparse/architecture.py",
"examples/ogb/ogbn_arxiv/model.py",
"eff_gcn_modules/rev/rev_layer.py"
] |
[
"import __init__\nimport torch\nimport torch.optim as optim\nimport statistics\nfrom dataset import OGBNDataset\nfrom model import DeeperGCN\nfrom args import ArgsInit\nimport time\nimport numpy as np\nfrom ogb.nodeproppred import Evaluator\nfrom utils.ckpt_util import save_ckpt\nfrom utils.data_util import intersection, process_indexes\nimport logging\n\n\ndef train(data, dataset, model, optimizer, criterion, device):\n\n loss_list = []\n model.train()\n sg_nodes, sg_edges, sg_edges_index, _ = data\n\n train_y = dataset.y[dataset.train_idx]\n idx_clusters = np.arange(len(sg_nodes))\n np.random.shuffle(idx_clusters)\n\n for idx in idx_clusters:\n\n x = dataset.x[sg_nodes[idx]].float().to(device)\n sg_nodes_idx = torch.LongTensor(sg_nodes[idx]).to(device)\n\n sg_edges_ = sg_edges[idx].to(device)\n sg_edges_attr = dataset.edge_attr[sg_edges_index[idx]].to(device)\n\n mapper = {node: idx for idx, node in enumerate(sg_nodes[idx])}\n\n inter_idx = intersection(sg_nodes[idx], dataset.train_idx.tolist())\n training_idx = [mapper[t_idx] for t_idx in inter_idx]\n\n optimizer.zero_grad()\n\n pred = model(x, sg_nodes_idx, sg_edges_, sg_edges_attr)\n\n target = train_y[inter_idx].to(device)\n\n loss = criterion(pred[training_idx].to(torch.float32), target.to(torch.float32))\n loss.backward()\n optimizer.step()\n loss_list.append(loss.item())\n\n return statistics.mean(loss_list)\n\n\[email protected]_grad()\ndef multi_evaluate(valid_data_list, dataset, model, evaluator, device):\n model.eval()\n target = dataset.y.detach().numpy()\n\n train_pre_ordered_list = []\n valid_pre_ordered_list = []\n test_pre_ordered_list = []\n\n test_idx = dataset.test_idx.tolist()\n train_idx = dataset.train_idx.tolist()\n valid_idx = dataset.valid_idx.tolist()\n\n for valid_data_item in valid_data_list:\n sg_nodes, sg_edges, sg_edges_index, _ = valid_data_item\n idx_clusters = np.arange(len(sg_nodes))\n\n test_predict = []\n test_target_idx = []\n\n train_predict = []\n valid_predict = []\n\n train_target_idx = []\n valid_target_idx = []\n\n for idx in idx_clusters:\n x = dataset.x[sg_nodes[idx]].float().to(device)\n sg_nodes_idx = torch.LongTensor(sg_nodes[idx]).to(device)\n\n mapper = {node: idx for idx, node in enumerate(sg_nodes[idx])}\n sg_edges_attr = dataset.edge_attr[sg_edges_index[idx]].to(device)\n\n inter_tr_idx = intersection(sg_nodes[idx], train_idx)\n inter_v_idx = intersection(sg_nodes[idx], valid_idx)\n\n train_target_idx += inter_tr_idx\n valid_target_idx += inter_v_idx\n\n tr_idx = [mapper[tr_idx] for tr_idx in inter_tr_idx]\n v_idx = [mapper[v_idx] for v_idx in inter_v_idx]\n\n pred = model(x, sg_nodes_idx, sg_edges[idx].to(device), sg_edges_attr).cpu().detach()\n\n train_predict.append(pred[tr_idx])\n valid_predict.append(pred[v_idx])\n\n inter_te_idx = intersection(sg_nodes[idx], test_idx)\n test_target_idx += inter_te_idx\n\n te_idx = [mapper[te_idx] for te_idx in inter_te_idx]\n test_predict.append(pred[te_idx])\n\n train_pre = torch.cat(train_predict, 0).numpy()\n valid_pre = torch.cat(valid_predict, 0).numpy()\n test_pre = torch.cat(test_predict, 0).numpy()\n\n train_pre_ordered = train_pre[process_indexes(train_target_idx)]\n valid_pre_ordered = valid_pre[process_indexes(valid_target_idx)]\n test_pre_ordered = test_pre[process_indexes(test_target_idx)]\n\n train_pre_ordered_list.append(train_pre_ordered)\n valid_pre_ordered_list.append(valid_pre_ordered)\n test_pre_ordered_list.append(test_pre_ordered)\n\n train_pre_final = torch.mean(torch.Tensor(train_pre_ordered_list), dim=0)\n valid_pre_final = torch.mean(torch.Tensor(valid_pre_ordered_list), dim=0)\n test_pre_final = torch.mean(torch.Tensor(test_pre_ordered_list), dim=0)\n\n eval_result = {}\n\n input_dict = {\"y_true\": target[train_idx], \"y_pred\": train_pre_final}\n eval_result[\"train\"] = evaluator.eval(input_dict)\n\n input_dict = {\"y_true\": target[valid_idx], \"y_pred\": valid_pre_final}\n eval_result[\"valid\"] = evaluator.eval(input_dict)\n\n input_dict = {\"y_true\": target[test_idx], \"y_pred\": test_pre_final}\n eval_result[\"test\"] = evaluator.eval(input_dict)\n\n return eval_result\n\n\ndef main():\n args = ArgsInit().save_exp()\n\n if args.use_gpu:\n device = torch.device(\"cuda:\" + str(args.device)) if torch.cuda.is_available() else torch.device(\"cpu\")\n else:\n device = torch.device(\"cpu\")\n\n logging.info('%s' % device)\n\n dataset = OGBNDataset(dataset_name=args.dataset)\n # extract initial node features\n nf_path = dataset.extract_node_features(args.aggr)\n\n args.num_tasks = dataset.num_tasks\n args.nf_path = nf_path\n\n logging.info('%s' % args)\n\n evaluator = Evaluator(args.dataset)\n criterion = torch.nn.BCEWithLogitsLoss()\n\n valid_data_list = []\n\n for i in range(args.num_evals):\n parts = dataset.random_partition_graph(dataset.total_no_of_nodes,\n cluster_number=args.valid_cluster_number)\n valid_data = dataset.generate_sub_graphs(parts,\n cluster_number=args.valid_cluster_number)\n valid_data_list.append(valid_data)\n\n sub_dir = 'random-train_{}-test_{}-num_evals_{}'.format(args.cluster_number,\n args.valid_cluster_number,\n args.num_evals)\n logging.info(sub_dir)\n\n model = DeeperGCN(args).to(device)\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n\n results = {'highest_valid': 0,\n 'final_train': 0,\n 'final_test': 0,\n 'highest_train': 0}\n\n start_time = time.time()\n\n for epoch in range(1, args.epochs + 1):\n # do random partition every epoch\n train_parts = dataset.random_partition_graph(dataset.total_no_of_nodes,\n cluster_number=args.cluster_number)\n data = dataset.generate_sub_graphs(train_parts, cluster_number=args.cluster_number)\n\n epoch_loss = train(data, dataset, model, optimizer, criterion, device)\n logging.info('Epoch {}, training loss {:.4f}'.format(epoch, epoch_loss))\n\n model.print_params(epoch=epoch)\n\n result = multi_evaluate(valid_data_list, dataset, model, evaluator, device)\n\n if epoch % 5 == 0:\n logging.info('%s' % result)\n\n train_result = result['train']['rocauc']\n valid_result = result['valid']['rocauc']\n test_result = result['test']['rocauc']\n\n if valid_result > results['highest_valid']:\n results['highest_valid'] = valid_result\n results['final_train'] = train_result\n results['final_test'] = test_result\n\n save_ckpt(model, optimizer, round(epoch_loss, 4),\n epoch,\n args.model_save_path, sub_dir,\n name_post='valid_best')\n\n if train_result > results['highest_train']:\n results['highest_train'] = train_result\n\n logging.info(\"%s\" % results)\n\n end_time = time.time()\n total_time = end_time - start_time\n logging.info('Total time: {}'.format(time.strftime('%H:%M:%S', time.gmtime(total_time))))\n\n\nif __name__ == \"__main__\":\n main()\n",
"import __init__\nimport torch\nfrom torch.nn import Linear as Lin\nfrom gcn_lib.sparse import MultiSeq, MLP, GraphConv, PlainDynBlock, ResDynBlock, DenseDynBlock, DilatedKnnGraph\nfrom utils.pyg_util import scatter_\nfrom torch_geometric.data import Data\n\n\nclass SparseDeepGCN(torch.nn.Module):\n def __init__(self, opt):\n super(SparseDeepGCN, self).__init__()\n channels = opt.n_filters\n k = opt.k\n act = opt.act\n norm = opt.norm\n bias = opt.bias\n epsilon = opt.epsilon\n stochastic = opt.stochastic\n conv = opt.conv\n c_growth = channels\n\n self.n_blocks = opt.n_blocks\n\n self.knn = DilatedKnnGraph(k, 1, stochastic, epsilon)\n self.head = GraphConv(opt.in_channels, channels, conv, act, norm, bias)\n\n if opt.block.lower() == 'res':\n self.backbone = MultiSeq(*[ResDynBlock(channels, k, 1+i, conv, act, norm, bias, stochastic=stochastic, epsilon=epsilon)\n for i in range(self.n_blocks-1)])\n fusion_dims = int(channels + c_growth * (self.n_blocks - 1))\n elif opt.block.lower() == 'dense':\n self.backbone = MultiSeq(*[DenseDynBlock(channels+c_growth*i, c_growth, k, 1+i,\n conv, act, norm, bias, stochastic=stochastic, epsilon=epsilon)\n for i in range(self.n_blocks-1)])\n fusion_dims = int(\n (channels + channels + c_growth * (self.n_blocks - 1)) * self.n_blocks // 2)\n else:\n # Use PlainGCN without skip connection and dilated convolution.\n stochastic = False\n self.backbone = MultiSeq(\n *[PlainDynBlock(channels, k, 1, conv, act, norm, bias, stochastic=stochastic, epsilon=epsilon)\n for i in range(self.n_blocks - 1)])\n fusion_dims = int(channels + c_growth * (self.n_blocks - 1))\n\n self.fusion_block = MLP([fusion_dims, 1024], act, norm, bias)\n self.prediction = MultiSeq(*[MLP([fusion_dims+1024, 512], act, norm, bias),\n MLP([512, 256], act, norm, bias, drop=opt.dropout),\n MLP([256, opt.n_classes], None, None, bias)])\n self.model_init()\n\n def model_init(self):\n for m in self.modules():\n if isinstance(m, Lin):\n torch.nn.init.kaiming_normal_(m.weight)\n m.weight.requires_grad = True\n if m.bias is not None:\n m.bias.data.zero_()\n m.bias.requires_grad = True\n\n def forward(self, data):\n corr, color, batch = data.pos, data.x, data.batch\n x = torch.cat((corr, color), dim=1)\n feats = [self.head(x, self.knn(x[:, 0:3], batch))]\n for i in range(self.n_blocks-1):\n feats.append(self.backbone[i](feats[-1], batch)[0])\n feats = torch.cat(feats, dim=1)\n\n fusion = scatter_('max', self.fusion_block(feats), batch)\n fusion = torch.repeat_interleave(fusion, repeats=feats.shape[0]//fusion.shape[0], dim=0)\n return self.prediction(torch.cat((fusion, feats), dim=1))\n\n\nif __name__ == \"__main__\":\n import random, numpy as np, argparse\n seed = 0\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n batch_size = 2\n N = 1024\n device = 'cuda'\n\n parser = argparse.ArgumentParser(description='PyTorch implementation of Deep GCN For semantic segmentation')\n parser.add_argument('--in_channels', default=9, type=int, help='input channels (default:9)')\n parser.add_argument('--n_classes', default=13, type=int, help='num of segmentation classes (default:13)')\n parser.add_argument('--k', default=20, type=int, help='neighbor num (default:16)')\n parser.add_argument('--block', default='res', type=str, help='graph backbone block type {plain, res, dense}')\n parser.add_argument('--conv', default='edge', type=str, help='graph conv layer {edge, mr}')\n parser.add_argument('--act', default='relu', type=str, help='activation layer {relu, prelu, leakyrelu}')\n parser.add_argument('--norm', default='batch', type=str, help='{batch, instance} normalization')\n parser.add_argument('--bias', default=True, type=bool, help='bias of conv layer True or False')\n parser.add_argument('--n_filters', default=64, type=int, help='number of channels of deep features')\n parser.add_argument('--n_blocks', default=7, type=int, help='number of basic blocks')\n parser.add_argument('--dropout', default=0.5, type=float, help='ratio of dropout')\n parser.add_argument('--epsilon', default=0.2, type=float, help='stochastic epsilon for gcn')\n parser.add_argument('--stochastic', default=False, type=bool, help='stochastic for gcn, True or False')\n args = parser.parse_args()\n\n pos = torch.rand((batch_size*N, 3), dtype=torch.float).to(device)\n x = torch.rand((batch_size*N, 6), dtype=torch.float).to(device)\n\n data = Data()\n data.pos = pos\n data.x = x\n data.batch = torch.arange(batch_size).unsqueeze(-1).expand(-1, N).contiguous().view(-1).contiguous()\n data = data.to(device)\n\n net = SparseDeepGCN(args).to(device)\n print(net)\n\n out = net(data)\n\n print('out logits shape', out.shape)\n import time \n st = time.time()\n runs = 1000\n\n with torch.no_grad():\n for i in range(runs):\n # print(i)\n out = net(data)\n torch.cuda.synchronize()\n print(time.time() - st)\n\n",
"import __init__\nimport torch\nfrom gcn_lib.sparse.torch_vertex import GENConv\nfrom gcn_lib.sparse.torch_nn import norm_layer\nimport torch.nn.functional as F\nfrom torch.utils.checkpoint import checkpoint\nimport logging\n\n\nclass DeeperGCN(torch.nn.Module):\n def __init__(self, args):\n super(DeeperGCN, self).__init__()\n\n self.num_layers = args.num_layers\n self.dropout = args.dropout\n self.block = args.block\n\n self.checkpoint_grad = False\n\n in_channels = args.in_channels\n hidden_channels = args.hidden_channels\n num_tasks = args.num_tasks\n conv = args.conv\n aggr = args.gcn_aggr\n\n t = args.t\n self.learn_t = args.learn_t\n p = args.p\n self.learn_p = args.learn_p\n y = args.y\n self.learn_y = args.learn_y\n\n self.msg_norm = args.msg_norm\n learn_msg_scale = args.learn_msg_scale\n\n norm = args.norm\n mlp_layers = args.mlp_layers\n\n if aggr in ['softmax_sg', 'softmax', 'power'] and self.num_layers > 7:\n self.checkpoint_grad = True\n self.ckp_k = self.num_layers // 2\n\n print('The number of layers {}'.format(self.num_layers),\n 'Aggregation method {}'.format(aggr),\n 'block: {}'.format(self.block))\n\n if self.block == 'res+':\n print('LN/BN->ReLU->GraphConv->Res')\n elif self.block == 'res':\n print('GraphConv->LN/BN->ReLU->Res')\n elif self.block == 'dense':\n raise NotImplementedError('To be implemented')\n elif self.block == \"plain\":\n print('GraphConv->LN/BN->ReLU')\n else:\n raise Exception('Unknown block Type')\n\n self.gcns = torch.nn.ModuleList()\n self.norms = torch.nn.ModuleList()\n\n self.node_features_encoder = torch.nn.Linear(in_channels, hidden_channels)\n self.node_pred_linear = torch.nn.Linear(hidden_channels, num_tasks)\n\n for layer in range(self.num_layers):\n\n if conv == 'gen':\n gcn = GENConv(hidden_channels, hidden_channels,\n aggr=aggr,\n t=t, learn_t=self.learn_t,\n p=p, learn_p=self.learn_p,\n y=y, learn_y=self.learn_y,\n msg_norm=self.msg_norm, learn_msg_scale=learn_msg_scale,\n norm=norm, mlp_layers=mlp_layers)\n else:\n raise Exception('Unknown Conv Type')\n\n self.gcns.append(gcn)\n self.norms.append(norm_layer(norm, hidden_channels))\n\n def forward(self, x, edge_index):\n\n h = self.node_features_encoder(x)\n\n if self.block == 'res+':\n\n h = self.gcns[0](h, edge_index)\n\n if self.checkpoint_grad:\n\n for layer in range(1, self.num_layers):\n h1 = self.norms[layer - 1](h)\n h2 = F.relu(h1)\n h2 = F.dropout(h2, p=self.dropout, training=self.training)\n\n if layer % self.ckp_k != 0:\n res = checkpoint(self.gcns[layer], h2, edge_index)\n h = res + h\n else:\n h = self.gcns[layer](h2, edge_index) + h\n\n else:\n for layer in range(1, self.num_layers):\n h1 = self.norms[layer - 1](h)\n h2 = F.relu(h1)\n h2 = F.dropout(h2, p=self.dropout, training=self.training)\n h = self.gcns[layer](h2, edge_index) + h\n\n h = F.relu(self.norms[self.num_layers - 1](h))\n h = F.dropout(h, p=self.dropout, training=self.training)\n\n elif self.block == 'res':\n\n h = F.relu(self.norms[0](self.gcns[0](h, edge_index)))\n h = F.dropout(h, p=self.dropout, training=self.training)\n\n for layer in range(1, self.num_layers):\n h1 = self.gcns[layer](h, edge_index)\n h2 = self.norms[layer](h1)\n h = F.relu(h2) + h\n h = F.dropout(h, p=self.dropout, training=self.training)\n\n elif self.block == 'dense':\n raise NotImplementedError('To be implemented')\n\n elif self.block == 'plain':\n\n h = F.relu(self.norms[0](self.gcns[0](h, edge_index)))\n h = F.dropout(h, p=self.dropout, training=self.training)\n\n for layer in range(1, self.num_layers):\n h1 = self.gcns[layer](h, edge_index)\n h2 = self.norms[layer](h1)\n h = F.relu(h2)\n h = F.dropout(h, p=self.dropout, training=self.training)\n else:\n raise Exception('Unknown block Type')\n\n h = self.node_pred_linear(h)\n\n return torch.log_softmax(h, dim=-1)\n\n def print_params(self, epoch=None, final=False):\n\n if self.learn_t:\n ts = []\n for gcn in self.gcns:\n ts.append(gcn.t.item())\n if final:\n print('Final t {}'.format(ts))\n else:\n logging.info('Epoch {}, t {}'.format(epoch, ts))\n\n if self.learn_p:\n ps = []\n for gcn in self.gcns:\n ps.append(gcn.p.item())\n if final:\n print('Final p {}'.format(ps))\n else:\n logging.info('Epoch {}, p {}'.format(epoch, ps))\n\n if self.learn_y:\n ys = []\n for gcn in self.gcns:\n ys.append(gcn.sigmoid_y.item())\n if final:\n print('Final sigmoid(y) {}'.format(ys))\n else:\n logging.info('Epoch {}, sigmoid(y) {}'.format(epoch, ys))\n\n if self.msg_norm:\n ss = []\n for gcn in self.gcns:\n ss.append(gcn.msg_norm.msg_scale.item())\n if final:\n print('Final s {}'.format(ss))\n else:\n logging.info('Epoch {}, s {}'.format(epoch, ss))\n\n",
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\ntry:\n from torch_geometric.nn import GCNConv, SAGEConv, GATConv\n from gcn_lib.sparse.torch_vertex import GENConv\n from gcn_lib.sparse.torch_nn import norm_layer\nexcept:\n print(\"An import exception occurred\")\n\n\nclass SharedDropout(nn.Module):\n def __init__(self):\n super(SharedDropout, self).__init__()\n self.mask = None\n\n def set_mask(self, mask):\n self.mask = mask\n\n def forward(self, x):\n if self.training:\n assert self.mask is not None\n out = x * self.mask\n return out\n else:\n return x\n\n\nclass BasicBlock(nn.Module):\n def __init__(self, norm, in_channels):\n super(BasicBlock, self).__init__()\n self.norm = norm_layer(norm, in_channels)\n self.dropout = SharedDropout()\n\n def forward(self, x, edge_index, dropout_mask=None, edge_emb=None):\n # dropout_mask = kwargs.get('dropout_mask', None)\n # edge_emb = kwargs.get('edge_emb', None)\n out = self.norm(x)\n out = F.relu(out)\n\n if isinstance(self.dropout, SharedDropout):\n if dropout_mask is not None:\n self.dropout.set_mask(dropout_mask)\n out = self.dropout(out)\n\n if edge_emb is not None:\n out = self.gcn(out, edge_index, edge_emb)\n else:\n out = self.gcn(out, edge_index)\n\n return out\n\n\nclass GENBlock(BasicBlock):\n def __init__(self, in_channels, out_channels,\n aggr='max',\n t=1.0, learn_t=False,\n p=1.0, learn_p=False,\n y=0.0, learn_y=False,\n msg_norm=False,\n learn_msg_scale=False,\n encode_edge=False,\n edge_feat_dim=0,\n norm='layer', mlp_layers=1):\n super(GENBlock, self).__init__(norm, in_channels)\n\n self.gcn = GENConv(in_channels, out_channels,\n aggr=aggr,\n t=t, learn_t=learn_t,\n p=p, learn_p=learn_p,\n y=y, learn_y=learn_y,\n msg_norm=msg_norm,\n learn_msg_scale=learn_msg_scale,\n encode_edge=encode_edge,\n edge_feat_dim=edge_feat_dim,\n norm=norm,\n mlp_layers=mlp_layers)\n\n\nclass GCNBlock(BasicBlock):\n def __init__(self, in_channels, out_channels,\n norm='layer'):\n super(GCNBlock, self).__init__(norm, in_channels)\n\n self.gcn = GCNConv(in_channels, out_channels)\n\n\nclass SAGEBlock(BasicBlock):\n def __init__(self, in_channels, out_channels,\n norm='layer',\n dropout=0.0):\n super(SAGEBlock, self).__init__(norm, in_channels)\n\n self.gcn = SAGEConv(in_channels, out_channels)\n\n\nclass GATBlock(torch.nn.Module):\n def __init__(self, in_channels, out_channels,\n heads=1,\n norm='layer',\n att_dropout=0.0,\n dropout=0.0):\n super(GATBlock, self).__init__(norm, in_channels)\n\n self.gcn = GATConv(in_channels, out_channels,\n heads=heads,\n concat=False,\n dropout=att_dropout,\n add_self_loops=False)\n"
] |
[
[
"torch.LongTensor",
"torch.Tensor",
"torch.cat",
"numpy.random.shuffle",
"torch.nn.BCEWithLogitsLoss",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device"
],
[
"torch.cuda.synchronize",
"torch.cuda.manual_seed",
"torch.cat",
"torch.manual_seed",
"torch.repeat_interleave",
"torch.no_grad",
"torch.rand",
"torch.cuda.manual_seed_all",
"torch.arange",
"torch.nn.init.kaiming_normal_"
],
[
"torch.nn.functional.dropout",
"torch.nn.ModuleList",
"torch.log_softmax",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.utils.checkpoint.checkpoint"
],
[
"torch.nn.functional.relu"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ersilia-os/cidrz-e2e-linkage
|
[
"840581cdb90617f3ceb1be898992f0a8df71f9e3"
] |
[
"e2elink/steps/score/score.py"
] |
[
"import os\nimport json\nimport numpy as np\n\nfrom ... import logger\nfrom .train.train import ModelTrainer\nfrom .ensemble.ensemble import ModelEnsembler\nfrom ..setup.setup import Session\nfrom ..compare.compare import Comparison\n\n\nclass Score(object):\n def __init__(self, score=None, meta=None):\n self.score = score\n self.meta = meta\n self.path = os.path.join(Session().get_output_path(), \"score\")\n self.score_path = os.path.join(self.path, \"score.npy\")\n self.meta_path = os.path.join(self.path, \"meta.json\")\n\n def save(self):\n logger.debug(\"Scores saved to {0}\".format(self.score_path))\n with open(self.score_path, \"wb\") as f:\n np.save(f, self.score, allow_pickle=False)\n logger.debug(\"Metadata saved to {0}\".format(self.meta_path))\n with open(self.meta_path, \"w\") as f:\n json.dump(self.meta, f, indent=4)\n\n def load(self):\n with open(self.score_path, \"rb\") as f:\n score = np.load(f)\n logger.debug(\"Loading scores from {0}\".format(self.score_path))\n with open(self.meta_path, \"r\") as f:\n meta = json.load(f)\n logger.debug(\"Loading metadata from {0}\".format(self.meta_path))\n return Score(score, meta)\n\n\nclass _Scorer(object):\n def __init__(self, ensembler):\n self.ensembler = ensembler\n\n def _score(self, C):\n P = []\n W = []\n CV = []\n T = []\n for item in self.ensembler.items():\n tag = item[\"tag\"]\n mdl = item[\"predictor\"]\n w = item[\"weight\"]\n cv = item[\"cv_results\"]\n P += [mdl.predict(C)]\n W += [w]\n CV += [cv]\n T += [tag]\n P = np.array(P).T\n sc = np.average(P, axis=1, weights=W)\n meta = {\"tags\": T, \"cv_results\": CV, \"weights\": W}\n return sc, meta\n\n def score(self, C):\n sc, meta = self._score(C)\n return sc, meta\n\n\nclass Scorer(object):\n def __init__(self):\n self.C = Comparison().load().C\n self._fit_if_available()\n self.ensembler = ModelEnsembler()\n self.scorer = _Scorer(self.ensembler)\n\n def _fit_if_available(self):\n mdl = ModelTrainer().fit()\n if mdl is not None:\n mdl.save()\n\n def score(self):\n sc, meta = self.scorer.score(self.C)\n return Score(sc, meta)\n"
] |
[
[
"numpy.load",
"numpy.array",
"numpy.average",
"numpy.save"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bpaniagua/MFSDA_Python
|
[
"d7e439fe670d5e2731c9ec722919a74f67b01e30"
] |
[
"MFSDA/MFSDA_run.py"
] |
[
"#!/usr/bin/env python-real\n# -*- coding: utf-8 -*-\n\"\"\"\nRun script: multivariate functional shape data analysis (MFSDA).\n\nAuthor: Chao Huang ([email protected])\nLast update: 2017-08-14\n\"\"\"\n\nimport sys,os\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)),os.path.join('Resources','Libraries')))\nimport numpy as np\nfrom scipy import stats\nfrom statsmodels.sandbox.stats.multicomp import fdrcorrection0\nfrom stat_read_x import read_x\nfrom stat_lpks import lpks\nfrom stat_sif import sif\nfrom stat_wald_ht import wald_ht\nfrom stat_bstrp_pvalue import bstrp_pvalue\n\nimport MFSDA_stat as mfsda\nimport timeit\nimport vtk\nimport argparse\nimport os\nimport json\n\n\"\"\"installed all the libraries above\"\"\"\n\ndef main():\n parser = argparse.ArgumentParser(description='Multivariate Functional Shape Data Analysis (MFSDA)')\n parser.add_argument('--shapeData', type=str, help='Text file list with vtk filenames, 1 file per line', required=True)\n parser.add_argument('--coordData', type=str, help='filename, .vtk shape template', required=True)\n parser.add_argument('--outputDir', help='output directory', default='./output')\n\n args = parser.parse_args()\n\n start_all = timeit.default_timer()\n run_script(args)\n stop_all = timeit.default_timer()\n delta_time_all = str(stop_all - start_all)\n print(\"The total elapsed time is \" + delta_time_all)\n\ndef run_script(args):\n \"\"\"\n Run the commandline script for MFSDA.\n \"\"\"\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n \"\"\"Step 1. load dataset \"\"\"\n\n print(\"loading data ......\")\n print(\"+++++++Read the surface shape data+++++++\") \n\n fh = open(args.shapeData, 'rU') \n\n y_design = []\n nshape = 0\n numpoints = -1\n\n header = fh.readline()\n toks = header.split(sep=',')\n covs_tmp = []\n\n for line in fh.readlines():\n toks = line.strip().split(sep=',')\n\n # Read VTK file\n vtkfilename = toks[0].rstrip()\n print(\"Reading {}\".format(vtkfilename))\n reader = vtk.vtkPolyDataReader()\n reader.SetFileName(vtkfilename)\n reader.Update()\n shapedata = reader.GetOutput()\n shapedatapoints = shapedata.GetPoints()\n\n y_design.append([])\n\n if numpoints == -1:\n numpoints = shapedatapoints.GetNumberOfPoints()\n\n if numpoints != shapedatapoints.GetNumberOfPoints():\n print(\"WARNING! The number of points is not the same for the shape:\", vtkfilename)\n\n for i in range(shapedatapoints.GetNumberOfPoints()):\n p = shapedatapoints.GetPoint(i)\n y_design[nshape].append(p)\n\n nshape += 1\n\n # Build covariate matrix\n covs_tmp.append(toks[1:])\n\n y_design = np.array(y_design)\n y_design.reshape(nshape, numpoints, 3)\n\n y_design = np.array(y_design)\n y_design.reshape(nshape, numpoints, 3) \n print(\"The dimension of shape matrix is \" + str(y_design.shape))\n\n print(\"+++++++Read the sphere coordinate data+++++++\")\n print(\"Reading\", args.coordData) \n reader = vtk.vtkPolyDataReader()\n reader.SetFileName(args.coordData)\n reader.Update()\n coordData = reader.GetOutput()\n shapedatapoints = coordData.GetPoints()\n\n if numpoints != shapedatapoints.GetNumberOfPoints():\n print(\"WARNING! The template does not have the same number of points as the shapes\")\n\n coord_mat = []\n for i in range(shapedatapoints.GetNumberOfPoints()):\n p = shapedatapoints.GetPoint(i)\n coord_mat.append(p)\n\n coord_mat = np.array(coord_mat) \n\n # Set up design matrix\n design_data = np.array(covs_tmp,dtype=float)\n\n # read the covariate type\n var_type = getCovariateType(design_data)\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n \"\"\"Step 2. Statistical analysis: including (1) smoothing and (2) hypothesis testing\"\"\"\n\n gpvals, lpvals_fdr, clu_pvals, efit_beta, efity_design, efit_eta = mfsda.run_stats(y_design, coord_mat, design_data, var_type)\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n \"\"\"Step3. Save all the results\"\"\"\n\n if not os.path.exists(args.outputDir):\n os.makedirs(args.outputDir)\n\n pvalues = {}\n pvalues['Gpvals'] = gpvals.tolist()\n pvalues['clu_pvals'] = clu_pvals.tolist()\n pvalues['Lpvals_fdr'] = lpvals_fdr.tolist()\n\n with open(os.path.join(args.outputDir,'pvalues.json'), 'w') as outfile:\n json.dump(pvalues, outfile)\n\n efit = {}\n efit['efitBetas'] = efit_beta.tolist()\n efit['efitYdesign'] = efity_design.tolist()\n efit['efitEtas'] = efit_eta.tolist()\n\n with open(os.path.join(args.outputDir,'efit.json'), 'w') as outfile:\n json.dump(efit, outfile)\n\n\ndef getCovariateType(design_data):\n\n (row,column)=design_data.shape\n cov_types=[]\n for c in range(column):\n cov_col=design_data[:,c]\n cov_type = 0. #int\n for i in range(len(cov_col)):\n if int(cov_col[i])!=cov_col[i]:\n cov_type = 1. #double\n break\n cov_types.append(cov_type)\n\n cov_types = np.array(cov_types)\n return cov_types\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jthorn22/tensorflow
|
[
"bdd76e2f04b17512d5c64a294975e7feb1231fab"
] |
[
"tensorflow/python/keras/engine/base_layer.py"
] |
[
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=protected-access\n\"\"\"Contains the base Layer class, from which all layers inherit.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport inspect # Necessary supplement to tf_inspect to deal with variadic args.\nimport itertools\n\nimport numpy as np\nfrom six.moves import zip # pylint: disable=redefined-builtin\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import func_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras import constraints\nfrom tensorflow.python.keras import initializers\nfrom tensorflow.python.keras import regularizers\nfrom tensorflow.python.keras.engine import base_layer_utils\nfrom tensorflow.python.keras.engine import input_spec\nfrom tensorflow.python.keras.utils import generic_utils\nfrom tensorflow.python.keras.utils import tf_utils\n# A module that only depends on `keras.layers` import these from here.\nfrom tensorflow.python.keras.utils.generic_utils import to_snake_case # pylint: disable=unused-import\nfrom tensorflow.python.keras.utils.tf_utils import is_tensor_or_tensor_list # pylint: disable=unused-import\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables as tf_variables\nfrom tensorflow.python.training.checkpointable import base as checkpointable\nfrom tensorflow.python.training.checkpointable import layer_utils as checkpointable_layer_utils\nfrom tensorflow.python.util import function_utils\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_decorator\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.python.util.tf_export import keras_export\nfrom tensorflow.tools.docs import doc_controls\n\n\n@keras_export('keras.layers.Layer')\nclass Layer(checkpointable.CheckpointableBase):\n \"\"\"Base layer class.\n\n This is the class from which all layers inherit.\n\n A layer is a class implementing common neural networks operations, such\n as convolution, batch norm, etc. These operations require managing weights,\n losses, updates, and inter-layer connectivity.\n\n Users will just instantiate a layer and then treat it as a callable.\n\n We recommend that descendants of `Layer` implement the following methods:\n\n * `__init__()`: Save configuration in member variables\n * `build()`: Called once from `__call__`, when we know the shapes of inputs\n and `dtype`. Should have the calls to `add_weight()`, and then\n call the super's `build()` (which sets `self.built = True`, which is\n nice in case the user wants to call `build()` manually before the\n first `__call__`).\n * `call()`: Called in `__call__` after making sure `build()` has been called\n once. Should actually perform the logic of applying the layer to the\n input tensors (which should be passed in as the first argument).\n\n Arguments:\n trainable: Boolean, whether the layer's variables should be trainable.\n name: String name of the layer.\n dtype: Default dtype of the layer's weights (default of `None` means use the\n type of the first input).\n dynamic: Set this to `True` if your layer should only be run eagerly, and\n should not be used to generate a static computation graph.\n This would be the case for a Tree-RNN or a recursive network,\n for example, or generally for any layer that manipulates tensors\n using Python control flow. If `False`, we assume that the layer can\n safely be used to generate a static computation graph.\n\n Read-only properties:\n name: The name of the layer (string).\n dtype: Default dtype of the layer's weights (default of `None` means use the\n type of the first input).\n updates: List of update ops of this layer.\n losses: List of losses added by this layer.\n trainable_weights: List of variables to be included in backprop.\n non_trainable_weights: List of variables that should not be\n included in backprop.\n weights: The concatenation of the lists trainable_weights and\n non_trainable_weights (in this order).\n\n Mutable properties:\n trainable: Whether the layer should be trained (boolean).\n input_spec: Optional (list of) `InputSpec` object(s) specifying the\n constraints on inputs that can be accepted by the layer.\n \"\"\"\n\n @checkpointable.no_automatic_dependency_tracking\n def __init__(self, trainable=True, name=None, dtype=None, dynamic=False,\n **kwargs):\n # These properties should be set by the user via keyword arguments.\n # note that 'dtype', 'input_shape' and 'batch_input_shape'\n # are only applicable to input layers: do not pass these keywords\n # to non-input layers.\n allowed_kwargs = {\n 'input_shape',\n 'batch_input_shape',\n 'batch_size',\n 'weights',\n 'activity_regularizer',\n }\n # Validate optional keyword arguments.\n for kwarg in kwargs:\n if kwarg not in allowed_kwargs:\n raise TypeError('Keyword argument not understood:', kwarg)\n\n # Mutable properties\n # Indicates whether the layer's weights are updated during training\n # and whether the layer's updates are run during training\n self.trainable = trainable\n # A stateful layer is a layer whose updates are run during inference too,\n # for instance stateful RNNs.\n self.stateful = False\n # Indicates whether `build` needs to be called upon layer call, to create\n # the layer's weights.\n self.built = False\n # Provides information about which inputs are compatible with the layer.\n self.input_spec = None\n self.supports_masking = False\n\n self._init_set_name(name)\n self._activity_regularizer = kwargs.pop('activity_regularizer', None)\n if not hasattr(self, '_trainable_weights'):\n self._trainable_weights = []\n if not hasattr(self, '_non_trainable_weights'):\n self._non_trainable_weights = []\n self._updates = []\n # A list of zero-argument lambdas which return Tensors, used for variable\n # regularizers.\n self._callable_losses = []\n # A list of symbolic Tensors containing activity regularizers and losses\n # manually added through `add_loss` in graph-building mode.\n self._losses = []\n # A list of loss values containing activity regularizers and losses\n # manually added through `add_loss` during eager execution. It is cleared\n # after every batch.\n # Because we plan on eventually allowing a same model instance to be trained\n # in eager mode or graph mode alternatively, we need to keep track of\n # eager losses and symbolic losses via separate attributes.\n self._eager_losses = []\n # A list of metric instances corresponding to the symbolic metric tensors\n # added using the `add_metric` API.\n self._metrics = []\n # TODO(psv): Remove this property.\n # A dictionary that maps metric names to metric result tensors. The results\n # are the running averages of metric values over an epoch.\n self._metrics_tensors = {}\n self._dtype = None if dtype is None else dtypes.as_dtype(dtype).name\n self._call_fn_args = function_utils.fn_args(self.call)\n self._compute_previous_mask = ('mask' in self._call_fn_args or\n hasattr(self, 'compute_mask'))\n self._call_convention = (base_layer_utils\n .CallConvention.EXPLICIT_INPUTS_ARGUMENT)\n if not hasattr(self, '_layers'):\n self._layers = [] # Dependencies tracked via attribute assignment.\n\n # These lists will be filled via successive calls\n # to self._add_inbound_node().\n self._inbound_nodes = []\n self._outbound_nodes = []\n\n call_argspec = tf_inspect.getfullargspec(self.call)\n if 'training' in call_argspec.args:\n self._expects_training_arg = True\n else:\n self._expects_training_arg = False\n\n # Whether the `call` method can be used to build a TF graph without issues.\n self._dynamic = dynamic\n\n # Manage input shape information if passed.\n if 'input_shape' in kwargs or 'batch_input_shape' in kwargs:\n # In this case we will later create an input layer\n # to insert before the current layer\n if 'batch_input_shape' in kwargs:\n batch_input_shape = tuple(kwargs['batch_input_shape'])\n elif 'input_shape' in kwargs:\n if 'batch_size' in kwargs:\n batch_size = kwargs['batch_size']\n else:\n batch_size = None\n batch_input_shape = (batch_size,) + tuple(kwargs['input_shape'])\n self._batch_input_shape = batch_input_shape\n\n # Manage initial weight values if passed.\n if 'weights' in kwargs:\n self._initial_weights = kwargs['weights']\n else:\n self._initial_weights = None\n\n def build(self, input_shape):\n \"\"\"Creates the variables of the layer (optional, for subclass implementers).\n\n This is a method that implementers of subclasses of `Layer` or `Model`\n can override if they need a state-creation step in-between\n layer instantiation and layer call.\n\n This is typically used to create the weights of `Layer` subclasses.\n\n Arguments:\n input_shape: Instance of `TensorShape`, or list of instances of\n `TensorShape` if the layer expects a list of inputs\n (one instance per input).\n \"\"\"\n self.built = True\n\n @doc_controls.for_subclass_implementers\n def call(self, inputs, **kwargs): # pylint: disable=unused-argument\n \"\"\"This is where the layer's logic lives.\n\n Arguments:\n inputs: Input tensor, or list/tuple of input tensors.\n **kwargs: Additional keyword arguments.\n\n Returns:\n A tensor or list/tuple of tensors.\n \"\"\"\n return inputs\n\n @doc_controls.for_subclass_implementers\n def add_weight(self,\n name,\n shape,\n dtype=None,\n initializer=None,\n regularizer=None,\n trainable=None,\n constraint=None,\n partitioner=None,\n use_resource=None,\n synchronization=tf_variables.VariableSynchronization.AUTO,\n aggregation=tf_variables.VariableAggregation.NONE,\n **kwargs):\n \"\"\"Adds a new variable to the layer, or gets an existing one; returns it.\n\n Arguments:\n name: variable name.\n shape: variable shape.\n dtype: The type of the variable. Defaults to `self.dtype` or `float32`.\n initializer: initializer instance (callable).\n regularizer: regularizer instance (callable).\n trainable: whether the variable should be part of the layer's\n \"trainable_variables\" (e.g. variables, biases)\n or \"non_trainable_variables\" (e.g. BatchNorm mean, stddev).\n Note, if the current variable scope is marked as non-trainable\n then this parameter is ignored and any added variables are also\n marked as non-trainable. `trainable` defaults to `True` unless\n `synchronization` is set to `ON_READ`.\n constraint: constraint instance (callable).\n partitioner: Partitioner to be passed to the `Checkpointable` API.\n use_resource: Whether to use `ResourceVariable`.\n synchronization: Indicates when a distributed a variable will be\n aggregated. Accepted values are constants defined in the class\n `tf.VariableSynchronization`. By default the synchronization is set to\n `AUTO` and the current `DistributionStrategy` chooses\n when to synchronize. If `synchronization` is set to `ON_READ`,\n `trainable` must not be set to `True`.\n aggregation: Indicates how a distributed variable will be aggregated.\n Accepted values are constants defined in the class\n `tf.VariableAggregation`.\n **kwargs: Additional keyword arguments. Accepted values are `getter` and\n `collections`.\n\n Returns:\n The created variable. Usually either a `Variable` or `ResourceVariable`\n instance. If `partitioner` is not `None`, a `PartitionedVariable`\n instance is returned.\n\n Raises:\n RuntimeError: If called with partioned variable regularization and\n eager execution is enabled.\n ValueError: When giving unsupported dtype and no initializer or when\n trainable has been set to True with synchronization set as `ON_READ`.\n \"\"\"\n # Validate optional keyword arguments.\n for kwarg in kwargs:\n if kwarg not in ['getter', 'collections']:\n raise TypeError('Unknown keyword argument:', kwarg)\n getter = kwargs.pop('getter', None)\n collections = kwargs.pop('collections', None)\n\n if dtype is None:\n dtype = self.dtype or backend.floatx()\n dtype = dtypes.as_dtype(dtype)\n initializer = initializers.get(initializer)\n regularizer = regularizers.get(regularizer)\n constraint = constraints.get(constraint)\n\n if synchronization == tf_variables.VariableSynchronization.ON_READ:\n if trainable:\n raise ValueError(\n 'Synchronization value can be set to '\n 'VariableSynchronization.ON_READ only for non-trainable variables. '\n 'You have specified trainable=True and '\n 'synchronization=VariableSynchronization.ON_READ.')\n else:\n # Set trainable to be false when variable is to be synced on read.\n trainable = False\n elif trainable is None:\n trainable = True\n\n # Initialize variable when no initializer provided\n if initializer is None:\n # If dtype is DT_FLOAT, provide a uniform unit scaling initializer\n if dtype.is_floating:\n initializer = initializers.glorot_uniform()\n # If dtype is DT_INT/DT_UINT, provide a default value `zero`\n # If dtype is DT_BOOL, provide a default value `FALSE`\n elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:\n initializer = initializers.zeros()\n # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?\n else:\n raise ValueError('An initializer for variable %s of type %s is required'\n ' for layer %s' % (name, dtype.base_dtype, self.name))\n\n variable = self._add_variable_with_custom_getter(\n name=name,\n shape=shape,\n # TODO(allenl): a `make_variable` equivalent should be added as a\n # `Checkpointable` method.\n getter=getter or base_layer_utils.make_variable,\n # Manage errors in Layer rather than Checkpointable.\n overwrite=True,\n initializer=initializer,\n dtype=dtype,\n constraint=constraint,\n trainable=trainable and self.trainable,\n partitioner=partitioner,\n use_resource=use_resource,\n collections=collections,\n synchronization=synchronization,\n aggregation=aggregation)\n backend.track_variable(variable)\n\n if regularizer is not None:\n # TODO(fchollet): in the future, this should be handled at the\n # level of variable creation, and weight regularization losses\n # should be variable attributes.\n self._handle_weight_regularization(name, variable, regularizer)\n\n if trainable:\n self._trainable_weights.append(variable)\n else:\n self._non_trainable_weights.append(variable)\n return variable\n\n def get_config(self):\n \"\"\"Returns the config of the layer.\n\n A layer config is a Python dictionary (serializable)\n containing the configuration of a layer.\n The same layer can be reinstantiated later\n (without its trained weights) from this configuration.\n\n The config of a layer does not include connectivity\n information, nor the layer class name. These are handled\n by `Network` (one layer of abstraction above).\n\n Returns:\n Python dictionary.\n \"\"\"\n config = {'name': self.name, 'trainable': self.trainable}\n if hasattr(self, '_batch_input_shape'):\n config['batch_input_shape'] = self._batch_input_shape\n if hasattr(self, 'dtype'):\n config['dtype'] = self.dtype\n return config\n\n @classmethod\n def from_config(cls, config):\n \"\"\"Creates a layer from its config.\n\n This method is the reverse of `get_config`,\n capable of instantiating the same layer from the config\n dictionary. It does not handle layer connectivity\n (handled by Network), nor weights (handled by `set_weights`).\n\n Arguments:\n config: A Python dictionary, typically the\n output of get_config.\n\n Returns:\n A layer instance.\n \"\"\"\n return cls(**config)\n\n def compute_output_shape(self, input_shape):\n \"\"\"Computes the output shape of the layer.\n\n Assumes that the layer will be built\n to match that input shape provided.\n\n Arguments:\n input_shape: Shape tuple (tuple of integers)\n or list of shape tuples (one per output tensor of the layer).\n Shape tuples can include None for free dimensions,\n instead of an integer.\n\n Returns:\n An input shape tuple.\n \"\"\"\n if context.executing_eagerly():\n # In this case we build the model first in order to do shape inference.\n # This is acceptable because the framework only calls\n # `compute_output_shape` on shape values that the layer would later be\n # built for. It would however cause issues in case a user attempts to\n # use `compute_output_shape` manually (these users will have to\n # implement `compute_output_shape` themselves).\n self.build(input_shape)\n with context.graph_mode():\n graph = func_graph.FuncGraph('graph')\n with graph.as_default():\n if isinstance(input_shape, list):\n inputs = [base_layer_utils.generate_placeholders_from_shape(shape)\n for shape in input_shape]\n else:\n inputs = base_layer_utils.generate_placeholders_from_shape(\n input_shape)\n\n try:\n if self._expects_training_arg:\n outputs = self(inputs, training=False)\n else:\n outputs = self(inputs)\n except TypeError:\n raise NotImplementedError('We could not automatically infer '\n 'the static shape of the layer\\'s output.'\n ' Please implement the '\n '`compute_output_shape` method on your '\n 'layer (%s).' % self.__class__.__name__)\n if isinstance(outputs, list):\n return [output.shape for output in outputs]\n else:\n return outputs.shape\n raise NotImplementedError\n\n def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument\n \"\"\"Computes an output mask tensor.\n\n Arguments:\n inputs: Tensor or list of tensors.\n mask: Tensor or list of tensors.\n\n Returns:\n None or a tensor (or list of tensors,\n one per output tensor of the layer).\n \"\"\"\n if not self.supports_masking:\n if mask is not None:\n if isinstance(mask, list):\n if any(m is not None for m in mask):\n raise TypeError('Layer ' + self.name + ' does not support masking, '\n 'but was passed an input_mask: ' + str(mask))\n else:\n raise TypeError('Layer ' + self.name + ' does not support masking, '\n 'but was passed an input_mask: ' + str(mask))\n # masking not explicitly supported: return None as mask\n return None\n # if masking is explicitly supported, by default\n # carry over the input mask\n return mask\n\n def __call__(self, inputs, *args, **kwargs):\n \"\"\"Wraps `call`, applying pre- and post-processing steps.\n\n Arguments:\n inputs: input tensor(s).\n *args: additional positional arguments to be passed to `self.call`.\n **kwargs: additional keyword arguments to be passed to `self.call`.\n\n Returns:\n Output tensor(s).\n\n Note:\n - The following optional keyword arguments are reserved for specific uses:\n * `training`: Boolean scalar tensor of Python boolean indicating\n whether the `call` is meant for training or inference.\n * `mask`: Boolean input mask.\n - If the layer's `call` method takes a `mask` argument (as some Keras\n layers do), its default value will be set to the mask generated\n for `inputs` by the previous layer (if `input` did come from\n a layer that generated a corresponding mask, i.e. if it came from\n a Keras layer with masking support.\n\n Raises:\n ValueError: if the layer's `call` method returns None (an invalid value).\n \"\"\"\n input_list = nest.flatten(inputs)\n\n if context.executing_eagerly():\n # Accept NumPy inputs by converting to Tensors when executing eagerly.\n if all(isinstance(x, (np.ndarray, float, int)) for x in input_list):\n inputs = nest.map_structure(ops.convert_to_tensor, inputs)\n input_list = nest.flatten(inputs)\n\n # We will attempt to build a TF graph if & only if all inputs are symbolic.\n # This is always the case in graph mode. It can also be the case in eager\n # mode when all inputs can be traced back to `keras.Input()` (when building\n # models using the functional API).\n build_graph = tf_utils.are_all_symbolic_tensors(input_list)\n\n # Handle Keras mask propagation from previous layer to current layer.\n previous_mask = None\n if build_graph and (not hasattr(self, '_compute_previous_mask') or\n self._compute_previous_mask):\n previous_mask = base_layer_utils.collect_previous_mask(inputs)\n if not hasattr(self, '_call_fn_args'):\n self._call_fn_args = function_utils.fn_args(self.call)\n if ('mask' in self._call_fn_args and 'mask' not in kwargs and\n not generic_utils.is_all_none(previous_mask)):\n # The previous layer generated a mask, and mask was not explicitly pass\n # to __call__, hence we set previous_mask as the default value.\n kwargs['mask'] = previous_mask\n\n with ops.name_scope(self._name_scope()):\n if not self.built:\n # Build layer if applicable (if the `build` method has been overridden).\n self._maybe_build(inputs)\n # We must set self.built since user defined build functions are not\n # constrained to set self.built.\n self.built = True\n\n # Check input assumptions set after layer building, e.g. input shape.\n if build_graph:\n # Symbolic execution on symbolic tensors. We will attempt to build\n # the corresponding TF subgraph inside `backend.get_graph()`\n input_spec.assert_input_compatibility(\n self.input_spec, inputs, self.name)\n graph = backend.get_graph()\n with graph.as_default():\n if not self.dynamic:\n try:\n outputs = self.call(inputs, *args, **kwargs)\n except TypeError as e:\n messages = ['`tf.Tensor` as a Python `bool` is not allowed',\n 'Tensor objects are only iterable when eager']\n for msg in messages:\n if msg in str(e):\n raise TypeError('You are attempting to use Python control '\n 'flow in a layer that was not declared to be '\n 'dynamic. Pass `dynamic=True` to the class '\n 'constructor.\\nEncountered error:\\n\"\"\"\\n' +\n str(e) + '\\n\"\"\"')\n raise e\n else:\n # We will use static shape inference to return symbolic tensors\n # matching the specifications of the layer outputs.\n # Since `self.dynamic` is True, we will never attempt to\n # run the underlying TF graph (which is disconnected).\n # TODO(fchollet): consider py_func as an alternative, which\n # would enable us to run the underlying graph if needed.\n outputs = self._symbolic_call(inputs)\n\n if outputs is None:\n raise ValueError('A layer\\'s `call` method should return a '\n 'Tensor or a list of Tensors, not None '\n '(layer: ' + self.name + ').')\n self._handle_activity_regularization(inputs, outputs)\n self._set_mask_metadata(inputs, outputs, previous_mask)\n if base_layer_utils.have_all_keras_metadata(inputs):\n inputs, outputs = self._set_connectivity_metadata_(\n inputs, outputs, args, kwargs)\n if hasattr(self, '_set_inputs') and not self.inputs:\n # Subclassed network: explicitly set metadata normally set by\n # a call to self._set_inputs().\n # TODO(b/120997007): This should be done in Eager as well, but\n # causes garbage collection issues because of the placeholders\n # created on the default Keras graph.\n self._set_inputs(inputs, outputs)\n else:\n # Eager execution on data tensors.\n outputs = self.call(inputs, *args, **kwargs)\n self._handle_activity_regularization(inputs, outputs)\n return outputs\n\n if not context.executing_eagerly():\n # Optionally load weight values specified at layer instantiation.\n # TODO(fchollet): consider enabling this with eager execution too.\n if (hasattr(self, '_initial_weights') and\n self._initial_weights is not None):\n self.set_weights(self._initial_weights)\n del self._initial_weights\n return outputs\n\n @property\n def dtype(self):\n return self._dtype\n\n @property\n def name(self):\n return self._name\n\n @property\n def dynamic(self):\n return self._dynamic\n\n @property\n def activity_regularizer(self):\n \"\"\"Optional regularizer function for the output of this layer.\"\"\"\n return self._activity_regularizer\n\n @activity_regularizer.setter\n def activity_regularizer(self, regularizer):\n \"\"\"Optional regularizer function for the output of this layer.\"\"\"\n self._activity_regularizer = regularizer\n\n @property\n def trainable_weights(self):\n if self.trainable:\n nested = self._gather_children_attribute('trainable_weights')\n return self._trainable_weights + nested\n else:\n return []\n\n @property\n def non_trainable_weights(self):\n if self.trainable:\n nested = self._gather_children_attribute('non_trainable_weights')\n return self._non_trainable_weights + nested\n else:\n nested = self._gather_children_attribute('weights')\n return self._trainable_weights + self._non_trainable_weights + nested\n\n @property\n def weights(self):\n \"\"\"Returns the list of all layer variables/weights.\n\n Returns:\n A list of variables.\n \"\"\"\n return self.trainable_weights + self.non_trainable_weights\n\n @property\n def updates(self):\n if not self.trainable and not self.stateful:\n return []\n return self._updates + self._gather_children_attribute('updates')\n\n @property\n def losses(self):\n \"\"\"Losses which are associated with this `Layer`.\n\n Variable regularization tensors are created when this property is accessed,\n so it is eager safe: accessing `losses` under a `tf.GradientTape` will\n propagate gradients back to the corresponding variables.\n\n Returns:\n A list of tensors.\n \"\"\"\n collected_losses = []\n if context.executing_eagerly():\n collected_losses.extend(self._eager_losses)\n else:\n collected_losses.extend(self._losses)\n for regularizer in self._callable_losses:\n loss_tensor = regularizer()\n if loss_tensor is not None:\n collected_losses.append(loss_tensor)\n return collected_losses + self._gather_children_attribute('losses')\n\n @doc_controls.for_subclass_implementers\n def add_loss(self, losses, inputs=None):\n \"\"\"Add loss tensor(s), potentially dependent on layer inputs.\n\n Some losses (for instance, activity regularization losses) may be dependent\n on the inputs passed when calling a layer. Hence, when reusing the same\n layer on different inputs `a` and `b`, some entries in `layer.losses` may\n be dependent on `a` and some on `b`. This method automatically keeps track\n of dependencies.\n\n The `get_losses_for` method allows to retrieve the losses relevant to a\n specific set of inputs.\n\n Note that `add_loss` is not supported when executing eagerly. Instead,\n variable regularizers may be added through `add_variable`. Activity\n regularization is not supported directly (but such losses may be returned\n from `Layer.call()`).\n\n Arguments:\n losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses\n may also be zero-argument callables which create a loss tensor.\n inputs: Ignored when executing eagerly. If anything other than None is\n passed, it signals the losses are conditional on some of the layer's\n inputs, and thus they should only be run where these inputs are\n available. This is the case for activity regularization losses, for\n instance. If `None` is passed, the losses are assumed\n to be unconditional, and will apply across all dataflows of the layer\n (e.g. weight regularization losses).\n \"\"\"\n losses = generic_utils.to_list(losses)\n\n def _tag_unconditional(loss):\n if callable(loss):\n loss = loss()\n if loss is None:\n return None # Will be filtered out when computing the .losses property\n if not tensor_util.is_tensor(loss):\n loss = ops.convert_to_tensor(loss, dtype=backend.floatx())\n loss._unconditional_loss = (inputs is None) # pylint: disable=protected-access\n return loss\n\n for loss in losses:\n if callable(loss):\n self._callable_losses.append(\n functools.partial(_tag_unconditional, loss))\n else:\n if context.executing_eagerly():\n self._eager_losses.append(_tag_unconditional(loss))\n else:\n self._losses.append(_tag_unconditional(loss))\n\n @doc_controls.for_subclass_implementers\n def add_metric(self, value, aggregation=None, name=None):\n \"\"\"Adds metric tensor to the layer.\n\n Args:\n value: Metric tensor.\n aggregation: Sample-wise metric reduction function. If `aggregation=None`,\n it indicates that the metric tensor provided has been aggregated\n already. eg, `model.add_metric(BinaryAccuracy(name='acc')(y_true,\n y_pred))`. If aggregation='mean', the given metric tensor will be\n sample-wise reduced using `mean` function. eg, `model.add_metric(\n tf.reduce_mean(outputs), name='output_mean', aggregation='mean')`.\n name: String metric name.\n\n Raises:\n ValueError: If `aggregation` is anything other than None or `mean`.\n \"\"\"\n if aggregation is not None and aggregation != 'mean':\n raise ValueError(\n 'We currently support only `mean` sample-wise metric aggregation. '\n 'You provided aggregation=`%s`' % aggregation)\n\n if tf_utils.is_symbolic_tensor(value):\n self._symbolic_add_metric(value, aggregation, name)\n else:\n self._eager_add_metric(value, aggregation, name)\n\n @doc_controls.for_subclass_implementers\n def add_update(self, updates, inputs=None):\n \"\"\"Add update op(s), potentially dependent on layer inputs.\n\n Weight updates (for instance, the updates of the moving mean and variance\n in a BatchNormalization layer) may be dependent on the inputs passed\n when calling a layer. Hence, when reusing the same layer on\n different inputs `a` and `b`, some entries in `layer.updates` may be\n dependent on `a` and some on `b`. This method automatically keeps track\n of dependencies.\n\n The `get_updates_for` method allows to retrieve the updates relevant to a\n specific set of inputs.\n\n This call is ignored when eager execution is enabled (in that case, variable\n updates are run on the fly and thus do not need to be tracked for later\n execution).\n\n Arguments:\n updates: Update op, or list/tuple of update ops.\n inputs: If anything other than None is passed, it signals the updates\n are conditional on some of the layer's inputs,\n and thus they should only be run where these inputs are available.\n This is the case for BatchNormalization updates, for instance.\n If None, the updates will be taken into account unconditionally,\n and you are responsible for making sure that any dependency they might\n have is available at runtime.\n A step counter might fall into this category.\n \"\"\"\n if context.executing_eagerly():\n return # Updates already applied when in eager mode.\n\n def process_update(x):\n if isinstance(x, ops.Operation):\n return x\n elif hasattr(x, 'op'):\n return x.op\n else:\n return ops.convert_to_tensor(x)\n\n updates = generic_utils.to_list(updates)\n updates = [process_update(x) for x in updates]\n self._updates += updates\n if inputs is None:\n for u in updates:\n u._unconditional_update = True # pylint: disable=protected-access\n else:\n for u in updates:\n u._unconditional_update = False # pylint: disable=protected-access\n\n def set_weights(self, weights):\n \"\"\"Sets the weights of the layer, from Numpy arrays.\n\n Arguments:\n weights: a list of Numpy arrays. The number\n of arrays and their shape must match\n number of the dimensions of the weights\n of the layer (i.e. it should match the\n output of `get_weights`).\n\n Raises:\n ValueError: If the provided weights list does not match the\n layer's specifications.\n \"\"\"\n params = self.weights\n if len(params) != len(weights):\n raise ValueError('You called `set_weights(weights)` on layer \"' +\n self.name + '\" with a weight list of length ' +\n str(len(weights)) + ', but the layer was expecting ' +\n str(len(params)) + ' weights. Provided weights: ' +\n str(weights)[:50] + '...')\n if not params:\n return\n weight_value_tuples = []\n param_values = backend.batch_get_value(params)\n for pv, p, w in zip(param_values, params, weights):\n if pv.shape != w.shape:\n raise ValueError('Layer weight shape ' + str(pv.shape) +\n ' not compatible with '\n 'provided weight shape ' + str(w.shape))\n weight_value_tuples.append((p, w))\n backend.batch_set_value(weight_value_tuples)\n\n def get_weights(self):\n \"\"\"Returns the current weights of the layer.\n\n Returns:\n Weights values as a list of numpy arrays.\n \"\"\"\n params = self.weights\n return backend.batch_get_value(params)\n\n def get_updates_for(self, inputs):\n \"\"\"Retrieves updates relevant to a specific set of inputs.\n\n Arguments:\n inputs: Input tensor or list/tuple of input tensors.\n\n Returns:\n List of update ops of the layer that depend on `inputs`.\n\n Raises:\n RuntimeError: If called in Eager mode.\n \"\"\"\n # Updates disabled if layer is not trainable and not explicitly stateful.\n if not self.trainable and not self.stateful:\n return []\n\n if inputs is None:\n # Requesting unconditional updates.\n return [x for x in self.updates if x._unconditional_update] # pylint: disable=protected-access\n\n # Requesting input-conditional updates.\n inputs = nest.flatten(inputs)\n reachable = tf_utils.get_reachable_from_inputs(inputs, self.updates)\n updates = []\n for update in self.updates:\n if update in reachable:\n updates.append(update)\n return updates\n\n def get_losses_for(self, inputs):\n \"\"\"Retrieves losses relevant to a specific set of inputs.\n\n Arguments:\n inputs: Input tensor or list/tuple of input tensors.\n\n Returns:\n List of loss tensors of the layer that depend on `inputs`.\n\n Raises:\n RuntimeError: If called in Eager mode.\n \"\"\"\n if inputs is None:\n # Requesting unconditional losses.\n return [x for x in self.losses if x._unconditional_loss] # pylint: disable=protected-access\n\n # Requesting input-conditional losses.\n inputs = nest.flatten(inputs)\n # Retrieve the set of tensors in the TF graph that depend on `inputs`.\n # The losses we want to return will be part of this set.\n # To avoid unnecessary work, we stop the search in case all of\n # `self.losses` have been retrieved.\n reachable = tf_utils.get_reachable_from_inputs(inputs, self.losses)\n losses = []\n for loss in self.losses:\n if loss in reachable:\n losses.append(loss)\n return losses\n\n def get_input_mask_at(self, node_index):\n \"\"\"Retrieves the input mask tensor(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A mask tensor\n (or list of tensors if the layer has multiple inputs).\n \"\"\"\n inputs = self.get_input_at(node_index)\n if isinstance(inputs, list):\n return [getattr(x, '_keras_mask', None) for x in inputs]\n else:\n return getattr(inputs, '_keras_mask', None)\n\n def get_output_mask_at(self, node_index):\n \"\"\"Retrieves the output mask tensor(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A mask tensor\n (or list of tensors if the layer has multiple outputs).\n \"\"\"\n output = self.get_output_at(node_index)\n if isinstance(output, list):\n return [getattr(x, '_keras_mask', None) for x in output]\n else:\n return getattr(output, '_keras_mask', None)\n\n @property\n def input_mask(self):\n \"\"\"Retrieves the input mask tensor(s) of a layer.\n\n Only applicable if the layer has exactly one inbound node,\n i.e. if it is connected to one incoming layer.\n\n Returns:\n Input mask tensor (potentially None) or list of input\n mask tensors.\n\n Raises:\n AttributeError: if the layer is connected to\n more than one incoming layers.\n \"\"\"\n inputs = self.input\n if isinstance(inputs, list):\n return [getattr(x, '_keras_mask', None) for x in inputs]\n else:\n return getattr(inputs, '_keras_mask', None)\n\n @property\n def output_mask(self):\n \"\"\"Retrieves the output mask tensor(s) of a layer.\n\n Only applicable if the layer has exactly one inbound node,\n i.e. if it is connected to one incoming layer.\n\n Returns:\n Output mask tensor (potentially None) or list of output\n mask tensors.\n\n Raises:\n AttributeError: if the layer is connected to\n more than one incoming layers.\n \"\"\"\n output = self.output\n if isinstance(output, list):\n return [getattr(x, '_keras_mask', None) for x in output]\n else:\n return getattr(output, '_keras_mask', None)\n\n def get_input_shape_at(self, node_index):\n \"\"\"Retrieves the input shape(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A shape tuple\n (or list of shape tuples if the layer has multiple inputs).\n\n Raises:\n RuntimeError: If called in Eager mode.\n \"\"\"\n return self._get_node_attribute_at_index(node_index, 'input_shapes',\n 'input shape')\n\n def get_output_shape_at(self, node_index):\n \"\"\"Retrieves the output shape(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A shape tuple\n (or list of shape tuples if the layer has multiple outputs).\n\n Raises:\n RuntimeError: If called in Eager mode.\n \"\"\"\n return self._get_node_attribute_at_index(node_index, 'output_shapes',\n 'output shape')\n\n def get_input_at(self, node_index):\n \"\"\"Retrieves the input tensor(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A tensor (or list of tensors if the layer has multiple inputs).\n\n Raises:\n RuntimeError: If called in Eager mode.\n \"\"\"\n return self._get_node_attribute_at_index(node_index, 'input_tensors',\n 'input')\n\n def get_output_at(self, node_index):\n \"\"\"Retrieves the output tensor(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A tensor (or list of tensors if the layer has multiple outputs).\n\n Raises:\n RuntimeError: If called in Eager mode.\n \"\"\"\n return self._get_node_attribute_at_index(node_index, 'output_tensors',\n 'output')\n\n @property\n def input(self):\n \"\"\"Retrieves the input tensor(s) of a layer.\n\n Only applicable if the layer has exactly one input,\n i.e. if it is connected to one incoming layer.\n\n Returns:\n Input tensor or list of input tensors.\n\n Raises:\n AttributeError: if the layer is connected to\n more than one incoming layers.\n\n Raises:\n RuntimeError: If called in Eager mode.\n AttributeError: If no inbound nodes are found.\n \"\"\"\n if not self._inbound_nodes:\n raise AttributeError('Layer ' + self.name +\n ' is not connected, no input to return.')\n return self._get_node_attribute_at_index(0, 'input_tensors', 'input')\n\n @property\n def output(self):\n \"\"\"Retrieves the output tensor(s) of a layer.\n\n Only applicable if the layer has exactly one output,\n i.e. if it is connected to one incoming layer.\n\n Returns:\n Output tensor or list of output tensors.\n\n Raises:\n AttributeError: if the layer is connected to more than one incoming\n layers.\n RuntimeError: if called in Eager mode.\n \"\"\"\n if not self._inbound_nodes:\n raise AttributeError('Layer ' + self.name + ' has no inbound nodes.')\n return self._get_node_attribute_at_index(0, 'output_tensors', 'output')\n\n @property\n def input_shape(self):\n \"\"\"Retrieves the input shape(s) of a layer.\n\n Only applicable if the layer has exactly one input,\n i.e. if it is connected to one incoming layer, or if all inputs\n have the same shape.\n\n Returns:\n Input shape, as an integer shape tuple\n (or list of shape tuples, one tuple per input tensor).\n\n Raises:\n AttributeError: if the layer has no defined input_shape.\n RuntimeError: if called in Eager mode.\n \"\"\"\n if not self._inbound_nodes:\n raise AttributeError('The layer has never been called '\n 'and thus has no defined input shape.')\n all_input_shapes = set(\n [str(node.input_shapes) for node in self._inbound_nodes])\n if len(all_input_shapes) == 1:\n return self._inbound_nodes[0].input_shapes\n else:\n raise AttributeError('The layer \"' + str(self.name) +\n ' has multiple inbound nodes, '\n 'with different input shapes. Hence '\n 'the notion of \"input shape\" is '\n 'ill-defined for the layer. '\n 'Use `get_input_shape_at(node_index)` '\n 'instead.')\n\n def count_params(self):\n \"\"\"Count the total number of scalars composing the weights.\n\n Returns:\n An integer count.\n\n Raises:\n ValueError: if the layer isn't yet built\n (in which case its weights aren't yet defined).\n \"\"\"\n if not self.built:\n if self.__class__.__name__ == 'Sequential':\n self.build() # pylint: disable=no-value-for-parameter\n else:\n raise ValueError('You tried to call `count_params` on ' + self.name +\n ', but the layer isn\\'t built. '\n 'You can build it manually via: `' + self.name +\n '.build(batch_input_shape)`.')\n return int(sum(np.prod(w.shape.as_list()) for w in self.weights))\n\n @property\n def output_shape(self):\n \"\"\"Retrieves the output shape(s) of a layer.\n\n Only applicable if the layer has one output,\n or if all outputs have the same shape.\n\n Returns:\n Output shape, as an integer shape tuple\n (or list of shape tuples, one tuple per output tensor).\n\n Raises:\n AttributeError: if the layer has no defined output shape.\n RuntimeError: if called in Eager mode.\n \"\"\"\n if not self._inbound_nodes:\n raise AttributeError('The layer has never been called '\n 'and thus has no defined output shape.')\n all_output_shapes = set(\n [str(node.output_shapes) for node in self._inbound_nodes])\n if len(all_output_shapes) == 1:\n return self._inbound_nodes[0].output_shapes\n else:\n raise AttributeError('The layer \"%s\"'\n ' has multiple inbound nodes, '\n 'with different output shapes. Hence '\n 'the notion of \"output shape\" is '\n 'ill-defined for the layer. '\n 'Use `get_output_shape_at(node_index)` '\n 'instead.' % self.name)\n\n @property\n @doc_controls.do_not_doc_inheritable\n def inbound_nodes(self):\n \"\"\"Deprecated, do NOT use! Only for compatibility with external Keras.\"\"\"\n return self._inbound_nodes\n\n @property\n @doc_controls.do_not_doc_inheritable\n def outbound_nodes(self):\n \"\"\"Deprecated, do NOT use! Only for compatibility with external Keras.\"\"\"\n return self._outbound_nodes\n\n ##############################################################################\n # Methods & attributes below are public aliases of other methods. #\n ##############################################################################\n\n def apply(self, inputs, *args, **kwargs):\n \"\"\"Apply the layer on a input.\n\n This is an alias of `self.__call__`.\n\n Arguments:\n inputs: Input tensor(s).\n *args: additional positional arguments to be passed to `self.call`.\n **kwargs: additional keyword arguments to be passed to `self.call`.\n\n Returns:\n Output tensor(s).\n \"\"\"\n return self.__call__(inputs, *args, **kwargs)\n\n @doc_controls.for_subclass_implementers\n def add_variable(self, *args, **kwargs):\n \"\"\"Alias for `add_weight`.\"\"\"\n return self.add_weight(*args, **kwargs)\n\n @property\n def variables(self):\n \"\"\"Returns the list of all layer variables/weights.\n\n Alias of `self.weights`.\n\n Returns:\n A list of variables.\n \"\"\"\n return self.weights\n\n @property\n def trainable_variables(self):\n return self.trainable_weights\n\n @property\n def non_trainable_variables(self):\n return self.non_trainable_weights\n\n ##############################################################################\n # Methods & attributes below are all private and only used by the framework. #\n ##############################################################################\n\n def _name_scope(self):\n return self.name\n\n def _init_set_name(self, name, zero_based=True):\n if not name:\n self._name = base_layer_utils.unique_layer_name(\n generic_utils.to_snake_case(self.__class__.__name__),\n zero_based=zero_based)\n else:\n self._name = name\n\n def _get_existing_metric(self, name=None):\n match = [m for m in self._metrics if m.name == name]\n if not match:\n return\n if len(match) > 1:\n raise ValueError(\n 'Please provide different names for the metrics you have added. '\n 'We found {} metrics with the name: \"{}\"'.format(len(match), name))\n return match[0]\n\n def _eager_add_metric(self, value, aggregation=None, name=None):\n # If the given metric is available in `metrics` list we just update state\n # on it, otherwise we create a new metric instance and\n # add it to the `metrics` list.\n match = self._get_existing_metric(name)\n if match:\n match(value) # Update the metric state.\n return\n else:\n if aggregation is None:\n raise ValueError('We do not support adding an aggregated metric tensor '\n 'in `call` in eager execution.')\n metric_obj, _ = base_layer_utils.create_mean_metric(value, name)\n self._metrics.append(metric_obj)\n\n def _symbolic_add_metric(self, value, aggregation=None, name=None):\n if aggregation is None:\n # Iterate over the metrics and check if the given metric exists already.\n # This can happen when a metric instance is created in subclassed model\n # layer `__init__` and we have tracked that instance already in\n # model.__setattr__.\n match = self._get_existing_metric(name)\n if match:\n result_tensor = value\n if match.name not in self._metrics_tensors:\n self._metrics_tensors[match.name] = result_tensor\n return\n else:\n raise ValueError(\n 'We currently do not support reusing a metric instance.')\n else:\n # We track the instance using the metadata on the result tensor.\n result_tensor = value\n metric_obj = result_tensor._metric_obj\n else:\n # If a non-aggregated tensor is given as input (ie. `aggregation` is\n # explicitly set to `mean`), we wrap the tensor in `Mean` metric.\n metric_obj, result_tensor = base_layer_utils.create_mean_metric(\n value, name)\n self._metrics.append(metric_obj)\n self._metrics_tensors[metric_obj.name] = result_tensor\n\n def _handle_weight_regularization(self, name, variable, regularizer):\n \"\"\"Create lambdas which compute regularization losses.\"\"\"\n\n def _loss_for_variable(v):\n \"\"\"Creates a regularization loss `Tensor` for variable `v`.\"\"\"\n with ops.name_scope(name + '/Regularizer'):\n regularization = regularizer(v)\n return regularization\n\n if isinstance(variable, tf_variables.PartitionedVariable):\n for v in variable:\n self.add_loss(functools.partial(_loss_for_variable, v))\n else:\n self.add_loss(functools.partial(_loss_for_variable, variable))\n\n def _handle_activity_regularization(self, inputs, outputs):\n # Apply activity regularization.\n # Note that it should be applied every time the layer creates a new\n # output, since it is output-specific.\n if self._activity_regularizer:\n output_list = nest.flatten(outputs)\n with ops.name_scope('ActivityRegularizer'):\n for output in output_list:\n activity_loss = self._activity_regularizer(output)\n batch_size = math_ops.cast(\n array_ops.shape(output)[0], activity_loss.dtype)\n # Make activity regularization strength batch-agnostic.\n mean_activity_loss = activity_loss / batch_size\n self.add_loss(mean_activity_loss, inputs=inputs)\n\n def _set_mask_metadata(self, inputs, outputs, previous_mask):\n # In some cases the mask of the outputs has already been computed by\n # inner layers and does not need to be recomputed by this layer.\n mask_already_computed = all(\n hasattr(x, '_keras_mask') for x in generic_utils.to_list(outputs))\n if hasattr(self, 'compute_mask') and not mask_already_computed:\n output_mask = self.compute_mask(inputs, previous_mask)\n else:\n output_mask = None\n if isinstance(outputs, (list, tuple)):\n if output_mask is None:\n output_mask = [None for _ in range(len(outputs))]\n for x, m in zip(outputs, output_mask):\n try:\n x._keras_mask = m # pylint: disable=protected-access\n except AttributeError:\n pass # C type such as dict. Masking not supported in this case.\n else:\n try:\n outputs._keras_mask = output_mask # pylint: disable=protected-access\n except AttributeError:\n pass # C type such as dict. Masking not supported in this case.\n\n def _set_connectivity_metadata_(self, inputs, outputs, args, kwargs):\n call_convention = getattr(\n self, '_call_convention',\n base_layer_utils.CallConvention.EXPLICIT_INPUTS_ARGUMENT)\n if args:\n if call_convention == (base_layer_utils\n .CallConvention.EXPLICIT_INPUTS_ARGUMENT):\n raise TypeError(\n 'This layer (\"{}\") takes an `inputs` argument in `call()`, '\n 'and only the `inputs` argument may be specified as a positional '\n 'argument. Pass everything else as a keyword argument '\n '(those arguments will not be tracked '\n 'as inputs to the layer).'.format(self.name))\n elif call_convention == (base_layer_utils\n .CallConvention.SINGLE_POSITIONAL_ARGUMENT):\n raise TypeError(\n 'This layer (\"{}\") takes a single positional argument in `call()`,'\n ' which is by convention the `inputs` argument, '\n 'and only this argument may be specified as a positional argument. '\n 'Pass everything else as a keyword argument '\n '(those arguments will not be tracked '\n 'as inputs to the layer).'.format(self.name))\n\n # If the layer returns tensors from its inputs, unmodified,\n # we copy them to avoid loss of tensor metadata.\n output_ls = nest.flatten(outputs)\n inputs_ls = nest.flatten(inputs)\n output_ls_copy = []\n for x in output_ls:\n if x in inputs_ls:\n with ops.name_scope(self.name):\n x = array_ops.identity(x)\n output_ls_copy.append(x)\n outputs = nest.pack_sequence_as(outputs, output_ls_copy)\n\n inputs, kwargs = self._inputs_from_call_args(\n call_args=(inputs,) + args, call_kwargs=kwargs)\n # Add an inbound node to the layer, so it can keep track of this call.\n # This updates the layer history of the output tensor(s).\n kwargs.pop('mask', None) # `mask` should not be serialized.\n self._add_inbound_node(\n input_tensors=inputs, output_tensors=outputs, arguments=kwargs)\n return inputs, outputs\n\n def _inputs_from_call_args(self, call_args, call_kwargs):\n \"\"\"Get Layer inputs from __call__ *args and **kwargs.\n\n Args:\n call_args: The positional arguments passed to __call__.\n call_kwargs: The keyword argument dict passed to __call__.\n\n Returns:\n A tuple of (inputs, non_input_kwargs). These may be the same objects as\n were passed in (call_args and call_kwargs).\n \"\"\"\n call_convention = getattr(\n self, '_call_convention',\n base_layer_utils.CallConvention.EXPLICIT_INPUTS_ARGUMENT)\n if (call_convention in (\n base_layer_utils.CallConvention.EXPLICIT_INPUTS_ARGUMENT,\n base_layer_utils.CallConvention.SINGLE_POSITIONAL_ARGUMENT)):\n assert len(call_args) == 1 # TypeError raised earlier in __call__.\n return call_args[0], call_kwargs\n else:\n call_arg_spec = tf_inspect.getfullargspec(self.call)\n # There is no explicit \"inputs\" argument expected or provided to\n # call(). Arguments which have default values are considered non-inputs,\n # and arguments without are considered inputs.\n if call_arg_spec.defaults:\n if call_arg_spec.varargs is not None:\n raise TypeError(\n 'Layers may not accept both positional arguments and '\n 'arguments with default values (unable to determine which '\n 'are inputs to the layer). '\n 'Issue occurred with layer \"%s\"' % (self.name))\n keyword_arg_names = set(\n call_arg_spec.args[-len(call_arg_spec.defaults):])\n else:\n keyword_arg_names = set()\n # Training is never an input argument name, to allow signatures like\n # call(x, training).\n keyword_arg_names.add('training')\n _, unwrapped_call = tf_decorator.unwrap(self.call)\n bound_args = inspect.getcallargs(\n unwrapped_call, *call_args, **call_kwargs)\n if call_arg_spec.varkw is not None:\n var_kwargs = bound_args.pop(call_arg_spec.varkw)\n bound_args.update(var_kwargs)\n keyword_arg_names = keyword_arg_names.union(var_kwargs.keys())\n all_args = call_arg_spec.args\n if all_args and bound_args[all_args[0]] is self:\n # Ignore the 'self' argument of methods\n bound_args.pop(call_arg_spec.args[0])\n all_args = all_args[1:]\n non_input_arg_values = {}\n input_arg_values = []\n remaining_args_are_keyword = False\n for argument_name in all_args:\n if argument_name in keyword_arg_names:\n remaining_args_are_keyword = True\n else:\n if remaining_args_are_keyword:\n raise TypeError(\n 'Found a positional argument in a layer call after a non-input '\n 'argument. All arguments after \"training\" must be keyword '\n 'arguments, and are not tracked as inputs to the layer. '\n 'Issue occurred with layer \"%s\"' % (self.name))\n if remaining_args_are_keyword:\n non_input_arg_values[argument_name] = bound_args[argument_name]\n else:\n input_arg_values.append(bound_args[argument_name])\n if call_arg_spec.varargs is not None:\n input_arg_values.extend(bound_args[call_arg_spec.varargs])\n return input_arg_values, non_input_arg_values\n\n def _add_inbound_node(self,\n input_tensors,\n output_tensors,\n arguments=None):\n \"\"\"Internal method to create an inbound node for the layer.\n\n Arguments:\n input_tensors: list of input tensors.\n output_tensors: list of output tensors.\n arguments: dictionary of keyword arguments that were passed to the\n `call` method of the layer at the call that created the node.\n \"\"\"\n inbound_layers = nest.map_structure(lambda t: t._keras_history[0],\n input_tensors)\n node_indices = nest.map_structure(lambda t: t._keras_history[1],\n input_tensors)\n tensor_indices = nest.map_structure(lambda t: t._keras_history[2],\n input_tensors)\n\n # Create node, add it to inbound nodes.\n Node(\n self,\n inbound_layers=inbound_layers,\n node_indices=node_indices,\n tensor_indices=tensor_indices,\n input_tensors=input_tensors,\n output_tensors=output_tensors,\n arguments=arguments)\n\n # Update tensor history metadata.\n # The metadata attribute consists of\n # 1) a layer instance\n # 2) a node index for the layer\n # 3) a tensor index for the node.\n # The allows layer reuse (multiple nodes per layer) and multi-output\n # or multi-input layers (e.g. a layer can return multiple tensors,\n # and each can be sent to a different layer).\n for i, tensor in enumerate(nest.flatten(output_tensors)):\n tensor._keras_history = (self, len(self._inbound_nodes) - 1, i) # pylint: disable=protected-access\n\n def _get_node_attribute_at_index(self, node_index, attr, attr_name):\n \"\"\"Private utility to retrieves an attribute (e.g. inputs) from a node.\n\n This is used to implement the methods:\n - get_input_shape_at\n - get_output_shape_at\n - get_input_at\n etc...\n\n Arguments:\n node_index: Integer index of the node from which\n to retrieve the attribute.\n attr: Exact node attribute name.\n attr_name: Human-readable attribute name, for error messages.\n\n Returns:\n The layer's attribute `attr` at the node of index `node_index`.\n\n Raises:\n RuntimeError: If the layer has no inbound nodes, or if called in Eager\n mode.\n ValueError: If the index provided does not match any node.\n \"\"\"\n if not self._inbound_nodes:\n raise RuntimeError('The layer has never been called '\n 'and thus has no defined ' + attr_name + '.')\n if not len(self._inbound_nodes) > node_index:\n raise ValueError('Asked to get ' + attr_name + ' at node ' +\n str(node_index) + ', but the layer has only ' +\n str(len(self._inbound_nodes)) + ' inbound nodes.')\n values = getattr(self._inbound_nodes[node_index], attr)\n if isinstance(values, list) and len(values) == 1:\n return values[0]\n else:\n return values\n\n def _maybe_build(self, inputs):\n # Check input assumptions set before layer building, e.g. input rank.\n input_spec.assert_input_compatibility(\n self.input_spec, inputs, self.name)\n input_list = nest.flatten(inputs)\n if input_list and self._dtype is None:\n try:\n self._dtype = input_list[0].dtype.base_dtype.name\n except AttributeError:\n pass\n input_shapes = None\n if all(hasattr(x, 'shape') for x in input_list):\n input_shapes = nest.map_structure(lambda x: x.shape, inputs)\n # Only call `build` if the user has manually overridden the build method.\n if not hasattr(self.build, '_is_default'):\n self.build(input_shapes)\n\n def _symbolic_call(self, inputs):\n input_shapes = nest.map_structure(lambda x: x.shape, inputs)\n output_shapes = self.compute_output_shape(input_shapes)\n return nest.map_structure(\n lambda shape: backend.placeholder(shape, dtype=self.dtype),\n output_shapes)\n\n def __setattr__(self, name, value):\n if (not getattr(self, '_setattr_tracking', True) or\n getattr(self, '_is_graph_network', False)):\n super(Layer, self).__setattr__(name, value)\n return\n\n # Append value to self._layers if relevant\n if (isinstance(value, Layer) or\n checkpointable_layer_utils.has_weights(value)):\n # Initialize `_layers` here in case `__init__` has not yet been called.\n if not hasattr(self, '_layers'):\n self._layers = []\n # We need to check object identity to avoid de-duplicating empty\n # container types which compare equal.\n if not any((layer is value for layer in self._layers)):\n self._layers.append(value)\n if hasattr(value, '_use_resource_variables'):\n # Legacy layers (V1 tf.layers) must always use\n # resource variables.\n value._use_resource_variables = True\n\n # Append value to list of trainable / non-trainable weights if relevant\n if isinstance(value, tf_variables.Variable):\n # Users may add extra weights/variables\n # simply by assigning them to attributes (invalid for graph networks)\n if not hasattr(self, '_trainable_weights'):\n self._trainable_weights = []\n if not hasattr(self, '_non_trainable_weights'):\n self._non_trainable_weights = []\n if value not in self._trainable_weights + self._non_trainable_weights:\n if value.trainable:\n self._trainable_weights.append(value)\n else:\n self._non_trainable_weights.append(value)\n super(Layer, self).__setattr__(name, value)\n\n def _gather_children_attribute(self, attribute):\n assert attribute in {'weights', 'trainable_weights',\n 'non_trainable_weights', 'updates', 'losses'}\n if hasattr(self, '_layers'):\n return list(itertools.chain.from_iterable(\n getattr(layer, attribute) for layer in self._layers))\n return []\n\n # This is a hack so that the is_layer (within\n # training/checkpointable/layer_utils.py) check doesn't get the weights attr.\n # TODO(b/110718070): Remove when fixed.\n def _is_layer(self):\n return True\n\n\nclass Node(object):\n \"\"\"A `Node` describes the connectivity between two layers.\n\n Each time a layer is connected to some new input,\n a node is added to `layer._inbound_nodes`.\n Each time the output of a layer is used by another layer,\n a node is added to `layer._outbound_nodes`.\n\n Arguments:\n outbound_layer: the layer that takes\n `input_tensors` and turns them into `output_tensors`\n (the node gets created when the `call`\n method of the layer was called).\n inbound_layers: a list of layers, the same length as `input_tensors`,\n the layers from where `input_tensors` originate.\n node_indices: a list of integers, the same length as `inbound_layers`.\n `node_indices[i]` is the origin node of `input_tensors[i]`\n (necessary since each inbound layer might have several nodes,\n e.g. if the layer is being shared with a different data stream).\n tensor_indices: a list of integers,\n the same length as `inbound_layers`.\n `tensor_indices[i]` is the index of `input_tensors[i]` within the\n output of the inbound layer\n (necessary since each inbound layer might\n have multiple tensor outputs, with each one being\n independently manipulable).\n input_tensors: list of input tensors.\n output_tensors: list of output tensors.\n arguments: dictionary of keyword arguments that were passed to the\n `call` method of the layer at the call that created the node.\n\n `node_indices` and `tensor_indices` are basically fine-grained coordinates\n describing the origin of the `input_tensors`.\n\n A node from layer A to layer B is added to:\n - A._outbound_nodes\n - B._inbound_nodes\n \"\"\"\n\n def __init__(self,\n outbound_layer,\n inbound_layers,\n node_indices,\n tensor_indices,\n input_tensors,\n output_tensors,\n arguments=None):\n # Layer instance (NOT a sequence)\n if isinstance(outbound_layer, (list, tuple, dict)):\n raise ValueError('`outbound_layer` should be a layer instance, '\n 'not a list, tuple, or, dict.')\n\n # this is the layer that takes a nested structure of input tensors\n # and turns them into a nested structure of output tensors.\n # the current node will be added to\n # the inbound_nodes of outbound_layer.\n self.outbound_layer = outbound_layer\n\n # The following 3 properties describe where\n # the input tensors come from: which layers,\n # and for each layer, which node and which\n # tensor output of each node.\n\n # Nested structure of layer instances.\n self.inbound_layers = inbound_layers\n # Nested structure of integers, 1:1 mapping with inbound_layers.\n self.node_indices = node_indices\n # Nested of integers, 1:1 mapping with inbound_layers.\n self.tensor_indices = tensor_indices\n\n # Following 2 properties:\n # tensor inputs and outputs of outbound_layer.\n\n # Nested structure of tensors. 1:1 mapping with inbound_layers.\n self.input_tensors = input_tensors\n # Nested structure of tensors, created by outbound_layer.call().\n self.output_tensors = output_tensors\n\n # Following 2 properties: input and output shapes.\n\n # Nested structure of shape tuples, shapes of input_tensors.\n self.input_shapes = nest.map_structure(backend.int_shape, input_tensors)\n # Nested structure of shape tuples, shapes of output_tensors.\n self.output_shapes = nest.map_structure(backend.int_shape, output_tensors)\n\n # Optional keyword arguments to layer's `call`.\n self.arguments = arguments\n\n # Add nodes to all layers involved.\n for layer in nest.flatten(inbound_layers):\n if layer is not None:\n # For compatibility with external Keras, we use the deprecated\n # accessor here.\n layer.outbound_nodes.append(self)\n # For compatibility with external Keras, we use the deprecated\n # accessor here.\n outbound_layer.inbound_nodes.append(self)\n\n def iterate_inbound(self):\n \"\"\"Returns a list of tuples representing the inbound data.\n\n Returns:\n List of tuples like: (inbound_layer, node_index, tensor_index, tensor).\n \"\"\"\n return zip(\n nest.flatten(self.inbound_layers), nest.flatten(self.node_indices),\n nest.flatten(self.tensor_indices), nest.flatten(self.input_tensors))\n\n def get_config(self):\n inbound_names = nest.map_structure(\n lambda layer: layer.name if layer else None, self.inbound_layers)\n return {\n 'outbound_layer': self.outbound_layer.name,\n 'inbound_layers': inbound_names,\n 'node_indices': self.node_indices,\n 'tensor_indices': self.tensor_indices\n }\n\n\ndef default(method):\n \"\"\"Decorates a method to detect overrides in subclasses.\"\"\"\n method._is_default = True\n return method\n\n\n# Avoid breaking users who directly import this symbol from this file.\n# TODO(fchollet): remove this.\nInputSpec = input_spec.InputSpec # pylint:disable=invalid-name\n"
] |
[
[
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.util.tf_inspect.getfullargspec",
"tensorflow.python.keras.backend.batch_get_value",
"tensorflow.python.keras.utils.generic_utils.to_list",
"tensorflow.python.keras.backend.placeholder",
"tensorflow.python.keras.regularizers.get",
"tensorflow.python.keras.initializers.glorot_uniform",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.keras.engine.base_layer_utils.generate_placeholders_from_shape",
"tensorflow.python.keras.initializers.zeros",
"tensorflow.python.training.checkpointable.layer_utils.has_weights",
"tensorflow.python.keras.backend.track_variable",
"tensorflow.python.keras.engine.base_layer_utils.create_mean_metric",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.python.keras.engine.input_spec.assert_input_compatibility",
"tensorflow.python.keras.backend.floatx",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.util.tf_decorator.unwrap",
"tensorflow.python.keras.constraints.get",
"tensorflow.python.framework.func_graph.FuncGraph",
"tensorflow.python.keras.utils.tf_utils.get_reachable_from_inputs",
"tensorflow.python.framework.tensor_util.is_tensor",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.keras.engine.base_layer_utils.have_all_keras_metadata",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.util.function_utils.fn_args",
"tensorflow.python.keras.backend.batch_set_value",
"tensorflow.python.keras.engine.base_layer_utils.collect_previous_mask",
"tensorflow.python.keras.utils.generic_utils.to_snake_case",
"tensorflow.python.keras.initializers.get",
"tensorflow.python.keras.utils.tf_utils.are_all_symbolic_tensors",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.keras.utils.generic_utils.is_all_none",
"tensorflow.python.keras.utils.tf_utils.is_symbolic_tensor",
"tensorflow.python.keras.backend.get_graph",
"tensorflow.python.util.nest.flatten"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dnabanita7/PySyft
|
[
"ce2510e65f5bad382e88806bcde30fa38c3c76c4",
"ce2510e65f5bad382e88806bcde30fa38c3c76c4",
"6477f64b63dc285059c3766deab3993653cead2e",
"6477f64b63dc285059c3766deab3993653cead2e"
] |
[
"src/syft/lib/torch/module.py",
"src/syft/lib/pandas/__init__.py",
"tests/syft/core/store/storable_object_test.py",
"tests/syft/core/node/common/action/function_or_constructor_action_test.py"
] |
[
"# stdlib\nimport ast\nfrom collections import OrderedDict\nimport copy\nimport os\nfrom pathlib import Path\nimport sys\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\n# third party\nimport torch\n\n# syft absolute\nimport syft as sy\n\n# syft relative\nfrom ...generate_wrapper import GenerateWrapper\n\n# from ...core.pointer.pointer import Pointer\nfrom ...lib.util import full_name_with_qualname\nfrom ...logger import critical\nfrom ...logger import info\nfrom ...logger import traceback_and_raise\nfrom ...proto.lib.torch.module_pb2 import Module as Module_PB\nfrom ..python.collections import OrderedDict as SyOrderedDict\n\n# from ...core.node.common.service.auth import AuthorizationException\n\n\ndef repr_to_kwargs(repr_str: str) -> Tuple[List[Any], Dict[Any, Any]]:\n # for example: repr_str = Conv2d(...).extra_repr()\n # produces: > str(\"1, 32, kernel_size=(3, 3), stride=(1, 1)\")\n # then we just have to split it into args and kwargs\n # using ast.literal_eval we can use python to give us the real primitive types\n # from the strings in a safe way\n # str(\"1 \") becomes int(1)\n # str(\"(1, 2) \") becomes tuple(1, 2)\n args: List[Any] = []\n kwargs: Dict[Any, Any] = {}\n parts = repr_str.split(\",\")\n\n # tuples are split by commas as well, so we will keep a tab on open parentheses\n # then concat with \",\" until we find a close parentheses\n # TODO: make work nested with a count and add tests\n para_open = False\n buffer = \"\"\n for part in parts:\n try:\n if \"(\" in part:\n para_open = True\n buffer = \"\"\n if para_open is True:\n buffer += part + \",\"\n if \")\" in part:\n # remove trailing ,\n part = buffer[:-1]\n buffer = \"\"\n para_open = False\n else:\n continue\n\n string = part.strip()\n if \"=\" not in string:\n # its an arg\n arg = ast.literal_eval(string)\n args.append(arg)\n else:\n # its a kwarg\n kv = string.split(\"=\")\n key = str(kv[0])\n string = kv[1].strip()\n value = ast.literal_eval(string)\n kwargs[key.strip()] = value\n except Exception as e:\n info(f\"ast.literal_eval failed to parse part: {string}. {e}\")\n\n return (args, kwargs)\n\n\nclass Module:\n \"\"\"\n This is our equivalent of torch.nn.Module and aims to have the same external\n interface. We will need to support both torch Modules and Module Pointers.\n \"\"\"\n\n def __init__(self, torch_ref: Any) -> None:\n self.setup(torch_ref=torch_ref)\n\n def setup(self, torch_ref: Any) -> None:\n # the remote torch means the model is remote\n self.remote_model: Optional[\"Module\"] = None\n self.local_model: Optional[\"Module\"] = None\n self.duet = None\n if \"syft\" in full_name_with_qualname(klass=type(torch_ref)):\n info(\"> Creating remote model\")\n self.is_local = False\n else:\n # otherwise we have a local model\n info(\"> Creating local model\")\n self.is_local = True\n\n self.torch_ref = torch_ref\n self.training = False\n self._modules: OrderedDict[str, Module] = OrderedDict()\n real_module = torch_ref.nn.Module()\n self.__dict__[\"real_module\"] = real_module # bypass getattr/setattr\n # if issubclass(type(real_module), Pointer):\n # try:\n # # TODO: this needs fixing but should be on by default for now\n # # https://github.com/OpenMined/PySyft/issues/5242\n # real_module.searchable = True\n # except AuthorizationException as e:\n # print(f\"Cant make real_module searchable. {e}\")\n\n def __setattr__(self, name: str, value: Union[Any, \"Module\"]) -> None:\n # this is how we catch the modules being set during subclass init\n # bug where torch.nn.modules isn't the full name on some imports\n # TODO: fix this properly\n if \"torch.nn\" in full_name_with_qualname(klass=type(value)):\n modules = self.__dict__.get(\"_modules\")\n if modules is not None:\n modules[name] = value\n\n # attach all the sub modules to a real module so that we can have a\n # remote module pointer that acts like a real model\n real_module: Optional[OrderedDict] = self.__dict__.get(\"real_module\")\n if real_module is not None:\n real_module.add_module(name, value) # type: ignore\n else:\n object.__setattr__(self, name, value)\n\n def __getattr__(self, name: str) -> Union[Any, \"Module\"]:\n modules: Optional[OrderedDict] = self.__dict__.get(\"_modules\")\n if modules is not None:\n if name in modules:\n return modules[name]\n\n return object.__getattribute__(self, name)\n\n def train(self, mode: bool = True) -> \"Module\":\n self.training = mode\n for _, module in self.modules.items():\n module.train(mode)\n return self\n\n def eval(self) -> \"Module\":\n return self.train(False)\n\n def __call__(\n self, *args: Union[List[Any], Tuple[Any, ...]], **kwargs: Dict[Any, Any]\n ) -> Any:\n return self.forward(*args, **kwargs)\n\n @property\n def modules(self) -> OrderedDict:\n modules = self.__dict__.get(\"_modules\")\n if modules is not None:\n return modules\n return OrderedDict()\n\n # local list of remote ListPointers of TensorPointers\n def parameters(self, recurse: bool = True) -> Optional[List[Any]]:\n params_list: Optional[List[Any]] = None\n\n if self.is_local is True:\n # we are local so use normal torch params\n params_list = []\n for _, module in self.modules.items():\n params = module.parameters(recurse)\n if params_list is None:\n # only on remote create a remote list so we can concat the param list\n # pointers without having to actually get them\n self.duet = params.client\n params_list = self.duet.syft.lib.python.List() # type: ignore\n # either way lets concat them until we have a big list of parameters\n params_list += params\n return params_list\n\n def cuda(self, device: Any) -> \"Module\":\n for _, module in self.modules.items():\n module.cuda(device)\n return self\n\n def cpu(self) -> \"Module\":\n for _, module in self.modules.items():\n module.cpu()\n return self\n\n def load_state_dict(self, input: Union[str, os.PathLike, Dict[str, Any]]) -> None:\n if not self.is_local:\n info(\"> This model is remote so try calling .get()\")\n return None\n\n state_dict = {}\n if isinstance(input, (str, os.PathLike)):\n with open(Path(input), \"rb\") as f:\n state_dict = torch.load(f)\n else:\n state_dict = dict(input)\n\n if not issubclass(type(state_dict), dict):\n traceback_and_raise(\n f\" Invalid input: {type(input)}. \"\n + \"Try inputting a state_dict or .pth file.\"\n )\n\n info(\"> Loading model weights\")\n layers: Dict[str, Any] = {}\n for save_key, values in state_dict.items():\n parts = save_key.split(\".\")\n if len(parts) < 2:\n info(f\" state dict key is too short: {save_key}\")\n continue\n layer = parts[0]\n attr = parts[1]\n if layer not in layers:\n layers[layer] = {}\n layers[layer][attr] = values\n\n for layer, sd in layers.items():\n local_layer = getattr(self, layer, None)\n if local_layer is not None and hasattr(local_layer, \"load_state_dict\"):\n d = local_layer.load_state_dict(sd)\n info(f\" {layer} state dict loaded with: {d}\")\n else:\n info(f\" Model doesnt have layer {layer}\")\n\n info(\"> Finished loading weights\")\n return None\n\n def state_dict(self) -> Optional[Dict[str, Any]]:\n if not self.is_local:\n info(\"> This model is remote so try calling .get()\")\n return None\n\n info(\"> Saving model weights\")\n model_state_dict = OrderedDict()\n for name, module in self.modules.items():\n if hasattr(module, \"state_dict\"):\n for k, v in module.state_dict().items():\n save_key = f\"{name}.{k}\"\n model_state_dict[save_key] = v\n\n info(\"> Finished saving weights\")\n return model_state_dict\n\n def save(self, path: Union[str, bytes, os.PathLike]) -> None:\n if not self.is_local:\n info(\"> This model is remote so try calling .get()\")\n return\n\n state_dict = self.state_dict()\n torch.save(state_dict, path)\n\n def load(self, path: Union[str, os.PathLike]) -> None:\n if not self.is_local:\n info(\"> This model is remote so try calling .get()\")\n return\n\n self.load_state_dict(input=path)\n\n def send(self, client: Any, send_parameters: bool = True) -> Any:\n if not self.is_local:\n info(\"> This model is remote so try calling .get()\")\n return\n\n info(\"> Sending local model\")\n\n remote_model = copy.copy(self)\n remote_model.setup(torch_ref=client.torch)\n remote_model.duet = client\n\n for name, module in self.modules.items():\n fqn = full_name_with_qualname(klass=type(module))\n klass = client.lib_ast.query(fqn, obj_type=type(module))\n module_repr = module.extra_repr()\n args, kwargs = repr_to_kwargs(repr_str=module_repr)\n remote_module_ptr = klass(*args, **kwargs)\n remote_model.__setattr__(name, remote_module_ptr)\n\n # if the remote module has state_dict lets get it\n if (\n send_parameters\n and hasattr(module, \"state_dict\")\n and hasattr(remote_module_ptr, \"load_state_dict\")\n ):\n local_state_ord_dict = module.state_dict()\n # cast to dict because OrderedDict is not supported\n\n # get a blocking copy of the state_dict\n info(f\" Sending local layer: {name}\")\n # cant import Dict / PrimitiveFactory due to circular imports\n remote_state_dict_ptr = client.syft.lib.python.Dict(\n dict(local_state_ord_dict)\n )\n # iterate through the key, values\n # weights and biases should be in there\n remote_module_ptr.load_state_dict(remote_state_dict_ptr)\n\n info(\"\\n> Finished sending local model <\\n\\n\")\n self.remote_model = remote_model\n return self.remote_model\n\n def get(\n self,\n request_block: bool = False,\n timeout_secs: int = 20,\n reason: str = \"\",\n delete_obj: bool = False,\n ) -> Optional[\"Module\"]:\n\n if self.is_local:\n info(\"> This model is local. Maybe you meant to call .send()?\")\n return None\n\n info(\"> Downloading remote model\")\n\n local_model = copy.copy(self)\n local_model.setup(torch_ref=torch)\n local_model.duet = self.duet\n\n for layer_name, module in self.modules.items():\n module_parts = module.path_and_name.split(\".\")\n klass_name = module_parts.pop()\n klass = getattr(sys.modules[\".\".join(module_parts)], klass_name)\n repr_ptr = module.extra_repr()\n\n module_repr = repr_ptr.get(\n request_block=request_block,\n reason=reason,\n timeout_secs=timeout_secs,\n )\n\n if module_repr is None:\n info(f\" Request for {reason} extra_repr failed, skipping layer\")\n continue\n\n args, kwargs = repr_to_kwargs(repr_str=module_repr.upcast())\n local_module = klass(*args, **kwargs)\n\n # the local real module has been set on the sy module\n local_model.__setattr__(layer_name, local_module)\n\n try:\n # if the remote module has state_dict lets get it\n if hasattr(module, \"state_dict\") and hasattr(\n local_module, \"load_state_dict\"\n ):\n info(\"loading remote state dict\")\n sd_ptr = module.state_dict()\n # get a blocking copy of the state_dict\n info(f\" Downloading remote layer: {layer_name}\")\n state_dict = sd_ptr.get(\n request_block=request_block,\n reason=reason,\n timeout_secs=timeout_secs,\n delete_obj=delete_obj,\n )\n # We have to recreate the OrderedDict for load_state_dict to work\n ordered_state_dict = OrderedDict()\n for elem, item in state_dict.items():\n ordered_state_dict[str(elem)] = item\n # iterate through the key, values\n # weights and biases should be in there\n if state_dict is not None:\n # TODO: support torch.nn.modules.module._IncompatibleKeys\n local_module.load_state_dict(ordered_state_dict)\n else:\n info(\n f\" Failed to get {layer_name} state_dict, skipping layer.\"\n )\n\n except Exception as e:\n critical(f\" Failed to download remote state for {layer_name}.\")\n traceback_and_raise(e)\n\n info(\"\\n> Finished downloading remote model <\\n\\n\")\n self.local_model = local_model\n return self.local_model\n\n # zero them so we know they are copied\n def zero_layers(self) -> None:\n for m in self.modules.values():\n if hasattr(m, \"weight\"):\n m.weight.requires_grad_(False).zero_()\n if hasattr(m, \"bias\"):\n m.bias.requires_grad_(False).zero_()\n\n # easy way to check the weights have changed\n def debug_sum_layers(self) -> None:\n info(\"> Summing layers for debugging: \")\n for n, m in self.modules.items():\n if hasattr(m, \"state_dict\"):\n if self.is_local:\n state_dict = m.state_dict()\n else:\n state_dict = m.state_dict().get()\n\n for k, v in state_dict.items():\n if hasattr(v, \"sum\"):\n s = v.sum().item()\n info(f\" Layer {n} sum({k}): {s}\")\n\n\ndef object2proto(obj: torch.nn.Module, is_child: bool = False) -> Module_PB:\n proto = Module_PB()\n if \"torch.nn.\" in type(obj).__module__:\n proto.module_type = type(obj).__name__\n else:\n proto.module_type = f\"_USER_DEFINED_MODULE_{type(obj).__name__}\"\n\n proto.module_repr = obj.extra_repr()\n\n if not is_child:\n proto.state_dict.CopyFrom(sy.serialize(SyOrderedDict(obj.state_dict())))\n\n for n, m in obj.named_children():\n child_proto = object2proto(m, is_child=True)\n child_proto.module_name = n\n proto.children.append(child_proto)\n\n return proto\n\n\ndef proto2object(proto: Module_PB) -> torch.nn.Module:\n is_userdefined = proto.module_type.startswith(\"_USER_DEFINED_MODULE_\")\n\n if is_userdefined:\n obj_type = type(\n proto.module_type.replace(\"_USER_DEFINED_MODULE_\", \"\"),\n (torch.nn.Module,),\n {},\n )\n else:\n obj_type = getattr(torch.nn, proto.module_type)\n\n args, kwargs = repr_to_kwargs(repr_str=proto.module_repr)\n obj = obj_type(*args, **kwargs)\n\n for child_proto in proto.children:\n obj.add_module(child_proto.module_name, sy.deserialize(child_proto))\n\n if proto.state_dict.ByteSize() > 0:\n obj.load_state_dict(sy.deserialize(proto.state_dict))\n\n return obj\n\n\nGenerateWrapper(\n wrapped_type=torch.nn.Module,\n import_path=\"torch.nn.Module\",\n protobuf_scheme=Module_PB,\n type_object2proto=object2proto,\n type_proto2object=proto2object,\n)\n",
"# stdlib\nimport functools\nfrom typing import Any as TypeAny\nfrom typing import List as TypeList\nfrom typing import Tuple as TypeTuple\n\n# third party\nfrom packaging import version\nimport pandas as pd\n\n# syft relative\nfrom . import frame # noqa: 401\nfrom . import series # noqa: 401\nfrom ...ast import add_classes\nfrom ...ast import add_methods\nfrom ...ast import add_modules\nfrom ...ast.globals import Globals\nfrom ..misc.union import UnionGenerator\nfrom ..util import generic_update_ast\n\nLIB_NAME = \"pandas\"\nPACKAGE_SUPPORT = {\"lib\": LIB_NAME}\n\nLIB_VERSION = version.parse(pd.__version__.split(\"+\")[0])\n\n\ndef create_ast(client: TypeAny = None) -> Globals:\n ast = Globals(client)\n\n modules: TypeList[TypeTuple[str, TypeAny]] = [(\"pandas\", pd)]\n\n classes: TypeList[TypeTuple[str, str, TypeAny]] = [\n (\"pandas.DataFrame\", \"pandas.DataFrame\", pd.DataFrame),\n (\"pandas.Series\", \"pandas.Series\", pd.Series),\n ]\n\n methods: TypeList[TypeTuple[str, str]] = [\n (\"pandas.read_csv\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__getitem__\", \"pandas.Series\"),\n (\"pandas.DataFrame.__setitem__\", \"pandas.Series\"),\n (\"pandas.DataFrame.__len__\", \"syft.lib.python.Int\"),\n (\"pandas.DataFrame.__abs__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__add__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__and__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__eq__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__floordiv__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__ge__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__gt__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__iadd__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__iand__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__ifloordiv__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__imod__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__imul__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__ipow__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__isub__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__le__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__lt__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__mod__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__mul__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__ne__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__neg__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__pos__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__pow__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__rfloordiv__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__rmod__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__rmul__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__round__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__rpow__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__rsub__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__rtruediv__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__sub__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__truediv__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.dropna\", \"pandas.DataFrame\"),\n (\"pandas.Series.__getitem__\", \"pandas.Series\"),\n (\"pandas.Series.__setitem__\", \"pandas.Series\"),\n (\"pandas.Series.__len__\", \"syft.lib.python.Int\"),\n (\"pandas.Series.__abs__\", \"pandas.Series\"),\n (\"pandas.Series.__add__\", \"pandas.Series\"),\n (\"pandas.Series.__and__\", \"pandas.Series\"),\n (\"pandas.Series.__divmod__\", \"pandas.Series\"),\n (\"pandas.Series.__eq__\", \"pandas.Series\"),\n (\"pandas.Series.__floordiv__\", \"pandas.Series\"),\n (\"pandas.Series.__ge__\", \"pandas.Series\"),\n (\"pandas.Series.__gt__\", \"pandas.Series\"),\n (\"pandas.Series.__iadd__\", \"pandas.Series\"),\n (\"pandas.Series.__iand__\", \"pandas.Series\"),\n (\"pandas.Series.__ifloordiv__\", \"pandas.Series\"),\n (\"pandas.Series.__imod__\", \"pandas.Series\"),\n (\"pandas.Series.__imul__\", \"pandas.Series\"),\n (\"pandas.Series.__ipow__\", \"pandas.Series\"),\n (\"pandas.Series.__isub__\", \"pandas.Series\"),\n (\"pandas.Series.__le__\", \"pandas.Series\"),\n (\"pandas.Series.__lt__\", \"pandas.Series\"),\n (\"pandas.Series.__mod__\", \"pandas.Series\"),\n (\"pandas.Series.__mul__\", \"pandas.Series\"),\n (\"pandas.Series.__ne__\", \"pandas.Series\"),\n (\"pandas.Series.__neg__\", \"pandas.Series\"),\n (\"pandas.Series.__pos__\", \"pandas.Series\"),\n (\"pandas.Series.__pow__\", \"pandas.Series\"),\n (\"pandas.Series.__rdivmod__\", \"pandas.Series\"),\n (\"pandas.Series.__rfloordiv__\", \"pandas.Series\"),\n (\"pandas.Series.__rmod__\", \"pandas.Series\"),\n (\"pandas.Series.__rmul__\", \"pandas.Series\"),\n (\"pandas.Series.__round__\", \"pandas.Series\"),\n (\"pandas.Series.__rpow__\", \"pandas.Series\"),\n (\"pandas.Series.__rsub__\", \"pandas.Series\"),\n (\"pandas.Series.__rtruediv__\", \"pandas.Series\"),\n (\"pandas.Series.__sub__\", \"pandas.Series\"),\n (\"pandas.Series.__truediv__\", \"pandas.Series\"),\n (\"pandas.Series.add\", \"pandas.Series\"),\n (\"pandas.Series.sub\", \"pandas.Series\"),\n (\"pandas.Series.mul\", \"pandas.Series\"),\n (\"pandas.Series.div\", \"pandas.Series\"),\n (\"pandas.Series.truediv\", \"pandas.Series\"),\n (\"pandas.Series.floordiv\", \"pandas.Series\"),\n (\"pandas.Series.mod\", \"pandas.Series\"),\n (\"pandas.Series.pow\", \"pandas.Series\"),\n (\"pandas.Series.radd\", \"pandas.Series\"),\n (\"pandas.Series.rsub\", \"pandas.Series\"),\n (\"pandas.Series.rmul\", \"pandas.Series\"),\n (\"pandas.Series.rdiv\", \"pandas.Series\"),\n (\"pandas.Series.rtruediv\", \"pandas.Series\"),\n (\"pandas.Series.rfloordiv\", \"pandas.Series\"),\n (\"pandas.Series.rmod\", \"pandas.Series\"),\n (\"pandas.Series.rpow\", \"pandas.Series\"),\n (\"pandas.Series.lt\", \"pandas.Series\"),\n (\"pandas.Series.gt\", \"pandas.Series\"),\n (\"pandas.Series.le\", \"pandas.Series\"),\n (\"pandas.Series.ge\", \"pandas.Series\"),\n (\"pandas.Series.ne\", \"pandas.Series\"),\n (\"pandas.Series.eq\", \"pandas.Series\"),\n (\"pandas.Series.argsort\", \"pandas.Series\"),\n (\"pandas.Series.round\", \"pandas.Series\"),\n (\"pandas.Series.head\", \"pandas.Series\"),\n (\"pandas.Series.tail\", \"pandas.Series\"),\n (\"pandas.Series.any\", \"syft.lib.python.Bool\"),\n (\"pandas.Series.shape\", \"syft.lib.python.Tuple\"),\n (\"pandas.Series.all\", \"syft.lib.python.Bool\"),\n (\"pandas.Series.argmax\", \"syft.lib.python.Int\"),\n (\"pandas.Series.nbytes\", \"syft.lib.python.Int\"),\n (\"pandas.Series.mean\", \"syft.lib.python.Float\"),\n (\"pandas.Series.ndim\", \"syft.lib.python.Int\"),\n (\"pandas.Series.size\", \"syft.lib.python.Int\"),\n (\"pandas.Series.hasnans\", \"syft.lib.python.Bool\"),\n (\"pandas.Series.empty\", \"syft.lib.python.Bool\"),\n (\"pandas.Series.T\", \"pandas.Series\"),\n (\"pandas.Series.dropna\", \"pandas.Series\"),\n (\"pandas.Series.to_frame\", \"pandas.DataFrame\"),\n (\"pandas.Series.to_list\", \"syft.lib.python.List\"),\n (\n \"pandas.Series.sum\",\n UnionGenerator[\"syft.lib.python.Float\", \"syft.lib.python.Int\"],\n ),\n (\"pandas.Series.median\", \"syft.lib.python.Float\"),\n (\n \"pandas.Series.max\",\n UnionGenerator[\n \"syft.lib.python.Bool\", \"syft.lib.python.Float\", \"syft.lib.python.Int\"\n ],\n ),\n (\n \"pandas.Series.min\",\n UnionGenerator[\n \"syft.lib.python.Bool\", \"syft.lib.python.Float\", \"syft.lib.python.Int\"\n ],\n ),\n ]\n\n if LIB_VERSION > version.parse(\"1.2.0\"):\n methods += [\n (\"pandas.DataFrame.__divmod__\", \"pandas.DataFrame\"),\n (\"pandas.DataFrame.__rdivmod__\", \"pandas.DataFrame\"),\n ]\n\n add_modules(ast, modules)\n add_classes(ast, classes)\n add_methods(ast, methods)\n\n for klass in ast.classes:\n klass.create_pointer_class()\n klass.create_send_method()\n # TODO: Pandas can't have tags and description because they break the dict\n # klass.create_storable_object_attr_convenience_methods()\n\n return ast\n\n\n# we cant create Unions that refer to the package itself until the create_ast\n# has completed first so we can call again into post_update_ast to finish these\n# TODO: add support for self referential unions using some kind of post update\n# Issue: https://github.com/OpenMined/PySyft/issues/5323\ndef post_create_ast(ast: Globals) -> Globals:\n self_referencing_methods = [\n (\"pandas.Series.loc\", UnionGenerator[\"pandas.DataFrame\", \"pandas.Series\"]),\n (\"pandas.Series.iloc\", UnionGenerator[\"pandas.DataFrame\", \"pandas.Series\"]),\n (\"pandas.DataFrame.loc\", UnionGenerator[\"pandas.DataFrame\", \"pandas.Series\"]),\n (\"pandas.DataFrame.iloc\", UnionGenerator[\"pandas.DataFrame\", \"pandas.Series\"]),\n ]\n\n add_methods(ast, self_referencing_methods)\n\n return ast\n\n\nupdate_ast = functools.partial(generic_update_ast, LIB_NAME, create_ast)\n# post_update_ast = functools.partial(generic_update_ast, LIB_NAME, post_create_ast)\n",
"# third party\nimport torch as th\n\n# syft absolute\nimport syft as sy\nfrom syft import serialize\nfrom syft.core.common import UID\nfrom syft.core.store.storeable_object import StorableObject\n\n\ndef test_create_storable_obj() -> None:\n id = UID()\n data = UID()\n description = \"This is a dummy test\"\n tags = [\"dummy\", \"test\"]\n StorableObject(id=id, data=data, description=description, tags=tags)\n\n\ndef test_serde_storable_obj() -> None:\n id = UID()\n data = th.Tensor([1, 2, 3, 4])\n description = \"This is a dummy test\"\n tags = [\"dummy\", \"test\"]\n obj = StorableObject(id=id, data=data, description=description, tags=tags)\n\n blob = sy.serialize(obj=obj)\n\n sy.deserialize(blob=blob)\n\n\ndef test_serde_storable_obj_2() -> None:\n id = UID()\n data = th.Tensor([1, 2, 3, 4])\n description = \"This is a dummy test\"\n tags = [\"dummy\", \"test\"]\n obj = StorableObject(id=id, data=data, description=description, tags=tags)\n blob = serialize(obj)\n ds_obj = sy.deserialize(blob=blob)\n assert obj.id == ds_obj.id\n assert (obj.data == ds_obj.data).all()\n assert obj.description == ds_obj.description\n assert obj.tags == ds_obj.tags\n\n\n# def test_serde_storable_obj_with_wrapped_class() -> None:\n# \"\"\"Ensure that storable object serialization works wrapping non-syft classes (like np.ndarray)\"\"\"\n#\n# id = UID()\n# data = np.array([1, 2, 3, 4])\n# description = \"This is a dummy test\"\n# tags = [\"dummy\", \"test\"]\n# obj = StorableObject(id=id, data=data, description=description, tags=tags)\n#\n# blob = sy.serialize(obj=obj)\n#\n# sy.deserialize(blob=blob)\n",
"# third party\nimport torch as th\n\n# syft absolute\nimport syft as sy\nfrom syft.core.common.uid import UID\nfrom syft.core.node.common.action.function_or_constructor_action import (\n RunFunctionOrConstructorAction,\n)\n\n\n# TODO test execution\n# TODO test permissions\ndef test_run_function_or_constructor_action_serde(\n root_client: sy.VirtualMachineClient,\n) -> None:\n args = (\n th.tensor([1, 2, 3]).send(root_client),\n th.tensor([4, 5, 5]).send(root_client),\n )\n\n msg = RunFunctionOrConstructorAction(\n path=\"torch.Tensor.add\",\n args=args,\n kwargs={},\n id_at_location=UID(),\n address=root_client.address,\n msg_id=UID(),\n )\n\n blob = sy.serialize(msg)\n\n msg2 = sy.deserialize(blob=blob)\n\n assert msg2.path == msg.path\n # FIXME this cannot be checked before we fix the Pointer serde problem (see _proto2object in Pointer)\n # assert msg2.args == msg.args\n assert msg2.kwargs == msg.kwargs\n assert msg2.address == msg.address\n assert msg2.id == msg.id\n assert msg2.id_at_location == msg.id_at_location\n"
] |
[
[
"torch.load",
"torch.save"
],
[
"pandas.__version__.split"
],
[
"torch.Tensor"
],
[
"torch.tensor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pjkundert/wikienergy
|
[
"ac3a13780bccb001c81d6f8ee27d3f5706cfa77e",
"ac3a13780bccb001c81d6f8ee27d3f5706cfa77e",
"ac3a13780bccb001c81d6f8ee27d3f5706cfa77e"
] |
[
"proto/pylearn2/make_fake_ac_dataset.py",
"disaggregator/build/pandas/pandas/tseries/converter.py",
"disaggregator/build/pandas/pandas/core/series.py"
] |
[
"import pylearn2\nimport pylearn2.datasets as ds\nimport numpy as np\nfrom hmmlearn import hmm\nimport os\nimport pickle\n\ndef build_dataset(model,num_samples,sample_length,label_index,num_classes):\n all_data = []\n all_labels = []\n for i in range(num_samples):\n data,labels = model.sample(sample_length)\n data = data.T[0]\n labels = labels.T\n all_data.append(data)\n label_one_hot = np.zeros(num_classes)\n label_one_hot[labels[label_index]] = 1\n all_labels.append(label_one_hot)\n return zip(np.array(all_data), np.array(all_labels).astype(int))\n \ndef get_train_valid_test(dataset,n_train,n_valid,n_test):\n dataset_copy = dataset[:]\n np.random.shuffle(dataset_copy)\n assert(len(dataset_copy) >= n_train + n_valid + n_test)\n train = dataset_copy[:n_train]\n valid = dataset_copy[n_train:n_train+n_valid]\n test = dataset_copy[n_train + n_valid:n_train+n_valid+n_test]\n return train, valid, test\n\ndef convert_to_pylearn_ds(train,valid,test):\n train_X, train_y = map(np.array,zip(*train))\n valid_X, valid_y = map(np.array,zip(*valid))\n test_X, test_y = map(np.array,zip(*test))\n\n # convert to pylearn_dataset\n return ds.DenseDesignMatrix(X=train_X,y=train_y),\\\n ds.DenseDesignMatrix(X=valid_X,y=valid_y),\\\n ds.DenseDesignMatrix(X=test_X,y=test_y)\n\ndef fake_hmm_appliance(pi,a,mean,cov):\n model=hmm.GaussianHMM(pi.size, \"full\", pi,a)\n model.means_ = mean\n model.covars_ = cov\n return model\n\ndef export_datasets(path,datasets, names):\n for name,dataset in zip(names,datasets):\n with open(os.path.join(path,name + '.pkl'), 'w') as f:\n pickle.dump(dataset,f)\n \nif __name__ == \"__main__\":\n # create a fake A/C\n pi=np.array([0.1,0.9])\n a=np.array([[0.95,0.05],[0.05,0.95]])\n mean=np.array([[0],[1500]])\n cov=np.array([[[ 1.]],[[ 10]]])\n model = fake_hmm_appliance(pi,a,mean,cov)\n\n # randomly sample one day of data and format it as a pylearn2 dataset\n dataset = build_dataset(model,10000,10,7,2)\n train, valid, test = get_train_valid_test(dataset,5000,2500,2500)\n train, valid, test = convert_to_pylearn_ds(train,valid,test)\n\n # export datasets\n export_datasets(\"/home/pngo/data\",[train,valid,test],[\"train_fake_ac_day\",\"valid_fake_ac_day\",\"test_fake_ac_day\"])\n",
"from datetime import datetime, timedelta\nimport datetime as pydt\nimport numpy as np\n\nfrom dateutil.relativedelta import relativedelta\n\nimport matplotlib.units as units\nimport matplotlib.dates as dates\n\nfrom matplotlib.ticker import Formatter, AutoLocator, Locator\nfrom matplotlib.transforms import nonsingular\n\nfrom pandas.compat import lrange\nimport pandas.compat as compat\nimport pandas.lib as lib\nimport pandas.core.common as com\nfrom pandas.core.index import Index\n\nfrom pandas.core.series import Series\nfrom pandas.tseries.index import date_range\nimport pandas.tseries.tools as tools\nimport pandas.tseries.frequencies as frequencies\nfrom pandas.tseries.frequencies import FreqGroup\nfrom pandas.tseries.period import Period, PeriodIndex\n\n\ndef register():\n units.registry[lib.Timestamp] = DatetimeConverter()\n units.registry[Period] = PeriodConverter()\n units.registry[pydt.datetime] = DatetimeConverter()\n units.registry[pydt.date] = DatetimeConverter()\n units.registry[pydt.time] = TimeConverter()\n units.registry[np.datetime64] = DatetimeConverter()\n\n\ndef _to_ordinalf(tm):\n tot_sec = (tm.hour * 3600 + tm.minute * 60 + tm.second +\n float(tm.microsecond / 1e6))\n return tot_sec\n\n\ndef time2num(d):\n if isinstance(d, compat.string_types):\n parsed = tools.to_datetime(d)\n if not isinstance(parsed, datetime):\n raise ValueError('Could not parse time %s' % d)\n return _to_ordinalf(parsed.time())\n if isinstance(d, pydt.time):\n return _to_ordinalf(d)\n return d\n\n\nclass TimeConverter(units.ConversionInterface):\n\n @staticmethod\n def convert(value, unit, axis):\n valid_types = (str, pydt.time)\n if (isinstance(value, valid_types) or com.is_integer(value) or\n com.is_float(value)):\n return time2num(value)\n if isinstance(value, Index):\n return value.map(time2num)\n if isinstance(value, (list, tuple, np.ndarray, Index)):\n return [time2num(x) for x in value]\n return value\n\n @staticmethod\n def axisinfo(unit, axis):\n if unit != 'time':\n return None\n\n majloc = AutoLocator()\n majfmt = TimeFormatter(majloc)\n return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='time')\n\n @staticmethod\n def default_units(x, axis):\n return 'time'\n\n\n### time formatter\nclass TimeFormatter(Formatter):\n\n def __init__(self, locs):\n self.locs = locs\n\n def __call__(self, x, pos=0):\n fmt = '%H:%M:%S'\n s = int(x)\n ms = int((x - s) * 1e3)\n us = int((x - s) * 1e6 - ms)\n m, s = divmod(s, 60)\n h, m = divmod(m, 60)\n _, h = divmod(h, 24)\n if us != 0:\n fmt += '.%6f'\n elif ms != 0:\n fmt += '.%3f'\n\n return pydt.time(h, m, s, us).strftime(fmt)\n\n\n### Period Conversion\n\n\nclass PeriodConverter(dates.DateConverter):\n\n @staticmethod\n def convert(values, units, axis):\n if not hasattr(axis, 'freq'):\n raise TypeError('Axis must have `freq` set to convert to Periods')\n valid_types = (str, datetime, Period, pydt.date, pydt.time)\n if (isinstance(values, valid_types) or com.is_integer(values) or\n com.is_float(values)):\n return get_datevalue(values, axis.freq)\n if isinstance(values, PeriodIndex):\n return values.asfreq(axis.freq).values\n if isinstance(values, Index):\n return values.map(lambda x: get_datevalue(x, axis.freq))\n if com.is_period_arraylike(values):\n return PeriodIndex(values, freq=axis.freq).values\n if isinstance(values, (list, tuple, np.ndarray, Index)):\n return [get_datevalue(x, axis.freq) for x in values]\n return values\n\n\ndef get_datevalue(date, freq):\n if isinstance(date, Period):\n return date.asfreq(freq).ordinal\n elif isinstance(date, (str, datetime, pydt.date, pydt.time)):\n return Period(date, freq).ordinal\n elif (com.is_integer(date) or com.is_float(date) or\n (isinstance(date, (np.ndarray, Index)) and (date.size == 1))):\n return date\n elif date is None:\n return None\n raise ValueError(\"Unrecognizable date '%s'\" % date)\n\nHOURS_PER_DAY = 24.\nMINUTES_PER_DAY = 60. * HOURS_PER_DAY\nSECONDS_PER_DAY = 60. * MINUTES_PER_DAY\nMUSECONDS_PER_DAY = 1e6 * SECONDS_PER_DAY\n\n\ndef _dt_to_float_ordinal(dt):\n \"\"\"\n Convert :mod:`datetime` to the Gregorian date as UTC float days,\n preserving hours, minutes, seconds and microseconds. Return value\n is a :func:`float`.\n \"\"\"\n if isinstance(dt, (np.ndarray, Index, Series)) and com.is_datetime64_ns_dtype(dt):\n base = dates.epoch2num(dt.asi8 / 1.0E9)\n else:\n base = dates.date2num(dt)\n return base\n\n\n### Datetime Conversion\nclass DatetimeConverter(dates.DateConverter):\n\n @staticmethod\n def convert(values, unit, axis):\n def try_parse(values):\n try:\n return _dt_to_float_ordinal(tools.to_datetime(values))\n except Exception:\n return values\n\n if isinstance(values, (datetime, pydt.date)):\n return _dt_to_float_ordinal(values)\n elif isinstance(values, np.datetime64):\n return _dt_to_float_ordinal(lib.Timestamp(values))\n elif isinstance(values, pydt.time):\n return dates.date2num(values)\n elif (com.is_integer(values) or com.is_float(values)):\n return values\n elif isinstance(values, compat.string_types):\n return try_parse(values)\n elif isinstance(values, (list, tuple, np.ndarray, Index)):\n if isinstance(values, Index):\n values = values.values\n if not isinstance(values, np.ndarray):\n values = com._asarray_tuplesafe(values)\n\n if com.is_integer_dtype(values) or com.is_float_dtype(values):\n return values\n\n try:\n values = tools.to_datetime(values)\n if isinstance(values, Index):\n values = values.map(_dt_to_float_ordinal)\n else:\n values = [_dt_to_float_ordinal(x) for x in values]\n except Exception:\n pass\n\n return values\n\n @staticmethod\n def axisinfo(unit, axis):\n \"\"\"\n Return the :class:`~matplotlib.units.AxisInfo` for *unit*.\n\n *unit* is a tzinfo instance or None.\n The *axis* argument is required but not used.\n \"\"\"\n tz = unit\n\n majloc = PandasAutoDateLocator(tz=tz)\n majfmt = PandasAutoDateFormatter(majloc, tz=tz)\n datemin = pydt.date(2000, 1, 1)\n datemax = pydt.date(2010, 1, 1)\n\n return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',\n default_limits=(datemin, datemax))\n\n\nclass PandasAutoDateFormatter(dates.AutoDateFormatter):\n\n def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):\n dates.AutoDateFormatter.__init__(self, locator, tz, defaultfmt)\n # matplotlib.dates._UTC has no _utcoffset called by pandas\n if self._tz is dates.UTC:\n self._tz._utcoffset = self._tz.utcoffset(None)\n self.scaled = {\n 365.0: '%Y',\n 30.: '%b %Y',\n 1.0: '%b %d %Y',\n 1. / 24.: '%H:%M:%S',\n 1. / 24. / 3600. / 1000.: '%H:%M:%S.%f'\n }\n\n def _get_fmt(self, x):\n\n scale = float(self._locator._get_unit())\n\n fmt = self.defaultfmt\n\n for k in sorted(self.scaled):\n if k >= scale:\n fmt = self.scaled[k]\n break\n\n return fmt\n\n def __call__(self, x, pos=0):\n fmt = self._get_fmt(x)\n self._formatter = dates.DateFormatter(fmt, self._tz)\n return self._formatter(x, pos)\n\n\nclass PandasAutoDateLocator(dates.AutoDateLocator):\n\n def get_locator(self, dmin, dmax):\n 'Pick the best locator based on a distance.'\n delta = relativedelta(dmax, dmin)\n\n num_days = ((delta.years * 12.0) + delta.months * 31.0) + delta.days\n num_sec = (delta.hours * 60.0 + delta.minutes) * 60.0 + delta.seconds\n tot_sec = num_days * 86400. + num_sec\n\n if abs(tot_sec) < self.minticks:\n self._freq = -1\n locator = MilliSecondLocator(self.tz)\n locator.set_axis(self.axis)\n\n locator.set_view_interval(*self.axis.get_view_interval())\n locator.set_data_interval(*self.axis.get_data_interval())\n return locator\n\n return dates.AutoDateLocator.get_locator(self, dmin, dmax)\n\n def _get_unit(self):\n return MilliSecondLocator.get_unit_generic(self._freq)\n\n\nclass MilliSecondLocator(dates.DateLocator):\n\n UNIT = 1. / (24 * 3600 * 1000)\n\n def __init__(self, tz):\n dates.DateLocator.__init__(self, tz)\n self._interval = 1.\n\n def _get_unit(self):\n return self.get_unit_generic(-1)\n\n @staticmethod\n def get_unit_generic(freq):\n unit = dates.RRuleLocator.get_unit_generic(freq)\n if unit < 0:\n return MilliSecondLocator.UNIT\n return unit\n\n def __call__(self):\n # if no data have been set, this will tank with a ValueError\n try:\n dmin, dmax = self.viewlim_to_dt()\n except ValueError:\n return []\n\n if dmin > dmax:\n dmax, dmin = dmin, dmax\n delta = relativedelta(dmax, dmin)\n\n # We need to cap at the endpoints of valid datetime\n try:\n start = dmin - delta\n except ValueError:\n start = _from_ordinal(1.0)\n\n try:\n stop = dmax + delta\n except ValueError:\n # The magic number!\n stop = _from_ordinal(3652059.9999999)\n\n nmax, nmin = dates.date2num((dmax, dmin))\n\n num = (nmax - nmin) * 86400 * 1000\n max_millis_ticks = 6\n for interval in [1, 10, 50, 100, 200, 500]:\n if num <= interval * (max_millis_ticks - 1):\n self._interval = interval\n break\n else:\n # We went through the whole loop without breaking, default to 1\n self._interval = 1000.\n\n estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())\n\n if estimate > self.MAXTICKS * 2:\n raise RuntimeError(('MillisecondLocator estimated to generate %d '\n 'ticks from %s to %s: exceeds Locator.MAXTICKS'\n '* 2 (%d) ') %\n (estimate, dmin, dmax, self.MAXTICKS * 2))\n\n freq = '%dL' % self._get_interval()\n tz = self.tz.tzname(None)\n st = _from_ordinal(dates.date2num(dmin)) # strip tz\n ed = _from_ordinal(dates.date2num(dmax))\n all_dates = date_range(start=st, end=ed, freq=freq, tz=tz).asobject\n\n try:\n if len(all_dates) > 0:\n locs = self.raise_if_exceeds(dates.date2num(all_dates))\n return locs\n except Exception as e: # pragma: no cover\n pass\n\n lims = dates.date2num([dmin, dmax])\n return lims\n\n def _get_interval(self):\n return self._interval\n\n def autoscale(self):\n \"\"\"\n Set the view limits to include the data range.\n \"\"\"\n dmin, dmax = self.datalim_to_dt()\n if dmin > dmax:\n dmax, dmin = dmin, dmax\n\n delta = relativedelta(dmax, dmin)\n\n # We need to cap at the endpoints of valid datetime\n try:\n start = dmin - delta\n except ValueError:\n start = _from_ordinal(1.0)\n\n try:\n stop = dmax + delta\n except ValueError:\n # The magic number!\n stop = _from_ordinal(3652059.9999999)\n\n dmin, dmax = self.datalim_to_dt()\n\n vmin = dates.date2num(dmin)\n vmax = dates.date2num(dmax)\n\n return self.nonsingular(vmin, vmax)\n\n\ndef _from_ordinal(x, tz=None):\n ix = int(x)\n dt = datetime.fromordinal(ix)\n remainder = float(x) - ix\n hour, remainder = divmod(24 * remainder, 1)\n minute, remainder = divmod(60 * remainder, 1)\n second, remainder = divmod(60 * remainder, 1)\n microsecond = int(1e6 * remainder)\n if microsecond < 10:\n microsecond = 0 # compensate for rounding errors\n dt = datetime(dt.year, dt.month, dt.day, int(hour), int(minute),\n int(second), microsecond)\n if tz is not None:\n dt = dt.astimezone(tz)\n\n if microsecond > 999990: # compensate for rounding errors\n dt += timedelta(microseconds=1e6 - microsecond)\n\n return dt\n\n### Fixed frequency dynamic tick locators and formatters\n\n##### -------------------------------------------------------------------------\n#---- --- Locators ---\n##### -------------------------------------------------------------------------\n\n\ndef _get_default_annual_spacing(nyears):\n \"\"\"\n Returns a default spacing between consecutive ticks for annual data.\n \"\"\"\n if nyears < 11:\n (min_spacing, maj_spacing) = (1, 1)\n elif nyears < 20:\n (min_spacing, maj_spacing) = (1, 2)\n elif nyears < 50:\n (min_spacing, maj_spacing) = (1, 5)\n elif nyears < 100:\n (min_spacing, maj_spacing) = (5, 10)\n elif nyears < 200:\n (min_spacing, maj_spacing) = (5, 25)\n elif nyears < 600:\n (min_spacing, maj_spacing) = (10, 50)\n else:\n factor = nyears // 1000 + 1\n (min_spacing, maj_spacing) = (factor * 20, factor * 100)\n return (min_spacing, maj_spacing)\n\n\ndef period_break(dates, period):\n \"\"\"\n Returns the indices where the given period changes.\n\n Parameters\n ----------\n dates : PeriodIndex\n Array of intervals to monitor.\n period : string\n Name of the period to monitor.\n \"\"\"\n current = getattr(dates, period)\n previous = getattr(dates - 1, period)\n return (current - previous).nonzero()[0]\n\n\ndef has_level_label(label_flags, vmin):\n \"\"\"\n Returns true if the ``label_flags`` indicate there is at least one label\n for this level.\n\n if the minimum view limit is not an exact integer, then the first tick\n label won't be shown, so we must adjust for that.\n \"\"\"\n if label_flags.size == 0 or (label_flags.size == 1 and\n label_flags[0] == 0 and\n vmin % 1 > 0.0):\n return False\n else:\n return True\n\n\ndef _daily_finder(vmin, vmax, freq):\n periodsperday = -1\n\n if freq >= FreqGroup.FR_HR:\n if freq == FreqGroup.FR_NS:\n periodsperday = 24 * 60 * 60 * 1000000000\n elif freq == FreqGroup.FR_US:\n periodsperday = 24 * 60 * 60 * 1000000\n elif freq == FreqGroup.FR_MS:\n periodsperday = 24 * 60 * 60 * 1000\n elif freq == FreqGroup.FR_SEC:\n periodsperday = 24 * 60 * 60\n elif freq == FreqGroup.FR_MIN:\n periodsperday = 24 * 60\n elif freq == FreqGroup.FR_HR:\n periodsperday = 24\n else: # pragma: no cover\n raise ValueError(\"unexpected frequency: %s\" % freq)\n periodsperyear = 365 * periodsperday\n periodspermonth = 28 * periodsperday\n\n elif freq == FreqGroup.FR_BUS:\n periodsperyear = 261\n periodspermonth = 19\n elif freq == FreqGroup.FR_DAY:\n periodsperyear = 365\n periodspermonth = 28\n elif frequencies.get_freq_group(freq) == FreqGroup.FR_WK:\n periodsperyear = 52\n periodspermonth = 3\n else: # pragma: no cover\n raise ValueError(\"unexpected frequency\")\n\n # save this for later usage\n vmin_orig = vmin\n\n (vmin, vmax) = (Period(ordinal=int(vmin), freq=freq),\n Period(ordinal=int(vmax), freq=freq))\n span = vmax.ordinal - vmin.ordinal + 1\n dates_ = PeriodIndex(start=vmin, end=vmax, freq=freq)\n # Initialize the output\n info = np.zeros(span,\n dtype=[('val', np.int64), ('maj', bool),\n ('min', bool), ('fmt', '|S20')])\n info['val'][:] = dates_.values\n info['fmt'][:] = ''\n info['maj'][[0, -1]] = True\n # .. and set some shortcuts\n info_maj = info['maj']\n info_min = info['min']\n info_fmt = info['fmt']\n\n def first_label(label_flags):\n if (label_flags[0] == 0) and (label_flags.size > 1) and \\\n ((vmin_orig % 1) > 0.0):\n return label_flags[1]\n else:\n return label_flags[0]\n\n # Case 1. Less than a month\n if span <= periodspermonth:\n day_start = period_break(dates_, 'day')\n month_start = period_break(dates_, 'month')\n\n def _hour_finder(label_interval, force_year_start):\n _hour = dates_.hour\n _prev_hour = (dates_ - 1).hour\n hour_start = (_hour - _prev_hour) != 0\n info_maj[day_start] = True\n info_min[hour_start & (_hour % label_interval == 0)] = True\n year_start = period_break(dates_, 'year')\n info_fmt[hour_start & (_hour % label_interval == 0)] = '%H:%M'\n info_fmt[day_start] = '%H:%M\\n%d-%b'\n info_fmt[year_start] = '%H:%M\\n%d-%b\\n%Y'\n if force_year_start and not has_level_label(year_start, vmin_orig):\n info_fmt[first_label(day_start)] = '%H:%M\\n%d-%b\\n%Y'\n\n def _minute_finder(label_interval):\n hour_start = period_break(dates_, 'hour')\n _minute = dates_.minute\n _prev_minute = (dates_ - 1).minute\n minute_start = (_minute - _prev_minute) != 0\n info_maj[hour_start] = True\n info_min[minute_start & (_minute % label_interval == 0)] = True\n year_start = period_break(dates_, 'year')\n info_fmt = info['fmt']\n info_fmt[minute_start & (_minute % label_interval == 0)] = '%H:%M'\n info_fmt[day_start] = '%H:%M\\n%d-%b'\n info_fmt[year_start] = '%H:%M\\n%d-%b\\n%Y'\n\n def _second_finder(label_interval):\n minute_start = period_break(dates_, 'minute')\n _second = dates_.second\n _prev_second = (dates_ - 1).second\n second_start = (_second - _prev_second) != 0\n info['maj'][minute_start] = True\n info['min'][second_start & (_second % label_interval == 0)] = True\n year_start = period_break(dates_, 'year')\n info_fmt = info['fmt']\n info_fmt[second_start & (_second %\n label_interval == 0)] = '%H:%M:%S'\n info_fmt[day_start] = '%H:%M:%S\\n%d-%b'\n info_fmt[year_start] = '%H:%M:%S\\n%d-%b\\n%Y'\n\n if span < periodsperday / 12000.0:\n _second_finder(1)\n elif span < periodsperday / 6000.0:\n _second_finder(2)\n elif span < periodsperday / 2400.0:\n _second_finder(5)\n elif span < periodsperday / 1200.0:\n _second_finder(10)\n elif span < periodsperday / 800.0:\n _second_finder(15)\n elif span < periodsperday / 400.0:\n _second_finder(30)\n elif span < periodsperday / 150.0:\n _minute_finder(1)\n elif span < periodsperday / 70.0:\n _minute_finder(2)\n elif span < periodsperday / 24.0:\n _minute_finder(5)\n elif span < periodsperday / 12.0:\n _minute_finder(15)\n elif span < periodsperday / 6.0:\n _minute_finder(30)\n elif span < periodsperday / 2.5:\n _hour_finder(1, False)\n elif span < periodsperday / 1.5:\n _hour_finder(2, False)\n elif span < periodsperday * 1.25:\n _hour_finder(3, False)\n elif span < periodsperday * 2.5:\n _hour_finder(6, True)\n elif span < periodsperday * 4:\n _hour_finder(12, True)\n else:\n info_maj[month_start] = True\n info_min[day_start] = True\n year_start = period_break(dates_, 'year')\n info_fmt = info['fmt']\n info_fmt[day_start] = '%d'\n info_fmt[month_start] = '%d\\n%b'\n info_fmt[year_start] = '%d\\n%b\\n%Y'\n if not has_level_label(year_start, vmin_orig):\n if not has_level_label(month_start, vmin_orig):\n info_fmt[first_label(day_start)] = '%d\\n%b\\n%Y'\n else:\n info_fmt[first_label(month_start)] = '%d\\n%b\\n%Y'\n\n # Case 2. Less than three months\n elif span <= periodsperyear // 4:\n month_start = period_break(dates_, 'month')\n info_maj[month_start] = True\n if freq < FreqGroup.FR_HR:\n info['min'] = True\n else:\n day_start = period_break(dates_, 'day')\n info['min'][day_start] = True\n week_start = period_break(dates_, 'week')\n year_start = period_break(dates_, 'year')\n info_fmt[week_start] = '%d'\n info_fmt[month_start] = '\\n\\n%b'\n info_fmt[year_start] = '\\n\\n%b\\n%Y'\n if not has_level_label(year_start, vmin_orig):\n if not has_level_label(month_start, vmin_orig):\n info_fmt[first_label(week_start)] = '\\n\\n%b\\n%Y'\n else:\n info_fmt[first_label(month_start)] = '\\n\\n%b\\n%Y'\n # Case 3. Less than 14 months ...............\n elif span <= 1.15 * periodsperyear:\n year_start = period_break(dates_, 'year')\n month_start = period_break(dates_, 'month')\n week_start = period_break(dates_, 'week')\n info_maj[month_start] = True\n info_min[week_start] = True\n info_min[year_start] = False\n info_min[month_start] = False\n info_fmt[month_start] = '%b'\n info_fmt[year_start] = '%b\\n%Y'\n if not has_level_label(year_start, vmin_orig):\n info_fmt[first_label(month_start)] = '%b\\n%Y'\n # Case 4. Less than 2.5 years ...............\n elif span <= 2.5 * periodsperyear:\n year_start = period_break(dates_, 'year')\n quarter_start = period_break(dates_, 'quarter')\n month_start = period_break(dates_, 'month')\n info_maj[quarter_start] = True\n info_min[month_start] = True\n info_fmt[quarter_start] = '%b'\n info_fmt[year_start] = '%b\\n%Y'\n # Case 4. Less than 4 years .................\n elif span <= 4 * periodsperyear:\n year_start = period_break(dates_, 'year')\n month_start = period_break(dates_, 'month')\n info_maj[year_start] = True\n info_min[month_start] = True\n info_min[year_start] = False\n\n month_break = dates_[month_start].month\n jan_or_jul = month_start[(month_break == 1) | (month_break == 7)]\n info_fmt[jan_or_jul] = '%b'\n info_fmt[year_start] = '%b\\n%Y'\n # Case 5. Less than 11 years ................\n elif span <= 11 * periodsperyear:\n year_start = period_break(dates_, 'year')\n quarter_start = period_break(dates_, 'quarter')\n info_maj[year_start] = True\n info_min[quarter_start] = True\n info_min[year_start] = False\n info_fmt[year_start] = '%Y'\n # Case 6. More than 12 years ................\n else:\n year_start = period_break(dates_, 'year')\n year_break = dates_[year_start].year\n nyears = span / periodsperyear\n (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)\n major_idx = year_start[(year_break % maj_anndef == 0)]\n info_maj[major_idx] = True\n minor_idx = year_start[(year_break % min_anndef == 0)]\n info_min[minor_idx] = True\n info_fmt[major_idx] = '%Y'\n #............................................\n\n return info\n\n\ndef _monthly_finder(vmin, vmax, freq):\n periodsperyear = 12\n\n vmin_orig = vmin\n (vmin, vmax) = (int(vmin), int(vmax))\n span = vmax - vmin + 1\n #..............\n # Initialize the output\n info = np.zeros(span,\n dtype=[('val', int), ('maj', bool), ('min', bool),\n ('fmt', '|S8')])\n info['val'] = np.arange(vmin, vmax + 1)\n dates_ = info['val']\n info['fmt'] = ''\n year_start = (dates_ % 12 == 0).nonzero()[0]\n info_maj = info['maj']\n info_fmt = info['fmt']\n #..............\n if span <= 1.15 * periodsperyear:\n info_maj[year_start] = True\n info['min'] = True\n\n info_fmt[:] = '%b'\n info_fmt[year_start] = '%b\\n%Y'\n\n if not has_level_label(year_start, vmin_orig):\n if dates_.size > 1:\n idx = 1\n else:\n idx = 0\n info_fmt[idx] = '%b\\n%Y'\n #..............\n elif span <= 2.5 * periodsperyear:\n quarter_start = (dates_ % 3 == 0).nonzero()\n info_maj[year_start] = True\n # TODO: Check the following : is it really info['fmt'] ?\n info['fmt'][quarter_start] = True\n info['min'] = True\n\n info_fmt[quarter_start] = '%b'\n info_fmt[year_start] = '%b\\n%Y'\n #..............\n elif span <= 4 * periodsperyear:\n info_maj[year_start] = True\n info['min'] = True\n\n jan_or_jul = (dates_ % 12 == 0) | (dates_ % 12 == 6)\n info_fmt[jan_or_jul] = '%b'\n info_fmt[year_start] = '%b\\n%Y'\n #..............\n elif span <= 11 * periodsperyear:\n quarter_start = (dates_ % 3 == 0).nonzero()\n info_maj[year_start] = True\n info['min'][quarter_start] = True\n\n info_fmt[year_start] = '%Y'\n #..................\n else:\n nyears = span / periodsperyear\n (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)\n years = dates_[year_start] // 12 + 1\n major_idx = year_start[(years % maj_anndef == 0)]\n info_maj[major_idx] = True\n info['min'][year_start[(years % min_anndef == 0)]] = True\n\n info_fmt[major_idx] = '%Y'\n #..............\n return info\n\n\ndef _quarterly_finder(vmin, vmax, freq):\n periodsperyear = 4\n vmin_orig = vmin\n (vmin, vmax) = (int(vmin), int(vmax))\n span = vmax - vmin + 1\n #............................................\n info = np.zeros(span,\n dtype=[('val', int), ('maj', bool), ('min', bool),\n ('fmt', '|S8')])\n info['val'] = np.arange(vmin, vmax + 1)\n info['fmt'] = ''\n dates_ = info['val']\n info_maj = info['maj']\n info_fmt = info['fmt']\n year_start = (dates_ % 4 == 0).nonzero()[0]\n #..............\n if span <= 3.5 * periodsperyear:\n info_maj[year_start] = True\n info['min'] = True\n\n info_fmt[:] = 'Q%q'\n info_fmt[year_start] = 'Q%q\\n%F'\n if not has_level_label(year_start, vmin_orig):\n if dates_.size > 1:\n idx = 1\n else:\n idx = 0\n info_fmt[idx] = 'Q%q\\n%F'\n #..............\n elif span <= 11 * periodsperyear:\n info_maj[year_start] = True\n info['min'] = True\n info_fmt[year_start] = '%F'\n #..............\n else:\n years = dates_[year_start] // 4 + 1\n nyears = span / periodsperyear\n (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)\n major_idx = year_start[(years % maj_anndef == 0)]\n info_maj[major_idx] = True\n info['min'][year_start[(years % min_anndef == 0)]] = True\n info_fmt[major_idx] = '%F'\n #..............\n return info\n\n\ndef _annual_finder(vmin, vmax, freq):\n (vmin, vmax) = (int(vmin), int(vmax + 1))\n span = vmax - vmin + 1\n #..............\n info = np.zeros(span,\n dtype=[('val', int), ('maj', bool), ('min', bool),\n ('fmt', '|S8')])\n info['val'] = np.arange(vmin, vmax + 1)\n info['fmt'] = ''\n dates_ = info['val']\n #..............\n (min_anndef, maj_anndef) = _get_default_annual_spacing(span)\n major_idx = dates_ % maj_anndef == 0\n info['maj'][major_idx] = True\n info['min'][(dates_ % min_anndef == 0)] = True\n info['fmt'][major_idx] = '%Y'\n #..............\n return info\n\n\ndef get_finder(freq):\n if isinstance(freq, compat.string_types):\n freq = frequencies.get_freq(freq)\n fgroup = frequencies.get_freq_group(freq)\n\n if fgroup == FreqGroup.FR_ANN:\n return _annual_finder\n elif fgroup == FreqGroup.FR_QTR:\n return _quarterly_finder\n elif freq == FreqGroup.FR_MTH:\n return _monthly_finder\n elif ((freq >= FreqGroup.FR_BUS) or fgroup == FreqGroup.FR_WK):\n return _daily_finder\n else: # pragma: no cover\n errmsg = \"Unsupported frequency: %s\" % (freq)\n raise NotImplementedError(errmsg)\n\n\nclass TimeSeries_DateLocator(Locator):\n \"\"\"\n Locates the ticks along an axis controlled by a :class:`Series`.\n\n Parameters\n ----------\n freq : {var}\n Valid frequency specifier.\n minor_locator : {False, True}, optional\n Whether the locator is for minor ticks (True) or not.\n dynamic_mode : {True, False}, optional\n Whether the locator should work in dynamic mode.\n base : {int}, optional\n quarter : {int}, optional\n month : {int}, optional\n day : {int}, optional\n \"\"\"\n\n def __init__(self, freq, minor_locator=False, dynamic_mode=True,\n base=1, quarter=1, month=1, day=1, plot_obj=None):\n if isinstance(freq, compat.string_types):\n freq = frequencies.get_freq(freq)\n self.freq = freq\n self.base = base\n (self.quarter, self.month, self.day) = (quarter, month, day)\n self.isminor = minor_locator\n self.isdynamic = dynamic_mode\n self.offset = 0\n self.plot_obj = plot_obj\n self.finder = get_finder(freq)\n\n def _get_default_locs(self, vmin, vmax):\n \"Returns the default locations of ticks.\"\n\n if self.plot_obj.date_axis_info is None:\n self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)\n\n locator = self.plot_obj.date_axis_info\n\n if self.isminor:\n return np.compress(locator['min'], locator['val'])\n return np.compress(locator['maj'], locator['val'])\n\n def __call__(self):\n 'Return the locations of the ticks.'\n # axis calls Locator.set_axis inside set_m<xxxx>_formatter\n vi = tuple(self.axis.get_view_interval())\n if vi != self.plot_obj.view_interval:\n self.plot_obj.date_axis_info = None\n self.plot_obj.view_interval = vi\n vmin, vmax = vi\n if vmax < vmin:\n vmin, vmax = vmax, vmin\n if self.isdynamic:\n locs = self._get_default_locs(vmin, vmax)\n else: # pragma: no cover\n base = self.base\n (d, m) = divmod(vmin, base)\n vmin = (d + 1) * base\n locs = lrange(vmin, vmax + 1, base)\n return locs\n\n def autoscale(self):\n \"\"\"\n Sets the view limits to the nearest multiples of base that contain the\n data.\n \"\"\"\n # requires matplotlib >= 0.98.0\n (vmin, vmax) = self.axis.get_data_interval()\n\n locs = self._get_default_locs(vmin, vmax)\n (vmin, vmax) = locs[[0, -1]]\n if vmin == vmax:\n vmin -= 1\n vmax += 1\n return nonsingular(vmin, vmax)\n\n#####-------------------------------------------------------------------------\n#---- --- Formatter ---\n#####-------------------------------------------------------------------------\n\n\nclass TimeSeries_DateFormatter(Formatter):\n \"\"\"\n Formats the ticks along an axis controlled by a :class:`PeriodIndex`.\n\n Parameters\n ----------\n freq : {int, string}\n Valid frequency specifier.\n minor_locator : {False, True}\n Whether the current formatter should apply to minor ticks (True) or\n major ticks (False).\n dynamic_mode : {True, False}\n Whether the formatter works in dynamic mode or not.\n \"\"\"\n\n def __init__(self, freq, minor_locator=False, dynamic_mode=True,\n plot_obj=None):\n if isinstance(freq, compat.string_types):\n freq = frequencies.get_freq(freq)\n self.format = None\n self.freq = freq\n self.locs = []\n self.formatdict = None\n self.isminor = minor_locator\n self.isdynamic = dynamic_mode\n self.offset = 0\n self.plot_obj = plot_obj\n self.finder = get_finder(freq)\n\n def _set_default_format(self, vmin, vmax):\n \"Returns the default ticks spacing.\"\n\n if self.plot_obj.date_axis_info is None:\n self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)\n info = self.plot_obj.date_axis_info\n\n if self.isminor:\n format = np.compress(info['min'] & np.logical_not(info['maj']),\n info)\n else:\n format = np.compress(info['maj'], info)\n self.formatdict = dict([(x, f) for (x, _, _, f) in format])\n return self.formatdict\n\n def set_locs(self, locs):\n 'Sets the locations of the ticks'\n # don't actually use the locs. This is just needed to work with\n # matplotlib. Force to use vmin, vmax\n self.locs = locs\n\n (vmin, vmax) = vi = tuple(self.axis.get_view_interval())\n if vi != self.plot_obj.view_interval:\n self.plot_obj.date_axis_info = None\n self.plot_obj.view_interval = vi\n if vmax < vmin:\n (vmin, vmax) = (vmax, vmin)\n self._set_default_format(vmin, vmax)\n\n def __call__(self, x, pos=0):\n if self.formatdict is None:\n return ''\n else:\n fmt = self.formatdict.pop(x, '')\n return Period(ordinal=int(x), freq=self.freq).strftime(fmt)\n",
"\"\"\"\nData structure for 1-dimensional cross-sectional and time series data\n\"\"\"\nfrom __future__ import division\n\n# pylint: disable=E1101,E1103\n# pylint: disable=W0703,W0622,W0613,W0201\n\nimport types\nimport warnings\n\nfrom numpy import nan, ndarray\nimport numpy as np\nimport numpy.ma as ma\n\nfrom pandas.core.common import (isnull, notnull, _is_bool_indexer,\n _default_index, _maybe_upcast,\n _asarray_tuplesafe, _infer_dtype_from_scalar,\n is_list_like, _values_from_object,\n _possibly_cast_to_datetime, _possibly_castable,\n _possibly_convert_platform, _try_sort,\n ABCSparseArray, _maybe_match_name, _coerce_to_dtype,\n _ensure_object, SettingWithCopyError,\n _maybe_box_datetimelike, ABCDataFrame)\nfrom pandas.core.index import (Index, MultiIndex, InvalidIndexError,\n _ensure_index)\nfrom pandas.core.indexing import _check_bool_indexer, _maybe_convert_indices\nfrom pandas.core import generic, base\nfrom pandas.core.internals import SingleBlockManager\nfrom pandas.core.categorical import Categorical\nfrom pandas.tseries.index import DatetimeIndex\nfrom pandas.tseries.tdi import TimedeltaIndex\nfrom pandas.tseries.period import PeriodIndex, Period\nfrom pandas import compat\nfrom pandas.util.terminal import get_terminal_size\nfrom pandas.compat import zip, u, OrderedDict\n\nimport pandas.core.ops as ops\nfrom pandas.core.algorithms import select_n\n\nimport pandas.core.common as com\nimport pandas.core.datetools as datetools\nimport pandas.core.format as fmt\nimport pandas.core.nanops as nanops\nfrom pandas.util.decorators import Appender, cache_readonly\n\nimport pandas.lib as lib\nimport pandas.tslib as tslib\nimport pandas.index as _index\n\nfrom numpy import percentile as _quantile\nfrom pandas.core.config import get_option\n\n__all__ = ['Series']\n\n\n_shared_doc_kwargs = dict(\n axes='index',\n klass='Series',\n axes_single_arg=\"{0,'index'}\",\n inplace=\"\"\"inplace : boolean, default False\n If True, performs operation inplace and returns None.\"\"\"\n)\n\n\ndef _coerce_method(converter):\n \"\"\" install the scalar coercion methods \"\"\"\n\n def wrapper(self):\n if len(self) == 1:\n return converter(self.iloc[0])\n raise TypeError(\n \"cannot convert the series to {0}\".format(str(converter)))\n return wrapper\n\n\n#----------------------------------------------------------------------\n# Series class\n\n\nclass Series(base.IndexOpsMixin, generic.NDFrame):\n\n \"\"\"\n One-dimensional ndarray with axis labels (including time series).\n\n Labels need not be unique but must be any hashable type. The object\n supports both integer- and label-based indexing and provides a host of\n methods for performing operations involving the index. Statistical\n methods from ndarray have been overridden to automatically exclude\n missing data (currently represented as NaN)\n\n Operations between Series (+, -, /, *, **) align values based on their\n associated index values-- they need not be the same length. The result\n index will be the sorted union of the two indexes.\n\n Parameters\n ----------\n data : array-like, dict, or scalar value\n Contains data stored in Series\n index : array-like or Index (1d)\n Values must be unique and hashable, same length as data. Index\n object (or other iterable of same length as data) Will default to\n np.arange(len(data)) if not provided. If both a dict and index\n sequence are used, the index will override the keys found in the\n dict.\n dtype : numpy.dtype or None\n If None, dtype will be inferred\n copy : boolean, default False\n Copy input data\n \"\"\"\n _metadata = ['name']\n _allow_index_ops = True\n\n def __init__(self, data=None, index=None, dtype=None, name=None,\n copy=False, fastpath=False):\n\n # we are called internally, so short-circuit\n if fastpath:\n\n # data is an ndarray, index is defined\n if not isinstance(data, SingleBlockManager):\n data = SingleBlockManager(data, index, fastpath=True)\n if copy:\n data = data.copy()\n if index is None:\n index = data.index\n\n else:\n\n if index is not None:\n index = _ensure_index(index)\n\n if data is None:\n data = {}\n if dtype is not None:\n dtype = self._validate_dtype(dtype)\n\n if isinstance(data, MultiIndex):\n raise NotImplementedError\n elif isinstance(data, Index):\n # need to copy to avoid aliasing issues\n if name is None:\n name = data.name\n\n data = data._to_embed(keep_tz=True)\n copy = True\n elif isinstance(data, np.ndarray):\n pass\n elif isinstance(data, Series):\n if name is None:\n name = data.name\n if index is None:\n index = data.index\n else:\n data = data.reindex(index, copy=copy)\n data = data._data\n elif isinstance(data, dict):\n if index is None:\n if isinstance(data, OrderedDict):\n index = Index(data)\n else:\n index = Index(_try_sort(data))\n try:\n if isinstance(index, DatetimeIndex):\n # coerce back to datetime objects for lookup\n data = lib.fast_multiget(data, index.astype('O'),\n default=np.nan)\n elif isinstance(index, PeriodIndex):\n data = [data.get(i, nan) for i in index]\n else:\n data = lib.fast_multiget(data, index.values,\n default=np.nan)\n except TypeError:\n data = [data.get(i, nan) for i in index]\n\n elif isinstance(data, SingleBlockManager):\n if index is None:\n index = data.index\n else:\n data = data.reindex(index, copy=copy)\n elif isinstance(data, Categorical):\n if dtype is not None:\n raise ValueError(\"cannot specify a dtype with a Categorical\")\n if name is None:\n name = data.name\n elif (isinstance(data, types.GeneratorType) or\n (compat.PY3 and isinstance(data, map))):\n data = list(data)\n elif isinstance(data, (set, frozenset)):\n raise TypeError(\"{0!r} type is unordered\"\n \"\".format(data.__class__.__name__))\n else:\n\n # handle sparse passed here (and force conversion)\n if isinstance(data, ABCSparseArray):\n data = data.to_dense()\n\n if index is None:\n if not is_list_like(data):\n data = [data]\n index = _default_index(len(data))\n\n # create/copy the manager\n if isinstance(data, SingleBlockManager):\n if dtype is not None:\n data = data.astype(dtype=dtype, raise_on_error=False)\n elif copy:\n data = data.copy()\n else:\n data = _sanitize_array(data, index, dtype, copy,\n raise_cast_failure=True)\n\n data = SingleBlockManager(data, index, fastpath=True)\n\n generic.NDFrame.__init__(self, data, fastpath=True)\n\n object.__setattr__(self, 'name', name)\n self._set_axis(0, index, fastpath=True)\n\n @classmethod\n def from_array(cls, arr, index=None, name=None, dtype=None, copy=False,\n fastpath=False):\n # return a sparse series here\n if isinstance(arr, ABCSparseArray):\n from pandas.sparse.series import SparseSeries\n cls = SparseSeries\n\n return cls(arr, index=index, name=name, dtype=dtype, copy=copy, fastpath=fastpath)\n\n @property\n def _constructor(self):\n return Series\n\n # types\n @property\n def _can_hold_na(self):\n return self._data._can_hold_na\n\n @property\n def is_time_series(self):\n return self._subtyp in ['time_series', 'sparse_time_series']\n\n _index = None\n\n def _set_axis(self, axis, labels, fastpath=False):\n \"\"\" override generic, we want to set the _typ here \"\"\"\n\n if not fastpath:\n labels = _ensure_index(labels)\n\n is_all_dates = labels.is_all_dates\n if is_all_dates:\n if not isinstance(labels, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):\n labels = DatetimeIndex(labels)\n\n # need to set here becuase we changed the index\n if fastpath:\n self._data.set_axis(axis, labels)\n self._set_subtyp(is_all_dates)\n\n object.__setattr__(self, '_index', labels)\n if not fastpath:\n self._data.set_axis(axis, labels)\n\n def _set_subtyp(self, is_all_dates):\n if is_all_dates:\n object.__setattr__(self, '_subtyp', 'time_series')\n else:\n object.__setattr__(self, '_subtyp', 'series')\n\n def _update_inplace(self, result, **kwargs):\n # we want to call the generic version and not the IndexOpsMixin\n return generic.NDFrame._update_inplace(self, result, **kwargs)\n\n # ndarray compatibility\n @property\n def dtype(self):\n \"\"\" return the dtype object of the underlying data \"\"\"\n return self._data.dtype\n\n @property\n def dtypes(self):\n \"\"\" return the dtype object of the underlying data \"\"\"\n return self._data.dtype\n\n @property\n def ftype(self):\n \"\"\" return if the data is sparse|dense \"\"\"\n return self._data.ftype\n\n @property\n def ftypes(self):\n \"\"\" return if the data is sparse|dense \"\"\"\n return self._data.ftype\n\n @property\n def values(self):\n \"\"\"\n Return Series as ndarray\n\n Returns\n -------\n arr : numpy.ndarray\n \"\"\"\n return self._data.values\n\n def get_values(self):\n \"\"\" same as values (but handles sparseness conversions); is a view \"\"\"\n return self._data.get_values()\n\n\n # ops\n def ravel(self, order='C'):\n \"\"\"\n Return the flattened underlying data as an ndarray\n\n See also\n --------\n numpy.ndarray.ravel\n \"\"\"\n return self.values.ravel(order=order)\n\n def compress(self, condition, axis=0, out=None, **kwargs):\n \"\"\"\n Return selected slices of an array along given axis as a Series\n\n See also\n --------\n numpy.ndarray.compress\n \"\"\"\n return self[condition]\n\n def nonzero(self):\n \"\"\"\n Return the indices of the elements that are non-zero\n\n This method is equivalent to calling `numpy.nonzero` on the\n series data. For compatability with NumPy, the return value is\n the same (a tuple with an array of indices for each dimension),\n but it will always be a one-item tuple because series only have\n one dimension.\n\n Examples\n --------\n >>> s = pd.Series([0, 3, 0, 4])\n >>> s.nonzero()\n (array([1, 3]),)\n >>> s.iloc[s.nonzero()[0]]\n 1 3\n 3 4\n dtype: int64\n\n See Also\n --------\n numpy.nonzero\n \"\"\"\n return self.values.nonzero()\n\n def put(self, *args, **kwargs):\n \"\"\"\n return a ndarray with the values put\n\n See also\n --------\n numpy.ndarray.put\n \"\"\"\n self.values.put(*args, **kwargs)\n\n def __len__(self):\n \"\"\"\n return the length of the Series\n \"\"\"\n return len(self._data)\n\n def view(self, dtype=None):\n return self._constructor(self.values.view(dtype),\n index=self.index).__finalize__(self)\n\n def __array__(self, result=None):\n \"\"\"\n the array interface, return my values\n \"\"\"\n return self.get_values()\n\n def __array_wrap__(self, result, context=None):\n \"\"\"\n Gets called after a ufunc\n \"\"\"\n return self._constructor(result, index=self.index,\n copy=False).__finalize__(self)\n\n def __array_prepare__(self, result, context=None):\n \"\"\"\n Gets called prior to a ufunc\n \"\"\"\n\n # nice error message for non-ufunc types\n if context is not None and not isinstance(self.values, np.ndarray):\n obj = context[1][0]\n raise TypeError(\"{obj} with dtype {dtype} cannot perform \"\n \"the numpy op {op}\".format(obj=type(obj).__name__,\n dtype=getattr(obj,'dtype',None),\n op=context[0].__name__))\n return result\n\n # complex\n @property\n def real(self):\n return self.values.real\n\n @real.setter\n def real(self, v):\n self.values.real = v\n\n @property\n def imag(self):\n return self.values.imag\n\n @imag.setter\n def imag(self, v):\n self.values.imag = v\n\n # coercion\n __float__ = _coerce_method(float)\n __long__ = _coerce_method(int)\n __int__ = _coerce_method(int)\n\n # we are preserving name here\n def __getstate__(self):\n return dict(_data=self._data, name=self.name)\n\n def _unpickle_series_compat(self, state):\n if isinstance(state, dict):\n self._data = state['_data']\n self.name = state['name']\n self.index = self._data.index\n\n elif isinstance(state, tuple):\n\n # < 0.12 series pickle\n\n nd_state, own_state = state\n\n # recreate the ndarray\n data = np.empty(nd_state[1], dtype=nd_state[2])\n np.ndarray.__setstate__(data, nd_state)\n\n # backwards compat\n index, name = own_state[0], None\n if len(own_state) > 1:\n name = own_state[1]\n\n # recreate\n self._data = SingleBlockManager(data, index, fastpath=True)\n self._index = index\n self.name = name\n\n else:\n raise Exception(\"cannot unpickle legacy formats -> [%s]\" % state)\n\n # indexers\n @property\n def axes(self):\n return [self.index]\n\n def _ixs(self, i, axis=0):\n \"\"\"\n Return the i-th value or values in the Series by location\n\n Parameters\n ----------\n i : int, slice, or sequence of integers\n\n Returns\n -------\n value : scalar (int) or Series (slice, sequence)\n \"\"\"\n try:\n\n # dispatch to the values if we need\n values = self.values\n if isinstance(values, np.ndarray):\n return _index.get_value_at(values, i)\n else:\n return values[i]\n except IndexError:\n raise\n except:\n if isinstance(i, slice):\n indexer = self.index._convert_slice_indexer(i, typ='iloc')\n return self._get_values(indexer)\n else:\n label = self.index[i]\n if isinstance(label, Index):\n return self.take(i, axis=axis, convert=True)\n else:\n return _index.get_value_at(self, i)\n\n @property\n def _is_mixed_type(self):\n return False\n\n def _slice(self, slobj, axis=0, typ=None):\n slobj = self.index._convert_slice_indexer(slobj, typ=typ or 'getitem')\n return self._get_values(slobj)\n\n def __getitem__(self, key):\n try:\n result = self.index.get_value(self, key)\n\n if not np.isscalar(result):\n if is_list_like(result) and not isinstance(result, Series):\n\n # we need to box if we have a non-unique index here\n # otherwise have inline ndarray/lists\n if not self.index.is_unique:\n result = self._constructor(result,\n index=[key]*len(result)\n ,dtype=self.dtype).__finalize__(self)\n\n return result\n except InvalidIndexError:\n pass\n except (KeyError, ValueError):\n if isinstance(key, tuple) and isinstance(self.index, MultiIndex):\n # kludge\n pass\n elif key is Ellipsis:\n return self\n elif _is_bool_indexer(key):\n pass\n else:\n\n # we can try to coerce the indexer (or this will raise)\n new_key = self.index._convert_scalar_indexer(key)\n if type(new_key) != type(key):\n return self.__getitem__(new_key)\n raise\n\n except Exception:\n raise\n\n if com.is_iterator(key):\n key = list(key)\n\n if _is_bool_indexer(key):\n key = _check_bool_indexer(self.index, key)\n\n return self._get_with(key)\n\n def _get_with(self, key):\n # other: fancy integer or otherwise\n if isinstance(key, slice):\n indexer = self.index._convert_slice_indexer(key, typ='getitem')\n return self._get_values(indexer)\n elif isinstance(key, ABCDataFrame):\n raise TypeError('Indexing a Series with DataFrame is not supported, '\\\n 'use the appropriate DataFrame column')\n else:\n if isinstance(key, tuple):\n try:\n return self._get_values_tuple(key)\n except:\n if len(key) == 1:\n key = key[0]\n if isinstance(key, slice):\n return self._get_values(key)\n raise\n\n # pragma: no cover\n if not isinstance(key, (list, np.ndarray, Series, Index)):\n key = list(key)\n\n if isinstance(key, Index):\n key_type = key.inferred_type\n else:\n key_type = lib.infer_dtype(key)\n\n if key_type == 'integer':\n if self.index.is_integer() or self.index.is_floating():\n return self.reindex(key)\n else:\n return self._get_values(key)\n elif key_type == 'boolean':\n return self._get_values(key)\n else:\n try:\n # handle the dup indexing case (GH 4246)\n if isinstance(key, (list, tuple)):\n return self.ix[key]\n\n return self.reindex(key)\n except Exception:\n # [slice(0, 5, None)] will break if you convert to ndarray,\n # e.g. as requested by np.median\n # hack\n if isinstance(key[0], slice):\n return self._get_values(key)\n raise\n\n def _get_values_tuple(self, key):\n # mpl hackaround\n if any(k is None for k in key):\n return self._get_values(key)\n\n if not isinstance(self.index, MultiIndex):\n raise ValueError('Can only tuple-index with a MultiIndex')\n\n # If key is contained, would have returned by now\n indexer, new_index = self.index.get_loc_level(key)\n return self._constructor(self.values[indexer],\n index=new_index).__finalize__(self)\n\n def _get_values(self, indexer):\n try:\n return self._constructor(self._data.get_slice(indexer),\n fastpath=True).__finalize__(self)\n except Exception:\n return self.values[indexer]\n\n def __setitem__(self, key, value):\n\n def setitem(key, value):\n try:\n self._set_with_engine(key, value)\n return\n except (SettingWithCopyError):\n raise\n except (KeyError, ValueError):\n values = self.values\n if (com.is_integer(key)\n and not self.index.inferred_type == 'integer'):\n\n values[key] = value\n return\n elif key is Ellipsis:\n self[:] = value\n return\n elif _is_bool_indexer(key):\n pass\n elif com.is_timedelta64_dtype(self.dtype):\n # reassign a null value to iNaT\n if isnull(value):\n value = tslib.iNaT\n\n try:\n self.index._engine.set_value(self.values, key, value)\n return\n except (TypeError):\n pass\n\n self.loc[key] = value\n return\n\n except TypeError as e:\n if isinstance(key, tuple) and not isinstance(self.index,\n MultiIndex):\n raise ValueError(\"Can only tuple-index with a MultiIndex\")\n\n # python 3 type errors should be raised\n if 'unorderable' in str(e): # pragma: no cover\n raise IndexError(key)\n\n if _is_bool_indexer(key):\n key = _check_bool_indexer(self.index, key)\n try:\n self.where(~key, value, inplace=True)\n return\n except (InvalidIndexError):\n pass\n\n self._set_with(key, value)\n\n # do the setitem\n cacher_needs_updating = self._check_is_chained_assignment_possible()\n setitem(key, value)\n if cacher_needs_updating:\n self._maybe_update_cacher()\n\n def _set_with_engine(self, key, value):\n values = self.values\n try:\n self.index._engine.set_value(values, key, value)\n return\n except KeyError:\n values[self.index.get_loc(key)] = value\n return\n\n def _set_with(self, key, value):\n # other: fancy integer or otherwise\n if isinstance(key, slice):\n indexer = self.index._convert_slice_indexer(key, typ='getitem')\n return self._set_values(indexer, value)\n else:\n if isinstance(key, tuple):\n try:\n self._set_values(key, value)\n except Exception:\n pass\n\n if not isinstance(key, (list, Series, np.ndarray, Series)):\n try:\n key = list(key)\n except:\n key = [ key ]\n\n if isinstance(key, Index):\n key_type = key.inferred_type\n else:\n key_type = lib.infer_dtype(key)\n\n if key_type == 'integer':\n if self.index.inferred_type == 'integer':\n self._set_labels(key, value)\n else:\n return self._set_values(key, value)\n elif key_type == 'boolean':\n self._set_values(key.astype(np.bool_), value)\n else:\n self._set_labels(key, value)\n\n def _set_labels(self, key, value):\n if isinstance(key, Index):\n key = key.values\n else:\n key = _asarray_tuplesafe(key)\n indexer = self.index.get_indexer(key)\n mask = indexer == -1\n if mask.any():\n raise ValueError('%s not contained in the index'\n % str(key[mask]))\n self._set_values(indexer, value)\n\n def _set_values(self, key, value):\n if isinstance(key, Series):\n key = key.values\n self._data = self._data.setitem(indexer=key, value=value)\n self._maybe_update_cacher()\n\n # help out SparseSeries\n _get_val_at = ndarray.__getitem__\n\n def repeat(self, reps):\n \"\"\"\n return a new Series with the values repeated reps times\n\n See also\n --------\n numpy.ndarray.repeat\n \"\"\"\n new_index = self.index.repeat(reps)\n new_values = self.values.repeat(reps)\n return self._constructor(new_values,\n index=new_index).__finalize__(self)\n\n def reshape(self, *args, **kwargs):\n \"\"\"\n return an ndarray with the values shape\n if the specified shape matches exactly the current shape, then\n return self (for compat)\n\n See also\n --------\n numpy.ndarray.take\n \"\"\"\n if len(args) == 1 and hasattr(args[0], '__iter__'):\n shape = args[0]\n else:\n shape = args\n\n if tuple(shape) == self.shape:\n # XXX ignoring the \"order\" keyword.\n return self\n\n return self.values.reshape(shape, **kwargs)\n\n iget_value = _ixs\n iget = _ixs\n irow = _ixs\n\n def get_value(self, label, takeable=False):\n \"\"\"\n Quickly retrieve single value at passed index label\n\n Parameters\n ----------\n index : label\n takeable : interpret the index as indexers, default False\n\n Returns\n -------\n value : scalar value\n \"\"\"\n if takeable is True:\n return _maybe_box_datetimelike(self.values[label])\n return self.index.get_value(self.values, label)\n\n def set_value(self, label, value, takeable=False):\n \"\"\"\n Quickly set single value at passed label. If label is not contained, a\n new object is created with the label placed at the end of the result\n index\n\n Parameters\n ----------\n label : object\n Partial indexing with MultiIndex not allowed\n value : object\n Scalar value\n takeable : interpret the index as indexers, default False\n\n Returns\n -------\n series : Series\n If label is contained, will be reference to calling Series,\n otherwise a new object\n \"\"\"\n try:\n if takeable:\n self.values[label] = value\n else:\n self.index._engine.set_value(self.values, label, value)\n return self\n except KeyError:\n\n # set using a non-recursive method\n self.loc[label] = value\n return self\n\n def reset_index(self, level=None, drop=False, name=None, inplace=False):\n \"\"\"\n Analogous to the :meth:`pandas.DataFrame.reset_index` function, see\n docstring there.\n\n Parameters\n ----------\n level : int, str, tuple, or list, default None\n Only remove the given levels from the index. Removes all levels by\n default\n drop : boolean, default False\n Do not try to insert index into dataframe columns\n name : object, default None\n The name of the column corresponding to the Series values\n inplace : boolean, default False\n Modify the Series in place (do not create a new object)\n\n Returns\n ----------\n resetted : DataFrame, or Series if drop == True\n \"\"\"\n if drop:\n new_index = np.arange(len(self))\n if level is not None and isinstance(self.index, MultiIndex):\n if not isinstance(level, (tuple, list)):\n level = [level]\n level = [self.index._get_level_number(lev) for lev in level]\n if len(level) < len(self.index.levels):\n new_index = self.index.droplevel(level)\n\n if inplace:\n self.index = new_index\n # set name if it was passed, otherwise, keep the previous name\n self.name = name or self.name\n else:\n return self._constructor(self.values.copy(),\n index=new_index).__finalize__(self)\n elif inplace:\n raise TypeError('Cannot reset_index inplace on a Series '\n 'to create a DataFrame')\n else:\n df = self.to_frame(name)\n return df.reset_index(level=level, drop=drop)\n\n def __unicode__(self):\n \"\"\"\n Return a string representation for a particular DataFrame\n\n Invoked by unicode(df) in py2 only. Yields a Unicode String in both\n py2/py3.\n \"\"\"\n width, height = get_terminal_size()\n max_rows = (height if get_option(\"display.max_rows\") == 0\n else get_option(\"display.max_rows\"))\n if max_rows and len(self.index) > max_rows:\n result = self._tidy_repr(min(30, max_rows - 4))\n elif len(self.index) > 0:\n result = self._get_repr(print_header=True,\n length=len(self) > 50,\n name=True,\n dtype=True)\n elif self.name is None:\n result = u('Series([], dtype: %s)') % (self.dtype)\n else:\n result = u('Series([], name: %s, dtype: %s)') % (self.name,\n self.dtype)\n return result\n\n def _tidy_repr(self, max_vals=20):\n \"\"\"\n\n Internal function, should always return unicode string\n \"\"\"\n if max_vals > 1:\n num = max_vals // 2\n else:\n num = 1\n max_vals = 2\n head = self.iloc[:num]._get_repr(print_header=True, length=False,\n dtype=False, name=False)\n tail = self.iloc[-(max_vals - num):]._get_repr(print_header=False,\n length=False,\n name=False,\n dtype=False)\n result = head + '\\n...\\n' + tail\n result = '%s\\n%s' % (result, self._repr_footer())\n\n return compat.text_type(result)\n\n def _repr_footer(self):\n\n namestr = u(\"Name: %s, \") % com.pprint_thing(\n self.name) if self.name is not None else \"\"\n\n # time series\n if self.is_time_series:\n if self.index.freq is not None:\n freqstr = u('Freq: %s, ') % self.index.freqstr\n else:\n freqstr = u('')\n\n return u('%s%sLength: %d') % (freqstr, namestr, len(self))\n\n # Categorical\n if com.is_categorical_dtype(self.dtype):\n level_info = self.values._repr_categories_info()\n return u('%sLength: %d, dtype: %s\\n%s') % (namestr,\n len(self),\n str(self.dtype.name),\n level_info)\n\n # reg series\n return u('%sLength: %d, dtype: %s') % (namestr,\n len(self),\n str(self.dtype.name))\n\n def to_string(self, buf=None, na_rep='NaN', float_format=None,\n length=False, dtype=False, name=False):\n \"\"\"\n Render a string representation of the Series\n\n Parameters\n ----------\n buf : StringIO-like, optional\n buffer to write to\n na_rep : string, optional\n string representation of NAN to use, default 'NaN'\n float_format : one-parameter function, optional\n formatter function to apply to columns' elements if they are floats\n default None\n length : boolean, default False\n Add the Series length\n dtype : boolean, default False\n Add the Series dtype\n name : boolean, default False\n Add the Series name (which may be None)\n\n Returns\n -------\n formatted : string (if not buffer passed)\n \"\"\"\n\n the_repr = self._get_repr(float_format=float_format, na_rep=na_rep,\n length=length, dtype=dtype, name=name)\n\n # catch contract violations\n if not isinstance(the_repr, compat.text_type):\n raise AssertionError(\"result must be of type unicode, type\"\n \" of result is {0!r}\"\n \"\".format(the_repr.__class__.__name__))\n\n if buf is None:\n return the_repr\n else:\n try:\n buf.write(the_repr)\n except AttributeError:\n with open(buf, 'w') as f:\n f.write(the_repr)\n\n def _get_repr(\n self, name=False, print_header=False, length=True, dtype=True,\n na_rep='NaN', float_format=None):\n \"\"\"\n\n Internal function, should always return unicode string\n \"\"\"\n\n formatter = fmt.SeriesFormatter(self, name=name, header=print_header,\n length=length, dtype=dtype,\n na_rep=na_rep,\n float_format=float_format)\n result = formatter.to_string()\n\n # TODO: following check prob. not neces.\n if not isinstance(result, compat.text_type):\n raise AssertionError(\"result must be of type unicode, type\"\n \" of result is {0!r}\"\n \"\".format(result.__class__.__name__))\n return result\n\n def __iter__(self):\n if com.is_categorical_dtype(self.dtype):\n return iter(self.values)\n elif np.issubdtype(self.dtype, np.datetime64):\n return (lib.Timestamp(x) for x in self.values)\n elif np.issubdtype(self.dtype, np.timedelta64):\n return (lib.Timedelta(x) for x in self.values)\n else:\n return iter(self.values)\n\n def iteritems(self):\n \"\"\"\n Lazily iterate over (index, value) tuples\n \"\"\"\n return zip(iter(self.index), iter(self))\n\n if compat.PY3: # pragma: no cover\n items = iteritems\n\n #----------------------------------------------------------------------\n # Misc public methods\n\n def keys(self):\n \"Alias for index\"\n return self.index\n\n def tolist(self):\n \"\"\" Convert Series to a nested list \"\"\"\n return list(self)\n\n def to_dict(self):\n \"\"\"\n Convert Series to {label -> value} dict\n\n Returns\n -------\n value_dict : dict\n \"\"\"\n return dict(compat.iteritems(self))\n\n def to_frame(self, name=None):\n \"\"\"\n Convert Series to DataFrame\n\n Parameters\n ----------\n name : object, default None\n The passed name should substitute for the series name (if it has\n one).\n\n Returns\n -------\n data_frame : DataFrame\n \"\"\"\n from pandas.core.frame import DataFrame\n if name is None:\n df = DataFrame(self)\n else:\n df = DataFrame({name: self})\n\n return df\n\n def to_sparse(self, kind='block', fill_value=None):\n \"\"\"\n Convert Series to SparseSeries\n\n Parameters\n ----------\n kind : {'block', 'integer'}\n fill_value : float, defaults to NaN (missing)\n\n Returns\n -------\n sp : SparseSeries\n \"\"\"\n from pandas.core.sparse import SparseSeries\n return SparseSeries(self, kind=kind,\n fill_value=fill_value).__finalize__(self)\n\n #----------------------------------------------------------------------\n # Statistics, overridden ndarray methods\n\n # TODO: integrate bottleneck\n\n def count(self, level=None):\n \"\"\"\n Return number of non-NA/null observations in the Series\n\n Parameters\n ----------\n level : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a smaller Series\n\n Returns\n -------\n nobs : int or Series (if level specified)\n \"\"\"\n if level is not None:\n mask = notnull(self.values)\n\n if isinstance(level, compat.string_types):\n level = self.index._get_level_number(level)\n\n level_index = self.index.levels[level]\n\n if len(self) == 0:\n return self._constructor(0, index=level_index)\\\n .__finalize__(self)\n\n # call cython function\n max_bin = len(level_index)\n labels = com._ensure_int64(self.index.labels[level])\n counts = lib.count_level_1d(mask.view(np.uint8),\n labels, max_bin)\n return self._constructor(counts,\n index=level_index).__finalize__(self)\n\n return notnull(_values_from_object(self)).sum()\n\n def mode(self):\n \"\"\"Returns the mode(s) of the dataset.\n\n Empty if nothing occurs at least 2 times. Always returns Series even\n if only one value.\n\n Parameters\n ----------\n sort : bool, default True\n If True, will lexicographically sort values, if False skips\n sorting. Result ordering when ``sort=False`` is not defined.\n\n Returns\n -------\n modes : Series (sorted)\n \"\"\"\n # TODO: Add option for bins like value_counts()\n from pandas.core.algorithms import mode\n return mode(self)\n\n @Appender(base._shared_docs['drop_duplicates'] % _shared_doc_kwargs)\n def drop_duplicates(self, take_last=False, inplace=False):\n return super(Series, self).drop_duplicates(take_last=take_last,\n inplace=inplace)\n\n @Appender(base._shared_docs['duplicated'] % _shared_doc_kwargs)\n def duplicated(self, take_last=False):\n return super(Series, self).duplicated(take_last=take_last)\n\n def idxmin(self, axis=None, out=None, skipna=True):\n \"\"\"\n Index of first occurrence of minimum of values.\n\n Parameters\n ----------\n skipna : boolean, default True\n Exclude NA/null values\n\n Returns\n -------\n idxmin : Index of minimum of values\n\n Notes\n -----\n This method is the Series version of ``ndarray.argmin``.\n\n See Also\n --------\n DataFrame.idxmin\n numpy.ndarray.argmin\n \"\"\"\n i = nanops.nanargmin(_values_from_object(self), skipna=skipna)\n if i == -1:\n return np.nan\n return self.index[i]\n\n def idxmax(self, axis=None, out=None, skipna=True):\n \"\"\"\n Index of first occurrence of maximum of values.\n\n Parameters\n ----------\n skipna : boolean, default True\n Exclude NA/null values\n\n Returns\n -------\n idxmax : Index of maximum of values\n\n Notes\n -----\n This method is the Series version of ``ndarray.argmax``.\n\n See Also\n --------\n DataFrame.idxmax\n numpy.ndarray.argmax\n \"\"\"\n i = nanops.nanargmax(_values_from_object(self), skipna=skipna)\n if i == -1:\n return np.nan\n return self.index[i]\n\n # ndarray compat\n argmin = idxmin\n argmax = idxmax\n\n @Appender(np.ndarray.round.__doc__)\n def round(self, decimals=0, out=None):\n \"\"\"\n\n \"\"\"\n result = _values_from_object(self).round(decimals, out=out)\n if out is None:\n result = self._constructor(result,\n index=self.index).__finalize__(self)\n\n return result\n\n def quantile(self, q=0.5):\n \"\"\"\n Return value at the given quantile, a la numpy.percentile.\n\n Parameters\n ----------\n q : float or array-like, default 0.5 (50% quantile)\n 0 <= q <= 1, the quantile(s) to compute\n\n Returns\n -------\n quantile : float or Series\n if ``q`` is an array, a Series will be returned where the\n index is ``q`` and the values are the quantiles.\n\n Examples\n --------\n\n >>> s = Series([1, 2, 3, 4])\n >>> s.quantile(.5)\n 2.5\n >>> s.quantile([.25, .5, .75])\n 0.25 1.75\n 0.50 2.50\n 0.75 3.25\n dtype: float64\n \"\"\"\n valid = self.dropna()\n\n def multi(values, qs):\n if com.is_list_like(qs):\n return Series([_quantile(values, x*100)\n for x in qs], index=qs)\n else:\n return _quantile(values, qs*100)\n\n return self._maybe_box(lambda values: multi(values, q), dropna=True)\n\n def ptp(self, axis=None, out=None):\n return _values_from_object(self).ptp(axis, out)\n\n def corr(self, other, method='pearson',\n min_periods=None):\n \"\"\"\n Compute correlation with `other` Series, excluding missing values\n\n Parameters\n ----------\n other : Series\n method : {'pearson', 'kendall', 'spearman'}\n * pearson : standard correlation coefficient\n * kendall : Kendall Tau correlation coefficient\n * spearman : Spearman rank correlation\n min_periods : int, optional\n Minimum number of observations needed to have a valid result\n\n\n Returns\n -------\n correlation : float\n \"\"\"\n this, other = self.align(other, join='inner', copy=False)\n if len(this) == 0:\n return np.nan\n return nanops.nancorr(this.values, other.values, method=method,\n min_periods=min_periods)\n\n def cov(self, other, min_periods=None):\n \"\"\"\n Compute covariance with Series, excluding missing values\n\n Parameters\n ----------\n other : Series\n min_periods : int, optional\n Minimum number of observations needed to have a valid result\n\n Returns\n -------\n covariance : float\n\n Normalized by N-1 (unbiased estimator).\n \"\"\"\n this, other = self.align(other, join='inner', copy=False)\n if len(this) == 0:\n return np.nan\n return nanops.nancov(this.values, other.values,\n min_periods=min_periods)\n\n def diff(self, periods=1):\n \"\"\"\n 1st discrete difference of object\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for forming difference\n\n Returns\n -------\n diffed : Series\n \"\"\"\n result = com.diff(_values_from_object(self), periods)\n return self._constructor(result, index=self.index).__finalize__(self)\n\n def autocorr(self):\n \"\"\"\n Lag-1 autocorrelation\n\n Returns\n -------\n autocorr : float\n \"\"\"\n return self.corr(self.shift(1))\n\n def dot(self, other):\n \"\"\"\n Matrix multiplication with DataFrame or inner-product with Series\n objects\n\n Parameters\n ----------\n other : Series or DataFrame\n\n Returns\n -------\n dot_product : scalar or Series\n \"\"\"\n from pandas.core.frame import DataFrame\n if isinstance(other, (Series, DataFrame)):\n common = self.index.union(other.index)\n if (len(common) > len(self.index) or\n len(common) > len(other.index)):\n raise ValueError('matrices are not aligned')\n\n left = self.reindex(index=common, copy=False)\n right = other.reindex(index=common, copy=False)\n lvals = left.values\n rvals = right.values\n else:\n left = self\n lvals = self.values\n rvals = np.asarray(other)\n if lvals.shape[0] != rvals.shape[0]:\n raise Exception('Dot product shape mismatch, %s vs %s' %\n (lvals.shape, rvals.shape))\n\n if isinstance(other, DataFrame):\n return self._constructor(np.dot(lvals, rvals),\n index=other.columns).__finalize__(self)\n elif isinstance(other, Series):\n return np.dot(lvals, rvals)\n elif isinstance(rvals, np.ndarray):\n return np.dot(lvals, rvals)\n else: # pragma: no cover\n raise TypeError('unsupported type: %s' % type(other))\n\n def searchsorted(self, v, side='left', sorter=None):\n \"\"\"Find indices where elements should be inserted to maintain order.\n\n Find the indices into a sorted Series `self` such that, if the\n corresponding elements in `v` were inserted before the indices, the\n order of `self` would be preserved.\n\n Parameters\n ----------\n v : array_like\n Values to insert into `a`.\n side : {'left', 'right'}, optional\n If 'left', the index of the first suitable location found is given.\n If 'right', return the last such index. If there is no suitable\n index, return either 0 or N (where N is the length of `a`).\n sorter : 1-D array_like, optional\n Optional array of integer indices that sort `self` into ascending\n order. They are typically the result of ``np.argsort``.\n\n Returns\n -------\n indices : array of ints\n Array of insertion points with the same shape as `v`.\n\n See Also\n --------\n Series.sort\n Series.order\n numpy.searchsorted\n\n Notes\n -----\n Binary search is used to find the required insertion points.\n\n Examples\n --------\n >>> x = pd.Series([1, 2, 3])\n >>> x\n 0 1\n 1 2\n 2 3\n dtype: int64\n >>> x.searchsorted(4)\n array([3])\n >>> x.searchsorted([0, 4])\n array([0, 3])\n >>> x.searchsorted([1, 3], side='left')\n array([0, 2])\n >>> x.searchsorted([1, 3], side='right')\n array([1, 3])\n >>> x.searchsorted([1, 2], side='right', sorter=[0, 2, 1])\n array([1, 3])\n \"\"\"\n if sorter is not None:\n sorter = com._ensure_platform_int(sorter)\n\n return self.values.searchsorted(Series(v).values, side=side,\n sorter=sorter)\n\n #------------------------------------------------------------------------------\n # Combination\n\n def append(self, to_append, verify_integrity=False):\n \"\"\"\n Concatenate two or more Series. The indexes must not overlap\n\n Parameters\n ----------\n to_append : Series or list/tuple of Series\n verify_integrity : boolean, default False\n If True, raise Exception on creating index with duplicates\n\n Returns\n -------\n appended : Series\n \"\"\"\n from pandas.tools.merge import concat\n\n if isinstance(to_append, (list, tuple)):\n to_concat = [self] + to_append\n else:\n to_concat = [self, to_append]\n return concat(to_concat, ignore_index=False,\n verify_integrity=verify_integrity)\n\n def _binop(self, other, func, level=None, fill_value=None):\n \"\"\"\n Perform generic binary operation with optional fill value\n\n Parameters\n ----------\n other : Series\n func : binary operator\n fill_value : float or object\n Value to substitute for NA/null values. If both Series are NA in a\n location, the result will be NA regardless of the passed fill value\n level : int or level name, default None\n Broadcast across a level, matching Index values on the\n passed MultiIndex level\n\n Returns\n -------\n combined : Series\n \"\"\"\n if not isinstance(other, Series):\n raise AssertionError('Other operand must be Series')\n\n new_index = self.index\n this = self\n\n if not self.index.equals(other.index):\n this, other = self.align(other, level=level, join='outer', copy=False)\n new_index = this.index\n\n this_vals = this.values\n other_vals = other.values\n\n if fill_value is not None:\n this_mask = isnull(this_vals)\n other_mask = isnull(other_vals)\n this_vals = this_vals.copy()\n other_vals = other_vals.copy()\n\n # one but not both\n mask = this_mask ^ other_mask\n this_vals[this_mask & mask] = fill_value\n other_vals[other_mask & mask] = fill_value\n\n result = func(this_vals, other_vals)\n name = _maybe_match_name(self, other)\n return self._constructor(result, index=new_index).__finalize__(self)\n\n def combine(self, other, func, fill_value=nan):\n \"\"\"\n Perform elementwise binary operation on two Series using given function\n with optional fill value when an index is missing from one Series or\n the other\n\n Parameters\n ----------\n other : Series or scalar value\n func : function\n fill_value : scalar value\n\n Returns\n -------\n result : Series\n \"\"\"\n if isinstance(other, Series):\n new_index = self.index.union(other.index)\n new_name = _maybe_match_name(self, other)\n new_values = np.empty(len(new_index), dtype=self.dtype)\n for i, idx in enumerate(new_index):\n lv = self.get(idx, fill_value)\n rv = other.get(idx, fill_value)\n new_values[i] = func(lv, rv)\n else:\n new_index = self.index\n new_values = func(self.values, other)\n new_name = self.name\n return self._constructor(new_values, index=new_index, name=new_name)\n\n def combine_first(self, other):\n \"\"\"\n Combine Series values, choosing the calling Series's values\n first. Result index will be the union of the two indexes\n\n Parameters\n ----------\n other : Series\n\n Returns\n -------\n y : Series\n \"\"\"\n new_index = self.index.union(other.index)\n this = self.reindex(new_index, copy=False)\n other = other.reindex(new_index, copy=False)\n name = _maybe_match_name(self, other)\n rs_vals = com._where_compat(isnull(this), other.values, this.values)\n return self._constructor(rs_vals, index=new_index).__finalize__(self)\n\n def update(self, other):\n \"\"\"\n Modify Series in place using non-NA values from passed\n Series. Aligns on index\n\n Parameters\n ----------\n other : Series\n \"\"\"\n other = other.reindex_like(self)\n mask = notnull(other)\n\n self._data = self._data.putmask(mask=mask, new=other, inplace=True)\n self._maybe_update_cacher()\n\n #----------------------------------------------------------------------\n # Reindexing, sorting\n\n def sort_index(self, ascending=True):\n \"\"\"\n Sort object by labels (along an axis)\n\n Parameters\n ----------\n ascending : boolean or list, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders\n\n Examples\n --------\n >>> result1 = s.sort_index(ascending=False)\n >>> result2 = s.sort_index(ascending=[1, 0])\n\n Returns\n -------\n sorted_obj : Series\n \"\"\"\n index = self.index\n if isinstance(index, MultiIndex):\n from pandas.core.groupby import _lexsort_indexer\n indexer = _lexsort_indexer(index.labels, orders=ascending)\n indexer = com._ensure_platform_int(indexer)\n new_labels = index.take(indexer)\n else:\n new_labels, indexer = index.order(return_indexer=True,\n ascending=ascending)\n\n new_values = self.values.take(indexer)\n return self._constructor(new_values,\n index=new_labels).__finalize__(self)\n\n def argsort(self, axis=0, kind='quicksort', order=None):\n \"\"\"\n Overrides ndarray.argsort. Argsorts the value, omitting NA/null values,\n and places the result in the same locations as the non-NA values\n\n Parameters\n ----------\n axis : int (can only be zero)\n kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See np.sort for more\n information. 'mergesort' is the only stable algorithm\n order : ignored\n\n Returns\n -------\n argsorted : Series, with -1 indicated where nan values are present\n\n See also\n --------\n numpy.ndarray.argsort\n \"\"\"\n values = self.values\n mask = isnull(values)\n\n if mask.any():\n result = Series(\n -1, index=self.index, name=self.name, dtype='int64')\n notmask = ~mask\n result[notmask] = np.argsort(values[notmask], kind=kind)\n return self._constructor(result,\n index=self.index).__finalize__(self)\n else:\n return self._constructor(\n np.argsort(values, kind=kind), index=self.index,\n dtype='int64').__finalize__(self)\n\n def rank(self, method='average', na_option='keep', ascending=True,\n pct=False):\n \"\"\"\n Compute data ranks (1 through n). Equal values are assigned a rank that\n is the average of the ranks of those values\n\n Parameters\n ----------\n method : {'average', 'min', 'max', 'first', 'dense'}\n * average: average rank of group\n * min: lowest rank in group\n * max: highest rank in group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups\n na_option : {'keep'}\n keep: leave NA values where they are\n ascending : boolean, default True\n False for ranks by high (1) to low (N)\n pct : boolean, default False\n Computes percentage rank of data\n\n Returns\n -------\n ranks : Series\n \"\"\"\n from pandas.core.algorithms import rank\n ranks = rank(self.values, method=method, na_option=na_option,\n ascending=ascending, pct=pct)\n return self._constructor(ranks, index=self.index).__finalize__(self)\n\n def sort(self, axis=0, ascending=True, kind='quicksort', na_position='last', inplace=True):\n \"\"\"\n Sort values and index labels by value. This is an inplace sort by default.\n Series.order is the equivalent but returns a new Series.\n\n Parameters\n ----------\n axis : int (can only be zero)\n ascending : boolean, default True\n Sort ascending. Passing False sorts descending\n kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See np.sort for more\n information. 'mergesort' is the only stable algorithm\n na_position : {'first', 'last'} (optional, default='last')\n 'first' puts NaNs at the beginning\n 'last' puts NaNs at the end\n inplace : boolean, default True\n Do operation in place.\n\n See Also\n --------\n Series.order\n \"\"\"\n return self.order(ascending=ascending,\n kind=kind,\n na_position=na_position,\n inplace=inplace)\n\n def order(self, na_last=None, ascending=True, kind='quicksort', na_position='last', inplace=False):\n \"\"\"\n Sorts Series object, by value, maintaining index-value link.\n This will return a new Series by default. Series.sort is the equivalent but as an inplace method.\n\n Parameters\n ----------\n na_last : boolean (optional, default=True) (DEPRECATED; use na_position)\n Put NaN's at beginning or end\n ascending : boolean, default True\n Sort ascending. Passing False sorts descending\n kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See np.sort for more\n information. 'mergesort' is the only stable algorithm\n na_position : {'first', 'last'} (optional, default='last')\n 'first' puts NaNs at the beginning\n 'last' puts NaNs at the end\n inplace : boolean, default False\n Do operation in place.\n\n Returns\n -------\n y : Series\n\n See Also\n --------\n Series.sort\n \"\"\"\n\n # GH 5856/5853\n if inplace and self._is_cached:\n raise ValueError(\"This Series is a view of some other array, to \"\n \"sort in-place you must create a copy\")\n\n if na_last is not None:\n warnings.warn((\"na_last is deprecated. Please use na_position instead\"),\n FutureWarning)\n na_position = 'last' if na_last else 'first'\n\n def _try_kind_sort(arr):\n # easier to ask forgiveness than permission\n try:\n # if kind==mergesort, it can fail for object dtype\n return arr.argsort(kind=kind)\n except TypeError:\n # stable sort not available for object dtype\n # uses the argsort default quicksort\n return arr.argsort(kind='quicksort')\n\n arr = self.values\n sortedIdx = np.empty(len(self), dtype=np.int32)\n\n bad = isnull(arr)\n\n good = ~bad\n idx = np.arange(len(self))\n\n argsorted = _try_kind_sort(arr[good])\n\n if not ascending:\n argsorted = argsorted[::-1]\n\n if na_position == 'last':\n n = good.sum()\n sortedIdx[:n] = idx[good][argsorted]\n sortedIdx[n:] = idx[bad]\n elif na_position == 'first':\n n = bad.sum()\n sortedIdx[n:] = idx[good][argsorted]\n sortedIdx[:n] = idx[bad]\n else:\n raise ValueError('invalid na_position: {!r}'.format(na_position))\n\n result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx])\n\n if inplace:\n self._update_inplace(result)\n else:\n return result.__finalize__(self)\n\n def nlargest(self, n=5, take_last=False):\n \"\"\"Return the largest `n` elements.\n\n Parameters\n ----------\n n : int\n Return this many descending sorted values\n take_last : bool\n Where there are duplicate values, take the last duplicate\n\n Returns\n -------\n top_n : Series\n The n largest values in the Series, in sorted order\n\n Notes\n -----\n Faster than ``.order(ascending=False).head(n)`` for small `n` relative\n to the size of the ``Series`` object.\n\n See Also\n --------\n Series.nsmallest\n\n Examples\n --------\n >>> import pandas as pd\n >>> import numpy as np\n >>> s = pd.Series(np.random.randn(1e6))\n >>> s.nlargest(10) # only sorts up to the N requested\n \"\"\"\n return select_n(self, n=n, take_last=take_last, method='nlargest')\n\n def nsmallest(self, n=5, take_last=False):\n \"\"\"Return the smallest `n` elements.\n\n Parameters\n ----------\n n : int\n Return this many ascending sorted values\n take_last : bool\n Where there are duplicate values, take the last duplicate\n\n Returns\n -------\n bottom_n : Series\n The n smallest values in the Series, in sorted order\n\n Notes\n -----\n Faster than ``.order().head(n)`` for small `n` relative to\n the size of the ``Series`` object.\n\n See Also\n --------\n Series.nlargest\n\n Examples\n --------\n >>> import pandas as pd\n >>> import numpy as np\n >>> s = pd.Series(np.random.randn(1e6))\n >>> s.nsmallest(10) # only sorts up to the N requested\n \"\"\"\n return select_n(self, n=n, take_last=take_last, method='nsmallest')\n\n def sortlevel(self, level=0, ascending=True, sort_remaining=True):\n \"\"\"\n Sort Series with MultiIndex by chosen level. Data will be\n lexicographically sorted by the chosen level followed by the other\n levels (in order)\n\n Parameters\n ----------\n level : int or level name, default None\n ascending : bool, default True\n\n Returns\n -------\n sorted : Series\n \"\"\"\n if not isinstance(self.index, MultiIndex):\n raise TypeError('can only sort by level with a hierarchical index')\n\n new_index, indexer = self.index.sortlevel(level, ascending=ascending,\n sort_remaining=sort_remaining)\n new_values = self.values.take(indexer)\n return self._constructor(new_values,\n index=new_index).__finalize__(self)\n\n def swaplevel(self, i, j, copy=True):\n \"\"\"\n Swap levels i and j in a MultiIndex\n\n Parameters\n ----------\n i, j : int, string (can be mixed)\n Level of index to be swapped. Can pass level name as string.\n\n Returns\n -------\n swapped : Series\n \"\"\"\n new_index = self.index.swaplevel(i, j)\n return self._constructor(self.values, index=new_index,\n copy=copy).__finalize__(self)\n\n def reorder_levels(self, order):\n \"\"\"\n Rearrange index levels using input order. May not drop or duplicate\n levels\n\n Parameters\n ----------\n order: list of int representing new level order.\n (reference level by number or key)\n axis: where to reorder levels\n\n Returns\n -------\n type of caller (new object)\n \"\"\"\n if not isinstance(self.index, MultiIndex): # pragma: no cover\n raise Exception('Can only reorder levels on a hierarchical axis.')\n\n result = self.copy()\n result.index = result.index.reorder_levels(order)\n return result\n\n def unstack(self, level=-1):\n \"\"\"\n Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame.\n The level involved will automatically get sorted.\n\n Parameters\n ----------\n level : int, string, or list of these, default last level\n Level(s) to unstack, can pass level name\n\n Examples\n --------\n >>> s\n one a 1.\n one b 2.\n two a 3.\n two b 4.\n\n >>> s.unstack(level=-1)\n a b\n one 1. 2.\n two 3. 4.\n\n >>> s.unstack(level=0)\n one two\n a 1. 2.\n b 3. 4.\n\n Returns\n -------\n unstacked : DataFrame\n \"\"\"\n from pandas.core.reshape import unstack\n return unstack(self, level)\n\n #----------------------------------------------------------------------\n # function application\n\n def map(self, arg, na_action=None):\n \"\"\"\n Map values of Series using input correspondence (which can be\n a dict, Series, or function)\n\n Parameters\n ----------\n arg : function, dict, or Series\n na_action : {None, 'ignore'}\n If 'ignore', propagate NA values\n\n Examples\n --------\n >>> x\n one 1\n two 2\n three 3\n\n >>> y\n 1 foo\n 2 bar\n 3 baz\n\n >>> x.map(y)\n one foo\n two bar\n three baz\n\n Returns\n -------\n y : Series\n same index as caller\n \"\"\"\n values = self.values\n if com.is_datetime64_dtype(values.dtype):\n values = lib.map_infer(values, lib.Timestamp)\n\n if na_action == 'ignore':\n mask = isnull(values)\n\n def map_f(values, f):\n return lib.map_infer_mask(values, f, mask.view(np.uint8))\n else:\n map_f = lib.map_infer\n\n if isinstance(arg, (dict, Series)):\n if isinstance(arg, dict):\n arg = self._constructor(arg, index=arg.keys())\n\n indexer = arg.index.get_indexer(values)\n new_values = com.take_1d(arg.values, indexer)\n return self._constructor(new_values,\n index=self.index).__finalize__(self)\n else:\n mapped = map_f(values, arg)\n return self._constructor(mapped,\n index=self.index).__finalize__(self)\n\n def apply(self, func, convert_dtype=True, args=(), **kwds):\n \"\"\"\n Invoke function on values of Series. Can be ufunc (a NumPy function\n that applies to the entire Series) or a Python function that only works\n on single values\n\n Parameters\n ----------\n func : function\n convert_dtype : boolean, default True\n Try to find better dtype for elementwise function results. If\n False, leave as dtype=object\n args : tuple\n Positional arguments to pass to function in addition to the value\n Additional keyword arguments will be passed as keywords to the function\n\n See also\n --------\n Series.map: For element-wise operations\n\n Returns\n -------\n y : Series or DataFrame if func returns a Series\n \"\"\"\n if len(self) == 0:\n return Series()\n\n if kwds or args and not isinstance(func, np.ufunc):\n f = lambda x: func(x, *args, **kwds)\n else:\n f = func\n\n if isinstance(f, np.ufunc):\n return f(self)\n\n values = _values_from_object(self)\n if com.is_datetime64_dtype(values.dtype):\n values = lib.map_infer(values, lib.Timestamp)\n\n mapped = lib.map_infer(values, f, convert=convert_dtype)\n if len(mapped) and isinstance(mapped[0], Series):\n from pandas.core.frame import DataFrame\n return DataFrame(mapped.tolist(), index=self.index)\n else:\n return self._constructor(mapped,\n index=self.index).__finalize__(self)\n\n def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,\n filter_type=None, **kwds):\n \"\"\"\n perform a reduction operation\n\n if we have an ndarray as a value, then simply perform the operation,\n otherwise delegate to the object\n\n \"\"\"\n delegate = self.values\n if isinstance(delegate, np.ndarray):\n # Validate that 'axis' is consistent with Series's single axis.\n self._get_axis_number(axis)\n if numeric_only:\n raise NotImplementedError(\n 'Series.{0} does not implement numeric_only.'.format(name))\n return op(delegate, skipna=skipna, **kwds)\n\n return delegate._reduce(op=op, name=name, axis=axis, skipna=skipna,\n numeric_only=numeric_only,\n filter_type=filter_type, **kwds)\n\n def _maybe_box(self, func, dropna=False):\n \"\"\"\n evaluate a function with possible input/output conversion if we are i8\n\n Parameters\n ----------\n dropna : bool, default False\n whether to drop values if necessary\n\n \"\"\"\n if dropna:\n values = self.dropna().values\n else:\n values = self.values\n\n if com.needs_i8_conversion(self):\n boxer = com.i8_boxer(self)\n\n if len(values) == 0:\n return boxer(iNaT)\n\n values = values.view('i8')\n result = func(values)\n\n if com.is_list_like(result):\n result = result.map(boxer)\n else:\n result = boxer(result)\n\n else:\n\n # let the function return nan if appropriate\n if dropna:\n if len(values) == 0:\n return np.nan\n result = func(values)\n\n return result\n\n def _reindex_indexer(self, new_index, indexer, copy):\n if indexer is None:\n if copy:\n return self.copy()\n return self\n\n # be subclass-friendly\n new_values = com.take_1d(self.get_values(), indexer)\n return self._constructor(new_values, index=new_index)\n\n def _needs_reindex_multi(self, axes, method, level):\n \"\"\" check if we do need a multi reindex; this is for compat with\n higher dims\n \"\"\"\n return False\n\n @Appender(generic._shared_docs['rename'] % _shared_doc_kwargs)\n def rename(self, index=None, **kwargs):\n return super(Series, self).rename(index=index, **kwargs)\n\n @Appender(generic._shared_docs['reindex'] % _shared_doc_kwargs)\n def reindex(self, index=None, **kwargs):\n return super(Series, self).reindex(index=index, **kwargs)\n\n def reindex_axis(self, labels, axis=0, **kwargs):\n \"\"\" for compatibility with higher dims \"\"\"\n if axis != 0:\n raise ValueError(\"cannot reindex series on non-zero axis!\")\n return self.reindex(index=labels, **kwargs)\n\n def take(self, indices, axis=0, convert=True, is_copy=False):\n \"\"\"\n return Series corresponding to requested indices\n\n Parameters\n ----------\n indices : list / array of ints\n convert : translate negative to positive indices (default)\n\n Returns\n -------\n taken : Series\n\n See also\n --------\n numpy.ndarray.take\n \"\"\"\n # check/convert indicies here\n if convert:\n indices = _maybe_convert_indices(\n indices, len(self._get_axis(axis)))\n\n indices = com._ensure_platform_int(indices)\n new_index = self.index.take(indices)\n new_values = self.values.take(indices)\n return self._constructor(new_values,\n index=new_index).__finalize__(self)\n\n def isin(self, values):\n \"\"\"\n Return a boolean :class:`~pandas.Series` showing whether each element\n in the :class:`~pandas.Series` is exactly contained in the passed\n sequence of ``values``.\n\n Parameters\n ----------\n values : list-like\n The sequence of values to test. Passing in a single string will\n raise a ``TypeError``. Instead, turn a single string into a\n ``list`` of one element.\n\n Returns\n -------\n isin : Series (bool dtype)\n\n Raises\n ------\n TypeError\n * If ``values`` is a string\n\n See Also\n --------\n pandas.DataFrame.isin\n\n Examples\n --------\n\n >>> s = pd.Series(list('abc'))\n >>> s.isin(['a', 'c', 'e'])\n 0 True\n 1 False\n 2 True\n dtype: bool\n\n Passing a single string as ``s.isin('a')`` will raise an error. Use\n a list of one element instead:\n\n >>> s.isin(['a'])\n 0 True\n 1 False\n 2 False\n dtype: bool\n\n \"\"\"\n if not com.is_list_like(values):\n raise TypeError(\"only list-like objects are allowed to be passed\"\n \" to Series.isin(), you passed a \"\n \"{0!r}\".format(type(values).__name__))\n\n # may need i8 conversion for proper membership testing\n comps = _values_from_object(self)\n if com.is_datetime64_dtype(self):\n from pandas.tseries.tools import to_datetime\n values = Series(to_datetime(values)).values.view('i8')\n comps = comps.view('i8')\n elif com.is_timedelta64_dtype(self):\n from pandas.tseries.timedeltas import to_timedelta\n values = Series(to_timedelta(values)).values.view('i8')\n comps = comps.view('i8')\n\n value_set = set(values)\n result = lib.ismember(comps, value_set)\n return self._constructor(result, index=self.index).__finalize__(self)\n\n def between(self, left, right, inclusive=True):\n \"\"\"\n Return boolean Series equivalent to left <= series <= right. NA values\n will be treated as False\n\n Parameters\n ----------\n left : scalar\n Left boundary\n right : scalar\n Right boundary\n\n Returns\n -------\n is_between : Series\n \"\"\"\n if inclusive:\n lmask = self >= left\n rmask = self <= right\n else:\n lmask = self > left\n rmask = self < right\n\n return lmask & rmask\n\n @classmethod\n def from_csv(cls, path, sep=',', parse_dates=True, header=None,\n index_col=0, encoding=None, infer_datetime_format=False):\n \"\"\"\n Read delimited file into Series\n\n Parameters\n ----------\n path : string file path or file handle / StringIO\n sep : string, default ','\n Field delimiter\n parse_dates : boolean, default True\n Parse dates. Different default from read_table\n header : int, default 0\n Row to use at header (skip prior rows)\n index_col : int or sequence, default 0\n Column to use for index. If a sequence is given, a MultiIndex\n is used. Different default from read_table\n encoding : string, optional\n a string representing the encoding to use if the contents are\n non-ascii, for python versions prior to 3\n infer_datetime_format: boolean, default False\n If True and `parse_dates` is True for a column, try to infer the\n datetime format based on the first datetime string. If the format\n can be inferred, there often will be a large parsing speed-up.\n\n Returns\n -------\n y : Series\n \"\"\"\n from pandas.core.frame import DataFrame\n df = DataFrame.from_csv(path, header=header, index_col=index_col,\n sep=sep, parse_dates=parse_dates,\n encoding=encoding,\n infer_datetime_format=infer_datetime_format)\n result = df.icol(0)\n result.index.name = result.name = None\n return result\n\n def to_csv(self, path, index=True, sep=\",\", na_rep='',\n float_format=None, header=False,\n index_label=None, mode='w', nanRep=None, encoding=None,\n date_format=None):\n \"\"\"\n Write Series to a comma-separated values (csv) file\n\n Parameters\n ----------\n path : string file path or file handle / StringIO. If None is provided\n the result is returned as a string.\n na_rep : string, default ''\n Missing data representation\n float_format : string, default None\n Format string for floating point numbers\n header : boolean, default False\n Write out series name\n index : boolean, default True\n Write row names (index)\n index_label : string or sequence, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n mode : Python write mode, default 'w'\n sep : character, default \",\"\n Field delimiter for the output file.\n encoding : string, optional\n a string representing the encoding to use if the contents are\n non-ascii, for python versions prior to 3\n date_format: string, default None\n Format string for datetime objects.\n \"\"\"\n from pandas.core.frame import DataFrame\n df = DataFrame(self)\n # result is only a string if no path provided, otherwise None\n result = df.to_csv(path, index=index, sep=sep, na_rep=na_rep,\n float_format=float_format, header=header,\n index_label=index_label, mode=mode, nanRep=nanRep,\n encoding=encoding, date_format=date_format)\n if path is None:\n return result\n\n def dropna(self, axis=0, inplace=False, **kwargs):\n \"\"\"\n Return Series without null values\n\n Returns\n -------\n valid : Series\n inplace : boolean, default False\n Do operation in place.\n \"\"\"\n axis = self._get_axis_number(axis or 0)\n result = remove_na(self)\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n valid = lambda self, inplace=False, **kwargs: self.dropna(inplace=inplace,\n **kwargs)\n\n def first_valid_index(self):\n \"\"\"\n Return label for first non-NA/null value\n \"\"\"\n if len(self) == 0:\n return None\n\n mask = isnull(self.values)\n i = mask.argmin()\n if mask[i]:\n return None\n else:\n return self.index[i]\n\n def last_valid_index(self):\n \"\"\"\n Return label for last non-NA/null value\n \"\"\"\n if len(self) == 0:\n return None\n\n mask = isnull(self.values[::-1])\n i = mask.argmin()\n if mask[i]:\n return None\n else:\n return self.index[len(self) - i - 1]\n\n #----------------------------------------------------------------------\n # Time series-oriented methods\n\n def asof(self, where):\n \"\"\"\n Return last good (non-NaN) value in TimeSeries if value is NaN for\n requested date.\n\n If there is no good value, NaN is returned.\n\n Parameters\n ----------\n where : date or array of dates\n\n Notes\n -----\n Dates are assumed to be sorted\n\n Returns\n -------\n value or NaN\n \"\"\"\n if isinstance(where, compat.string_types):\n where = datetools.to_datetime(where)\n\n values = self.values\n\n if not hasattr(where, '__iter__'):\n start = self.index[0]\n if isinstance(self.index, PeriodIndex):\n where = Period(where, freq=self.index.freq).ordinal\n start = start.ordinal\n\n if where < start:\n return np.nan\n loc = self.index.searchsorted(where, side='right')\n if loc > 0:\n loc -= 1\n while isnull(values[loc]) and loc > 0:\n loc -= 1\n return values[loc]\n\n if not isinstance(where, Index):\n where = Index(where)\n\n locs = self.index.asof_locs(where, notnull(values))\n new_values = com.take_1d(values, locs)\n return self._constructor(new_values, index=where).__finalize__(self)\n\n @cache_readonly\n def str(self):\n from pandas.core.strings import StringMethods\n return StringMethods(self)\n\n def to_timestamp(self, freq=None, how='start', copy=True):\n \"\"\"\n Cast to datetimeindex of timestamps, at *beginning* of period\n\n Parameters\n ----------\n freq : string, default frequency of PeriodIndex\n Desired frequency\n how : {'s', 'e', 'start', 'end'}\n Convention for converting period to timestamp; start of period\n vs. end\n\n Returns\n -------\n ts : TimeSeries with DatetimeIndex\n \"\"\"\n new_values = self.values\n if copy:\n new_values = new_values.copy()\n\n new_index = self.index.to_timestamp(freq=freq, how=how)\n return self._constructor(new_values,\n index=new_index).__finalize__(self)\n\n def to_period(self, freq=None, copy=True):\n \"\"\"\n Convert TimeSeries from DatetimeIndex to PeriodIndex with desired\n frequency (inferred from index if not passed)\n\n Parameters\n ----------\n freq : string, default\n\n Returns\n -------\n ts : TimeSeries with PeriodIndex\n \"\"\"\n new_values = self.values\n if copy:\n new_values = new_values.copy()\n\n new_index = self.index.to_period(freq=freq)\n return self._constructor(new_values,\n index=new_index).__finalize__(self)\n\n #------------------------------------------------------------------------------\n # Datetimelike delegation methods\n\n @cache_readonly\n def dt(self):\n from pandas.tseries.common import maybe_to_datetimelike\n try:\n return maybe_to_datetimelike(self)\n except (Exception):\n raise TypeError(\"Can only use .dt accessor with datetimelike values\")\n\n #------------------------------------------------------------------------------\n # Categorical methods\n\n @cache_readonly\n def cat(self):\n from pandas.core.categorical import CategoricalAccessor\n if not com.is_categorical_dtype(self.dtype):\n raise TypeError(\"Can only use .cat accessor with a 'category' dtype\")\n return CategoricalAccessor(self.values, self.index)\n\nSeries._setup_axes(['index'], info_axis=0, stat_axis=0,\n aliases={'rows': 0})\nSeries._add_numeric_operations()\n_INDEX_TYPES = ndarray, Index, list, tuple\n\n#------------------------------------------------------------------------------\n# Supplementary functions\n\n\ndef remove_na(series):\n \"\"\"\n Return series containing only true/non-NaN values, possibly empty.\n \"\"\"\n return series[notnull(_values_from_object(series))]\n\n\ndef _sanitize_index(data, index, copy=False):\n \"\"\" sanitize an index type to return an ndarray of the underlying, pass thru a non-Index \"\"\"\n\n if len(data) != len(index):\n raise ValueError('Length of values does not match length of '\n 'index')\n\n if isinstance(data, PeriodIndex):\n data = data.asobject\n elif isinstance(data, DatetimeIndex):\n data = data._to_embed(keep_tz=True)\n if copy:\n data = data.copy()\n elif isinstance(data, np.ndarray):\n\n # coerce datetimelike types\n if data.dtype.kind in ['M','m']:\n data = _sanitize_array(data, index, copy=copy)\n\n return data\n\ndef _sanitize_array(data, index, dtype=None, copy=False,\n raise_cast_failure=False):\n \"\"\" sanitize input data to an ndarray, copy if specified, coerce to the dtype if specified \"\"\"\n\n if dtype is not None:\n dtype = _coerce_to_dtype(dtype)\n\n if isinstance(data, ma.MaskedArray):\n mask = ma.getmaskarray(data)\n if mask.any():\n data, fill_value = _maybe_upcast(data, copy=True)\n data[mask] = fill_value\n else:\n data = data.copy()\n\n def _try_cast(arr, take_fast_path):\n\n # perf shortcut as this is the most common case\n if take_fast_path:\n if _possibly_castable(arr) and not copy and dtype is None:\n return arr\n\n try:\n arr = _possibly_cast_to_datetime(arr, dtype)\n subarr = np.array(arr, dtype=dtype, copy=copy)\n except (ValueError, TypeError):\n if com.is_categorical_dtype(dtype):\n subarr = Categorical(arr)\n elif dtype is not None and raise_cast_failure:\n raise\n else:\n subarr = np.array(arr, dtype=object, copy=copy)\n return subarr\n\n # GH #846\n if isinstance(data, (np.ndarray, Index, Series)):\n subarr = np.array(data, copy=False)\n if dtype is not None:\n\n # possibility of nan -> garbage\n if com.is_float_dtype(data.dtype) and com.is_integer_dtype(dtype):\n if not isnull(data).any():\n subarr = _try_cast(data, True)\n elif copy:\n subarr = data.copy()\n else:\n if (com.is_datetime64_dtype(data.dtype) and\n not com.is_datetime64_dtype(dtype)):\n if dtype == object:\n ints = np.asarray(data).view('i8')\n subarr = tslib.ints_to_pydatetime(ints)\n elif raise_cast_failure:\n raise TypeError('Cannot cast datetime64 to %s' % dtype)\n else:\n subarr = _try_cast(data, True)\n elif isinstance(data, Index):\n # don't coerce Index types\n # e.g. indexes can have different conversions (so don't fast path them)\n # GH 6140\n subarr = _sanitize_index(data, index, copy=True)\n else:\n subarr = _try_cast(data, True)\n\n if copy:\n subarr = data.copy()\n\n elif isinstance(data, Categorical):\n subarr = data\n\n if copy:\n subarr = data.copy()\n return subarr\n\n elif isinstance(data, list) and len(data) > 0:\n if dtype is not None:\n try:\n subarr = _try_cast(data, False)\n except Exception:\n if raise_cast_failure: # pragma: no cover\n raise\n subarr = np.array(data, dtype=object, copy=copy)\n subarr = lib.maybe_convert_objects(subarr)\n\n else:\n subarr = _possibly_convert_platform(data)\n\n subarr = _possibly_cast_to_datetime(subarr, dtype)\n\n else:\n subarr = _try_cast(data, False)\n\n # scalar like\n if subarr.ndim == 0:\n if isinstance(data, list): # pragma: no cover\n subarr = np.array(data, dtype=object)\n elif index is not None:\n value = data\n\n # figure out the dtype from the value (upcast if necessary)\n if dtype is None:\n dtype, value = _infer_dtype_from_scalar(value)\n else:\n # need to possibly convert the value here\n value = _possibly_cast_to_datetime(value, dtype)\n\n subarr = np.empty(len(index), dtype=dtype)\n subarr.fill(value)\n\n else:\n return subarr.item()\n\n # the result that we want\n elif subarr.ndim == 1:\n if index is not None:\n\n # a 1-element ndarray\n if len(subarr) != len(index) and len(subarr) == 1:\n value = subarr[0]\n subarr = np.empty(len(index), dtype=subarr.dtype)\n subarr.fill(value)\n\n elif subarr.ndim > 1:\n if isinstance(data, np.ndarray):\n raise Exception('Data must be 1-dimensional')\n else:\n subarr = _asarray_tuplesafe(data, dtype=dtype)\n\n # This is to prevent mixed-type Series getting all casted to\n # NumPy string type, e.g. NaN --> '-1#IND'.\n if issubclass(subarr.dtype.type, compat.string_types):\n subarr = np.array(data, dtype=object, copy=copy)\n\n return subarr\n\n# backwards compatiblity\nTimeSeries = Series\n\n#----------------------------------------------------------------------\n# Add plotting methods to Series\n\nimport pandas.tools.plotting as _gfx\n\nSeries.plot = _gfx.plot_series\nSeries.hist = _gfx.hist_series\n\n# Add arithmetic!\nops.add_flex_arithmetic_methods(Series, **ops.series_flex_funcs)\nops.add_special_arithmetic_methods(Series, **ops.series_special_funcs)\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.random.shuffle"
],
[
"pandas.core.common.is_integer_dtype",
"matplotlib.dates.AutoDateFormatter.__init__",
"matplotlib.ticker.AutoLocator",
"pandas.tseries.frequencies.get_freq",
"matplotlib.transforms.nonsingular",
"matplotlib.dates.DateLocator.__init__",
"numpy.arange",
"matplotlib.dates.RRuleLocator.get_unit_generic",
"pandas.core.common.is_float_dtype",
"pandas.tseries.period.Period",
"pandas.core.common.is_datetime64_ns_dtype",
"pandas.tseries.period.PeriodIndex",
"pandas.tseries.tools.to_datetime",
"pandas.tseries.frequencies.get_freq_group",
"numpy.zeros",
"numpy.logical_not",
"matplotlib.dates.DateFormatter",
"matplotlib.units.AxisInfo",
"pandas.core.common.is_integer",
"pandas.core.common.is_float",
"matplotlib.dates.AutoDateLocator.get_locator",
"matplotlib.dates.epoch2num",
"pandas.core.common._asarray_tuplesafe",
"numpy.compress",
"pandas.tseries.index.date_range",
"pandas.lib.Timestamp",
"matplotlib.dates.date2num",
"pandas.core.common.is_period_arraylike",
"pandas.compat.lrange"
],
[
"pandas.core.nanops.nancov",
"pandas.core.common.is_categorical_dtype",
"pandas.core.frame.DataFrame",
"pandas.tseries.common.maybe_to_datetimelike",
"pandas.compat.text_type",
"pandas.core.common._infer_dtype_from_scalar",
"pandas.tseries.period.Period",
"pandas.tseries.tools.to_datetime",
"pandas.util.decorators.Appender",
"pandas.core.common._try_sort",
"pandas.tools.merge.concat",
"pandas.core.common.is_integer",
"numpy.array",
"pandas.lib.map_infer",
"pandas.core.algorithms.select_n",
"pandas.core.groupby._lexsort_indexer",
"pandas.core.common._coerce_to_dtype",
"pandas.lib.Timestamp",
"pandas.core.generic.NDFrame.__init__",
"pandas.core.common._maybe_upcast",
"pandas.core.common.is_integer_dtype",
"numpy.asarray",
"pandas.core.common._ensure_platform_int",
"numpy.ma.getmaskarray",
"pandas.core.common.is_iterator",
"pandas.index.get_value_at",
"pandas.core.nanops.nancorr",
"pandas.core.generic.NDFrame._update_inplace",
"pandas.core.index._ensure_index",
"pandas.lib.Timedelta",
"pandas.core.common.take_1d",
"pandas.core.frame.DataFrame.from_csv",
"pandas.compat.u",
"pandas.core.algorithms.mode",
"pandas.core.internals.SingleBlockManager",
"pandas.lib.infer_dtype",
"numpy.isscalar",
"numpy.ndarray.__setstate__",
"numpy.empty",
"pandas.core.index.Index",
"numpy.issubdtype",
"pandas.compat.iteritems",
"pandas.util.terminal.get_terminal_size",
"pandas.core.common.needs_i8_conversion",
"pandas.lib.ismember",
"pandas.core.ops.add_special_arithmetic_methods",
"pandas.core.common._possibly_cast_to_datetime",
"pandas.core.common._ensure_int64",
"pandas.core.common._maybe_match_name",
"pandas.core.strings.StringMethods",
"pandas.core.categorical.Categorical",
"pandas.tseries.index.DatetimeIndex",
"pandas.core.common._possibly_convert_platform",
"pandas.core.algorithms.rank",
"pandas.lib.fast_multiget",
"numpy.argsort",
"pandas.core.indexing._check_bool_indexer",
"pandas.core.ops.add_flex_arithmetic_methods",
"pandas.core.common._asarray_tuplesafe",
"pandas.core.common._maybe_box_datetimelike",
"numpy.percentile",
"pandas.core.common._is_bool_indexer",
"pandas.core.common._possibly_castable",
"pandas.core.common.pprint_thing",
"pandas.tseries.timedeltas.to_timedelta",
"pandas.core.common.is_list_like",
"numpy.dot",
"pandas.sparse.series.SparseSeries",
"pandas.core.common.is_timedelta64_dtype",
"pandas.lib.maybe_convert_objects",
"pandas.core.common.i8_boxer",
"pandas.core.common.notnull",
"pandas.core.categorical.CategoricalAccessor",
"pandas.core.datetools.to_datetime",
"pandas.core.config.get_option",
"pandas.tslib.ints_to_pydatetime",
"pandas.core.common.is_float_dtype",
"pandas.core.format.SeriesFormatter",
"pandas.core.reshape.unstack",
"pandas.core.common._values_from_object",
"pandas.core.common.isnull",
"pandas.core.common.is_datetime64_dtype"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
altair-viz/altair-transform
|
[
"b65bf854de1e80f931e063d8fb2ec938773826fb",
"b65bf854de1e80f931e063d8fb2ec938773826fb"
] |
[
"altair_transform/transform/fold.py",
"altair_transform/transform/tests/test_quantile.py"
] |
[
"import altair as alt\nimport pandas as pd\nfrom .visitor import visit\n\n\[email protected](alt.FoldTransform)\ndef visit_fold(transform: alt.FoldTransform, df: pd.DataFrame) -> pd.DataFrame:\n transform = transform.to_dict()\n fold = transform[\"fold\"]\n var_name, value_name = transform.get(\"as\", (\"key\", \"value\"))\n value_vars = [c for c in df.columns if c in fold]\n id_vars = [c for c in df.columns if c not in fold]\n\n # Add an index to track input order\n dfi = df.reset_index(drop=True).reset_index()\n index_name = dfi.columns[0]\n melted = dfi.melt(\n id_vars=[index_name] + id_vars,\n value_vars=value_vars,\n var_name=var_name,\n value_name=value_name,\n )\n return (\n pd.merge(melted, dfi, on=[index_name] + id_vars, how=\"left\")\n .sort_values(index_name)\n .drop(index_name, axis=1)\n .reset_index(drop=True)\n )\n",
"from typing import Any, Dict, List, Optional\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport pandas as pd\nfrom pandas.testing import assert_frame_equal\nimport pytest\n\nimport altair_transform\n\n\[email protected]\ndef data() -> pd.DataFrame:\n rand = np.random.RandomState(42)\n return pd.DataFrame({\"x\": rand.randint(0, 100, 12), \"c\": list(\"AAABBBCCCDDD\")})\n\n\ndef test_quantile_transform(data: pd.DataFrame) -> None:\n transform = {\"quantile\": \"x\", \"step\": 0.1}\n out = altair_transform.apply(data, transform)\n assert list(out.columns) == [\"prob\", \"value\"]\n assert_allclose(out.prob, np.arange(0.05, 1, 0.1))\n assert_allclose(out.value, np.quantile(data.x, out.prob))\n\n\ndef test_quantile_transform_groupby(data: pd.DataFrame) -> None:\n group = \"c\"\n transform = {\"quantile\": \"x\", \"step\": 0.1, \"groupby\": [group]}\n out = altair_transform.apply(data, transform)\n assert list(out.columns) == [\"c\", \"prob\", \"value\"]\n\n for key in data[group].unique():\n out_group_1 = altair_transform.apply(data[data[group] == key], transform)\n out_group_2 = out[out[group] == key][out_group_1.columns].reset_index(drop=True)\n assert_frame_equal(out_group_1, out_group_2)\n\n\[email protected](\"step\", [None, 0.1])\[email protected](\"groupby\", [None, [\"c\"]])\[email protected](\"probs\", [None, [0.2 * i for i in range(6)]])\[email protected](\"as_\", [None, [\"p\", \"q\"]])\ndef test_quantile_against_js(\n driver,\n data: pd.DataFrame,\n step: Optional[float],\n groupby: Optional[List[str]],\n probs: Optional[List[float]],\n as_: Optional[List[str]],\n) -> None:\n transform: Dict[str, Any] = {\"quantile\": \"x\"}\n if step is not None:\n transform[\"step\"] = step\n if groupby is not None:\n transform[\"groupby\"] = groupby\n if probs is not None:\n transform[\"probs\"] = probs\n if as_ is not None:\n transform[\"as\"] = as_\n got = altair_transform.apply(data, transform)\n want = driver.apply(data, transform)\n assert_frame_equal(\n got[sorted(got.columns)],\n want[sorted(want.columns)],\n check_dtype=False,\n check_index_type=False,\n check_less_precise=True,\n )\n"
] |
[
[
"pandas.merge"
],
[
"numpy.arange",
"numpy.random.RandomState",
"numpy.quantile",
"pandas.testing.assert_frame_equal"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
PaulLerner/pyannote-audio
|
[
"06f76a2c5a37c79cf42710167c7b7404658879d3"
] |
[
"pyannote/audio/labeling/tasks/base.py"
] |
[
"#!/usr/bin/env python\n# encoding: utf-8\n\n# The MIT License (MIT)\n\n# Copyright (c) 2018-2020 CNRS\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# AUTHORS\n# Hervé BREDIN - http://herve.niderb.fr\n\nfrom typing import Optional\nfrom typing import Text\n\nimport torch\nimport torch.nn.functional as F\n\nimport numpy as np\nimport scipy.signal\n\nfrom pyannote.core import Segment\nfrom pyannote.core import SlidingWindow\nfrom pyannote.core import Timeline\nfrom pyannote.core import Annotation\nfrom pyannote.core import SlidingWindowFeature\n\nfrom pyannote.database import get_unique_identifier\nfrom pyannote.database import get_annotated\nfrom pyannote.database.protocol.protocol import Protocol\n\nfrom pyannote.core.utils.numpy import one_hot_encoding\n\nfrom pyannote.audio.features import RawAudio\nfrom pyannote.audio.features.wrapper import Wrapper, Wrappable\n\nfrom pyannote.core.utils.random import random_segment\nfrom pyannote.core.utils.random import random_subsegment\n\nfrom pyannote.audio.train.trainer import Trainer\nfrom pyannote.audio.train.generator import BatchGenerator\n\nfrom pyannote.audio.train.task import Task, TaskType, TaskOutput\n\nfrom pyannote.audio.train.model import Resolution\nfrom pyannote.audio.train.model import RESOLUTION_CHUNK\nfrom pyannote.audio.train.model import RESOLUTION_FRAME\nfrom pyannote.audio.train.model import Alignment\n\nSECONDS_IN_A_DAY = 24 * 60 * 60\n\n\nclass LabelingTaskGenerator(BatchGenerator):\n \"\"\"Base batch generator for various labeling tasks\n\n This class should be inherited from: it should not be used directy\n\n Parameters\n ----------\n task : Task\n Task\n feature_extraction : Wrappable\n Describes how features should be obtained.\n See pyannote.audio.features.wrapper.Wrapper documentation for details.\n protocol : Protocol\n subset : {'train', 'development', 'test'}, optional\n Protocol and subset.\n resolution : `pyannote.core.SlidingWindow`, optional\n Override `feature_extraction.sliding_window`. This is useful for\n models that include the feature extraction step (e.g. SincNet) and\n therefore output a lower sample rate than that of the input.\n Defaults to `feature_extraction.sliding_window`\n alignment : {'center', 'loose', 'strict'}, optional\n Which mode to use when cropping labels. This is useful for models that\n include the feature extraction step (e.g. SincNet) and therefore use a\n different cropping mode. Defaults to 'center'.\n duration : float, optional\n Duration of audio chunks. Defaults to 2s.\n batch_size : int, optional\n Batch size. Defaults to 32.\n per_epoch : float, optional\n Force total audio duration per epoch, in days.\n Defaults to total duration of protocol subset.\n exhaustive : bool, optional\n Ensure training files are covered exhaustively (useful in case of\n non-uniform label distribution).\n step : `float`, optional\n Ratio of audio chunk duration used as step between two consecutive\n audio chunks. Defaults to 0.1. Has not effect when exhaustive is False.\n mask : str, optional\n When provided, protocol files are expected to contain a key named after\n this `mask` variable and providing a `SlidingWindowFeature` instance.\n Generated batches will contain an additional \"mask\" key (on top of\n existing \"X\" and \"y\" keys) computed as an excerpt of `current_file[mask]`\n time-aligned with \"y\". Defaults to not add any \"mask\" key.\n local_labels : bool, optional\n Set to True to yield samples with local (file-level) labels.\n Defaults to use global (protocol-level) labels.\n \"\"\"\n\n def __init__(\n self,\n task: Task,\n feature_extraction: Wrappable,\n protocol: Protocol,\n subset: Text = \"train\",\n resolution: Optional[Resolution] = None,\n alignment: Optional[Alignment] = None,\n duration: float = 2.0,\n batch_size: int = 32,\n per_epoch: float = None,\n exhaustive: bool = False,\n step: float = 0.1,\n mask: Text = None,\n local_labels: bool = False,\n ):\n\n self.task = task\n self.feature_extraction = Wrapper(feature_extraction)\n self.duration = duration\n self.exhaustive = exhaustive\n self.step = step\n self.mask = mask\n self.local_labels = local_labels\n\n self.resolution_ = resolution\n\n if alignment is None:\n alignment = \"center\"\n self.alignment = alignment\n\n self.batch_size = batch_size\n\n # load metadata and estimate total duration of training data\n total_duration = self._load_metadata(protocol, subset=subset)\n\n #\n if per_epoch is None:\n\n # 1 epoch = covering the whole training set once\n #\n per_epoch = total_duration / SECONDS_IN_A_DAY\n\n # when exhaustive is False, this is not completely correct.\n # in practice, it will randomly sample audio chunk until their\n # overall duration reaches the duration of the training set.\n # but nothing guarantees that every single part of the training set\n # has been seen exactly once: it might be more than once, it might\n # be less than once. on average, however, after a certain amount of\n # epoch, this will be correct\n\n # when exhaustive is True, however, we can actually make sure every\n # single part of the training set has been seen. we just have to\n # make sur we account for the step used by the exhaustive sliding\n # window\n if self.exhaustive:\n per_epoch *= np.ceil(1 / self.step)\n\n self.per_epoch = per_epoch\n\n # TODO. use cached property (Python 3.8 only)\n # https://docs.python.org/fr/3/library/functools.html#functools.cached_property\n @property\n def resolution(self):\n\n if self.resolution_ in [None, RESOLUTION_FRAME]:\n return self.feature_extraction.sliding_window\n\n if self.resolution_ == RESOLUTION_CHUNK:\n return self.SlidingWindow(\n duration=self.duration, step=self.step * self.duration\n )\n\n return self.resolution_\n\n def postprocess_y(self, Y: np.ndarray) -> np.ndarray:\n \"\"\"This function does nothing but return its input.\n It should be overriden by subclasses.\n\n Parameters\n ----------\n Y : (n_samples, n_speakers) numpy.ndarray\n\n Returns\n -------\n postprocessed :\n\n \"\"\"\n return Y\n\n def initialize_y(self, current_file):\n \"\"\"Precompute y for the whole file\n\n Parameters\n ----------\n current_file : `dict`\n File as provided by a pyannote.database protocol.\n\n Returns\n -------\n y : `SlidingWindowFeature`\n Precomputed y for the whole file\n \"\"\"\n\n if self.local_labels:\n labels = current_file[\"annotation\"].labels()\n else:\n labels = self.segment_labels_\n\n y = one_hot_encoding(\n current_file[\"annotation\"],\n get_annotated(current_file),\n self.resolution,\n labels=labels,\n mode=\"center\",\n )\n\n y.data = self.postprocess_y(y.data)\n return y\n\n def crop_y(self, y, segment):\n \"\"\"Extract y for specified segment\n\n Parameters\n ----------\n y : `pyannote.core.SlidingWindowFeature`\n Output of `initialize_y` above.\n segment : `pyannote.core.Segment`\n Segment for which to obtain y.\n\n Returns\n -------\n cropped_y : (n_samples, dim) `np.ndarray`\n y for specified `segment`\n \"\"\"\n\n return y.crop(segment, mode=self.alignment, fixed=self.duration)\n\n def _load_metadata(self, protocol, subset=\"train\") -> float:\n \"\"\"Load training set metadata\n\n This function is called once at instantiation time, returns the total\n training set duration, and populates the following attributes:\n\n Attributes\n ----------\n data_ : dict\n\n {'segments': <list of annotated segments>,\n 'duration': <total duration of annotated segments>,\n 'current_file': <protocol dictionary>,\n 'y': <labels as numpy array>}\n\n segment_labels_ : list\n Sorted list of (unique) labels in protocol.\n\n file_labels_ : dict of list\n Sorted lists of (unique) file labels in protocol\n\n Returns\n -------\n duration : float\n Total duration of annotated segments, in seconds.\n \"\"\"\n\n self.data_ = {}\n segment_labels, file_labels = set(), dict()\n\n # loop once on all files\n for current_file in getattr(protocol, subset)():\n\n # ensure annotation/annotated are cropped to actual file duration\n support = Segment(start=0, end=current_file[\"duration\"])\n current_file[\"annotated\"] = get_annotated(current_file).crop(\n support, mode=\"intersection\"\n )\n current_file[\"annotation\"] = current_file[\"annotation\"].crop(\n support, mode=\"intersection\"\n )\n\n # keep track of unique segment labels\n segment_labels.update(current_file[\"annotation\"].labels())\n\n # keep track of unique file labels\n for key, value in current_file.items():\n if isinstance(value, (Annotation, Timeline, SlidingWindowFeature)):\n continue\n if key not in file_labels:\n file_labels[key] = set()\n file_labels[key].add(value)\n\n segments = [\n s for s in current_file[\"annotated\"] if s.duration > self.duration\n ]\n\n # corner case where no segment is long enough\n # and we removed them all...\n if not segments:\n continue\n\n # total duration of label in current_file (after removal of\n # short segments).\n duration = sum(s.duration for s in segments)\n\n # store all these in data_ dictionary\n datum = {\n \"segments\": segments,\n \"duration\": duration,\n \"current_file\": current_file,\n }\n uri = get_unique_identifier(current_file)\n self.data_[uri] = datum\n\n self.file_labels_ = {k: sorted(file_labels[k]) for k in file_labels}\n self.segment_labels_ = sorted(segment_labels)\n\n for uri in list(self.data_):\n current_file = self.data_[uri][\"current_file\"]\n y = self.initialize_y(current_file)\n self.data_[uri][\"y\"] = y\n if self.mask is not None:\n mask = current_file[self.mask]\n current_file[self.mask] = mask.align(y)\n\n return sum(datum[\"duration\"] for datum in self.data_.values())\n\n @property\n def specifications(self):\n \"\"\"Task & sample specifications\n\n Returns\n -------\n specs : `dict`\n ['task'] (`pyannote.audio.train.Task`) : task\n ['X']['dimension'] (`int`) : features dimension\n ['y']['classes'] (`list`) : list of classes\n \"\"\"\n\n specs = {\n \"task\": self.task,\n \"X\": {\"dimension\": self.feature_extraction.dimension},\n }\n\n if not self.local_labels:\n specs[\"y\"] = {\"classes\": self.segment_labels_}\n\n return specs\n\n def samples(self):\n if self.exhaustive:\n return self._sliding_samples()\n else:\n return self._random_samples()\n\n def _random_samples(self):\n \"\"\"Random samples\n\n Returns\n -------\n samples : generator\n Generator that yields {'X': ..., 'y': ...} samples indefinitely.\n \"\"\"\n\n uris = list(self.data_)\n durations = np.array([self.data_[uri][\"duration\"] for uri in uris])\n probabilities = durations / np.sum(durations)\n\n while True:\n\n # choose file at random with probability\n # proportional to its (annotated) duration\n uri = uris[np.random.choice(len(uris), p=probabilities)]\n\n datum = self.data_[uri]\n current_file = datum[\"current_file\"]\n\n # choose one segment at random with probability\n # proportional to its duration\n segment = next(random_segment(datum[\"segments\"], weighted=True))\n\n # choose fixed-duration subsegment at random\n subsegment = next(random_subsegment(segment, self.duration))\n\n X = self.feature_extraction.crop(\n current_file, subsegment, mode=\"center\", fixed=self.duration\n )\n\n y = self.crop_y(datum[\"y\"], subsegment)\n sample = {\"X\": X, \"y\": y}\n\n if self.mask is not None:\n mask = self.crop_y(current_file[self.mask], subsegment)\n sample[\"mask\"] = mask\n\n for key, classes in self.file_labels_.items():\n sample[key] = classes.index(current_file[key])\n\n yield sample\n\n def _sliding_samples(self):\n\n uris = list(self.data_)\n durations = np.array([self.data_[uri][\"duration\"] for uri in uris])\n probabilities = durations / np.sum(durations)\n sliding_segments = SlidingWindow(\n duration=self.duration, step=self.step * self.duration\n )\n\n while True:\n\n np.random.shuffle(uris)\n\n # loop on all files\n for uri in uris:\n\n datum = self.data_[uri]\n\n # make a copy of current file\n current_file = dict(datum[\"current_file\"])\n\n # compute features for the whole file\n features = self.feature_extraction(current_file)\n\n # randomly shift 'annotated' segments start time so that\n # we avoid generating exactly the same subsequence twice\n annotated = Timeline()\n for segment in get_annotated(current_file):\n shifted_segment = Segment(\n segment.start + np.random.random() * self.duration, segment.end\n )\n if shifted_segment:\n annotated.add(shifted_segment)\n\n samples = []\n for sequence in sliding_segments(annotated):\n\n X = features.crop(sequence, mode=\"center\", fixed=self.duration)\n y = self.crop_y(datum[\"y\"], sequence)\n sample = {\"X\": X, \"y\": y}\n\n if self.mask is not None:\n\n # extract mask for current sub-segment\n mask = current_file[self.mask].crop(\n sequence, mode=\"center\", fixed=self.duration\n )\n\n # it might happen that \"mask\" and \"y\" use different\n # sliding windows. therefore, we simply resample \"mask\"\n # to match \"y\"\n if len(mask) != len(y):\n mask = scipy.signal.resample(mask, len(y), axis=0)\n sample[\"mask\"] = mask\n\n for key, classes in self.file_labels_.items():\n sample[key] = classes.index(current_file[key])\n\n samples.append(sample)\n\n np.random.shuffle(samples)\n for sample in samples:\n yield sample\n\n @property\n def batches_per_epoch(self):\n \"\"\"Number of batches needed to complete an epoch\"\"\"\n duration_per_epoch = self.per_epoch * SECONDS_IN_A_DAY\n duration_per_batch = self.duration * self.batch_size\n return int(np.ceil(duration_per_epoch / duration_per_batch))\n\n\nclass LabelingTask(Trainer):\n \"\"\"Base class for various labeling tasks\n\n This class should be inherited from: it should not be used directy\n\n Parameters\n ----------\n duration : float, optional\n Duration of audio chunks. Defaults to 2s.\n batch_size : int, optional\n Batch size. Defaults to 32.\n per_epoch : float, optional\n Force total audio duration per epoch, in days.\n Defaults to total duration of protocol subset.\n exhaustive : bool, optional\n Ensure training files are covered exhaustively (useful in case of\n non-uniform label distribution).\n step : `float`, optional\n Ratio of audio chunk duration used as step between two consecutive\n audio chunks. Defaults to 0.1. Has not effect when exhaustive is False.\n \"\"\"\n\n def __init__(\n self,\n duration: float = 2.0,\n batch_size: int = 32,\n per_epoch: float = None,\n exhaustive: bool = False,\n step: float = 0.1,\n ):\n super(LabelingTask, self).__init__()\n self.duration = duration\n self.batch_size = batch_size\n self.per_epoch = per_epoch\n self.exhaustive = exhaustive\n self.step = step\n\n def get_batch_generator(\n self,\n feature_extraction: Wrappable,\n protocol: Protocol,\n subset: Text = \"train\",\n resolution: Optional[Resolution] = None,\n alignment: Optional[Alignment] = None,\n ) -> LabelingTaskGenerator:\n \"\"\"This method should be overriden by subclass\n\n Parameters\n ----------\n feature_extraction : Wrappable\n Describes how features should be obtained.\n See pyannote.audio.features.wrapper.Wrapper documentation for details.\n protocol : Protocol\n subset : {'train', 'development'}, optional\n Defaults to 'train'.\n resolution : `pyannote.core.SlidingWindow`, optional\n Override `feature_extraction.sliding_window`. This is useful for\n models that include the feature extraction step (e.g. SincNet) and\n therefore output a lower sample rate than that of the input.\n alignment : {'center', 'loose', 'strict'}, optional\n Which mode to use when cropping labels. This is useful for models\n that include the feature extraction step (e.g. SincNet) and\n therefore use a different cropping mode. Defaults to 'center'.\n\n Returns\n -------\n batch_generator : `LabelingTaskGenerator`\n \"\"\"\n\n return LabelingTaskGenerator(\n self.task,\n feature_extraction,\n protocol,\n subset=subset,\n resolution=resolution,\n alignment=alignment,\n duration=self.duration,\n per_epoch=self.per_epoch,\n batch_size=self.batch_size,\n exhaustive=self.exhaustive,\n step=self.step,\n )\n\n @property\n def weight(self):\n \"\"\"Class/task weights\n\n Returns\n -------\n weight : None or `torch.Tensor`\n \"\"\"\n return None\n\n def on_train_start(self):\n \"\"\"Set loss function (with support for class weights)\n\n loss_func_ = Function f(input, target, weight=None) -> loss value\n \"\"\"\n\n self.task_ = self.model_.task\n\n if self.task_.is_multiclass_classification:\n\n self.n_classes_ = len(self.model_.classes)\n\n def loss_func(input, target, weight=None, mask=None):\n if mask is None:\n return F.nll_loss(input, target, weight=weight, reduction=\"mean\")\n else:\n return torch.mean(\n mask\n * F.nll_loss(input, target, weight=weight, reduction=\"none\")\n )\n\n if self.task_.is_multilabel_classification:\n\n def loss_func(input, target, weight=None, mask=None):\n if mask is None:\n return F.binary_cross_entropy(\n input, target, weight=weight, reduction=\"mean\"\n )\n else:\n return torch.mean(\n mask\n * F.binary_cross_entropy(\n input, target, weight=weight, reduction=\"none\"\n )\n )\n\n if self.task_.is_regression:\n\n def loss_func(input, target, weight=None, mask=None):\n if mask is None:\n return F.mse_loss(input, target, reduction=\"mean\")\n else:\n return torch.mean(\n mask * F.mse_loss(input, target, reduction=\"none\")\n )\n\n self.loss_func_ = loss_func\n\n def batch_loss(self, batch):\n \"\"\"Compute loss for current `batch`\n\n Parameters\n ----------\n batch : `dict`\n ['X'] (`numpy.ndarray`)\n ['y'] (`numpy.ndarray`)\n ['mask'] (`numpy.ndarray`, optional)\n\n Returns\n -------\n batch_loss : `dict`\n ['loss'] (`torch.Tensor`) : Loss\n \"\"\"\n\n # forward pass\n X = torch.tensor(batch[\"X\"], dtype=torch.float32, device=self.device_)\n fX = self.model_(X)\n\n mask = None\n if self.task_.is_multiclass_classification:\n\n fX = fX.view((-1, self.n_classes_))\n\n target = (\n torch.tensor(batch[\"y\"], dtype=torch.int64, device=self.device_)\n .contiguous()\n .view((-1,))\n )\n\n if \"mask\" in batch:\n mask = (\n torch.tensor(\n batch[\"mask\"], dtype=torch.float32, device=self.device_\n )\n .contiguous()\n .view((-1,))\n )\n\n elif self.task_.is_multilabel_classification or self.task_.is_regression:\n\n target = torch.tensor(batch[\"y\"], dtype=torch.float32, device=self.device_)\n\n if \"mask\" in batch:\n mask = torch.tensor(\n batch[\"mask\"], dtype=torch.float32, device=self.device_\n )\n\n weight = self.weight\n if weight is not None:\n weight = weight.to(device=self.device_)\n\n return {\n \"loss\": self.loss_func_(fX, target, weight=weight, mask=mask),\n }\n\n @property\n def task(self):\n return Task(\n type=TaskType.MULTI_CLASS_CLASSIFICATION, output=TaskOutput.SEQUENCE\n )\n"
] |
[
[
"numpy.random.random",
"torch.nn.functional.nll_loss",
"numpy.random.shuffle",
"torch.tensor",
"numpy.ceil",
"torch.nn.functional.mse_loss",
"torch.nn.functional.binary_cross_entropy",
"numpy.array",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Lijun-Yu/pytorch-lightning
|
[
"4dc4c8cfa5b1bcc8732036f889eb54455cc97e36",
"4dc4c8cfa5b1bcc8732036f889eb54455cc97e36"
] |
[
"pytorch_lightning/trainer/data_loading.py",
"pytorch_lightning/accelerators/ddp_base_backend.py"
] |
[
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport multiprocessing\nimport platform\nfrom abc import ABC, abstractmethod\nfrom typing import Union, List, Tuple, Callable, Optional\n\nimport torch.distributed as torch_distrib\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom pytorch_lightning.core import LightningModule\nfrom pytorch_lightning.utilities import rank_zero_warn\nfrom pytorch_lightning.utilities.data import has_iterable_dataset, has_len\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.debugging import InternalDebugger\nfrom pytorch_lightning.utilities.model_utils import is_overridden\n\n\ntry:\n from apex import amp\nexcept ImportError:\n amp = None\n\ntry:\n import torch_xla\n import torch_xla.core.xla_model as xm\n import torch_xla.distributed.xla_multiprocessing as xmp\nexcept ImportError:\n XLA_AVAILABLE = False\nelse:\n XLA_AVAILABLE = True\n\ntry:\n import horovod.torch as hvd\nexcept (ModuleNotFoundError, ImportError):\n HOROVOD_AVAILABLE = False\nelse:\n HOROVOD_AVAILABLE = True\n\n\nclass TrainerDataLoadingMixin(ABC):\n\n # this is just a summary on variables used in this abstract class,\n # the proper values/initialisation should be done in child class\n global_rank: int\n use_ddp: bool\n use_ddp2: bool\n use_horovod: bool\n shown_warnings: ...\n val_check_interval: float\n use_tpu: bool\n tpu_local_core_rank: int\n train_dataloader: DataLoader\n num_training_batches: Union[int, float]\n val_check_batch: ...\n val_dataloaders: List[DataLoader]\n num_val_batches: List[Union[int, float]]\n test_dataloaders: List[DataLoader]\n num_test_batches: List[Union[int, float]]\n limit_train_batches: Union[int, float]\n limit_val_batches: Union[int, float]\n limit_test_batches: Union[int, float]\n replace_sampler_ddp: bool\n num_nodes: int\n num_processes: int\n distributed_backend: Optional[str]\n dev_debugger: InternalDebugger\n\n def _worker_check(self, dataloader: DataLoader, name: str) -> None:\n on_windows = platform.system() == 'Windows'\n\n # ddp_spawn + num_workers > 0 don't mix! tell the user\n is_dataloader = isinstance(dataloader, DataLoader)\n using_spawn = self.distributed_backend == 'ddp_spawn'\n if is_dataloader and not on_windows:\n if dataloader.num_workers > 0 and using_spawn:\n rank_zero_warn('Dataloader(num_workers>0) and ddp_spawn do not mix well!'\n ' Your performance might suffer dramatically.'\n ' Please consider setting distributed_backend=ddp to use num_workers > 0'\n ' (this is a bottleneck of Python .spawn() and PyTorch')\n\n elif dataloader.num_workers == 0 and using_spawn:\n rank_zero_warn('You are using `distributed_backend=ddp_spawn` with num_workers=0.'\n ' For much faster performance, switch to `distributed_backend=ddp`'\n ' and set `num_workers>0`')\n\n elif dataloader.num_workers <= 2 and multiprocessing.cpu_count() > 2 and not using_spawn:\n num_cpus = multiprocessing.cpu_count()\n rank_zero_warn(f'The dataloader, {name}, does not have many workers which may be a bottleneck.'\n ' Consider increasing the value of the `num_workers` argument`'\n f' (try {num_cpus} which is the number of cpus on this machine)'\n ' in the `DataLoader` init to improve performance.')\n\n def auto_add_sampler(self, dataloader: DataLoader, train: bool) -> DataLoader:\n\n # don't do anything if it's not a dataloader\n is_dataloader = isinstance(dataloader, DataLoader)\n # don't manipulate iterable datasets\n is_iterable_ds = has_iterable_dataset(dataloader)\n\n if not is_dataloader or is_iterable_ds:\n return dataloader\n need_dist_sampler = (self.use_ddp or self.use_ddp2 or self.use_horovod or self.use_tpu)\n\n if self.replace_sampler_ddp and need_dist_sampler:\n if not isinstance(dataloader.sampler, (SequentialSampler, RandomSampler)):\n raise MisconfigurationException(\n 'You seem to have configured a sampler in your DataLoader. This will be replaced '\n ' by `DistributedSampler` since `replace_sampler_ddp` is True and you are using'\n ' distributed training. Either remove the sampler from your DataLoader or set'\n ' `replace_sampler_ddp`=False if you want to use your custom sampler.')\n\n # replace with distributed sampler\n sampler = self._get_distributed_sampler(dataloader, train)\n dataloader = self.replace_sampler(dataloader, sampler)\n\n return dataloader\n\n def replace_sampler(self, dataloader, sampler):\n skip_keys = ['sampler', 'batch_sampler', 'dataset_kind']\n\n dl_args = {\n k: v for k, v in dataloader.__dict__.items() if not k.startswith('_') and k not in skip_keys\n }\n\n dl_args['sampler'] = sampler\n dataloader = type(dataloader)(**dl_args)\n return dataloader\n\n def _get_distributed_sampler(self, dataloader, train):\n if self.use_tpu:\n kwargs = dict(num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())\n elif self.use_horovod:\n kwargs = dict(num_replicas=hvd.size(), rank=hvd.rank())\n else:\n world_size = {\n 'ddp': self.num_nodes * self.num_processes,\n 'ddp_spawn': self.num_nodes * self.num_processes,\n 'ddp2': self.num_nodes,\n 'ddp_cpu': self.num_processes * self.num_nodes\n }\n assert self.distributed_backend is not None\n kwargs = dict(num_replicas=world_size[self.distributed_backend], rank=self.global_rank)\n\n kwargs['shuffle'] = train\n sampler = DistributedSampler(dataloader.dataset, **kwargs)\n return sampler\n\n def reset_train_dataloader(self, model: LightningModule) -> None:\n \"\"\"Resets the train dataloader and initialises required variables\n (number of batches, when to validate, etc.).\n\n Args:\n model: The current `LightningModule`\n \"\"\"\n self.train_dataloader = self.request_dataloader(model.train_dataloader)\n\n # debugging\n self.dev_debugger.track_load_dataloader_call('train_dataloader', dataloaders=[self.train_dataloader])\n\n self.num_training_batches = 0\n\n # automatically add samplers\n self.train_dataloader = self.auto_add_sampler(self.train_dataloader, train=True)\n\n self.num_training_batches = len(self.train_dataloader) if has_len(self.train_dataloader) else float('inf')\n self._worker_check(self.train_dataloader, 'train dataloader')\n\n if isinstance(self.limit_train_batches, int) or self.limit_train_batches == 0.0:\n self.num_training_batches = min(self.num_training_batches, int(self.limit_train_batches))\n elif self.num_training_batches != float('inf'):\n self.num_training_batches = int(self.num_training_batches * self.limit_train_batches)\n elif self.limit_train_batches != 1.0:\n raise MisconfigurationException(\n 'When using an IterableDataset for `limit_train_batches`,'\n ' `Trainer(limit_train_batches)` must be `0.0`, `1.0` or an int. An int k specifies'\n ' `num_training_batches` to use.')\n\n # determine when to check validation\n # if int passed in, val checks that often\n # otherwise, it checks in [0, 1.0] % range of a training epoch\n if isinstance(self.val_check_interval, int):\n self.val_check_batch = self.val_check_interval\n if self.val_check_batch > self.num_training_batches:\n raise ValueError(\n f'`val_check_interval` ({self.val_check_interval}) must be less than or equal '\n f'to the number of the training batches ({self.num_training_batches}). '\n 'If you want to disable validation set `limit_val_batches` to 0.0 instead.')\n else:\n if not has_len(self.train_dataloader):\n if self.val_check_interval == 1.0:\n self.val_check_batch = float('inf')\n else:\n raise MisconfigurationException(\n 'When using an IterableDataset for `train_dataloader`,'\n ' `Trainer(val_check_interval)` must be `1.0` or an int. An int k specifies'\n ' checking validation every k training batches.')\n else:\n self.val_check_batch = int(self.num_training_batches * self.val_check_interval)\n self.val_check_batch = max(1, self.val_check_batch)\n\n def _reset_eval_dataloader(\n self,\n model: LightningModule,\n mode: str\n ) -> Tuple[List[Union[int, float]], List[DataLoader]]:\n \"\"\"Generic method to reset a dataloader for evaluation.\n\n Args:\n model: The current `LightningModule`\n mode: Either `'val'` or `'test'`\n\n Returns:\n Tuple (num_batches, dataloaders)\n \"\"\"\n # use the training loader as val and test when overfitting\n loader_name = f'{mode}_dataloader'\n if self.overfit_batches > 0:\n loader_name = 'train_dataloader'\n\n # load loaders\n dataloaders = self.request_dataloader(getattr(model, loader_name))\n\n if not isinstance(dataloaders, list):\n dataloaders = [dataloaders]\n\n self.dev_debugger.track_load_dataloader_call(loader_name, dataloaders=dataloaders)\n\n for loader_i in range(len(dataloaders)):\n loader = dataloaders[loader_i]\n\n # shuffling in val and test set is bad practice\n if mode in ('val', 'test') and hasattr(loader, 'sampler') and isinstance(loader.sampler, RandomSampler):\n\n # when overfitting, the dataloader should not have sampler\n if self.overfit_batches > 0:\n rank_zero_warn('You requested to overfit but enabled training dataloader shuffling.'\n ' We are turning it off for you.')\n dataloaders[loader_i] = self.replace_sampler(loader, SequentialSampler(loader.dataset))\n\n else:\n rank_zero_warn(f'Your {mode}_dataloader has `shuffle=True`, it is best practice to turn'\n ' this off for validation and test dataloaders.')\n\n if any([dl is None for dl in dataloaders]):\n rank_zero_warn(\"One of given dataloaders is None and it will be skipped.\")\n\n # add samplers\n dataloaders = [self.auto_add_sampler(dl, train=False) for dl in dataloaders if dl is not None]\n\n loader_num_batches = []\n\n # determine number of batches\n # datasets could be none, 1 or 2+\n if len(dataloaders) != 0:\n for i, dataloader in enumerate(dataloaders):\n num_batches = len(dataloader) if has_len(dataloader) else float('inf')\n self._worker_check(dataloader, f'{mode} dataloader {i}')\n\n # percent or num_steps\n limit_eval_batches = getattr(self, f'limit_{mode}_batches')\n\n # limit num batches either as a percent or num steps\n if isinstance(limit_eval_batches, int) or limit_eval_batches == 0.0:\n num_batches = min(num_batches, int(limit_eval_batches))\n elif num_batches != float('inf'):\n num_batches = int(num_batches * limit_eval_batches)\n elif limit_eval_batches != 1.0:\n raise MisconfigurationException(\n 'When using an IterableDataset for `limit_{mode}_batches`,'\n f' `Trainer(limit_{mode}_batches)` must be `0.0`, `1.0` or an int. An int k specifies'\n f' `num_{mode}_batches` to use.')\n\n if num_batches == 0 and limit_eval_batches > 0.0 and isinstance(limit_eval_batches, float):\n min_pct = 1.0 / len(dataloader)\n raise MisconfigurationException(\n f'you requested to check {limit_eval_batches} of the {mode} dataloader but'\n f' {limit_eval_batches}*{num_batches} = 0. Please increase the limit_{mode}_batches.'\n f' Try at least limit_{mode}_batches={min_pct}'\n )\n\n loader_num_batches.append(num_batches)\n\n return loader_num_batches, dataloaders\n\n def reset_val_dataloader(self, model: LightningModule) -> None:\n \"\"\"Resets the validation dataloader and determines the number of batches.\n\n Args:\n model: The current `LightningModule`\n \"\"\"\n has_loader = is_overridden('val_dataloader', model)\n has_step = is_overridden('validation_step', model)\n if has_loader and has_step:\n self.num_val_batches, self.val_dataloaders = self._reset_eval_dataloader(model, 'val')\n\n def reset_test_dataloader(self, model) -> None:\n \"\"\"Resets the validation dataloader and determines the number of batches.\n\n Args:\n model: The current `LightningModule`\n \"\"\"\n has_loader = is_overridden('test_dataloader', model)\n has_step = is_overridden('test_step', model)\n if has_loader and has_step:\n self.num_test_batches, self.test_dataloaders =\\\n self._reset_eval_dataloader(model, 'test')\n\n def request_dataloader(self, dataloader_fx: Callable) -> DataLoader:\n \"\"\"Handles downloading data in the GPU or TPU case.\n\n Args:\n dataloader_fx: The bound dataloader getter\n\n Returns:\n The dataloader\n \"\"\"\n dataloader = dataloader_fx()\n\n # get the function we'll use to get data\n if self.use_ddp or self.use_ddp2:\n # all processes wait until data download has happened\n torch_distrib.barrier()\n\n # data download/load on TPU\n elif self.use_tpu and XLA_AVAILABLE:\n # all processes wait until data download has happened\n torch_xla.core.xla_model.rendezvous('pl.TrainerDataLoadingMixin.get_dataloaders')\n\n elif self.use_horovod:\n # all processes wait until data download has happened\n hvd.join()\n\n return dataloader\n",
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\nimport re\nimport torch\n\nfrom pytorch_lightning.utilities import AMPType\nfrom pytorch_lightning.accelerators.base_backend import Accelerator\nimport torch.distributed as torch_distrib\nimport torch.distributed as dist\nfrom pytorch_lightning.utilities.cloud_io import atomic_save\nfrom pytorch_lightning.utilities.distributed import rank_zero_warn, rank_zero_only\nfrom pytorch_lightning import _logger as log\n\ntry:\n from hydra.utils import to_absolute_path, get_original_cwd\n from hydra.core.hydra_config import HydraConfig\nexcept ImportError:\n HYDRA_AVAILABLE = False\nelse:\n HYDRA_AVAILABLE = True\n\ntry:\n from apex import amp\nexcept ImportError:\n amp = None\n\n\nclass DDPBase(Accelerator):\n\n def __init__(self, trainer):\n super().__init__(trainer)\n\n def training_step(self, args):\n if self.trainer.amp_backend == AMPType.NATIVE:\n with torch.cuda.amp.autocast():\n output = self.trainer.model(*args)\n else:\n output = self.trainer.model(*args)\n return output\n\n def validation_step(self, args):\n output = self.training_step(args)\n return output\n\n def test_step(self, args):\n output = self.training_step(args)\n return output\n\n def barrier(self, name: str = None):\n torch_distrib.barrier()\n\n def early_stopping_should_stop(self, pl_module):\n stop = torch.tensor(int(self.trainer.should_stop), device=pl_module.device)\n dist.all_reduce(stop, op=dist.reduce_op.SUM)\n dist.barrier()\n should_stop = stop == self.trainer.world_size\n return should_stop\n\n def transfer_distrib_spawn_state_on_fit_end(self, model, mp_queue, results):\n if self.trainer.distributed_backend.lower() not in ['ddp_spawn', 'ddp_cpu', 'tpu']:\n return\n\n # track the best model path\n best_model_path = None\n if self.trainer.checkpoint_callback is not None:\n best_model_path = self.trainer.checkpoint_callback.best_model_path\n\n if self.trainer.global_rank == 0 and mp_queue is not None:\n rank_zero_warn('cleaning up ddp environment...')\n # todo, pass complete checkpoint as state dictionary\n mp_queue.put(best_model_path)\n mp_queue.put(results)\n\n # save the last weights\n last_path = None\n if not self.trainer.testing and best_model_path is not None and len(best_model_path) > 0:\n last_path = re.sub('.ckpt', '.tmp_end.ckpt', best_model_path)\n atomic_save(model.state_dict(), last_path)\n mp_queue.put(last_path)\n\n def ddp_train_tmp(self, process_idx, mp_queue, model, is_master=False, proc_offset=0):\n \"\"\"\n Entry point for ddp\n\n Args:\n process_idx:\n mp_queue: multiprocessing queue\n model:\n\n Returns:\n\n \"\"\"\n # offset the process id if requested\n process_idx = process_idx + proc_offset\n\n # show progressbar only on progress_rank 0\n if (self.trainer.node_rank != 0 or process_idx != 0) and self.trainer.progress_bar_callback is not None:\n self.trainer.progress_bar_callback.disable()\n\n # determine which process we are and world size\n self.set_world_ranks(process_idx)\n\n # set warning rank\n rank_zero_only.rank = self.trainer.global_rank\n\n # set up server using proc 0's ip address\n # try to init for 20 times at max in case ports are taken\n # where to store ip_table\n model.trainer = self.trainer\n model.init_ddp_connection(\n self.trainer.global_rank,\n self.trainer.world_size,\n self.trainer.is_slurm_managing_tasks\n )\n\n # call setup after the ddp process has connected\n self.trainer.call_setup_hook(model)\n\n # on world_size=0 let everyone know training is starting\n if self.trainer.is_global_zero:\n log.info('-' * 100)\n log.info(f'distributed_backend={self.trainer.distributed_backend}')\n log.info(f'All DDP processes registered. Starting ddp with {self.trainer.world_size} processes')\n log.info('-' * 100)\n\n # call sync_bn before .cuda(), configure_apex and configure_ddp\n if self.trainer.sync_batchnorm:\n model = model.configure_sync_batchnorm(model)\n\n # move the model to the correct device\n self.model_to_device(model, process_idx, is_master)\n\n # CHOOSE OPTIMIZER\n # allow for lr schedulers as well\n optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model)\n self.trainer.optimizers = optimizers\n self.trainer.lr_schedulers = lr_schedulers\n self.trainer.optimizer_frequencies = optimizer_frequencies\n\n # set model properties before going into wrapper\n self.trainer.model_connector.copy_trainer_model_properties(model)\n\n # AMP -\n # run through amp wrapper before going to distributed DP\n if self.trainer.amp_backend == AMPType.APEX:\n model, optimizers = model.configure_apex(amp, model, self.trainer.optimizers, self.trainer.amp_level)\n self.trainer.optimizers = optimizers\n self.trainer.reinit_scheduler_properties(self.trainer.optimizers, self.trainer.lr_schedulers)\n\n # device ids change depending on the DDP setup\n device_ids = self.get_device_ids()\n\n # allow user to configure ddp\n model = model.configure_ddp(model, device_ids)\n\n # set up training routine\n self.trainer.train_loop.setup_training(model)\n\n # train or test\n results = self.train_or_test()\n\n # get original model\n model = self.trainer.get_model()\n\n # persist info in ddp_spawn\n self.transfer_distrib_spawn_state_on_fit_end(model, mp_queue, results)\n\n # clean up memory\n torch.cuda.empty_cache()\n\n if self.trainer.global_rank == 0:\n return results\n\n def set_world_ranks(self, process_idx):\n raise NotImplementedError('to create a ddp backend, please implement set_world_ranks')\n\n def model_to_device(self, model, process_idx, is_master):\n raise NotImplementedError('to create a ddp backend, please implement model_to_device')\n\n def get_device_ids(self):\n raise NotImplementedError('to create a ddp backend, please implement get_device_ids')\n"
] |
[
[
"torch.distributed.barrier",
"torch.utils.data.SequentialSampler",
"torch.utils.data.distributed.DistributedSampler"
],
[
"torch.distributed.all_reduce",
"torch.cuda.empty_cache",
"torch.distributed.barrier",
"torch.cuda.amp.autocast"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jengvi/trimesh
|
[
"aeefe89a27ae17c3f4d6286b9a2ba1623329a286",
"aeefe89a27ae17c3f4d6286b9a2ba1623329a286",
"aeefe89a27ae17c3f4d6286b9a2ba1623329a286"
] |
[
"tests/generic.py",
"trimesh/exchange/dae.py",
"trimesh/viewer/windowed.py"
] |
[
"# flake8: noqa\n\"\"\"\nModule which contains most imports and data unit tests\nmight need, to reduce the amount of boilerplate.\n\"\"\"\nfrom trimesh.base import Trimesh\nfrom trimesh.constants import tol, tol_path\nfrom collections import deque\nfrom copy import deepcopy\nimport collections\nimport trimesh\nfrom distutils.spawn import find_executable\nimport os\nimport sys\nimport json\nimport copy\nimport time\nimport shutil\nimport timeit\nimport base64\nimport inspect\nimport logging\nimport platform\nimport tempfile\nimport unittest\nimport itertools\nimport subprocess\nimport contextlib\nimport threading\nimport warnings\n\ntry:\n # Python 3\n from http.server import SimpleHTTPRequestHandler\n import socketserver\nexcept ImportError:\n # Python 2\n from SimpleHTTPServer import SimpleHTTPRequestHandler\n import SocketServer as socketserver\n\nimport numpy as np\n\n# should we require all soft dependencies\n# this is set in the docker images to catch missing packages\nall_dep = 'alldep' in ''.join(sys.argv)\n\nif all_dep:\n # make sure pyembree is importable\n from pyembree import rtcore_scene\n\ntry:\n import sympy as sp\nexcept ImportError as E:\n if all_dep:\n raise E\n\n\n# make sure functions know they should run additional\n# potentially slow validation checks and raise exceptions\ntrimesh.util._STRICT = True\ntrimesh.constants.tol.strict = True\ntrimesh.constants.tol_path.strict = True\n\n\ntry:\n from shapely.geometry import Point, Polygon, LineString\n has_path = True\nexcept ImportError as E:\n if all_dep:\n raise E\n has_path = False\n\ntry:\n from scipy import spatial, sparse\nexcept BaseException as E:\n if all_dep:\n raise E\n\n# find_executable for binvox\nhas_binvox = trimesh.exchange.binvox.binvox_encoder is not None\n\n# Python version as an array, i.e. [3, 6]\npython_version = np.array([sys.version_info.major,\n sys.version_info.minor])\n\n# some repeatable homogeneous transforms to use in tests\ntransforms = [trimesh.transformations.euler_matrix(np.pi / 4, i, 0)\n for i in np.linspace(0.0, np.pi * 2.0, 100)]\n# should be a (100, 4, 4) float\ntransforms = np.array(transforms)\n\ntry:\n # do the imports for Python 2\n from cStringIO import StringIO\n _PY3 = False\nexcept ImportError:\n # if that didn't work we're probably on Python 3\n from io import StringIO\n from io import BytesIO\n _PY3 = True\n\n# are we on linux\nis_linux = 'linux' in platform.system().lower()\n\n# find the current absolute path using inspect\ndir_current = os.path.dirname(\n os.path.abspath(os.path.expanduser(__file__)))\n# the absolute path for our reference models\ndir_models = os.path.abspath(\n os.path.join(dir_current, '..', 'models'))\n# the absolute path for our 2D reference models\ndir_2D = os.path.abspath(\n os.path.join(dir_current, '..', 'models', '2D'))\n# the absolute path for our test data and truth\ndir_data = os.path.abspath(\n os.path.join(dir_current, 'data'))\n\n# a logger for tests to call\nlog = logging.getLogger('trimesh')\nlog.addHandler(logging.NullHandler())\n\n# turn strings / bytes into file- like objects\nio_wrap = trimesh.util.wrap_as_stream\n\n\ndef random(*args, **kwargs):\n \"\"\"\n A random function always seeded from the same value.\n\n Replaces: np.random.random(*args, **kwargs)\n \"\"\"\n state = np.random.RandomState(seed=1)\n return state.random_sample(*args, **kwargs)\n\n\ndef _load_data():\n \"\"\"\n Load the JSON files from our truth directory.\n \"\"\"\n data = {}\n for file_name in os.listdir(dir_data):\n name, extension = os.path.splitext(file_name)\n if extension != '.json':\n continue\n file_path = os.path.join(dir_data, file_name)\n with open(file_path, 'r') as file_obj:\n data[name] = json.load(file_obj)\n\n data['model_paths'] = [os.path.join(dir_models, f)\n for f in os.listdir(dir_models)]\n data['2D_files'] = [os.path.join(dir_2D, f) for f in os.listdir(dir_2D)]\n return data\n\n\ndef get_mesh(file_name, *args, **kwargs):\n \"\"\"\n Get a mesh from the models directory by name.\n\n Parameters\n -------------\n file_name : str\n Name of model in /models/\n *args : [str]\n Additional files to load\n\n Returns\n -----------\n meshes : trimesh.Trimesh or list\n Single mesh or list of meshes from args\n \"\"\"\n meshes = collections.deque()\n for name in np.append(file_name, args):\n location = os.path.join(dir_models, name)\n log.info('loading mesh from: %s', location)\n meshes.append(trimesh.load(location, **kwargs))\n if len(meshes) == 1:\n return meshes[0]\n return list(meshes)\n\n\[email protected]\ndef serve_meshes():\n \"\"\"\n This context manager serves meshes over HTTP at some\n available port.\n \"\"\"\n class _ServerThread(threading.Thread):\n def run(self):\n os.chdir(dir_models)\n Handler = SimpleHTTPRequestHandler\n self.httpd = socketserver.TCPServer(('', 0), Handler)\n _, self.port = self.httpd.server_address\n self.httpd.serve_forever()\n\n t = _ServerThread()\n t.daemon = False\n t.start()\n time.sleep(0.2)\n yield 'http://localhost:{}'.format(t.port)\n t.httpd.shutdown()\n t.join()\n\n\ndef get_meshes(count=np.inf,\n raise_error=False,\n only_watertight=True):\n \"\"\"\n Get meshes to test with.\n\n Parameters\n ----------\n count : int\n Approximate number of meshes you want\n raise_error : bool\n If True raise a ValueError if a mesh\n that should be loadable returns a non- Trimesh object.\n\n Returns\n ----------\n meshes : list\n Trimesh objects from models folder\n \"\"\"\n # use deterministic file name order\n file_names = sorted(os.listdir(dir_models))\n\n meshes = []\n for file_name in file_names:\n extension = trimesh.util.split_extension(file_name).lower()\n if extension in trimesh.available_formats():\n loaded = trimesh.util.make_sequence(get_mesh(file_name))\n for m in loaded:\n # is the loaded mesh a Geometry object or a subclass:\n # Trimesh, PointCloud, Scene\n type_ok = isinstance(m, trimesh.parent.Geometry)\n if raise_error and not type_ok:\n raise ValueError('%s returned a non- Trimesh object!',\n file_name)\n if not isinstance(m, trimesh.Trimesh) or (\n only_watertight and not m.is_watertight):\n continue\n meshes.append(m)\n yield m\n else:\n log.warning('%s has no loader, not running test on!',\n file_name)\n\n if len(meshes) >= count:\n break\n\n\ndef get_2D(count=None):\n \"\"\"\n Get Path2D objects to test with.\n\n Parameters\n --------------\n count : int\n Number of 2D drawings to return\n\n Yields\n --------------\n path : trimesh.path.Path2D\n Drawing from models folder\n \"\"\"\n # if no path loading return empty list\n if not has_path:\n raise StopIteration\n\n # all files in the 2D models directory\n listdir = sorted(os.listdir(dir_2D))\n # if count isn't passed return all files\n if count is None:\n count = len(listdir)\n # save resulting loaded paths\n paths = []\n for file_name in listdir:\n # check to see if the file is loadable\n ext = trimesh.util.split_extension(file_name)\n if ext not in trimesh.available_formats():\n continue\n # full path\n location = os.path.join(dir_2D, file_name)\n try:\n paths.append(trimesh.load(location))\n except BaseException as E:\n log.error('failed on: {}'.format(file_name),\n exc_info=True)\n raise E\n\n yield paths[-1]\n\n # if we don't need every path break\n if len(paths) >= count:\n break\n\n\ndef check_path2D(path):\n \"\"\"\n Make basic assertions on Path2D objects\n \"\"\"\n # root count should be the same as the closed polygons\n assert len(path.root) == len(path.polygons_full)\n\n # make sure polygons are really polygons\n assert all(type(i).__name__ == 'Polygon'\n for i in path.polygons_full)\n assert all(type(i).__name__ == 'Polygon'\n for i in path.polygons_closed)\n\n # these should all correspond to each other\n assert len(path.discrete) == len(path.polygons_closed)\n assert len(path.discrete) == len(path.paths)\n\n # make sure None polygons are not referenced in graph\n assert all(path.polygons_closed[i] is not None\n for i in path.enclosure_directed.nodes())\n\n assert path.colors.shape == (len(path.entities), 4)\n\n\ndef scene_equal(a, b):\n \"\"\"\n Do a simple check on two scenes and assert\n that they have the same geometry.\n\n Parameters\n ------------\n a : trimesh.Scene\n Object to be compared\n b : trimesh.Scene\n Object to be compared\n \"\"\"\n # should have the same number of geometries\n assert len(a.geometry) == len(b.geometry)\n for k, m in a.geometry.items():\n # each mesh should correspond by name\n # and have the same volume\n assert np.isclose(\n m.volume, b.geometry[k].volume, rtol=0.001)\n\n\nTemporaryDirectory = trimesh.util.TemporaryDirectory\n\n# all the JSON files with truth data\ndata = _load_data()\n\n# find executables to run with subprocess\n# formats supported by meshlab for export tests\nif any(find_executable(i) is None\n for i in ['xfvb-run', 'meshlabserver']):\n meshlab_formats = []\nelse:\n meshlab_formats = ['3ds', 'ply', 'stl', 'obj', 'qobj', 'off', 'ptx', 'vmi',\n 'bre', 'dae', 'ctm', 'pts', 'apts', 'xyz', 'gts', 'pdb',\n 'tri', 'asc', 'x3d', 'x3dv', 'wrl']\n",
"import io\nimport copy\nimport uuid\n\nimport numpy as np\n\ntry:\n # pip install pycollada\n import collada\nexcept BaseException:\n collada = None\n\ntry:\n import PIL.Image\nexcept ImportError:\n pass\n\nfrom .. import util\nfrom .. import visual\n\nfrom ..constants import log\n\n\ndef load_collada(file_obj, resolver=None, **kwargs):\n \"\"\"\n Load a COLLADA (.dae) file into a list of trimesh kwargs.\n\n Parameters\n ----------\n file_obj : file object\n Containing a COLLADA file\n resolver : trimesh.visual.Resolver or None\n For loading referenced files, like texture images\n kwargs : **\n Passed to trimesh.Trimesh.__init__\n\n Returns\n -------\n loaded : list of dict\n kwargs for Trimesh constructor\n \"\"\"\n # load scene using pycollada\n c = collada.Collada(file_obj)\n\n # Create material map from Material ID to trimesh material\n material_map = {}\n for m in c.materials:\n effect = m.effect\n material_map[m.id] = _parse_material(effect, resolver)\n\n # name : kwargs\n meshes = {}\n # list of dict\n graph = []\n for node in c.scene.nodes:\n _parse_node(node=node,\n parent_matrix=np.eye(4),\n material_map=material_map,\n meshes=meshes,\n graph=graph,\n resolver=resolver)\n\n # create kwargs for load_kwargs\n result = {'class': 'Scene',\n 'graph': graph,\n 'geometry': meshes}\n\n return result\n\n\ndef export_collada(mesh, **kwargs):\n \"\"\"\n Export a mesh or a list of meshes as a COLLADA .dae file.\n\n Parameters\n -----------\n mesh: Trimesh object or list of Trimesh objects\n The mesh(es) to export.\n\n Returns\n -----------\n export: str, string of COLLADA format output\n \"\"\"\n meshes = mesh\n if not isinstance(mesh, (list, tuple, set, np.ndarray)):\n meshes = [mesh]\n\n c = collada.Collada()\n nodes = []\n for i, m in enumerate(meshes):\n\n # Load uv, colors, materials\n uv = None\n colors = None\n mat = _unparse_material(None)\n if m.visual.defined:\n if m.visual.kind == 'texture':\n mat = _unparse_material(m.visual.material)\n uv = m.visual.uv\n elif m.visual.kind == 'vertex':\n colors = (m.visual.vertex_colors / 255.0)[:, :3]\n c.effects.append(mat.effect)\n c.materials.append(mat)\n\n # Create geometry object\n vertices = collada.source.FloatSource(\n 'verts-array', m.vertices.flatten(), ('X', 'Y', 'Z'))\n normals = collada.source.FloatSource(\n 'normals-array', m.vertex_normals.flatten(), ('X', 'Y', 'Z'))\n input_list = collada.source.InputList()\n input_list.addInput(0, 'VERTEX', '#verts-array')\n input_list.addInput(1, 'NORMAL', '#normals-array')\n arrays = [vertices, normals]\n if uv is not None:\n texcoords = collada.source.FloatSource(\n 'texcoords-array', uv.flatten(), ('U', 'V'))\n input_list.addInput(2, 'TEXCOORD', '#texcoords-array')\n arrays.append(texcoords)\n if colors is not None:\n idx = 2\n if uv:\n idx = 3\n colors = collada.source.FloatSource('colors-array',\n colors.flatten(), ('R', 'G', 'B'))\n input_list.addInput(idx, 'COLOR', '#colors-array')\n arrays.append(colors)\n geom = collada.geometry.Geometry(\n c, uuid.uuid4().hex, uuid.uuid4().hex, arrays\n )\n indices = np.repeat(m.faces.flatten(), len(arrays))\n\n matref = u'material{}'.format(i)\n triset = geom.createTriangleSet(indices, input_list, matref)\n geom.primitives.append(triset)\n c.geometries.append(geom)\n\n matnode = collada.scene.MaterialNode(matref, mat, inputs=[])\n geomnode = collada.scene.GeometryNode(geom, [matnode])\n node = collada.scene.Node(u'node{}'.format(i), children=[geomnode])\n nodes.append(node)\n scene = collada.scene.Scene('scene', nodes)\n c.scenes.append(scene)\n c.scene = scene\n\n b = io.BytesIO()\n c.write(b)\n b.seek(0)\n return b.read()\n\n\ndef _parse_node(node,\n parent_matrix,\n material_map,\n meshes,\n graph,\n resolver=None):\n \"\"\"\n Recursively parse COLLADA scene nodes.\n \"\"\"\n\n # Parse mesh node\n if isinstance(node, collada.scene.GeometryNode):\n geometry = node.geometry\n\n # Create local material map from material symbol to actual material\n local_material_map = {}\n for mn in node.materials:\n symbol = mn.symbol\n m = mn.target\n if m.id in material_map:\n local_material_map[symbol] = material_map[m.id]\n else:\n local_material_map[symbol] = _parse_material(m, resolver)\n\n # Iterate over primitives of geometry\n for i, primitive in enumerate(geometry.primitives):\n if isinstance(primitive, collada.polylist.Polylist):\n primitive = primitive.triangleset()\n if isinstance(primitive, collada.triangleset.TriangleSet):\n vertex = primitive.vertex\n vertex_index = primitive.vertex_index\n vertices = vertex[vertex_index].reshape(\n len(vertex_index) * 3, 3)\n\n # Get normals if present\n normals = None\n if primitive.normal is not None:\n normal = primitive.normal\n normal_index = primitive.normal_index\n normals = normal[normal_index].reshape(\n len(normal_index) * 3, 3)\n\n # Get colors if present\n colors = None\n s = primitive.sources\n if ('COLOR' in s and len(s['COLOR'])\n > 0 and len(primitive.index) > 0):\n color = s['COLOR'][0][4].data\n color_index = primitive.index[:, :, s['COLOR'][0][0]]\n colors = color[color_index].reshape(\n len(color_index) * 3, 3)\n\n faces = np.arange(\n vertices.shape[0]).reshape(\n vertices.shape[0] // 3, 3)\n\n # Get UV coordinates if possible\n vis = None\n if primitive.material in local_material_map:\n material = copy.copy(\n local_material_map[primitive.material])\n uv = None\n if len(primitive.texcoordset) > 0:\n texcoord = primitive.texcoordset[0]\n texcoord_index = primitive.texcoord_indexset[0]\n uv = texcoord[texcoord_index].reshape(\n (len(texcoord_index) * 3, 2))\n vis = visual.texture.TextureVisuals(\n uv=uv, material=material)\n\n primid = u'{}.{}'.format(geometry.id, i)\n meshes[primid] = {\n 'vertices': vertices,\n 'faces': faces,\n 'vertex_normals': normals,\n 'vertex_colors': colors,\n 'visual': vis}\n\n graph.append({'frame_to': primid,\n 'matrix': parent_matrix,\n 'geometry': primid})\n\n # recurse down tree for nodes with children\n elif isinstance(node, collada.scene.Node):\n if node.children is not None:\n for child in node.children:\n # create the new matrix\n matrix = np.dot(parent_matrix, node.matrix)\n # parse the child node\n _parse_node(\n node=child,\n parent_matrix=matrix,\n material_map=material_map,\n meshes=meshes,\n graph=graph,\n resolver=resolver)\n\n elif isinstance(node, collada.scene.CameraNode):\n # TODO: convert collada cameras to trimesh cameras\n pass\n elif isinstance(node, collada.scene.LightNode):\n # TODO: convert collada lights to trimesh lights\n pass\n\n\ndef _load_texture(file_name, resolver):\n \"\"\"\n Load a texture from a file into a PIL image.\n \"\"\"\n file_data = resolver.get(file_name)\n image = PIL.Image.open(util.wrap_as_stream(file_data))\n return image\n\n\ndef _parse_material(effect, resolver):\n \"\"\"\n Turn a COLLADA effect into a trimesh material.\n \"\"\"\n\n # Compute base color\n baseColorFactor = np.ones(4)\n baseColorTexture = None\n if isinstance(effect.diffuse, collada.material.Map):\n try:\n baseColorTexture = _load_texture(\n effect.diffuse.sampler.surface.image.path, resolver)\n except BaseException:\n log.warning('unable to load base texture',\n exc_info=True)\n elif effect.diffuse is not None:\n baseColorFactor = effect.diffuse\n\n # Compute emission color\n emissiveFactor = np.zeros(3)\n emissiveTexture = None\n if isinstance(effect.emission, collada.material.Map):\n try:\n emissiveTexture = _load_texture(\n effect.diffuse.sampler.surface.image.path, resolver)\n except BaseException:\n log.warning('unable to load emissive texture',\n exc_info=True)\n elif effect.emission is not None:\n emissiveFactor = effect.emission[:3]\n\n # Compute roughness\n roughnessFactor = 1.0\n if (not isinstance(effect.shininess, collada.material.Map)\n and effect.shininess is not None):\n roughnessFactor = np.sqrt(2.0 / (2.0 + effect.shininess))\n\n # Compute metallic factor\n metallicFactor = 0.0\n\n # Compute normal texture\n normalTexture = None\n if effect.bumpmap is not None:\n try:\n normalTexture = _load_texture(\n effect.bumpmap.sampler.surface.image.path, resolver)\n except BaseException:\n log.warning('unable to load bumpmap',\n exc_info=True)\n\n # Compute opacity\n if (effect.transparent is not None\n and not isinstance(effect.transparent, collada.material.Map)):\n baseColorFactor = tuple(np.append(baseColorFactor[:3], effect.transparent[3]))\n\n return visual.material.PBRMaterial(\n emissiveFactor=emissiveFactor,\n emissiveTexture=emissiveTexture,\n normalTexture=normalTexture,\n baseColorTexture=baseColorTexture,\n baseColorFactor=baseColorFactor,\n metallicFactor=metallicFactor,\n roughnessFactor=roughnessFactor)\n\n\ndef _unparse_material(material):\n \"\"\"\n Turn a trimesh material into a COLLADA material.\n \"\"\"\n # TODO EXPORT TEXTURES\n if isinstance(material, visual.material.PBRMaterial):\n diffuse = material.baseColorFactor\n if diffuse is not None:\n diffuse = list(diffuse)\n\n emission = material.emissiveFactor\n if emission is not None:\n emission = [float(emission[0]), float(emission[1]),\n float(emission[2]), 1.0]\n\n shininess = material.roughnessFactor\n if shininess is not None:\n shininess = 2.0 / shininess**2 - 2.0\n\n effect = collada.material.Effect(\n uuid.uuid4().hex, params=[], shadingtype='phong',\n diffuse=diffuse, emission=emission,\n specular=[1.0, 1.0, 1.0, 1.0], shininess=float(shininess)\n )\n material = collada.material.Material(\n uuid.uuid4().hex, 'pbrmaterial', effect\n )\n else:\n effect = collada.material.Effect(\n uuid.uuid4().hex, params=[], shadingtype='phong'\n )\n material = collada.material.Material(\n uuid.uuid4().hex, 'defaultmaterial', effect\n )\n return material\n\n\ndef load_zae(file_obj, resolver=None, **kwargs):\n \"\"\"\n Load a ZAE file, which is just a zipped DAE file.\n\n Parameters\n -------------\n file_obj : file object\n Contains ZAE data\n resolver : trimesh.visual.Resolver\n Resolver to load additional assets\n kwargs : dict\n Passed to load_collada\n\n Returns\n ------------\n loaded : dict\n Results of loading\n \"\"\"\n\n # a dict, {file name : file object}\n archive = util.decompress(file_obj,\n file_type='zip')\n\n # load the first file with a .dae extension\n file_name = next(i for i in archive.keys()\n if i.lower().endswith('.dae'))\n\n # a resolver so the loader can load textures / etc\n resolver = visual.resolvers.ZipResolver(archive)\n\n # run the regular collada loader\n loaded = load_collada(archive[file_name],\n resolver=resolver,\n **kwargs)\n return loaded\n\n\n# only provide loaders if `pycollada` is installed\n_collada_loaders = {}\n_collada_exporters = {}\nif collada is not None:\n _collada_loaders['dae'] = load_collada\n _collada_loaders['zae'] = load_zae\n _collada_exporters['dae'] = export_collada\n",
"\"\"\"\nwindowed.py\n---------------\n\nProvides a pyglet- based windowed viewer to preview\nTrimesh, Scene, PointCloud, and Path objects.\n\nWorks on all major platforms: Windows, Linux, and OSX.\n\"\"\"\nimport platform\nimport collections\nimport numpy as np\n\nimport pyglet\nimport pyglet.gl as gl\n\nfrom .trackball import Trackball\n\nfrom .. import util\nfrom .. import rendering\n\nfrom ..visual import to_rgba\nfrom ..transformations import translation_matrix\n\npyglet.options['shadow_window'] = False\n\n# smooth only when fewer faces than this\n_SMOOTH_MAX_FACES = 100000\n\n\nclass SceneViewer(pyglet.window.Window):\n\n def __init__(self,\n scene,\n smooth=True,\n flags=None,\n visible=True,\n resolution=None,\n start_loop=True,\n callback=None,\n callback_period=None,\n caption=None,\n fixed=None,\n offset_lines=True,\n background=None,\n window_conf=None,\n profile=False,\n ** kwargs):\n \"\"\"\n Create a window that will display a trimesh.Scene object\n in an OpenGL context via pyglet.\n\n Parameters\n ---------------\n scene : trimesh.scene.Scene\n Scene with geometry and transforms\n smooth : bool\n If True try to smooth shade things\n flags : dict\n If passed apply keys to self.view:\n ['cull', 'wireframe', etc]\n visible : bool\n Display window or not\n resolution : (2,) int\n Initial resolution of window\n start_loop : bool\n Call pyglet.app.run() at the end of init\n callback : function\n A function which can be called periodically to\n update things in the scene\n callback_period : float\n How often to call the callback, in seconds\n fixed : None or iterable\n List of keys in scene.geometry to skip view\n transform on to keep fixed relative to camera\n offset_lines : bool\n If True, will offset lines slightly so if drawn\n coplanar with mesh geometry they will be visible\n background : None or (4,) uint8\n Color for background\n window_conf : None, or gl.Config\n Passed to window init\n kwargs : dict\n Additional arguments to pass, including\n 'background' for to set background color\n \"\"\"\n self.scene = self._scene = scene\n\n self.callback = callback\n self.callback_period = callback_period\n self.scene._redraw = self._redraw\n self.offset_lines = bool(offset_lines)\n self.background = background\n # save initial camera transform\n self._initial_camera_transform = scene.camera_transform.copy()\n\n # a transform to offset lines slightly to avoid Z-fighting\n if self.offset_lines:\n self._line_offset = translation_matrix(\n [0, 0, scene.scale / 1000])\n\n self.reset_view(flags=flags)\n self.batch = pyglet.graphics.Batch()\n self._smooth = smooth\n\n self._profile = bool(profile)\n if self._profile:\n from pyinstrument import Profiler\n self.Profiler = Profiler\n\n # store kwargs\n self.kwargs = kwargs\n\n # store a vertexlist for an axis marker\n self._axis = None\n # store a vertexlist for a grid display\n self._grid = None\n # store scene geometry as vertex lists\n self.vertex_list = {}\n # store geometry hashes\n self.vertex_list_hash = {}\n # store geometry rendering mode\n self.vertex_list_mode = {}\n # store meshes that don't rotate relative to viewer\n self.fixed = fixed\n # store a hidden (don't not display) node.\n self._nodes_hidden = set()\n # name : texture\n self.textures = {}\n\n # if resolution isn't defined set a default value\n if resolution is None:\n resolution = scene.camera.resolution\n else:\n scene.camera.resolution = resolution\n\n # no window conf was passed so try to get the best looking one\n if window_conf is None:\n try:\n # try enabling antialiasing\n # if you have a graphics card this will probably work\n conf = gl.Config(sample_buffers=1,\n samples=4,\n depth_size=24,\n double_buffer=True)\n super(SceneViewer, self).__init__(config=conf,\n visible=visible,\n resizable=True,\n width=resolution[0],\n height=resolution[1],\n caption=caption)\n except pyglet.window.NoSuchConfigException:\n conf = gl.Config(double_buffer=True)\n super(SceneViewer, self).__init__(config=conf,\n resizable=True,\n visible=visible,\n width=resolution[0],\n height=resolution[1],\n caption=caption)\n else:\n # window config was manually passed\n super(SceneViewer, self).__init__(config=window_conf,\n resizable=True,\n visible=visible,\n width=resolution[0],\n height=resolution[1],\n caption=caption)\n\n # add scene geometry to viewer geometry\n self._update_vertex_list()\n\n # call after geometry is added\n self.init_gl()\n self.set_size(*resolution)\n self.update_flags()\n\n # someone has passed a callback to be called periodically\n if self.callback is not None:\n # if no callback period is specified set it to default\n if callback_period is None:\n # 30 times per second\n callback_period = 1.0 / 30.0\n # set up a do-nothing periodic task which will\n # trigger `self.on_draw` every `callback_period`\n # seconds if someone has passed a callback\n pyglet.clock.schedule_interval(lambda x: x,\n callback_period)\n if start_loop:\n pyglet.app.run()\n\n def _redraw(self):\n self.on_draw()\n\n def _update_vertex_list(self):\n # update vertex_list if needed\n for name, geom in self.scene.geometry.items():\n if geom.is_empty:\n continue\n if geometry_hash(geom) == self.vertex_list_hash.get(name):\n continue\n self.add_geometry(name=name,\n geometry=geom,\n smooth=bool(self._smooth))\n\n def _update_meshes(self):\n # call the callback if specified\n if self.callback is not None:\n self.callback(self.scene)\n self._update_vertex_list()\n self._update_perspective(self.width, self.height)\n\n def add_geometry(self, name, geometry, **kwargs):\n \"\"\"\n Add a geometry to the viewer.\n\n Parameters\n --------------\n name : hashable\n Name that references geometry\n geometry : Trimesh, Path2D, Path3D, PointCloud\n Geometry to display in the viewer window\n kwargs **\n Passed to rendering.convert_to_vertexlist\n \"\"\"\n # convert geometry to constructor args\n args = rendering.convert_to_vertexlist(geometry, **kwargs)\n # create the indexed vertex list\n self.vertex_list[name] = self.batch.add_indexed(*args)\n # save the MD5 of the geometry\n self.vertex_list_hash[name] = geometry_hash(geometry)\n # save the rendering mode from the constructor args\n self.vertex_list_mode[name] = args[1]\n\n try:\n # if a geometry has UV coordinates that match vertices\n assert len(geometry.visual.uv) == len(geometry.vertices)\n has_tex = True\n except BaseException:\n has_tex = False\n\n if has_tex:\n tex = rendering.material_to_texture(\n geometry.visual.material)\n if tex is not None:\n self.textures[name] = tex\n\n def cleanup_geometries(self):\n \"\"\"\n Remove any stored vertex lists that no longer\n exist in the scene.\n \"\"\"\n # shorthand to scene graph\n graph = self.scene.graph\n # which parts of the graph still have geometry\n geom_keep = set([graph[node][1] for\n node in graph.nodes_geometry])\n # which geometries no longer need to be kept\n geom_delete = [geom for geom in self.vertex_list\n if geom not in geom_keep]\n for geom in geom_delete:\n # remove stored vertex references\n self.vertex_list.pop(geom, None)\n self.vertex_list_hash.pop(geom, None)\n self.vertex_list_mode.pop(geom, None)\n self.textures.pop(geom, None)\n\n def unhide_geometry(self, node):\n \"\"\"\n If a node is hidden remove the flag and show the\n geometry on the next draw.\n\n Parameters\n -------------\n node : str\n Node to display\n \"\"\"\n self._nodes_hidden.discard(node)\n\n def hide_geometry(self, node):\n \"\"\"\n Don't display the geometry contained at a node on\n the next draw.\n\n Parameters\n -------------\n node : str\n Node to not display\n \"\"\"\n self._nodes_hidden.add(node)\n\n def reset_view(self, flags=None):\n \"\"\"\n Set view to the default view.\n\n Parameters\n --------------\n flags : None or dict\n If any view key passed override the default\n e.g. {'cull': False}\n \"\"\"\n self.view = {\n 'cull': True,\n 'axis': False,\n 'grid': False,\n 'fullscreen': False,\n 'wireframe': False,\n 'ball': Trackball(\n pose=self._initial_camera_transform,\n size=self.scene.camera.resolution,\n scale=self.scene.scale,\n target=self.scene.centroid)}\n try:\n # if any flags are passed override defaults\n if isinstance(flags, dict):\n for k, v in flags.items():\n if k in self.view:\n self.view[k] = v\n self.update_flags()\n except BaseException:\n pass\n\n def init_gl(self):\n \"\"\"\n Perform the magic incantations to create an\n OpenGL scene using pyglet.\n \"\"\"\n\n # if user passed a background color use it\n if self.background is None:\n # default background color is white\n background = np.ones(4)\n else:\n # convert to (4,) uint8 RGBA\n background = to_rgba(self.background)\n # convert to 0.0-1.0 float\n background = background.astype(np.float64) / 255.0\n\n self._gl_set_background(background)\n # use camera setting for depth\n self._gl_enable_depth(self.scene.camera)\n self._gl_enable_color_material()\n self._gl_enable_blending()\n self._gl_enable_smooth_lines()\n self._gl_enable_lighting(self.scene)\n\n @staticmethod\n def _gl_set_background(background):\n gl.glClearColor(*background)\n\n @staticmethod\n def _gl_unset_background():\n gl.glClearColor(*[0, 0, 0, 0])\n\n @staticmethod\n def _gl_enable_depth(camera):\n \"\"\"\n Enable depth test in OpenGL using distances\n from `scene.camera`.\n \"\"\"\n # set the culling depth from our camera object\n gl.glDepthRange(camera.z_near, camera.z_far)\n\n gl.glClearDepth(1.0)\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glDepthFunc(gl.GL_LEQUAL)\n\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glEnable(gl.GL_CULL_FACE)\n\n @staticmethod\n def _gl_enable_color_material():\n # do some openGL things\n gl.glColorMaterial(gl.GL_FRONT_AND_BACK,\n gl.GL_AMBIENT_AND_DIFFUSE)\n gl.glEnable(gl.GL_COLOR_MATERIAL)\n gl.glShadeModel(gl.GL_SMOOTH)\n\n gl.glMaterialfv(gl.GL_FRONT,\n gl.GL_AMBIENT,\n rendering.vector_to_gl(\n 0.192250, 0.192250, 0.192250))\n gl.glMaterialfv(gl.GL_FRONT,\n gl.GL_DIFFUSE,\n rendering.vector_to_gl(\n 0.507540, 0.507540, 0.507540))\n gl.glMaterialfv(gl.GL_FRONT,\n gl.GL_SPECULAR,\n rendering.vector_to_gl(\n .5082730, .5082730, .5082730))\n\n gl.glMaterialf(gl.GL_FRONT,\n gl.GL_SHININESS,\n .4 * 128.0)\n\n @staticmethod\n def _gl_enable_blending():\n # enable blending for transparency\n gl.glEnable(gl.GL_BLEND)\n gl.glBlendFunc(gl.GL_SRC_ALPHA,\n gl.GL_ONE_MINUS_SRC_ALPHA)\n\n @staticmethod\n def _gl_enable_smooth_lines():\n # make the lines from Path3D objects less ugly\n gl.glEnable(gl.GL_LINE_SMOOTH)\n gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)\n # set the width of lines to 4 pixels\n gl.glLineWidth(4)\n # set PointCloud markers to 4 pixels in size\n gl.glPointSize(4)\n\n @staticmethod\n def _gl_enable_lighting(scene):\n \"\"\"\n Take the lights defined in scene.lights and\n apply them as openGL lights.\n \"\"\"\n gl.glEnable(gl.GL_LIGHTING)\n # opengl only supports 7 lights?\n for i, light in enumerate(scene.lights[:7]):\n # the index of which light we have\n lightN = eval('gl.GL_LIGHT{}'.format(i))\n\n # get the transform for the light by name\n matrix = scene.graph.get(light.name)[0]\n\n # convert light object to glLightfv calls\n multiargs = rendering.light_to_gl(\n light=light,\n transform=matrix,\n lightN=lightN)\n\n # enable the light in question\n gl.glEnable(lightN)\n # run the glLightfv calls\n for args in multiargs:\n gl.glLightfv(*args)\n\n def toggle_culling(self):\n \"\"\"\n Toggle back face culling.\n\n It is on by default but if you are dealing with\n non- watertight meshes you probably want to be able\n to see the back sides.\n \"\"\"\n self.view['cull'] = not self.view['cull']\n self.update_flags()\n\n def toggle_wireframe(self):\n \"\"\"\n Toggle wireframe mode\n\n Good for looking inside meshes, off by default.\n \"\"\"\n self.view['wireframe'] = not self.view['wireframe']\n self.update_flags()\n\n def toggle_fullscreen(self):\n \"\"\"\n Toggle between fullscreen and windowed mode.\n \"\"\"\n self.view['fullscreen'] = not self.view['fullscreen']\n self.update_flags()\n\n def toggle_axis(self):\n \"\"\"\n Toggle a rendered XYZ/RGB axis marker:\n off, world frame, every frame\n \"\"\"\n # cycle through three axis states\n states = [False, 'world', 'all']\n # the state after toggling\n index = (states.index(self.view['axis']) + 1) % len(states)\n # update state to next index\n self.view['axis'] = states[index]\n # perform gl actions\n self.update_flags()\n\n def toggle_grid(self):\n \"\"\"\n Toggle a rendered grid.\n \"\"\"\n # update state to next index\n self.view['grid'] = not self.view['grid']\n # perform gl actions\n self.update_flags()\n\n def update_flags(self):\n \"\"\"\n Check the view flags, and call required GL functions.\n \"\"\"\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None\n\n if self.view['grid'] and self._grid is None:\n try:\n # create a grid marker\n from ..path.creation import grid\n bounds = self.scene.bounds\n center = bounds.mean(axis=0)\n # set the grid to the lowest Z position\n # also offset by the scale to avoid interference\n center[2] = bounds[0][2] - (bounds[:, 2].ptp() / 100)\n # choose the side length by maximum XY length\n side = bounds.ptp(axis=0)[:2].max()\n # create an axis marker sized relative to the scene\n grid_mesh = grid(\n side=side,\n count=4,\n transform=translation_matrix(center))\n # convert the path to vertexlist args\n args = rendering.convert_to_vertexlist(grid_mesh)\n # create ordered args for a vertex list\n self._grid = self.batch.add_indexed(*args)\n except BaseException:\n util.log.warning(\n 'failed to create grid!', exc_info=True)\n elif not self.view['grid'] and self._grid is not None:\n self._grid.delete()\n self._grid = None\n\n def _update_perspective(self, width, height):\n try:\n # for high DPI screens viewport size\n # will be different then the passed size\n width, height = self.get_viewport_size()\n except BaseException:\n # older versions of pyglet may not have this\n pass\n\n # set the new viewport size\n gl.glViewport(0, 0, width, height)\n gl.glMatrixMode(gl.GL_PROJECTION)\n gl.glLoadIdentity()\n\n # get field of view and Z range from camera\n camera = self.scene.camera\n\n # set perspective from camera data\n gl.gluPerspective(camera.fov[1],\n width / float(height),\n camera.z_near,\n camera.z_far)\n gl.glMatrixMode(gl.GL_MODELVIEW)\n\n return width, height\n\n def on_resize(self, width, height):\n \"\"\"\n Handle resized windows.\n \"\"\"\n width, height = self._update_perspective(width, height)\n self.scene.camera.resolution = (width, height)\n self.view['ball'].resize(self.scene.camera.resolution)\n self.scene.camera_transform[...] = self.view['ball'].pose\n\n def on_mouse_press(self, x, y, buttons, modifiers):\n \"\"\"\n Set the start point of the drag.\n \"\"\"\n self.view['ball'].set_state(Trackball.STATE_ROTATE)\n if (buttons == pyglet.window.mouse.LEFT):\n ctrl = (modifiers & pyglet.window.key.MOD_CTRL)\n shift = (modifiers & pyglet.window.key.MOD_SHIFT)\n if (ctrl and shift):\n self.view['ball'].set_state(Trackball.STATE_ZOOM)\n elif shift:\n self.view['ball'].set_state(Trackball.STATE_ROLL)\n elif ctrl:\n self.view['ball'].set_state(Trackball.STATE_PAN)\n elif (buttons == pyglet.window.mouse.MIDDLE):\n self.view['ball'].set_state(Trackball.STATE_PAN)\n elif (buttons == pyglet.window.mouse.RIGHT):\n self.view['ball'].set_state(Trackball.STATE_ZOOM)\n\n self.view['ball'].down(np.array([x, y]))\n self.scene.camera_transform[...] = self.view['ball'].pose\n\n def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):\n \"\"\"\n Pan or rotate the view.\n \"\"\"\n self.view['ball'].drag(np.array([x, y]))\n self.scene.camera_transform[...] = self.view['ball'].pose\n\n def on_mouse_scroll(self, x, y, dx, dy):\n \"\"\"\n Zoom the view.\n \"\"\"\n self.view['ball'].scroll(dy)\n self.scene.camera_transform[...] = self.view['ball'].pose\n\n def on_key_press(self, symbol, modifiers):\n \"\"\"\n Call appropriate functions given key presses.\n \"\"\"\n magnitude = 10\n if symbol == pyglet.window.key.W:\n self.toggle_wireframe()\n elif symbol == pyglet.window.key.Z:\n self.reset_view()\n elif symbol == pyglet.window.key.C:\n self.toggle_culling()\n elif symbol == pyglet.window.key.A:\n self.toggle_axis()\n elif symbol == pyglet.window.key.G:\n self.toggle_grid()\n elif symbol == pyglet.window.key.Q:\n self.on_close()\n elif symbol == pyglet.window.key.M:\n self.maximize()\n elif symbol == pyglet.window.key.F:\n self.toggle_fullscreen()\n\n if symbol in [\n pyglet.window.key.LEFT,\n pyglet.window.key.RIGHT,\n pyglet.window.key.DOWN,\n pyglet.window.key.UP]:\n self.view['ball'].down([0, 0])\n if symbol == pyglet.window.key.LEFT:\n self.view['ball'].drag([-magnitude, 0])\n elif symbol == pyglet.window.key.RIGHT:\n self.view['ball'].drag([magnitude, 0])\n elif symbol == pyglet.window.key.DOWN:\n self.view['ball'].drag([0, -magnitude])\n elif symbol == pyglet.window.key.UP:\n self.view['ball'].drag([0, magnitude])\n self.scene.camera_transform[...] = self.view['ball'].pose\n\n def on_draw(self):\n \"\"\"\n Run the actual draw calls.\n \"\"\"\n\n if self._profile:\n profiler = self.Profiler()\n profiler.start()\n\n self._update_meshes()\n gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)\n gl.glLoadIdentity()\n\n # pull the new camera transform from the scene\n transform_camera = np.linalg.inv(self.scene.camera_transform)\n\n # apply the camera transform to the matrix stack\n gl.glMultMatrixf(rendering.matrix_to_gl(transform_camera))\n\n # we want to render fully opaque objects first,\n # followed by objects which have transparency\n node_names = collections.deque(self.scene.graph.nodes_geometry)\n # how many nodes did we start with\n count_original = len(node_names)\n count = -1\n\n # if we are rendering an axis marker at the world\n if self._axis:\n # we stored it as a vertex list\n self._axis.draw(mode=gl.GL_TRIANGLES)\n if self._grid:\n self._grid.draw(mode=gl.GL_LINES)\n\n while len(node_names) > 0:\n count += 1\n current_node = node_names.popleft()\n\n if current_node in self._nodes_hidden:\n continue\n\n # get the transform from world to geometry and mesh name\n transform, geometry_name = self.scene.graph.get(current_node)\n\n # if no geometry at this frame continue without rendering\n if geometry_name is None:\n continue\n\n # if a geometry is marked as fixed apply the inverse view transform\n if self.fixed is not None and geometry_name in self.fixed:\n # remove altered camera transform from fixed geometry\n transform_fix = np.linalg.inv(\n np.dot(self._initial_camera_transform, transform_camera))\n # apply the transform so the fixed geometry doesn't move\n transform = np.dot(transform, transform_fix)\n\n # get a reference to the mesh so we can check transparency\n mesh = self.scene.geometry[geometry_name]\n if mesh.is_empty:\n continue\n\n # get the GL mode of the current geometry\n mode = self.vertex_list_mode[geometry_name]\n\n # if you draw a coplanar line with a triangle it will z-fight\n # the best way to do this is probably a shader but this works fine\n if mode == gl.GL_LINES:\n # apply the offset in camera space\n transform = util.multi_dot([\n transform,\n np.linalg.inv(transform_camera),\n self._line_offset,\n transform_camera])\n\n # add a new matrix to the model stack\n gl.glPushMatrix()\n # transform by the nodes transform\n gl.glMultMatrixf(rendering.matrix_to_gl(transform))\n\n # draw an axis marker for each mesh frame\n if self.view['axis'] == 'all':\n self._axis.draw(mode=gl.GL_TRIANGLES)\n\n # transparent things must be drawn last\n if (hasattr(mesh, 'visual') and\n hasattr(mesh.visual, 'transparency')\n and mesh.visual.transparency):\n # put the current item onto the back of the queue\n if count < count_original:\n # add the node to be drawn last\n node_names.append(current_node)\n # pop the matrix stack for now\n gl.glPopMatrix()\n # come back to this mesh later\n continue\n\n # if we have texture enable the target texture\n texture = None\n if geometry_name in self.textures:\n texture = self.textures[geometry_name]\n gl.glEnable(texture.target)\n gl.glBindTexture(texture.target, texture.id)\n\n # draw the mesh with its transform applied\n self.vertex_list[geometry_name].draw(mode=mode)\n # pop the matrix stack as we drew what we needed to draw\n gl.glPopMatrix()\n\n # disable texture after using\n if texture is not None:\n gl.glDisable(texture.target)\n\n if self._profile:\n profiler.stop()\n print(profiler.output_text(unicode=True, color=True))\n\n def save_image(self, file_obj):\n \"\"\"\n Save the current color buffer to a file object\n in PNG format.\n\n Parameters\n -------------\n file_obj: file name, or file- like object\n \"\"\"\n manager = pyglet.image.get_buffer_manager()\n colorbuffer = manager.get_color_buffer()\n\n # if passed a string save by name\n if hasattr(file_obj, 'write'):\n colorbuffer.save(file=file_obj)\n else:\n colorbuffer.save(filename=file_obj)\n\n\ndef geometry_hash(geometry):\n \"\"\"\n Get an MD5 for a geometry object\n\n Parameters\n ------------\n geometry : object\n\n Returns\n ------------\n MD5 : str\n \"\"\"\n if hasattr(geometry, 'md5'):\n # for most of our trimesh objects\n md5 = geometry.md5()\n elif hasattr(geometry, 'tostring'):\n # for unwrapped ndarray objects\n md5 = str(hash(geometry.tostring()))\n\n if hasattr(geometry, 'visual'):\n # if visual properties are defined\n md5 += str(geometry.visual.crc())\n return md5\n\n\ndef render_scene(scene,\n resolution=None,\n visible=True,\n **kwargs):\n \"\"\"\n Render a preview of a scene to a PNG.\n\n Parameters\n ------------\n scene : trimesh.Scene\n Geometry to be rendered\n resolution : (2,) int or None\n Resolution in pixels, or set from scene.camera\n kwargs : **\n Passed to SceneViewer\n\n Returns\n ---------\n render : bytes\n Image in PNG format\n \"\"\"\n window = SceneViewer(scene,\n start_loop=False,\n visible=visible,\n resolution=resolution,\n **kwargs)\n\n if visible is None:\n visible = platform.system() != 'Linux'\n\n from ..util import BytesIO\n\n # need to run loop twice to display anything\n for save in [False, False, True]:\n pyglet.clock.tick()\n window.switch_to()\n window.dispatch_events()\n window.dispatch_event('on_draw')\n window.flip()\n if save:\n # save the color buffer data to memory\n file_obj = BytesIO()\n window.save_image(file_obj)\n file_obj.seek(0)\n render = file_obj.read()\n window.close()\n\n return render\n"
] |
[
[
"numpy.linspace",
"numpy.append",
"numpy.array",
"numpy.random.RandomState",
"numpy.isclose"
],
[
"numpy.dot",
"numpy.sqrt",
"numpy.arange",
"numpy.eye",
"numpy.ones",
"numpy.append",
"numpy.zeros"
],
[
"numpy.linalg.inv",
"numpy.dot",
"numpy.array",
"numpy.ones"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AI-Companion/ds-gear
|
[
"66e029b786579eaed337f51302f0ee34e9551089"
] |
[
"dsg/CNN_classifier.py"
] |
[
"import os\nimport pickle\nimport re\nimport subprocess\nimport time\nfrom itertools import compress\nimport numpy as np\nfrom cv2 import cv2\nfrom keras.applications.vgg16 import VGG16\nfrom keras.models import Model, load_model\nfrom keras.layers import Dense, Flatten, Dropout, BatchNormalization\nfrom keras.utils import to_categorical\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nimport matplotlib.pyplot as plt\nfrom dsg.base import BasePreprocessor, BaseNN\n\n\nclass CNNClassifierPreprocessor(BasePreprocessor):\n \"\"\"\n Utility class performing several data preprocessing steps\n \"\"\"\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def init_from_config(self, image_height: int, image_width: int, validation_split: float):\n self.validation_split = validation_split\n self.image_height = image_height\n self.image_width = image_width\n self.labels_to_idx = None\n\n def init_from_file(self, preprocessor_file: str):\n \"\"\"\n Loads preprocessing tools for the model\n Args:\n preprocessor_file: url to saved preprocessing file\n Return:\n preprocessed object\n \"\"\"\n with open(preprocessor_file, 'rb') as f:\n self.image_height = pickle.load(f)\n self.image_width = pickle.load(f)\n self.validation_split = pickle.load(f)\n self.labels_to_idx = pickle.load(f)\n\n def clean(self, X):\n return X\n\n def save(self, file_name_prefix, save_folder):\n \"\"\"\n Stores the data preprocessor under 'models folder'\n Args:\n file_name_prefix: a file name prefix having the following format 'named_entity_recognition_%Y%m%d_%H%M%S'\n save_folder: folder under which to save the files\n Return:\n None\n \"\"\"\n file_url = os.path.join(save_folder, file_name_prefix + \"_preprocessor.pkl\")\n with open(file_url, 'wb') as handle:\n pickle.dump(self.image_height, handle, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(self.image_width, handle, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(self.validation_split, handle, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(self.labels_to_idx, handle, protocol=pickle.HIGHEST_PROTOCOL)\n print(\"----> proprocessor object saved to %s\" % file_url)\n\n def preprocess(self, X, y=None):\n \"\"\"\n Loads an array containing training images ready to be injected in the CNN\n Args:\n X: list of image urls\n y: labels\n Returns:\n array having shape (n_images, image_height, image_width, 3)\n \"\"\"\n X_result = []\n for image_url in X:\n im = cv2.imread(image_url, 1)\n im = cv2.resize(im, (self.image_width, self.image_height))\n X_result.append(im)\n X_result = np.asarray(X_result)\n if y is not None:\n y_result = [self.labels_to_idx[k] for k in y]\n y_result = to_categorical(y_result, len(self.labels_to_idx))\n return X_result, y_result\n else:\n return X_result\n\n def split_train_test(self, X, y):\n \"\"\"\n Wrapper method to split training data into a validation set and a training set\n Args:\n X: tokenized predictors\n y: labels\n Return:\n tuple consisting of training predictors, training labels, validation predictors, validation labels\n \"\"\"\n print(\"===========> data split\")\n X_train, X_test, y_train, y_test = train_test_split(X, y, shuffle=True, stratify=y, test_size=self.validation_split)\n print(\"----> data splitted: validation ratio = %.1f\" % self.validation_split)\n return X_train, X_test, y_train, y_test\n \n def fit(self, X, y):\n \"\"\"\n updates the labels_to_idx dict with the training values\n Args:\n X: list of image urls\n y: labels\n Returns:\n None\n \"\"\"\n labels = list(set(y))\n self.labels_to_idx = {k:v for v,k in enumerate(labels)}\n return\n\n\nclass CNNClassifier(BaseNN):\n \"\"\"\n Handles the RNN model\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.use_pretrained_cnn = None\n self.pretrained_network_path = None\n self.pretrained_network_name = None\n self.pretrained_layer = None\n self.model = None\n self.n_labels = None\n self.idx_to_labels = None\n self.batch_size = None\n keys = kwargs.keys()\n if 'h5_file' in keys:\n self.init_from_files(h5_file=kwargs['h5_file'], idx_to_labels=kwargs['idx_to_labels'])\n else:\n self.init_from_config(idx_to_labels=kwargs['idx_to_labels'],\n pre_trained_cnn=kwargs['pre_trained_cnn'],\n pretrained_network_name=kwargs['pretrained_network_name'],\n n_iter=kwargs['n_iter'],\n image_height=kwargs['image_height'],\n image_width=kwargs['image_width'],\n batch_size=kwargs['batch_size'],\n pretrained_network_path=kwargs['pretrained_network_path'])\n\n def init_from_files(self, h5_file, idx_to_labels):\n \"\"\"\n Initializes the class from a previously saved model\n Args:\n h5_file: url to a saved class\n idx_to_labels: conversion from indices to original labels\n Return:\n None\n \"\"\"\n self.model = load_model(h5_file)\n self.idx_to_labels = idx_to_labels\n\n def init_from_config(self, idx_to_labels, pre_trained_cnn, pretrained_network_name,\n n_iter, image_height, image_width, batch_size, pretrained_network_path):\n \"\"\"\n initialize the class for the first time from a given configuration file and data processor\n Args:\n idx_to_labels: conversion from indices to original labels\n pre_trained_cnn: whether to use a pretrained network\n pretrained_network_name: name of the pretrained network to use\n n_iter: number of backprop iterations\n image_height: height in pixels\n image_width: width in pixels\n batch_size: back prop batch size\n pretrained_network_path: url for the pretrained network\n Return:\n None\n \"\"\"\n self.use_pretrained_cnn = pre_trained_cnn\n self.pretrained_cnn_name = pretrained_network_name\n self.model = None\n self.n_iter = n_iter\n self.image_height = image_height\n self.image_width = image_width\n self.idx_to_labels = idx_to_labels\n self.batch_size = batch_size\n self.n_labels = len(idx_to_labels)\n self.pretrained_network_path = pretrained_network_path\n self.model = self.build_model()\n\n def build_model(self):\n \"\"\"\n Builds an CNN model according to fixed architecture\n Return:\n None\n \"\"\"\n print(\"===========> build model\")\n vggmodel = VGG16(include_top=False, input_shape=(self.image_height, self.image_width, 3))\n for layer in vggmodel.layers:\n layer.trainable = False\n x = vggmodel.layers[-1].output\n x = Flatten()(x)\n # 1st connected\n x = Dense(8, activation='relu')(x)\n x = BatchNormalization()(x)\n x = Dropout(0.5)(x)\n x = Dense(self.n_labels, activation='softmax')(x)\n # define new model\n model = Model(inputs=vggmodel.inputs, outputs=x)\n # summarize\n model.compile(loss='categorical_crossentropy', optimizer=\"adam\", metrics=['acc'])\n print(model.summary())\n return model\n\n def fit(self, X_train, y_train, X_test=None, y_test=None):\n \"\"\"\n Fits the model object to the data\n Args:\n X_train: numpy array containing encoded training features\n y_train: numpy array containing training targets\n X_test: numpy array containing encoded test features\n y_test: numpy array containing test targets\n Return:\n history of mertrics + classification report\n \"\"\"\n report = None\n if (X_test is not None) and (y_test is not None):\n history = self.model.fit(x=X_train, y=y_train, epochs=self.n_iter,\n batch_size=self.batch_size, validation_data=(X_test, y_test),\n verbose=2)\n y_hat = self.predict(X_test)\n y = np.argmax(y_test, axis=1)\n y = [self.idx_to_labels[i] for i in y]\n report = classification_report(y, y_hat, output_dict=True)\n df = pd.DataFrame(report).transpose().round(2)\n print(df)\n else:\n history = self.model.fit(x=X_train, y=y_train, epochs=self.n_iter, batch_size=self.batch_size, verbose=2)\n return history, report\n\n def predict(self, X_test):\n \"\"\"\n Inference method\n Args:\n X_test: predictors array\n Return:\n numpy array containing the class for token character in the sentence\n \"\"\"\n probs = self.model.predict(X_test)\n ids = np.argmax(probs, axis=1)\n max_probs = probs.max(axis=1) < 0.8\n labels = [self.idx_to_labels[v] if not max_probs[i] else \"other\" for i,v in enumerate(ids)]\n return labels\n\n def predict_proba(self, X_test):\n \"\"\"\n Inference method\n Args:\n X_test: array of predictors\n Return:\n numpy array containing the probabilities of a positive review for each list entry\n \"\"\"\n probs = self.model.predict(X_test)\n return probs\n\n def save(self, file_name_prefix, save_folder):\n \"\"\"\n Stores the data preprocessor under 'models folder'\n Args:\n file_name_prefix: a file name prefix having the following format 'sentiment_analysis_%Y%m%d_%H%M%S'\n save_folder: folder under which to save the files\n Return:\n None\n \"\"\"\n file_url_keras_model = os.path.join(save_folder, file_name_prefix + \"_rnn_model.h5\")\n self.model.save(file_url_keras_model)\n print(\"----> model saved to %s\" % file_url_keras_model)\n \n"
] |
[
[
"numpy.asarray",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"numpy.argmax",
"sklearn.metrics.classification_report"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
aerdem4/lofo-importance
|
[
"c3648b0421cc304ed4f3f2ebcbe962d70b9708fe"
] |
[
"lofo/utils.py"
] |
[
"import multiprocessing\nimport pandas as pd\n\n\ndef lofo_to_df(lofo_scores, feature_list):\n importance_df = pd.DataFrame()\n importance_df[\"feature\"] = feature_list\n importance_df[\"importance_mean\"] = lofo_scores.mean(axis=1)\n importance_df[\"importance_std\"] = lofo_scores.std(axis=1)\n\n for val_score in range(lofo_scores.shape[1]):\n importance_df[\"val_imp_{}\".format(val_score)] = lofo_scores[:, val_score]\n\n return importance_df.sort_values(\"importance_mean\", ascending=False)\n\n\ndef parallel_apply(cv_func, feature_list, n_jobs):\n pool = multiprocessing.Pool(n_jobs)\n manager = multiprocessing.Manager()\n result_queue = manager.Queue()\n\n for f in feature_list:\n pool.apply_async(cv_func, (f, result_queue))\n\n pool.close()\n pool.join()\n\n lofo_cv_result = [result_queue.get() for _ in range(len(feature_list))]\n return lofo_cv_result\n\n\ndef flatten_list(nested_list):\n return [item for sublist in nested_list for item in sublist]\n"
] |
[
[
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
nikbaya/split
|
[
"fb65c01cb6807a8b161fc3b1f25e3ddd90e89f62"
] |
[
"python/compare_sumstats.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 27 13:15:22 2018\n\nUsed to generate QQ plots of rg split sumstats.\n\n@author: nbaya\n\"\"\"\n\nimport hail as hl\nimport pandas as pd\nimport numpy as np\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\n\nsplitA = hl.import_table('/Users/nbaya/Documents/lab/ukbb-sexdiff/rg_sex/sumstats/50_sim_inf_meta_A_n300_batch_1_s0.tsv.bgz', impute=True)\nsplitB = hl.import_table('/Users/nbaya/Documents/lab/ukbb-sexdiff/rg_sex/sumstats/50_sim_inf_meta_B_n300_batch_1_s0.tsv.bgz', impute=True)\n\nsplitA = hl.import_table('/Users/nbaya/Documents/lab/ukbb-sexdiff/rg_sex/sumstats/vds1_5.tsv.gz', impute=True)\nsplitB = hl.import_table('/Users/nbaya/Documents/lab/ukbb-sexdiff/rg_sex/sumstats/vds2_5.tsv.gz', impute=True)\n\nsplitA = hl.import_table('/Users/nbaya/Documents/lab/ukbb-sexdiff/rg_sex/sumstats/50_sim_inf_meta_A_n300_batch_1_s0_old.tsv.bgz', impute=True)\nsplitB = hl.import_table('/Users/nbaya/Documents/lab/ukbb-sexdiff/rg_sex/sumstats/50_sim_inf_meta_B_n300_batch_1_s0_old.tsv.bgz', impute=True)\n\nsplitA = hl.import_table('/Users/nbaya/Documents/lab/ukbb-sexdiff/rg_sex/sumstats/20160_meta_A_batch_1_s0.tsv.bgz', impute=True)\nsplitB = hl.import_table('/Users/nbaya/Documents/lab/ukbb-sexdiff/rg_sex/sumstats/20160_meta_B_batch_1_s0.tsv.bgz', impute=True)\n\nsplitA = hl.import_table('/Users/nbaya/Documents/lab/ukbb-sexdiff/rg_sex/sumstats/50_sim_inf_sample_A_batch_1.1_set84.tsv.bgz', impute=True)\nsplitB = hl.import_table('/Users/nbaya/Documents/lab/ukbb-sexdiff/rg_sex/sumstats/50_sim_inf_sample_A_batch_1.2_set84.tsv.bgz', impute=True)\n\ndf_A = splitA.to_pandas()\ndf_B = splitB.to_pandas()\n\nmean_chi2_A = np.mean(df_A.Z*df_A.Z)\nmean_chi2_B = np.mean(df_B.Z*df_B.Z)\n\ndf_A['P'] = stats.norm.sf(abs(df_A.Z))*2\ndf_B['P'] = stats.norm.sf(abs(df_B.Z))*2\n\nplt.subplot(2,1,1)\nplt.plot(-np.log10(np.linspace(1,1/df_A.shape[0],df_A.shape[0])), -np.log10(df_A.sort_values(by='P',ascending=False).P),'o', alpha=0.5)\nplt.plot([0,10],[0,10],'k--')\nplt.xlim([0,np.max(-np.log10(np.linspace(1,1/df_A.shape[0],df_A.shape[0])))*1.05])\nplt.ylim([0,np.max(-np.log10(df_A.sort_values(by='P',ascending=False).P))*1.05])\n\nplt.subplot(2,1,2)\nplt.plot(-np.log10(np.linspace(1,1/df_B.shape[0],df_B.shape[0])), -np.log10(df_B.sort_values(by='P',ascending=False).P),'o', alpha=0.5)\nplt.plot([0,10],[0,10],'k--')\nplt.xlim([0,np.max(-np.log10(np.linspace(1,1/df_B.shape[0],df_B.shape[0])))*1.05])\nplt.ylim([0,np.max(-np.log10(df_B.sort_values(by='P',ascending=False).P))*1.05])\nfig = plt.gcf()\nfig.set_size_inches(8, 12)\n"
] |
[
[
"numpy.linspace",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"numpy.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LauZyHou/sklearn-STS
|
[
"8dd90a8fcf37094ea03f06fa10ce74dcf2d57dd3"
] |
[
"supervised_learning/generalized_linear_model/Ridge.py"
] |
[
"from sklearn import linear_model\n\nif __name__ == '__main__':\n \"\"\"ridge regression\"\"\"\n reg = linear_model.Ridge(alpha=0.5)\n reg.fit([[0, 0], [0, 0], [1, 1]], [0, .1, 1])\n print(reg.coef_)\n \"\"\" Generalized Cross-Validation\"\"\"\n reg = linear_model.RidgeCV(alphas=[0.1, 1.0, 10.0])\n reg.fit([[0, 0], [0, 0], [1, 1]], [0, .1, 1])\n print(reg.coef_, reg.alpha_)\n"
] |
[
[
"sklearn.linear_model.RidgeCV",
"sklearn.linear_model.Ridge"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alexis-roche/nipy
|
[
"b765f258621c886538b77115128511cdfd4600fe"
] |
[
"nipy/algorithms/diagnostics/tsdiffplot.py"
] |
[
"# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n''' plot tsdiffana parameters '''\nfrom __future__ import absolute_import\n\nimport numpy as np\n\nimport nipy\nfrom .timediff import time_slice_diffs\n\nfrom nipy.externals.six import string_types\n\ndef plot_tsdiffs(results, axes=None):\n ''' Plotting routine for time series difference metrics\n\n Requires matplotlib\n\n Parameters\n ----------\n results : dict\n Results of format returned from\n :func:`nipy.algorithms.diagnostics.time_slice_diff`\n '''\n import matplotlib.pyplot as plt\n T = len(results['volume_means'])\n S = results['slice_mean_diff2'].shape[1]\n mean_means = np.mean(results['volume_means'])\n scaled_slice_diff = results['slice_mean_diff2'] / mean_means\n\n if axes is None:\n n_plots = 4\n fig = plt.figure()\n fig.set_size_inches([10,10])\n axes = [plt.subplot(n_plots, 1, i+1) for i in range(n_plots)]\n\n def xmax_labels(ax, val, xlabel, ylabel):\n xlims = ax.axis()\n ax.axis((0, val) + xlims[2:])\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n\n # plot of mean volume variance\n ax = axes[0]\n ax.plot(results['volume_mean_diff2'] / mean_means)\n xmax_labels(ax, T-1, 'Difference image number', 'Scaled variance')\n\n # plot of diff by slice\n ax = axes[1]\n #Set up the color map for the different slices:\n X, Y = np.meshgrid(np.arange(scaled_slice_diff.shape[0]),\n np.arange(scaled_slice_diff.shape[1]))\n\n # Use HSV in order to code the slices from bottom to top:\n ax.scatter(X.T.ravel(),scaled_slice_diff.ravel(),\n c=Y.T.ravel(),cmap=plt.cm.hsv,\n alpha=0.2)\n\n xmax_labels(ax, T-1,\n 'Difference image number',\n 'Slice by slice variance')\n\n # mean intensity\n ax = axes[2]\n ax.plot(results['volume_means'] / mean_means)\n xmax_labels(ax, T,\n 'Image number',\n 'Scaled mean \\n voxel intensity')\n\n # slice plots min max mean\n ax = axes[3]\n ax.hold(True)\n ax.plot(np.mean(scaled_slice_diff, 0), 'k')\n ax.plot(np.min(scaled_slice_diff, 0), 'b')\n ax.plot(np.max(scaled_slice_diff, 0), 'r')\n ax.hold(False)\n xmax_labels(ax, S+1,\n 'Slice number',\n 'Max/mean/min \\n slice variation')\n return axes\n\n\[email protected]_with_doc('Please see docstring for alternative code')\ndef plot_tsdiffs_image(img, axes=None, show=True):\n ''' Plot time series diagnostics for image\n\n This function is deprecated; please use something like::\n\n results = time_slice_diff_image(img, slice_axis=2)\n plot_tsdiffs(results)\n\n instead.\n\n Parameters\n ----------\n img : image-like or filename str\n image on which to do diagnostics\n axes : None or sequence, optional\n Axes on which to plot the diagnostics. If None, then we create a figure\n and subplots for the plots. Sequence should have length\n >=4.\n show : {True, False}, optional\n If True, show the figure after plotting it\n\n Returns\n -------\n axes : Matplotlib axes\n Axes on which we have done the plots. Will be same as `axes` input if\n `axes` input was not None\n '''\n if isinstance(img, string_types):\n title = img\n else:\n title = 'Difference plots'\n img = nipy.as_image(img)\n res = time_slice_diffs(img)\n axes = plot_tsdiffs(res, axes)\n axes[0].set_title(title)\n if show:\n # show the plot\n import matplotlib.pyplot as plt\n plt.show()\n return axes\n\n\n"
] |
[
[
"numpy.deprecate_with_doc",
"numpy.min",
"numpy.arange",
"numpy.max",
"matplotlib.pyplot.subplot",
"numpy.mean",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Neovairis/tensorpack
|
[
"d7a13cb74c9066bc791d7aafc3b744b60ee79a9f",
"d7a13cb74c9066bc791d7aafc3b744b60ee79a9f",
"242dc71cafb9642e68a2bfb58bcf6ad45ccbb35c"
] |
[
"tensorpack/graph_builder/training.py",
"examples/PennTreebank/PTB-LSTM.py",
"tensorpack/tfutils/symbolic_functions.py"
] |
[
"# -*- coding: utf-8 -*-\n# File: training.py\n\nimport copy\nimport pprint\nimport re\nfrom abc import ABCMeta, abstractmethod\nfrom contextlib import contextmanager\nimport six\nimport tensorflow as tf\nfrom six.moves import range, zip\n\nfrom ..compat import tfv1\nfrom ..tfutils.common import get_tf_version_tuple\nfrom ..tfutils.gradproc import ScaleGradient\nfrom ..tfutils.tower import TrainTowerContext\nfrom ..utils import logger\nfrom .utils import (\n GradientPacker, LeastLoadedDeviceSetter, aggregate_grads, allreduce_grads, allreduce_grads_hierarchical,\n merge_grad_list, override_to_local_variable, split_grad_list)\n\n__all__ = ['GraphBuilder',\n 'SyncMultiGPUParameterServerBuilder', 'DataParallelBuilder',\n 'SyncMultiGPUReplicatedBuilder', 'AsyncMultiGPUBuilder']\n\n\[email protected]_metaclass(ABCMeta)\nclass GraphBuilder(object):\n @abstractmethod\n def build(*args, **kwargs):\n pass\n\n\n@contextmanager\ndef _maybe_reuse_vs(reuse):\n if reuse:\n with tf.variable_scope(tf.get_variable_scope(), reuse=True):\n yield\n else:\n yield\n\n\nclass DataParallelBuilder(GraphBuilder):\n def __init__(self, towers):\n \"\"\"\n Args:\n towers(list[int]): list of GPU ids.\n \"\"\"\n if len(towers) > 1:\n logger.info(\"[DataParallel] Training a model of {} towers.\".format(len(towers)))\n if not tf.test.is_built_with_cuda():\n logger.error(\"[DataParallel] TensorFlow was not built with CUDA support!\")\n\n self.towers = towers\n\n @staticmethod\n def _check_grad_list(grad_list):\n \"\"\"\n Args:\n grad_list: list of list of tuples, shape is Ngpu x Nvar x 2\n \"\"\"\n nvars = [len(k) for k in grad_list]\n\n def basename(x):\n return re.sub('tower[0-9]+/', '', x.op.name)\n\n if len(set(nvars)) != 1:\n names_per_gpu = [set([basename(k[1]) for k in grad_and_vars]) for grad_and_vars in grad_list]\n inters = copy.copy(names_per_gpu[0])\n for s in names_per_gpu:\n inters &= s\n for s in names_per_gpu:\n s -= inters\n logger.error(\"Unique trainable variables on towers: \" + pprint.pformat(names_per_gpu))\n raise ValueError(\"Number of gradients from each tower is different! \" + str(nvars))\n\n @staticmethod\n def call_for_each_tower(\n towers, func, devices=None, use_vs=None):\n \"\"\"\n Run `func` on all GPUs (towers) and return the results.\n\n Args:\n towers (list[int]): a list of GPU id.\n func: a lambda to be called inside each tower\n devices: a list of devices to be used. By default will use '/gpu:{tower}'\n use_vs (list[bool]): list of use_vs to passed to TowerContext\n\n Returns:\n List of outputs of ``func``, evaluated on each tower.\n \"\"\"\n\n ret = []\n if devices is not None:\n assert len(devices) == len(towers)\n if use_vs is not None:\n assert len(use_vs) == len(towers)\n\n tower_names = ['tower{}'.format(idx) for idx in range(len(towers))]\n\n for idx, t in enumerate(towers):\n device = devices[idx] if devices is not None else '/gpu:{}'.format(t)\n usevs = use_vs[idx] if use_vs is not None else False\n reuse = not usevs and idx > 0\n with tfv1.device(device), _maybe_reuse_vs(reuse), TrainTowerContext(\n tower_names[idx],\n vs_name=tower_names[idx] if usevs else '',\n index=idx, total=len(towers)):\n if len(str(device)) < 10: # a device function doesn't have good string description\n logger.info(\"Building graph for training tower {} on device {} ...\".format(idx, device))\n else:\n logger.info(\"Building graph for training tower {} ...\".format(idx))\n\n # When use_vs is True, use LOCAL_VARIABLES,\n # so these duplicated variables won't be saved by default.\n with override_to_local_variable(enable=usevs):\n ret.append(func())\n return ret\n\n @staticmethod\n def build_on_towers(*args, **kwargs):\n return DataParallelBuilder.call_for_each_tower(*args, **kwargs)\n\n\nclass SyncMultiGPUParameterServerBuilder(DataParallelBuilder):\n \"\"\"\n Data-parallel training in 'ParameterServer' mode.\n It builds one tower on each GPU with\n shared variable scope. It synchronizes the gradients computed\n from each tower, averages them and applies to the shared variables.\n\n It is an equivalent of ``--variable_update=parameter_server`` in\n `tensorflow/benchmarks <https://github.com/tensorflow/benchmarks>`_.\n \"\"\"\n def __init__(self, towers, ps_device):\n \"\"\"\n Args:\n towers(list[int]): list of GPU id\n ps_device (str): either 'gpu' or 'cpu', where variables are stored.\n \"\"\"\n super(SyncMultiGPUParameterServerBuilder, self).__init__(towers)\n assert ps_device in ['cpu', 'gpu']\n self.ps_device = ps_device\n\n def call_for_each_tower(self, tower_fn):\n \"\"\"\n Call the function `tower_fn` under :class:`TowerContext` for each tower.\n\n Returns:\n a list, contains the return values of `tower_fn` on each tower.\n \"\"\"\n raw_devices = ['/gpu:{}'.format(k) for k in self.towers]\n if self.ps_device == 'gpu':\n devices = [LeastLoadedDeviceSetter(d, raw_devices) for d in raw_devices]\n else:\n devices = [tf.train.replica_device_setter(\n worker_device=d, ps_device='/cpu:0', ps_tasks=1) for d in raw_devices]\n\n return DataParallelBuilder.build_on_towers(self.towers, tower_fn, devices)\n\n def build(self, grad_list, get_opt_fn):\n \"\"\"\n Reduce the gradients, apply them with the optimizer,\n and set self.grads to a list of (g, v), containing the averaged gradients.\n\n Args:\n grad_list ([[(grad, var), ...], ...]): #GPU lists to be reduced. Each is the gradients computed on each GPU.\n get_opt_fn (-> tf.train.Optimizer): callable which returns an optimizer\n\n Returns:\n tf.Operation: the training op\n \"\"\"\n assert len(grad_list) == len(self.towers)\n DataParallelBuilder._check_grad_list(grad_list)\n\n # debug tower performance (without update):\n # ops = [k[0] for k in grad_list[1]] + [k[0] for k in grad_list[0]]\n # self.train_op = tf.group(*ops)\n # return\n\n self.grads = aggregate_grads(grad_list, colocation=True)\n # grads = grad_list[0]\n\n opt = get_opt_fn()\n if self.ps_device == 'cpu':\n with tf.device('/cpu:0'):\n train_op = opt.apply_gradients(self.grads, name='train_op')\n else:\n train_op = opt.apply_gradients(self.grads, name='train_op')\n return train_op\n\n\nclass SyncMultiGPUReplicatedBuilder(DataParallelBuilder):\n \"\"\"\n Data-parallel training in \"replicated\" mode,\n where each GPU contains a replicate of the whole model.\n It will build one tower on each GPU under its own variable scope.\n Each gradient update is averaged or summed across or GPUs through NCCL.\n\n It is an equivalent of ``--variable_update=replicated`` in\n `tensorflow/benchmarks <https://github.com/tensorflow/benchmarks>`_.\n \"\"\"\n\n def __init__(self, towers, average, mode):\n super(SyncMultiGPUReplicatedBuilder, self).__init__(towers)\n self._average = average\n assert mode in ['nccl', 'cpu', 'hierarchical'], mode\n self._mode = mode\n\n if self._mode == 'hierarchical' and len(towers) != 8:\n logger.warn(\"mode='hierarchical' require >= 8 GPUs. Fallback to mode='nccl'.\")\n self._mode = 'nccl'\n\n def call_for_each_tower(self, tower_fn):\n \"\"\"\n Call the function `tower_fn` under :class:`TowerContext` for each tower.\n\n Returns:\n a list, contains the return values of `tower_fn` on each tower.\n \"\"\"\n # if tower_fn returns [(grad, var), ...], this returns #GPU x #VAR x 2\n return DataParallelBuilder.build_on_towers(\n self.towers,\n tower_fn,\n # use no variable scope for the first tower\n use_vs=[False] + [True] * (len(self.towers) - 1))\n\n def build(self, grad_list, get_opt_fn):\n \"\"\"\n Reduce the gradients, apply them with the optimizer,\n and set self.grads to #GPU number of lists of (g, v), containing the all-reduced gradients on each device.\n\n Args:\n grad_list ([[(grad, var), ...], ...]): #GPU lists to be reduced. Each is the gradients computed on each GPU.\n get_opt_fn (-> tf.train.Optimizer): callable which returns an optimizer\n\n Returns:\n (tf.Operation, tf.Operation)\n\n 1. the training op.\n\n 2. the op which sync variables from GPU 0 to other GPUs.\n It has to be run before the training has started.\n And you can optionally run it later to sync non-trainable variables.\n \"\"\"\n assert len(grad_list) == len(self.towers)\n raw_devices = ['/gpu:{}'.format(k) for k in self.towers]\n\n DataParallelBuilder._check_grad_list(grad_list)\n\n dtypes = set([x[0].dtype.base_dtype for x in grad_list[0]])\n dtypes_nccl_supported = [tf.float32, tf.float64]\n if get_tf_version_tuple() >= (1, 8):\n dtypes_nccl_supported.append(tf.float16)\n valid_for_nccl = all([k in dtypes_nccl_supported for k in dtypes])\n if self._mode == 'nccl' and not valid_for_nccl:\n logger.warn(\"Cannot use mode='nccl' because some gradients have unsupported types. Fallback to mode='cpu'\")\n self._mode = 'cpu'\n\n if self._mode in ['nccl', 'hierarchical']:\n all_grads, all_vars = split_grad_list(grad_list)\n # use allreduce from tf-benchmarks\n # from .batch_allreduce import AllReduceSpecAlgorithm\n # algo = AllReduceSpecAlgorithm('nccl', list(range(8)), 0, 10)\n # all_grads, warmup_ops = algo.batch_all_reduce(all_grads, 1, True, False)\n # print(\"WARMUP OPS\", warmup_ops)\n\n if self._mode == 'nccl':\n all_grads = allreduce_grads(all_grads, average=self._average) # #gpu x #param\n else:\n packer = GradientPacker(len(raw_devices))\n succ = packer.compute_strategy(all_grads[0])\n if succ:\n packed_grads = packer.pack_all(all_grads, raw_devices)\n packed_grads_aggr = allreduce_grads_hierarchical(\n packed_grads, raw_devices, average=self._average)\n all_grads = packer.unpack_all(packed_grads_aggr, raw_devices)\n else:\n all_grads = allreduce_grads_hierarchical(all_grads, raw_devices, average=self._average)\n\n self.grads = merge_grad_list(all_grads, all_vars)\n elif self._mode == 'cpu':\n agg_grad_and_vars = aggregate_grads(\n grad_list, colocation=False,\n devices=['/cpu:0'], average=self._average) # #param x 2\n self.grads = [] # #gpu x #param x 2\n for grad_and_vars in grad_list: # grad_and_vars: #paramx2\n # take v from each tower, and g from average.\n self.grads.append(\n [(g, v) for (_, v), (g, _) in zip(grad_and_vars, agg_grad_and_vars)])\n\n train_ops = []\n opt = get_opt_fn()\n with tf.name_scope('apply_gradients'):\n for idx, grad_and_vars in enumerate(self.grads):\n with tf.device(raw_devices[idx]):\n # apply_gradients may create variables. Make them LOCAL_VARIABLES\n with override_to_local_variable(enable=idx > 0):\n train_ops.append(opt.apply_gradients(\n grad_and_vars, name='apply_grad_{}'.format(idx)))\n train_op = tf.group(*train_ops, name='train_op')\n\n with tf.name_scope('sync_variables'):\n post_init_op = SyncMultiGPUReplicatedBuilder.get_post_init_ops()\n return train_op, post_init_op\n\n# Adopt from https://github.com/tensorflow/benchmarks/blob/master/scripts/tf_cnn_benchmarks/variable_mgr.py\n @staticmethod\n def get_post_init_ops():\n \"\"\"\n Copy values of variables on GPU 0 to other GPUs.\n \"\"\"\n # literally all variables, because it's better to sync optimizer-internal variables as well\n all_vars = tf.global_variables() + tf.local_variables()\n var_by_name = dict([(v.name, v) for v in all_vars])\n trainable_names = set([x.name for x in tf.trainable_variables()])\n post_init_ops = []\n\n def log_failure(name, reason):\n logger.warn(\"[ReplicatedTrainer] Do not know how to sync variable '{}' across GPUs. \"\n \"Reason: {} \".format(name, reason))\n assert name not in trainable_names, \\\n \"The aforementioned variable is trainable, so this is probably a fatal error.\"\n logger.warn(\n \"[ReplicatedTrainer] This variable is non-trainable. \"\n \"Ignore this warning if you know it's OK to leave it out-of-sync.\")\n\n for v in all_vars:\n if not v.name.startswith('tower'):\n continue\n if v.name.startswith('tower0'):\n # in this trainer, the master name doesn't have the towerx/ prefix\n log_failure(v.name, \"Name should not have prefix 'tower0' in this trainer!\")\n continue # TODO some vars (EMA) may still startswith tower0\n\n split_name = v.name.split('/')\n prefix = split_name[0]\n realname = '/'.join(split_name[1:])\n if prefix in realname:\n log_failure(v.name, \"Prefix {} appears multiple times in its name!\".format(prefix))\n continue\n copy_from = var_by_name.get(realname)\n if copy_from is not None:\n post_init_ops.append(v.assign(copy_from.read_value()))\n else:\n log_failure(v.name, \"Cannot find {} in the graph!\".format(realname))\n logger.info(\n \"'sync_variables_from_main_tower' includes {} operations.\".format(len(post_init_ops)))\n return tf.group(*post_init_ops, name='sync_variables_from_main_tower')\n\n\nclass AsyncMultiGPUBuilder(DataParallelBuilder):\n \"\"\"\n Data-parallel training with async update.\n It builds one tower on each GPU with shared variable scope.\n Every tower computes the gradients and independently applies them to the\n variables, without synchronizing and averaging across towers.\n \"\"\"\n\n def __init__(self, towers, scale_gradient=True):\n \"\"\"\n Args:\n towers(list[int]): list of GPU ids.\n scale_gradient (bool): if True, will scale each gradient by ``1.0/nr_gpu``.\n \"\"\"\n super(AsyncMultiGPUBuilder, self).__init__(towers)\n self._scale_gradient = scale_gradient\n\n def call_for_each_tower(self, tower_fn):\n \"\"\"\n Call the function `tower_fn` under :class:`TowerContext` for each tower.\n\n Returns:\n a list, contains the return values of `tower_fn` on each tower.\n \"\"\"\n ps_device = 'cpu' if len(self.towers) >= 4 else 'gpu'\n\n raw_devices = ['/gpu:{}'.format(k) for k in self.towers]\n if ps_device == 'gpu':\n devices = [LeastLoadedDeviceSetter(d, raw_devices) for d in raw_devices]\n else:\n devices = [tf.train.replica_device_setter(\n worker_device=d, ps_device='/cpu:0', ps_tasks=1) for d in raw_devices]\n\n return DataParallelBuilder.build_on_towers(self.towers, tower_fn, devices)\n\n def build(self, grad_list, get_opt_fn):\n \"\"\"\n Args:\n grad_list ([[(grad, var), ...], ...]): #GPU lists to be reduced. Each is the gradients computed on each GPU.\n get_opt_fn (-> tf.train.Optimizer): callable which returns an optimizer\n\n Returns:\n tf.Operation: the training op\n \"\"\"\n assert len(grad_list) == len(self.towers)\n DataParallelBuilder._check_grad_list(grad_list)\n\n if self._scale_gradient and len(self.towers) > 1:\n # pretend to average the grads, in order to make async and\n # sync have consistent effective learning rate\n gradproc = ScaleGradient(('.*', 1.0 / len(self.towers)), verbose=False)\n grad_list = [gradproc.process(gv) for gv in grad_list]\n # Ngpu x Nvar x 2\n\n train_ops = []\n opt = get_opt_fn()\n with tf.name_scope('async_apply_gradients'):\n for i, grad_and_vars in enumerate(zip(*grad_list)):\n # Ngpu x 2\n v = grad_and_vars[0][1]\n with tf.device(v.device):\n # will call apply_gradients (therefore gradproc) multiple times\n train_ops.append(opt.apply_gradients(\n grad_and_vars, name='apply_grad_{}'.format(i)))\n return tf.group(*train_ops, name='train_op')\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: PTB-LSTM.py\n# Author: Yuxin Wu\n\nimport argparse\nimport numpy as np\nimport os\nimport tensorflow as tf\n\nfrom tensorpack import *\nfrom tensorpack.tfutils import gradproc, optimizer, summary\nfrom tensorpack.utils import logger\nfrom tensorpack.utils.argtools import memoized_ignoreargs\nfrom tensorpack.utils.fs import download, get_dataset_path\n\nimport reader as tfreader\nfrom reader import ptb_producer\n\nrnn = tf.contrib.rnn\n\nSEQ_LEN = 35\nHIDDEN_SIZE = 650\nNUM_LAYER = 2\nBATCH = 20\nDROPOUT = 0.5\nVOCAB_SIZE = None\nTRAIN_URL = 'https://raw.githubusercontent.com/tomsercu/lstm/master/data/ptb.train.txt'\nVALID_URL = 'https://raw.githubusercontent.com/tomsercu/lstm/master/data/ptb.valid.txt'\nTEST_URL = 'https://raw.githubusercontent.com/tomsercu/lstm/master/data/ptb.test.txt'\n\n\n@memoized_ignoreargs\ndef get_PennTreeBank(data_dir=None):\n if data_dir is None:\n data_dir = get_dataset_path('ptb_data')\n if not os.path.isfile(os.path.join(data_dir, 'ptb.train.txt')):\n download(TRAIN_URL, data_dir)\n download(VALID_URL, data_dir)\n download(TEST_URL, data_dir)\n word_to_id = tfreader._build_vocab(os.path.join(data_dir, 'ptb.train.txt'))\n data3 = [np.asarray(tfreader._file_to_word_ids(os.path.join(data_dir, fname), word_to_id))\n for fname in ['ptb.train.txt', 'ptb.valid.txt', 'ptb.test.txt']]\n return data3, word_to_id\n\n\nclass Model(ModelDesc):\n def inputs(self):\n return [tf.TensorSpec((None, SEQ_LEN), tf.int32, 'input'),\n tf.TensorSpec((None, SEQ_LEN), tf.int32, 'nextinput')]\n\n def build_graph(self, input, nextinput):\n is_training = get_current_tower_context().is_training\n initializer = tf.random_uniform_initializer(-0.05, 0.05)\n\n def get_basic_cell():\n cell = rnn.BasicLSTMCell(num_units=HIDDEN_SIZE, forget_bias=0.0, reuse=tf.get_variable_scope().reuse)\n if is_training:\n cell = rnn.DropoutWrapper(cell, output_keep_prob=1 - DROPOUT)\n return cell\n\n cell = rnn.MultiRNNCell([get_basic_cell() for _ in range(NUM_LAYER)])\n\n def get_v(n):\n return tf.get_variable(n, [BATCH, HIDDEN_SIZE],\n trainable=False,\n initializer=tf.constant_initializer())\n\n state_var = [rnn.LSTMStateTuple(\n get_v('c{}'.format(k)), get_v('h{}'.format(k))) for k in range(NUM_LAYER)]\n self.state = state_var = tuple(state_var)\n\n embeddingW = tf.get_variable('embedding', [VOCAB_SIZE, HIDDEN_SIZE], initializer=initializer)\n input_feature = tf.nn.embedding_lookup(embeddingW, input) # B x seqlen x hiddensize\n input_feature = Dropout(input_feature, keep_prob=1 - DROPOUT)\n\n with tf.variable_scope('LSTM', initializer=initializer):\n input_list = tf.unstack(input_feature, num=SEQ_LEN, axis=1) # seqlen x (Bxhidden)\n outputs, last_state = rnn.static_rnn(cell, input_list, state_var, scope='rnn')\n\n # update the hidden state after a rnn loop completes\n update_state_ops = []\n for k in range(NUM_LAYER):\n update_state_ops.extend([\n tf.assign(state_var[k].c, last_state[k].c),\n tf.assign(state_var[k].h, last_state[k].h)])\n\n # seqlen x (Bxrnnsize)\n output = tf.reshape(tf.concat(outputs, 1), [-1, HIDDEN_SIZE]) # (Bxseqlen) x hidden\n logits = FullyConnected('fc', output, VOCAB_SIZE,\n activation=tf.identity, kernel_initializer=initializer,\n bias_initializer=initializer)\n xent_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=tf.reshape(nextinput, [-1]))\n\n with tf.control_dependencies(update_state_ops):\n cost = tf.truediv(tf.reduce_sum(xent_loss),\n tf.cast(BATCH, tf.float32), name='cost') # log-perplexity\n\n perpl = tf.exp(cost / SEQ_LEN, name='perplexity')\n summary.add_moving_summary(perpl, cost)\n return cost\n\n def reset_lstm_state(self):\n s = self.state\n z = tf.zeros_like(s[0].c)\n ops = []\n for k in range(NUM_LAYER):\n ops.append(s[k].c.assign(z))\n ops.append(s[k].h.assign(z))\n return tf.group(*ops, name='reset_lstm_state')\n\n def optimizer(self):\n lr = tf.get_variable('learning_rate', initializer=1.0, trainable=False)\n opt = tf.train.GradientDescentOptimizer(lr)\n return optimizer.apply_grad_processors(\n opt, [gradproc.GlobalNormClip(5)])\n\n\ndef get_config():\n logger.auto_set_dir()\n\n data3, wd2id = get_PennTreeBank()\n global VOCAB_SIZE\n VOCAB_SIZE = len(wd2id)\n steps_per_epoch = (data3[0].shape[0] // BATCH - 1) // SEQ_LEN\n\n train_data = TensorInput(\n lambda: ptb_producer(data3[0], BATCH, SEQ_LEN),\n steps_per_epoch)\n val_data = TensorInput(\n lambda: ptb_producer(data3[1], BATCH, SEQ_LEN),\n (data3[1].shape[0] // BATCH - 1) // SEQ_LEN)\n\n test_data = TensorInput(\n lambda: ptb_producer(data3[2], BATCH, SEQ_LEN),\n (data3[2].shape[0] // BATCH - 1) // SEQ_LEN)\n\n M = Model()\n return TrainConfig(\n data=train_data,\n model=M,\n callbacks=[\n ModelSaver(),\n HyperParamSetterWithFunc(\n 'learning_rate',\n lambda e, x: x * 0.80 if e > 6 else x),\n RunOp(lambda: M.reset_lstm_state()),\n InferenceRunner(val_data, [ScalarStats(['cost'])]),\n RunOp(lambda: M.reset_lstm_state()),\n InferenceRunner(\n test_data,\n [ScalarStats(['cost'], prefix='test')], tower_name='InferenceTowerTest'),\n RunOp(lambda: M.reset_lstm_state()),\n CallbackFactory(\n trigger=lambda self:\n [self.trainer.monitors.put_scalar(\n 'validation_perplexity',\n np.exp(self.trainer.monitors.get_latest('validation_cost') / SEQ_LEN)),\n self.trainer.monitors.put_scalar(\n 'test_perplexity',\n np.exp(self.trainer.monitors.get_latest('test_cost') / SEQ_LEN))]\n ),\n ],\n max_epoch=70,\n )\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', type=int, help='the GPU to use')\n parser.add_argument('--load', help='load model')\n args = parser.parse_args()\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)\n\n config = get_config()\n if args.load:\n config.session_init = SaverRestore(args.load)\n launch_train_with_config(config, SimpleTrainer())\n",
"# -*- coding: utf-8 -*-\n# File: symbolic_functions.py\n\n\nimport tensorflow as tf\n\nfrom ..compat import tfv1\nfrom ..utils.develop import deprecated\n\n__all__ = ['print_stat', 'rms']\n\n\ndef print_stat(x, message=None):\n \"\"\" A simple print Op that might be easier to use than :meth:`tf.Print`.\n Use it like: ``x = print_stat(x, message='This is x')``.\n \"\"\"\n if message is None:\n message = x.op.name\n lst = [tf.shape(x), tf.reduce_mean(x)]\n if x.dtype.is_floating:\n lst.append(rms(x))\n return tf.Print(x, lst + [x], summarize=20,\n message=message, name='print_' + x.op.name)\n\n\n# for internal use only\ndef rms(x, name=None):\n \"\"\"\n Returns:\n root mean square of tensor x.\n \"\"\"\n if name is None:\n name = x.op.name + '/rms'\n with tfv1.name_scope(None): # name already contains the scope\n return tf.sqrt(tf.reduce_mean(tf.square(x)), name=name)\n return tf.sqrt(tf.reduce_mean(tf.square(x)), name=name)\n\n\n# don't hurt to leave it here\n@deprecated(\"Please implement it by yourself.\", \"2018-04-28\")\ndef psnr(prediction, ground_truth, maxp=None, name='psnr'):\n \"\"\"`Peak Signal to Noise Ratio <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_.\n\n .. math::\n\n PSNR = 20 \\cdot \\log_{10}(MAX_p) - 10 \\cdot \\log_{10}(MSE)\n\n Args:\n prediction: a :class:`tf.Tensor` representing the prediction signal.\n ground_truth: another :class:`tf.Tensor` with the same shape.\n maxp: maximum possible pixel value of the image (255 in in 8bit images)\n\n Returns:\n A scalar tensor representing the PSNR\n \"\"\"\n\n maxp = float(maxp)\n\n def log10(x):\n with tf.name_scope(\"log10\"):\n numerator = tf.log(x)\n denominator = tf.log(tf.constant(10, dtype=numerator.dtype))\n return numerator / denominator\n\n mse = tf.reduce_mean(tf.square(prediction - ground_truth))\n if maxp is None:\n psnr = tf.multiply(log10(mse), -10., name=name)\n else:\n psnr = tf.multiply(log10(mse), -10.)\n psnr = tf.add(tf.multiply(20., log10(maxp)), psnr, name=name)\n\n return psnr\n"
] |
[
[
"tensorflow.device",
"tensorflow.test.is_built_with_cuda",
"tensorflow.global_variables",
"tensorflow.local_variables",
"tensorflow.get_variable_scope",
"tensorflow.train.replica_device_setter",
"tensorflow.name_scope",
"tensorflow.trainable_variables",
"tensorflow.group"
],
[
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.unstack",
"tensorflow.random_uniform_initializer",
"tensorflow.control_dependencies",
"tensorflow.reduce_sum",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.exp",
"tensorflow.assign",
"tensorflow.get_variable_scope",
"tensorflow.constant_initializer",
"tensorflow.zeros_like",
"tensorflow.TensorSpec",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.variable_scope",
"tensorflow.group",
"tensorflow.nn.embedding_lookup"
],
[
"tensorflow.Print",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.shape",
"tensorflow.name_scope",
"tensorflow.square",
"tensorflow.log"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
Achazwl/cpm_kernels
|
[
"926d06461ad460dc8e80a66239328739eed16618",
"926d06461ad460dc8e80a66239328739eed16618"
] |
[
"tests/test_arith.py",
"cpm_kernels/torch/arith.py"
] |
[
"import cpm_kernels.torch as ct\nimport cpm_kernels.kernels as ck\nimport torch\nimport unittest\nimport random\n\nclass TestArith(unittest.TestCase):\n def test_global_scale(self):\n with torch.cuda.device(3):\n for shape in [\n (3, 5, 6),\n (17, 32, 128),\n (32, 1024, 4096),\n (33, 777, 1232),\n (31, 123, 566),\n (3, 4, 5, 6, 8),\n (21, 66, 5, 3, 2),\n (11, 3, 5, 7, 10)\n ]:\n x = torch.randn(*shape, device=\"cuda\").half()\n scale = random.random() * 10\n\n x1 = x.clone().requires_grad_()\n x2 = x.clone().requires_grad_()\n \n out = ct.global_scale(x1, scale)\n ans = ct.global_scaleTH(x2, scale)\n self.assertTrue(torch.isclose(out, ans, 1e-2, 1e-2).all())\n\n gradient_start = torch.randn_like(out)\n out.backward(gradient=gradient_start)\n ans.backward(gradient=gradient_start)\n\n self.assertTrue(torch.isclose(x1.grad, x2.grad, 1e-2, 1e-2).all())\n\n ct.global_scale_inplace(x, scale)\n self.assertTrue(torch.isclose(x, ans, 1e-2, 1e-2).all())\n\n def test_element_add(self):\n with torch.cuda.device(3):\n for shape in [\n (3, 5, 6),\n (17, 32, 128),\n (32, 1024, 4096),\n (33, 777, 1232),\n (31, 123, 566),\n (3, 4, 5, 6, 8),\n (21, 66, 5, 3, 2),\n (11, 3, 5, 7, 10)\n ]:\n x = torch.randn(*shape, device=\"cuda\").half()\n y = torch.randn(*shape, device=\"cuda\").half()\n\n x1 = x.clone().requires_grad_()\n x2 = x.clone().requires_grad_()\n y1 = y.clone().requires_grad_()\n y2 = y.clone().requires_grad_()\n \n out = ct.element_add(x1, y1)\n ans = ct.element_addTH(x2, y2)\n self.assertTrue(torch.isclose(out, ans, 1e-2, 1e-2).all())\n\n gradient_start = torch.randn_like(out)\n out.backward(gradient=gradient_start)\n ans.backward(gradient=gradient_start)\n\n self.assertTrue(torch.isclose(x1.grad, x2.grad, 1e-2, 1e-2).all())\n\n self.assertTrue(torch.isclose(y1.grad, y2.grad, 1e-2, 1e-2).all())\n\n ct.element_add_inplace(x, y)\n self.assertTrue(torch.isclose(x, ans, 1e-2, 1e-2).all())\n\n def test_element_mul(self):\n with torch.cuda.device(3):\n for shape in [\n (3, 5, 6),\n (17, 32, 128),\n (32, 1024, 4096),\n (33, 778, 1231),\n (31, 124, 321),\n (3, 4, 5, 6, 37),\n (21, 66, 5, 3, 2),\n (11, 3, 4, 7, 11)\n ]:\n x = torch.randn(*shape, device=\"cuda\").half()\n y = torch.randn(*shape, device=\"cuda\").half()\n\n x1 = x.clone().requires_grad_()\n y1 = y.clone().requires_grad_()\n x2 = x.clone().requires_grad_()\n y2 = y.clone().requires_grad_()\n \n out = ct.element_mul(x1, y1)\n ans = ct.element_mulTH(x2, y2)\n self.assertTrue(torch.isclose(out, ans, 1e-2, 1e-2).all())\n\n gradient_start = torch.randn_like(out)\n out.backward(gradient=gradient_start)\n ans.backward(gradient=gradient_start)\n\n self.assertTrue(torch.isclose(x1.grad, x2.grad, 1e-2, 1e-2).all())\n\n self.assertTrue(torch.isclose(y1.grad, y2.grad, 1e-2, 1e-2).all())\n\n ct.element_mul_inplace(x, y)\n self.assertTrue(torch.isclose(x, ans, 1e-2, 1e-2).all())\n \n def test_mask_inf(self):\n with torch.cuda.device(3):\n for shape in [\n (3, 5, 6),\n (17, 32, 128),\n (32, 1024, 4096),\n (33, 777, 1232),\n (31, 123, 566),\n (3, 4, 5, 6, 8),\n (21, 66, 5, 3, 2),\n (11, 3, 5, 7, 10)\n ]:\n x = torch.randn(*shape, device=\"cuda\").half()\n mask = torch.randn((shape[0], shape[2]), device=\"cuda\") < 0\n value = float(\"-inf\")\n \n x1 = x.clone().requires_grad_()\n x2 = x.clone().requires_grad_()\n\n ans = ct.maskTH(x1, mask, value)\n out = ct.mask(x2, mask, value)\n self.assertTrue(torch.isclose(out, ans, 1e-2, 1e-2).all())\n\n gradient_start = torch.randn_like(out)\n out.backward(gradient=gradient_start)\n ans.backward(gradient=gradient_start)\n\n self.assertTrue(torch.isclose(x1.grad, x2.grad, 1e-2, 1e-2).all())\n\n ct.mask_inplace(x, mask, value)\n self.assertTrue(torch.isclose(x, ans, 1e-2, 1e-2).all())\n\n def test_mask_inf(self):\n with torch.cuda.device(3):\n for shape in [\n (3, 5, 6),\n (17, 32, 128),\n (32, 1024, 4096),\n (33, 777, 1232),\n (31, 123, 566),\n (3, 5, 8),\n (21, 66, 2),\n (11, 3, 10)\n ]:\n x = torch.randn(*shape, device=\"cuda\").half()\n mask = torch.randn((shape[0], shape[2]), device=\"cuda\") < 0\n value = 0\n \n x1 = x.clone().requires_grad_()\n x2 = x.clone().requires_grad_()\n\n ans = ct.maskTH(x1, mask, value)\n out = ct.mask(x2, mask, value)\n self.assertTrue(torch.isclose(out, ans, 1e-2).all())\n\n gradient_start = torch.randn_like(out)\n out.backward(gradient=gradient_start)\n ans.backward(gradient=gradient_start)\n\n self.assertTrue(torch.isclose(x1.grad, x2.grad, 1e-2, 1e-2).all())\n\n ct.mask_inplace(x, mask, value)\n self.assertTrue(torch.isclose(x, ans, 1e-2, 1e-2).all())\n \n def test_batched_add(self):\n with torch.cuda.device(3):\n for shape in [\n (3, 5, 6),\n (17, 32, 128),\n (32, 1024, 4096),\n (33, 777, 1232),\n (31, 123, 566),\n (3, 5, 8),\n (21, 66, 2),\n (11, 3, 10)\n ]:\n x = torch.randn(*shape, device=\"cuda\").half()\n y = torch.randn(x.size()[1:], device=\"cuda\").half()\n\n x1 = x.clone().requires_grad_()\n y1 = y.clone().requires_grad_()\n x2 = x.clone().requires_grad_()\n y2 = y.clone().requires_grad_()\n\n out = ct.batched_add(x1, y1)\n ans = ct.batched_addTH(x2, y2)\n self.assertTrue(torch.isclose(out, ans, 1e-2, 1e-2).all())\n\n gradient_start = torch.randn_like(out) / shape[0]\n out.backward(gradient=gradient_start)\n ans.backward(gradient=gradient_start)\n\n self.assertTrue(torch.isclose(x1.grad, x2.grad, 1e-2, 1e-2).all())\n\n self.assertTrue(torch.isclose(y1.grad, y2.grad, 1e-2, 1e-2).all())\n\n ct.batched_add_inplace(x, y)\n self.assertTrue(torch.isclose(x, ans, 1e-2, 1e-2).all())\n \n def test_ln_mul_add(self):\n with torch.cuda.device(3):\n for shape in [\n (3, 5, 6),\n (17, 32, 128),\n (32, 1024, 4096),\n (33, 777, 1232),\n (31, 123, 566),\n (3, 5, 8),\n (21, 66, 2),\n (11, 3, 10)\n ]:\n x = torch.randn(*shape, device=\"cuda\").half()\n alpha = torch.randn(shape[1], device=\"cuda\").half()\n beta = torch.randn(shape[1], device=\"cuda\").half()\n\n x1 = x.clone().requires_grad_()\n alpha1 = alpha.clone().requires_grad_()\n beta1 = beta.clone().requires_grad_()\n\n x2 = x.clone().requires_grad_()\n alpha2 = alpha.clone().requires_grad_()\n beta2 = beta.clone().requires_grad_()\n\n out = ct.ln_mul_add(x1, alpha1, beta1)\n ans = ct.ln_mul_addTH(x2, alpha2, beta2)\n self.assertTrue(torch.isclose(out, ans, 1e-2, 1e-2).all())\n\n gradient_start = torch.randn_like(out) / shape[0]\n out.backward(gradient=gradient_start)\n ans.backward(gradient=gradient_start)\n self.assertTrue(torch.isclose(x1.grad, x2.grad, 1e-2, 1e-2).all())\n self.assertTrue(torch.isclose(alpha1.grad, alpha2.grad, 1e-2, 1e-2).all())\n\n self.assertTrue(torch.isclose(beta1.grad, beta2.grad, 1e-2, 1e-2).all())\n\n ct.ln_mul_add_inplace(x, alpha, beta)\n self.assertTrue(torch.isclose(x, ans, 1e-2, 1e-2).all())\n \n def test_ln_mul(self):\n with torch.cuda.device(3):\n for shape in [\n (3, 5, 6),\n (17, 32, 128),\n (32, 1024, 4096),\n (33, 777, 1232),\n (31, 123, 566),\n (3, 5, 8),\n (21, 66, 2),\n (11, 3, 10)\n ]:\n x = torch.randn(*shape, device=\"cuda\").half()\n alpha = (torch.randn(shape[1], device=\"cuda\").half() + 10) / 5\n\n x1 = x.clone().requires_grad_()\n alpha1 = alpha.clone().requires_grad_()\n\n x2 = x.clone().requires_grad_()\n alpha2 = alpha.clone().requires_grad_()\n\n out = ct.ln_mul(x1, alpha1)\n ans = ct.ln_mulTH(x2, alpha2)\n self.assertTrue(torch.isclose(out, ans, 1e-2, 1e-2).all())\n\n gradient_start = torch.randn_like(out) / shape[0]\n out.backward(gradient=gradient_start)\n ans.backward(gradient=gradient_start)\n\n self.assertTrue(torch.isclose(x1.grad, x2.grad, 1e-2, 1e-2).all())\n\n self.assertTrue(torch.isclose(alpha1.grad, alpha2.grad, 1e-2, 1e-2).all())\n\n ct.ln_mul_inplace(x, alpha)\n self.assertTrue(torch.isclose(x, ans, 1e-2, 1e-2).all())\n \n def test_ln_sub_div(self):\n with torch.cuda.device(3):\n for shape in [\n (3, 5, 6),\n (17, 32, 128),\n (32, 1024, 4096),\n (33, 777, 1232),\n (31, 123, 566),\n (3, 5, 8),\n (21, 66, 2),\n (11, 3, 10)\n ]:\n x = torch.randn(*shape, device=\"cuda\").half()\n alpha = (torch.randn(shape[1], device=\"cuda\").half() + 10) / 5\n beta = torch.randn(shape[1], device=\"cuda\").half()\n\n out = ct.ln_sub_div(x, alpha, beta)\n ans = ct.ln_sub_divTH(x, alpha, beta)\n self.assertTrue(torch.isclose(out, ans, 1e-2, 1e-2).all())\n\n ct.ln_sub_div_inplace(x, alpha, beta)\n self.assertTrue(torch.isclose(x, ans, 1e-2, 1e-2).all())\n\n def test_ln_div(self):\n with torch.cuda.device(3):\n for shape in [\n (3, 5, 6),\n (17, 32, 128),\n (32, 1024, 4096),\n (33, 777, 1232),\n (31, 123, 566),\n (3, 5, 8),\n (21, 66, 2),\n (11, 3, 10)\n ]:\n x = torch.randn(*shape, device=\"cuda\").half()\n alpha = (torch.randn(shape[1], device=\"cuda\").half() + 10) / 5\n\n out = ct.ln_div(x, alpha)\n ans = ct.ln_divTH(x, alpha)\n self.assertTrue(torch.isclose(out, ans, 1e-2, 1e-2).all())\n\n ct.ln_div_inplace(x, alpha)\n self.assertTrue(torch.isclose(x, ans, 1e-2, 1e-2).all())\n \n def test_ln_add(self):\n with torch.cuda.device(3):\n for shape in [\n (3, 5, 6),\n (17, 32, 128),\n (32, 1024, 4096),\n (33, 777, 1232),\n (31, 123, 566),\n (3, 5, 8),\n (21, 66, 2),\n (11, 3, 10)\n ]:\n x = torch.randn(*shape, device=\"cuda\").half()\n alpha = (torch.randn(shape[1], device=\"cuda\").half() + 10) / 5\n\n x1 = x.clone().requires_grad_()\n alpha1 = alpha.clone().requires_grad_()\n\n x2 = x.clone().requires_grad_()\n alpha2 = alpha.clone().requires_grad_()\n\n out = ct.ln_add(x1, alpha1)\n ans = ct.ln_addTH(x2, alpha2)\n self.assertTrue(torch.isclose(out, ans, 1e-2, 1e-2).all())\n\n gradient_start = torch.randn_like(out) / shape[0]\n out.backward(gradient=gradient_start)\n ans.backward(gradient=gradient_start)\n\n self.assertTrue(torch.isclose(x1.grad, x2.grad, 1e-2, 1e-2).all())\n\n self.assertTrue(torch.isclose(alpha1.grad, alpha2.grad, 1e-2, 1e-2).all())\n\n ct.ln_add_inplace(x, alpha)\n self.assertTrue(torch.isclose(x, ans, 1e-2, 1e-2).all())\n \n def test_batched_mul_add(self):\n with torch.cuda.device(3):\n for shape in [\n (3 * 5, 6),\n (17 * 32, 128),\n (32 * 1024, 4096),\n (33 * 777, 1232),\n (31 * 123, 566),\n (3 * 5, 8),\n (21 * 66, 2),\n (11 * 3, 10)\n ]:\n x = torch.randn(*shape, 2, device=\"cuda\").half()\n alpha = (torch.randn(shape[1], device=\"cuda\").half() + 10) / 5\n beta = torch.randn(shape[1], device=\"cuda\").half()\n\n ans = torch.empty(shape + (2,), dtype=torch.half, device=\"cuda\")\n ck.arith_ln_mul_add(\n shape[0], shape[1], 2,\n x.data_ptr(),\n alpha.data_ptr(),\n beta.data_ptr(),\n ans.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n\n x_0, x_1 = x[:, :, 0].contiguous(), x[:, :, 1].contiguous()\n \n out = torch.empty( shape, dtype=torch.half, device=\"cuda\")\n ck.arith_batch_mul_add(\n shape[0], shape[1],\n x_0.data_ptr(),\n alpha.data_ptr(),\n beta.data_ptr(),\n out.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n self.assertTrue(torch.isclose(out, ans[:, :, 0], 1e-5, 1e-5).all())\n\n ck.arith_batch_mul_add(\n shape[0], shape[1],\n x_1.data_ptr(),\n alpha.data_ptr(),\n beta.data_ptr(),\n out.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n self.assertTrue(torch.isclose(out, ans[:, :, 1], 1e-5, 1e-5).all())\n \n def test_batched_mul(self):\n with torch.cuda.device(3):\n for shape in [\n (3 * 5, 6),\n (17 * 32, 128),\n (32 * 1024, 4096),\n (33 * 777, 1232),\n (31 * 123, 566),\n (3 * 5, 8),\n (21 * 66, 2),\n (11 * 3, 10)\n ]:\n x = torch.randn(*shape, 2, device=\"cuda\").half()\n alpha = torch.randn(shape[1], device=\"cuda\").half()\n\n ans = torch.empty(shape + (2,), dtype=torch.half, device=\"cuda\")\n ck.arith_ln_mul(\n shape[0], shape[1], 2,\n x.data_ptr(),\n alpha.data_ptr(),\n ans.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n\n x_0, x_1 = x[:, :, 0].contiguous(), x[:, :, 1].contiguous()\n\n out = torch.empty( shape, dtype=torch.half, device=\"cuda\")\n ck.arith_batch_mul(\n shape[0], shape[1],\n x_0.data_ptr(),\n alpha.data_ptr(),\n out.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n self.assertTrue(torch.isclose(out, ans[:, :, 0], 1e-5, 1e-5).all())\n ck.arith_batch_mul(\n shape[0], shape[1],\n x_1.data_ptr(),\n alpha.data_ptr(),\n out.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n self.assertTrue(torch.isclose(out, ans[:, :, 1], 1e-5, 1e-5).all())",
"import torch\nfrom ..kernels import arith\n\nclass OpGlobalScale(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x : torch.Tensor, scale : float):\n assert x.is_cuda and x.is_contiguous() and x.dtype == torch.half\n out = torch.empty(x.size(), device=x.device, dtype=torch.half)\n\n arith.arith_global_scale(\n x.numel(), x.data_ptr(),\n scale,\n out.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n ctx.scale = scale\n return out\n \n @staticmethod\n def backward(ctx, grad_output : torch.Tensor):\n assert grad_output.is_cuda and grad_output.is_contiguous() and grad_output.dtype == torch.half\n grad = torch.empty(grad_output.size(), device=grad_output.device, dtype=torch.half)\n arith.arith_global_scale(\n grad_output.numel(), grad_output.data_ptr(),\n ctx.scale,\n grad.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n return grad, None\n\ndef global_scale(x : torch.Tensor, scale : float) -> torch.Tensor:\n \"\"\"\n out = x * scale\n \"\"\"\n return OpGlobalScale.apply(x, scale)\n\ndef global_scaleTH(x : torch.Tensor, scale : float) -> torch.Tensor:\n \"\"\"\n out = x * scale\n \"\"\"\n return x * scale\n\ndef global_scale_inplace(x : torch.Tensor, scale : float) -> None:\n \"\"\"\n x *= scale\n \"\"\"\n assert x.is_cuda and x.is_contiguous() and x.dtype == torch.half\n arith.arith_global_scale(\n x.numel(), x.data_ptr(),\n scale,\n x.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n\nclass OpElementAdd(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x : torch.Tensor, y : torch.Tensor):\n assert x.is_cuda and x.is_contiguous() and x.dtype == torch.half\n assert y.is_cuda and y.is_contiguous() and y.dtype == torch.half\n assert x.device == y.device and x.size() == y.size()\n out = torch.empty(x.size(), device=x.device, dtype=torch.half)\n arith.arith_element_add(\n x.size(0),\n x.stride(0),\n x.data_ptr(),\n y.data_ptr(),\n out.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n return out\n \n @staticmethod\n def backward(ctx, grad_output):\n return grad_output, grad_output\n\n\ndef element_add(x : torch.Tensor, y : torch.Tensor) -> torch.Tensor:\n \"\"\"\n out = x + y\n \"\"\"\n return OpElementAdd.apply(x, y)\n\ndef element_add_inplace(x : torch.Tensor, y : torch.Tensor) -> None:\n \"\"\"\n x += y\n \"\"\"\n assert x.is_cuda and x.is_contiguous() and x.dtype == torch.half\n assert y.is_cuda and y.is_contiguous() and y.dtype == torch.half\n assert x.device == y.device\n arith.arith_element_add(\n x.size(0),\n x.stride(0),\n x.data_ptr(),\n y.data_ptr(),\n x.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n\[email protected]\ndef element_addTH(x : torch.Tensor, y : torch.Tensor) -> torch.Tensor:\n \"\"\"\n out = x + y\n \"\"\"\n return x + y\n\nclass OpElementMul(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x : torch.Tensor, y : torch.Tensor):\n assert x.is_cuda and x.is_contiguous() and x.dtype == torch.half\n assert y.is_cuda and y.is_contiguous() and y.dtype == torch.half\n assert x.device == y.device and x.size() == y.size()\n out = torch.empty(x.size(), device=x.device, dtype=torch.half)\n arith.arith_element_mul(\n x.size(0),\n x.stride(0),\n x.data_ptr(),\n y.data_ptr(),\n out.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n ctx.save_for_backward(x, y)\n return out\n \n @staticmethod\n def backward(ctx, grad_output):\n assert grad_output.is_cuda and grad_output.is_contiguous() and grad_output.dtype == torch.half\n x, y = ctx.saved_tensors\n grad_x = torch.empty(x.size(), device=x.device, dtype=torch.half)\n grad_y = torch.empty(y.size(), device=y.device, dtype=torch.half)\n arith.arith_element_mul(\n x.size(0),\n x.stride(0),\n grad_output.data_ptr(),\n y.data_ptr(),\n grad_x.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n arith.arith_element_mul(\n y.size(0),\n y.stride(0),\n x.data_ptr(),\n grad_output.data_ptr(),\n grad_y.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n return grad_x, grad_y\n\ndef element_mul(x : torch.Tensor, y : torch.Tensor) -> torch.Tensor:\n \"\"\"\n out = x * y\n \"\"\"\n return OpElementMul.apply(x, y)\n\ndef element_mul_inplace(x : torch.Tensor, y : torch.Tensor) -> None:\n \"\"\"\n x *= y\n \"\"\"\n assert x.is_cuda and x.is_contiguous() and x.dtype == torch.half\n assert y.is_cuda and y.is_contiguous() and y.dtype == torch.half\n assert x.device == y.device and x.size() == y.size()\n arith.arith_element_mul(\n x.size(0),\n x.stride(0),\n x.data_ptr(),\n y.data_ptr(),\n x.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n\[email protected]\ndef element_mulTH(x : torch.Tensor, y : torch.Tensor) -> torch.Tensor:\n \"\"\"\n out = x * y\n \"\"\"\n return x * y\n\nclass OpBatchedAdd(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x : torch.Tensor, y : torch.Tensor):\n assert x.is_contiguous() and x.is_cuda and x.dtype == torch.float16\n assert y.is_contiguous() and y.is_cuda and y.dtype == torch.float16\n assert x.device == y.device\n assert x.size()[1:] == y.size()\n\n out = torch.empty(x.size(), device=x.device, dtype=x.dtype)\n arith.arith_batch_add_forward(\n x.size(0),\n x.stride(0),\n x.data_ptr(),\n y.data_ptr(),\n out.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n return out\n\n @staticmethod\n def backward(ctx, grad_output : torch.Tensor):\n assert grad_output.is_contiguous() and grad_output.is_cuda and grad_output.dtype == torch.float16\n grad_y = torch.empty( grad_output.size()[1:], device=grad_output.device, dtype=grad_output.dtype)\n arith.arith_batch_add_backward(\n grad_output.size(0),\n grad_output.stride(0),\n grad_output.data_ptr(),\n grad_y.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n return grad_output, grad_y\n\ndef batched_add(x : torch.Tensor, y : torch.Tensor) -> torch.Tensor:\n \"\"\"\n out = x + y[None, :]\n \"\"\"\n return OpBatchedAdd.apply(x, y)\n\ndef batched_add_inplace(x : torch.Tensor, y : torch.Tensor) -> None:\n \"\"\"\n x += y[None, :]\n \"\"\"\n assert x.is_cuda and x.is_contiguous() and x.dtype == torch.half\n assert y.is_cuda and y.is_contiguous() and y.dtype == torch.half\n assert x.device == y.device\n assert x.size()[1:] == y.size()\n\n arith.arith_batch_add_forward(\n x.size(0),\n x.stride(0),\n x.data_ptr(),\n y.data_ptr(),\n x.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n \[email protected]\ndef batched_addTH(x : torch.Tensor, y : torch.Tensor) -> torch.Tensor:\n \"\"\"\n out = x + y[None, :]\n \"\"\"\n return x + y[None, :]\n\nclass OpLnMulAdd(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x : torch.Tensor, alpha : torch.Tensor, beta : torch.Tensor):\n assert x.is_cuda and x.is_contiguous() and x.dtype == torch.half\n assert alpha.is_cuda and alpha.is_contiguous() and alpha.dtype == torch.half\n assert beta.is_cuda and beta.is_contiguous() and beta.dtype == torch.half\n assert x.device == alpha.device and x.device == beta.device\n assert x.ndim == 3 and alpha.ndim == 1 and beta.ndim == 1\n batch, n, m = x.size()\n assert alpha.size(0) == n and beta.size(0) == n\n\n out = torch.empty(x.size(), device=x.device, dtype=torch.half)\n arith.arith_ln_mul_add(\n batch, n, m,\n x.data_ptr(),\n alpha.data_ptr(),\n beta.data_ptr(),\n out.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n ctx.save_for_backward(x, alpha)\n return out\n \n @staticmethod\n def backward(ctx, grad_output : torch.Tensor):\n assert grad_output.is_cuda and grad_output.is_contiguous() and grad_output.dtype == torch.half\n assert grad_output.ndim == 3\n batch, n, m = grad_output.size()\n x, alpha = ctx.saved_tensors\n grad_alpha = torch.empty(alpha.size(), device=alpha.device, dtype=torch.half)\n grad_x = torch.empty(x.size(), device=x.device, dtype=torch.half)\n arith.arith_ln_mul(\n batch, n, m,\n grad_output.data_ptr(),\n alpha.data_ptr(),\n grad_x.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n arith.arith_ln_mul_backward(\n batch, n, m,\n x.data_ptr(),\n grad_output.data_ptr(),\n grad_alpha.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n grad_beta = torch.empty((n,), device=x.device, dtype=torch.half)\n arith.arith_ln_add_backward(\n batch, n, m,\n grad_output.data_ptr(),\n grad_beta.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n return grad_x, grad_alpha, grad_beta\n\ndef ln_mul_add(x : torch.Tensor, alpha : torch.Tensor, beta : torch.Tensor) -> torch.Tensor:\n \"\"\"\n out = x * alpha[None, :, None] + beta[None, :, None]\n \"\"\"\n return OpLnMulAdd.apply(x, alpha, beta)\n\ndef ln_mul_add_inplace(x : torch.Tensor, alpha : torch.Tensor, beta : torch.Tensor) -> None:\n \"\"\"\n x = x * alpha[None, :, None] + beta[None, :, None]\n \"\"\"\n assert x.is_cuda and x.is_contiguous() and x.dtype == torch.half\n assert alpha.is_cuda and alpha.is_contiguous() and alpha.dtype == torch.half\n assert beta.is_cuda and beta.is_contiguous() and beta.dtype == torch.half\n assert x.device == alpha.device and x.device == beta.device\n assert x.ndim == 3 and alpha.ndim == 1 and beta.ndim == 1\n batch, n, m = x.size()\n assert alpha.size(0) == n and beta.size(0) == n\n\n arith.arith_ln_mul_add(\n batch, n, m,\n x.data_ptr(),\n alpha.data_ptr(),\n beta.data_ptr(),\n x.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n\[email protected]\ndef ln_mul_addTH(x : torch.Tensor, alpha : torch.Tensor, beta : torch.Tensor) -> torch.Tensor:\n \"\"\"\n out = x * alpha[None, :, None] + beta[None, :, None]\n \"\"\"\n return x * alpha[None, :, None] + beta[None, :, None]\n\nclass OpLnMul(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x : torch.Tensor, alpha : torch.Tensor):\n assert x.is_cuda and x.is_contiguous() and x.dtype == torch.half\n assert alpha.is_cuda and alpha.is_contiguous() and alpha.dtype == torch.half\n assert x.device == alpha.device\n assert x.ndim == 3 and alpha.ndim == 1\n batch, n, m = x.size()\n assert alpha.size(0) == n\n\n out = torch.empty(x.size(), device=x.device, dtype=torch.half)\n arith.arith_ln_mul(\n batch, n, m,\n x.data_ptr(),\n alpha.data_ptr(),\n out.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n ctx.save_for_backward(x, alpha)\n return out\n \n @staticmethod\n def backward(ctx, grad_output : torch.Tensor):\n assert grad_output.is_cuda and grad_output.is_contiguous() and grad_output.dtype == torch.half\n assert grad_output.ndim == 3\n batch, n, m = grad_output.size()\n x, alpha = ctx.saved_tensors\n grad_alpha = torch.empty(alpha.size(), device=alpha.device, dtype=torch.half)\n grad_x = torch.empty(x.size(), device=x.device, dtype=torch.half)\n arith.arith_ln_mul(\n batch, n, m,\n grad_output.data_ptr(),\n alpha.data_ptr(),\n grad_x.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n arith.arith_ln_mul_backward(\n batch, n, m,\n x.data_ptr(),\n grad_output.data_ptr(),\n grad_alpha.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n return grad_x, grad_alpha\n\ndef ln_mul(x : torch.Tensor, alpha : torch.Tensor) -> torch.Tensor:\n \"\"\"\n out = x * alpha[None, :, None]\n \"\"\"\n return OpLnMul.apply(x, alpha)\n\ndef ln_mul_inplace(x : torch.Tensor, alpha : torch.Tensor) -> None:\n \"\"\"\n x = x * alpha[None, :, None]\n \"\"\"\n assert x.is_cuda and x.is_contiguous() and x.dtype == torch.half\n assert alpha.is_cuda and alpha.is_contiguous() and alpha.dtype == torch.half\n assert x.device == alpha.device\n assert x.ndim == 3 and alpha.ndim == 1\n batch, n, m = x.size()\n assert alpha.size(0) == n\n\n arith.arith_ln_mul(\n batch, n, m,\n x.data_ptr(),\n alpha.data_ptr(),\n x.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n\[email protected]\ndef ln_mulTH(x : torch.Tensor, alpha : torch.Tensor) -> torch.Tensor:\n \"\"\"\n out = x * alpha[None, :, None]\n \"\"\"\n return x * alpha[None, :, None]\n\nclass OpLnAdd(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x : torch.Tensor, beta : torch.Tensor):\n assert x.is_cuda and x.is_contiguous() and x.dtype == torch.half\n assert beta.is_cuda and beta.is_contiguous() and beta.dtype == torch.half\n assert x.device == beta.device\n assert x.ndim == 3 and beta.ndim == 1\n batch, n, m = x.size()\n assert beta.size(0) == n\n\n out = torch.empty(x.size(), device=x.device, dtype=torch.half)\n arith.arith_ln_add(\n batch, n, m,\n x.data_ptr(),\n beta.data_ptr(),\n out.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n return out\n \n @staticmethod\n def backward(ctx, grad_output : torch.Tensor):\n assert grad_output.is_cuda and grad_output.is_contiguous() and grad_output.dtype == torch.half\n assert grad_output.ndim == 3\n batch, n, m = grad_output.size()\n \n grad_beta = torch.empty((n,), device=grad_output.device, dtype=torch.half)\n arith.arith_ln_add_backward(\n batch, n, m,\n grad_output.data_ptr(),\n grad_beta.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n return grad_output, grad_beta\n\ndef ln_add(x : torch.Tensor, beta : torch.Tensor) -> torch.Tensor:\n \"\"\"\n out = x + beta[None, :, None]\n \"\"\"\n return OpLnAdd.apply(x, beta)\n\ndef ln_add_inplace(x : torch.Tensor, beta : torch.Tensor) -> None:\n \"\"\"\n x = x + beta[None, :, None]\n \"\"\"\n assert x.is_cuda and x.is_contiguous() and x.dtype == torch.half\n assert beta.is_cuda and beta.is_contiguous() and beta.dtype == torch.half\n assert x.device == beta.device\n assert x.ndim == 3 and beta.ndim == 1\n batch, n, m = x.size()\n assert beta.size(0) == n\n\n arith.arith_ln_add(\n batch, n, m,\n x.data_ptr(),\n beta.data_ptr(),\n x.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n\ndef ln_addTH(x : torch.Tensor, beta : torch.Tensor) -> torch.Tensor:\n \"\"\"\n out = x + beta[None, :, None]\n \"\"\"\n return x + beta[None, :, None]\n\ndef ln_sub_div(x : torch.Tensor, alpha : torch.Tensor, beta : torch.Tensor) -> torch.Tensor:\n \"\"\"\n out = (x - beta[None, :, None]) / alpha[None, :, None]\n \"\"\"\n assert x.is_cuda and x.is_contiguous() and x.dtype == torch.half\n assert alpha.is_cuda and alpha.is_contiguous() and alpha.dtype == torch.half\n assert beta.is_cuda and beta.is_contiguous() and beta.dtype == torch.half\n assert x.device == alpha.device and x.device == beta.device\n assert x.ndim == 3 and alpha.ndim == 1 and beta.ndim == 1\n batch, n, m = x.size()\n assert alpha.size(0) == n and beta.size(0) == n\n\n out = torch.empty(x.size(), device=x.device, dtype=torch.half)\n arith.arith_ln_sub_div(\n batch, n, m,\n x.data_ptr(),\n alpha.data_ptr(),\n beta.data_ptr(),\n out.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n return out\n\ndef ln_sub_div_inplace(x : torch.Tensor, alpha : torch.Tensor, beta : torch.Tensor) -> None:\n \"\"\"\n x = (x - beta[None, :, None]) / alpha[None, :, None]\n \"\"\"\n assert x.is_cuda and x.is_contiguous() and x.dtype == torch.half\n assert alpha.is_cuda and alpha.is_contiguous() and alpha.dtype == torch.half\n assert beta.is_cuda and beta.is_contiguous() and beta.dtype == torch.half\n assert x.device == alpha.device and x.device == beta.device\n assert x.ndim == 3 and alpha.ndim == 1 and beta.ndim == 1\n batch, n, m = x.size()\n assert alpha.size(0) == n and beta.size(0) == n\n\n arith.arith_ln_sub_div(\n batch, n, m,\n x.data_ptr(),\n alpha.data_ptr(),\n beta.data_ptr(),\n x.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n\[email protected]\ndef ln_sub_divTH(x : torch.Tensor, alpha : torch.Tensor, beta : torch.Tensor) -> torch.Tensor:\n \"\"\"\n out = (x - beta[None, :, None]) / alpha[None, :, None]\n \"\"\"\n return (x - beta[None, :, None]) / alpha[None, :, None]\n\n\ndef ln_div(x : torch.Tensor, alpha : torch.Tensor) -> torch.Tensor:\n \"\"\"\n out = x / alpha[None, :, None]\n \"\"\"\n assert x.is_cuda and x.is_contiguous() and x.dtype == torch.half\n assert alpha.is_cuda and alpha.is_contiguous() and alpha.dtype == torch.half\n assert x.device == alpha.device\n assert x.ndim == 3 and alpha.ndim == 1\n batch, n, m = x.size()\n assert alpha.size(0) == n\n\n out = torch.empty(x.size(), device=x.device, dtype=torch.half)\n arith.arith_ln_div(\n batch, n, m,\n x.data_ptr(),\n alpha.data_ptr(),\n out.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n return out\n\ndef ln_div_inplace(x : torch.Tensor, alpha : torch.Tensor) -> None:\n \"\"\"\n x = x / alpha[None, :, None]\n \"\"\"\n assert x.is_cuda and x.is_contiguous() and x.dtype == torch.half\n assert alpha.is_cuda and alpha.is_contiguous() and alpha.dtype == torch.half\n assert x.device == alpha.device\n assert x.ndim == 3 and alpha.ndim == 1\n batch, n, m = x.size()\n assert alpha.size(0) == n\n\n arith.arith_ln_div(\n batch, n, m,\n x.data_ptr(),\n alpha.data_ptr(),\n x.data_ptr(),\n torch.cuda.current_stream().cuda_stream\n )\n\ndef ln_divTH(x : torch.Tensor, alpha : torch.Tensor) -> torch.Tensor:\n \"\"\"\n out = x / alpha[None, :, None]\n \"\"\"\n return x / alpha[None, :, None]\n\n"
] |
[
[
"torch.randn_like",
"torch.empty",
"torch.cuda.current_stream",
"torch.randn",
"torch.cuda.device",
"torch.isclose"
],
[
"torch.cuda.current_stream",
"torch.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jreback/blaze
|
[
"85c39335cac4ef7f2921a7f621bc13525880fc44"
] |
[
"blaze/compute/numpy.py"
] |
[
"from __future__ import absolute_import, division, print_function\n\nimport datetime\n\nimport numpy as np\nfrom pandas import DataFrame, Series\nfrom datashape import to_numpy\n\nfrom ..expr import Reduction, Field, Projection, Broadcast, Selection, ndim\nfrom ..expr import Distinct, Sort, Head, Label, ReLabel, Expr, Slice\nfrom ..expr import std, var, count, nunique, Summary\nfrom ..expr import BinOp, UnaryOp, USub, Not, nelements\nfrom ..expr import UTCFromTimestamp, DateTimeTruncate\n\nfrom .core import base, compute\nfrom ..dispatch import dispatch\nfrom into import into\nimport pandas as pd\n\n__all__ = ['np']\n\n\n@dispatch(Field, np.ndarray)\ndef compute_up(c, x, **kwargs):\n if x.dtype.names and c._name in x.dtype.names:\n return x[c._name]\n if not x.dtype.names and x.shape[1] == len(c._child.fields):\n return x[:, c._child.fields.index(c._name)]\n raise NotImplementedError() # pragma: no cover\n\n\n@dispatch(Projection, np.ndarray)\ndef compute_up(t, x, **kwargs):\n if x.dtype.names and all(col in x.dtype.names for col in t.fields):\n return x[t.fields]\n if not x.dtype.names and x.shape[1] == len(t._child.fields):\n return x[:, [t._child.fields.index(col) for col in t.fields]]\n raise NotImplementedError() # pragma: no cover\n\n\n@dispatch(Broadcast, np.ndarray)\ndef compute_up(t, x, **kwargs):\n d = dict((t._child[c]._expr, x[c]) for c in t._child.fields)\n return compute(t._expr, d)\n\n\n@dispatch(BinOp, np.ndarray, (np.ndarray, base))\ndef compute_up(t, lhs, rhs, **kwargs):\n return t.op(lhs, rhs)\n\n\n@dispatch(BinOp, np.ndarray)\ndef compute_up(t, data, **kwargs):\n if isinstance(t.lhs, Expr):\n return t.op(data, t.rhs)\n else:\n return t.op(t.lhs, data)\n\n\n@dispatch(BinOp, base, np.ndarray)\ndef compute_up(t, lhs, rhs, **kwargs):\n return t.op(lhs, rhs)\n\n\n@dispatch(UnaryOp, np.ndarray)\ndef compute_up(t, x, **kwargs):\n return getattr(np, t.symbol)(x)\n\n\n@dispatch(Not, np.ndarray)\ndef compute_up(t, x, **kwargs):\n return ~x\n\n\n@dispatch(USub, np.ndarray)\ndef compute_up(t, x, **kwargs):\n return -x\n\n\n@dispatch(count, np.ndarray)\ndef compute_up(t, x, **kwargs):\n if np.issubdtype(x.dtype, np.float): # scalar dtype\n return pd.notnull(x).sum(keepdims=t.keepdims, axis=t.axis)\n else:\n return np.ones(x.shape).sum(keepdims=t.keepdims, axis=t.axis)\n\n\n@dispatch(nunique, np.ndarray)\ndef compute_up(t, x, **kwargs):\n assert t.axis == tuple(range(ndim(t._child)))\n result = len(np.unique(x))\n if t.keepdims:\n result = np.array([result])\n return result\n\n\n@dispatch(Reduction, np.ndarray)\ndef compute_up(t, x, **kwargs):\n return getattr(x, t.symbol)(axis=t.axis, keepdims=t.keepdims)\n\n\ndef axify(expr, axis):\n \"\"\" inject axis argument into expression\n\n Helper function for compute_up(Summary, np.ndarray)\n\n >>> from blaze import symbol\n >>> s = symbol('s', '10 * 10 * int')\n >>> expr = s.sum()\n >>> axify(expr, axis=0)\n sum(s, axis=(0,))\n \"\"\"\n return type(expr)(expr._child, axis=axis)\n\n@dispatch(Summary, np.ndarray)\ndef compute_up(expr, data, **kwargs):\n shape, dtype = to_numpy(expr.dshape)\n if shape:\n result = np.empty(shape=shape, dtype=dtype)\n for n, v in zip(expr.names, expr.values):\n result[n] = compute(axify(v, expr.axis), data)\n return result\n else:\n return tuple(compute(axify(v, expr.axis), data) for v in expr.values)\n\n\n@dispatch((std, var), np.ndarray)\ndef compute_up(t, x, **kwargs):\n return getattr(x, t.symbol)(ddof=t.unbiased)\n\n\n@dispatch(Distinct, np.ndarray)\ndef compute_up(t, x, **kwargs):\n return np.unique(x)\n\n\n@dispatch(Sort, np.ndarray)\ndef compute_up(t, x, **kwargs):\n if (t.key in x.dtype.names or\n isinstance(t.key, list) and all(k in x.dtype.names for k in t.key)):\n result = np.sort(x, order=t.key)\n elif t.key:\n raise NotImplementedError(\"Sort key %s not supported\" % str(t.key))\n else:\n result = np.sort(x)\n\n if not t.ascending:\n result = result[::-1]\n\n return result\n\n\n@dispatch(Head, np.ndarray)\ndef compute_up(t, x, **kwargs):\n return x[:t.n]\n\n@dispatch(Label, np.ndarray)\ndef compute_up(t, x, **kwargs):\n return np.array(x, dtype=[(t.label, x.dtype.type)])\n\n\n@dispatch(ReLabel, np.ndarray)\ndef compute_up(t, x, **kwargs):\n types = [x.dtype[i] for i in range(len(x.dtype))]\n return np.array(x, dtype=list(zip(t.fields, types)))\n\n\n@dispatch(Selection, np.ndarray)\ndef compute_up(sel, x, **kwargs):\n return x[compute(sel.predicate, {sel._child: x})]\n\n@dispatch(UTCFromTimestamp, np.ndarray)\ndef compute_up(expr, data, **kwargs):\n return (data * 1e6).astype('M8[us]')\n\n@dispatch(Slice, np.ndarray)\ndef compute_up(expr, x, **kwargs):\n return x[expr.index]\n\n\n@dispatch(Expr, np.ndarray)\ndef compute_up(t, x, **kwargs):\n ds = t._child.dshape\n if x.ndim > 1 or isinstance(x, np.recarray) or x.dtype.fields is not None:\n return compute_up(t, into(DataFrame, x, dshape=ds), **kwargs)\n else:\n return compute_up(t, into(Series, x, dshape=ds), **kwargs)\n\n\n@dispatch(nelements, np.ndarray)\ndef compute_up(expr, data, **kwargs):\n axis = expr.axis\n if expr.keepdims:\n shape = tuple(data.shape[i] if i not in axis else 1\n for i in range(ndim(expr._child)))\n else:\n shape = tuple(data.shape[i] for i in range(ndim(expr._child))\n if i not in axis)\n value = np.prod([data.shape[i] for i in axis])\n result = np.empty(shape)\n result.fill(value)\n result = result.astype('int64')\n\n return result\n\n\n\n# Note the use of 'week': 'M8[D]' here.\n\n# We truncate week offsets \"manually\" in the compute_up implementation by first\n# converting to days then multiplying our measure by 7 this simplifies our code\n# by only requiring us to calculate the week offset relative to the day of week.\n\nprecision_map = {'year': 'M8[Y]',\n 'month': 'M8[M]',\n 'week': 'M8[D]',\n 'day': 'M8[D]',\n 'hour': 'M8[h]',\n 'minute': 'M8[m]',\n 'second': 'M8[s]',\n 'millisecond': 'M8[ms]',\n 'microsecond': 'M8[us]',\n 'nanosecond': 'M8[ns]'}\n\n\n# these offsets are integers in units of their representation\n\nepoch = datetime.datetime(1970, 1, 1)\noffsets = {\n 'week': epoch.isoweekday(),\n 'day': epoch.toordinal() # number of days since *Python's* epoch (01/01/01)\n}\n\n\n@dispatch(DateTimeTruncate, (np.ndarray, np.datetime64))\ndef compute_up(expr, data, **kwargs):\n np_dtype = precision_map[expr.unit]\n offset = offsets.get(expr.unit, 0)\n measure = expr.measure * 7 if expr.unit == 'week' else expr.measure\n result = (((data.astype(np_dtype)\n .view('int64')\n + offset)\n // measure\n * measure\n - offset)\n .astype(np_dtype))\n return result\n\n\n@dispatch(np.ndarray)\ndef chunks(x, chunksize=1024):\n start = 0\n n = len(x)\n while start < n:\n yield x[start:start + chunksize]\n start += chunksize\n"
] |
[
[
"pandas.notnull",
"numpy.unique",
"numpy.issubdtype",
"numpy.sort",
"numpy.ones",
"numpy.prod",
"numpy.array",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
MonitSharma/Computational-Methods-in-Physics
|
[
"e3b2db36c37dd5f64b9a37ba39e9bb267ba27d85",
"e3b2db36c37dd5f64b9a37ba39e9bb267ba27d85",
"e3b2db36c37dd5f64b9a37ba39e9bb267ba27d85"
] |
[
"PSET2/P2/RK4.py",
"PSET1/Exercise/Lecture 2/runge_kutta_fehlberg.py",
"PSET3/P1 & P2/leap2.py"
] |
[
"import math\r\nimport time # import time to use for performance analysis\r\nimport numpy as np # import numpy for array space\r\nimport matplotlib.pyplot as plt # import matplotlib for graphing functions\r\nfrom scipy.integrate import odeint # import scipy to use the ordinary differencial equation integral function\r\n\r\n# Ordinary Differential Equation\r\ndef dy_dx(y, x): # takes inputs y and x\r\n return x + 2*y # returns the function y/(e^x - 1)\r\n\r\n\r\n# Runge-Kutta Forumla based off the formula used in class\r\ndef rungeKutta(y, x, h): # takes y, x, and h inputs\r\n k1 = dy_dx(y, x) # solves k1 using the differential equation function\r\n k2 = dy_dx(y + ((0.5 * h) * k1), x + 0.5 * h) # solves k2 based on the answer from k1 using the differential equation function\r\n k3 = dy_dx(y + ((0.5 * h) * k2), x + 0.5 * h) # solves k3 based on the answer from k2 using the differential equation function\r\n k4 = dy_dx(y + (h * k3), x + h) # solves k4 based on the answer from k3 using the differential equation function\r\n\r\n t4 = (1.0 / 6.0) * (k1 + (2 * k2) + (2 * k3) + k4) # solves for t4 by taking a 6th of k1 + 2*k2 + 2*k3 + k4\r\n\r\n y = y + (t4 * h) # solves for y by taking the initial y value and adding it to t4*h\r\n return y # returns y\r\n\r\n# initial variable values\r\ny = 6\r\n # initial y value of 5 as outlined in the assignment\r\nx = 0 # initial x value of 1 as outlined in the assignment\r\nh = 0.02 # initial h value of 0.02 as outlined in the assignment\r\nn = 2000 # initial n value of 2000 chosen between 1000 or 2000 as outlined in the assignment\r\n\r\n# Runge-Kutta x and y arrays \r\nxsr = [] # x-space runge-kutta array to store the values of x for the runge-kutta function\r\nysr = [] # y-space runge-kutta array to store the values of y for the runge-kutta function\r\n\r\n# ODEint x and y arrays, solution, and time analysis\r\ntso = time.time() # time start for ODEint function solution\r\n\r\nxso = np.linspace(0.0, 0.2 , 30) # x-space ODEint from 1 to n*h (40) plus 1 with a step size of n\r\nyso = odeint(dy_dx, y, xso) # y-space ODEint useing the odeint function from scicpy to find the y-space\r\n\r\nteo = time.time() # time end fore ODEint function solution\r\ntto = teo - tso # total time the ODEint function to solve\r\n\r\n# graphing ODEint\r\nplt.title(\"ODE Function Analysis\") # set the title of the graph\r\nplt.xlabel(\"x\") # set the x label on the graph\r\nplt.ylabel(\"y\") # set the y label on the graph\r\nplt.plot(xso, yso, 'g-', label = \"ODEint\", linewidth = 2) # set the ODE line to be red and label it\r\n\r\nplt.legend() # shows the legend on the graph\r\nplt.savefig('Exact Solution.png') # displays the graph\r\n\r\n# Runge-Kutta solution and time analysis\r\ntsr = time.time() # time start for runge-kutta function solution\r\n\r\nwhile (x <= 0.2): # for loop to run the runge-kutta function n number of times\r\n xsr.append(x) # append the x value to the x-space runge-kutta array\r\n ysr.append(y) # append the y value to the y-space runge-kutta array\r\n \r\n y = rungeKutta(y, x, h) # update the y value using the rungeKutta function\r\n \r\n x += h # update the x value by moving one step forward (0.02)\r\n\r\nter = time.time() # time end for runge-kutta function solution\r\nttr = ter - tsr # total time the runge-kutta function to solve\r\n\r\ntd = ttr - tto # time difference between ODEint and runge-kutta function\r\n\r\n# graphing runge-kutta\r\nplt.title(\"Runge-Kutta Function Analysis\") # set the title of the graph\r\nplt.xlabel(\"x\") # set the x label on the graph\r\nplt.ylabel(\"y\") # set the y label on the graph\r\nplt.plot(xsr, ysr, 'b--', label = \"Runge Kutta\") # set the runge-kutta to be blue and label it\r\n\r\nplt.legend() # shows the legend on the graph\r\nplt.savefig('Runge_Kutta.png') # displays the graph\r\n\r\n# solutions\r\nprint(\"\\nODEint Solution: \", yso[-1]) # ODEint function solution\r\nprint(\"Runge-Kutta Solution: \", ysr[-1]) # Runge-Kutta function solution\r\n\r\n# Print statement for time difference\r\nprint(\"\\nODEint Time: \", tto) # print the ODEint time\r\nprint(\"Runge Kutta Time: \", ttr) # print the runge-kutta time\r\nprint(\"ODEint is \", td, \" seconds faster\\n\\n\") # print the difference between ODEint and runge-kutta\r\n\r\n# error calculation\r\nerror = 0 # initial error value of 0\r\nerrorRange = [] # array to store error over xn\r\nerrorSpace = np.linspace(0.0,0.2, 30) # error space for error analysis\r\nfor i in range(len(ysr)): # for loop to run through every x values\r\n error += (np.abs(ysr[i] - yso[i])/yso[i]) * 100 # sum all the error values using the percentage error formula\r\n errorRange.append((np.abs(ysr[i] - yso[i])/yso[i]) * 100)\r\n print(\"Percent Error at x =\", i, \":\", (np.abs(ysr[i] - yso[i])/yso[i]) * 100) # print error at each x value\r\n\r\nprint(\"\\nAverage Error Percent:\", error/((int)(n * h) + 1), \"\\n\") # print the total error divided by the total number of x values\r\n\r\n# graphing error\r\n#plt.title(\"Error Analysis\") # set the title of the graph\r\n#plt.xlabel(\"xn\") # set the x label on the graph\r\n#plt.ylabel(\"error\") # set the y label on the graph\r\n#plt.plot(errorSpace, errorRange, label = \"Error over Xn\") # create the line and label it\r\n#plt.legend() # shows the legend on the graph\r\n#plt.show() # displays the graph\r\n#\r\n# graphing both functions\r\nplt.title(\"Runge-Kutta and ODE Function Analysis\") # set the title of the graph\r\nplt.xlabel(\"x\") # set the x label on the graph\r\nplt.ylabel(\"y\") # set the y label on the graph\r\nplt.plot(xso, yso, 'r-', label = \"ODEint\", linewidth = 2) # set the ODE line to be red and label it\r\nplt.plot(xsr, ysr, 'bo', label = \"Runge Kutta\") # set the runge-kutta to be blue and label it\r\nplt.legend() # shows the legend on the graph\r\nplt.savefig('Comparison.png') ",
"from numpy import*\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\ndef rungeKutta(f, to, yo, tEnd, tau):\r\n def increment(f, t, y, tau): # поиск приближённого решения методом Рунге—Кутта—Фельберга.\r\n k1 = tau*f(t, y)\r\n k2 = tau*f(t+(1/4)*tau, y+(1/4)*k1)\r\n k3 = tau*f(t+(3/8)*tau, y+(3/32)*k1+(9/32)*k2)\r\n k4 = tau*f(t+(12/13)*tau, y+(1932/2197)*k1-(7200/2197)*k2+(7296/2197)*k3)\r\n k5 = tau*f(t+tau, y+(439/216)*k1-8*k2+(3680/513)*k3 -(845/4104)*k4)\r\n k6 = tau*f(t+(1/2)*tau, y-(8/27)*k1+2*k2-(3544/2565)*k3 +(1859/4104)*k4-(11/40)*k5)\r\n return (16/135)*k1+(6656/12825)*k3+(28561/56430)*k4-(9/50)*k5+(2/55)*k6\r\n\r\n t = [] # preparing an empty list t\r\n y = [] # preparing an empty list y\r\n t.append(to) # \r\n y.append(yo) # внесение в список y начального значения yo\r\n while to < tEnd: # внесение результатов расчёта в массивы t,y\r\n tau = min(tau, tEnd - to) # определение минимального шага tau\r\n yo = yo + increment(f, to, yo, tau) # расчёт значения в точке t0,y0 для задачи Коши\r\n to = to + tau # приращение времени\r\n t.append(to) # заполнение массива t\r\n y.append(yo) # заполнение массива y\r\n return array(t), array(y)\r\n\r\ndef f(t, y):\r\n return (1/power(t,2))*(t*y - power(y,2))\r\n\r\nto = 1 # начальный момент отсчёта времени\r\ntEnd = 3 # конечный момент отсчёта времени\r\n#yo = array([0,1,1,0]) # начальные условия\r\n#yo = array([0,1,1,0,1])\r\n#yo = 2\r\n#yo = array([0.2, 1, 1, 0, 1, 1.5])\r\ntau = 0.2\r\nt = to # шаг\r\ntt = [] # preparing an empty list tt\r\nyy = []\r\n\r\n\r\nwhile(to<=tEnd):\r\n t, y = rungeKutta(f, to, yo, tEnd, tau)\r\n t = t+ tau\r\n tt.append(t)\r\n yy.append(y)\r\ny1 = array([i[0] for i in y])\r\ny2 = array([i[1] for i in y])\r\ny3 = array([i[2] for i in y])\r\ny4 = array([i[3] for i in y])\r\ny5 = array([i[4] for i in y])\r\ny6 = array([i[5] for i in y])\r\n\r\n# визуализация\r\nplt.plot(t, y1, label = 'y1')\r\nplt.plot(t, y2, label = 'y2')\r\nplt.plot(t, y3, label = 'y3')\r\nplt.plot(t, y4, label = 'y4')\r\nplt.plot(t, y5, label = 'y5')\r\nplt.plot(t, y6, label = 'y6')\r\n\r\nprint(y1)\r\nprint(y2)\r\nprint(y3)\r\nprint(y4)\r\nprint(y5)\r\nprint(y6)\r\n\r\nplt.title(\"Results of the numerical solution of the ODE \\n system using the \\n Runge–Kutta–Felberg method\")\r\nplt.xlabel('t')\r\nplt.ylabel('Yn')\r\nplt.legend(loc='best')\r\n# plt.xlim(0, 8)\r\n# plt.ylim(-0.1, 2)\r\nplt.grid(True)\r\nplt.show()",
"# EqStrigMovMat.py: Animated leapfrog for string wi MatPlotLib\r\n\r\nfrom numpy import *\r\nimport numpy as np, matplotlib.pyplot as plt\r\nimport matplotlib.animation as animation\r\n\r\nrho = 0.01; ten = 40.; c = sqrt(ten/rho) # Density, tension \r\nc1 = c; ratio = c*c/(c1*c1) # CFL criterium = 1\r\nxi = np.zeros((101,3), float) # Declaration \r\nk = range(0,101)\r\n\r\ndef Initialize(): # Initial conditions\r\n for i in range(0, 81): xi[i, 0] = 0.00125*i \r\n for i in range (81, 101): xi[i, 0] = 0.1 - 0.005*(i - 80) \r\n \r\ndef animate(num): \r\n for i in range(1, 100): \r\n xi[i,2] = 2.*xi[i,1] - xi[i,0] + ratio*(xi[i+1,1]\r\n \t + xi[i-1,1] - 2*xi[i,1])\r\n line.set_data(k,xi[k,2]) # Data to plot ,x,y \r\n for m in range (0,101): \r\n xi[m, 0] = xi[m, 1] # Recycle array \r\n xi[m, 1] = xi[m, 2]\r\n return line \r\n \r\nInitialize() # Plot initial string \r\nfig = plt.figure() \r\nax = fig.add_subplot(111, autoscale_on=False, xlim=(0, 101), \r\n\tylim=(-0.15, 0.15))\r\nax.grid() # Plot grid\r\nplt.title(\"Vibrating String\")\r\nline, = ax.plot(k, xi[k,0], lw=2) \r\nfor i in range(1,100): \r\n xi[i,1] = xi[i,0] + 0.5*ratio*(xi[i+1,0] + xi[i-1,0] -2*xi[i,0]) \r\nani = animation.FuncAnimation(fig, animate,1) # Dummy 1 \r\nplt.show() \r\nprint(\"finished\")"
] |
[
[
"matplotlib.pyplot.legend",
"numpy.abs",
"matplotlib.pyplot.title",
"numpy.linspace",
"matplotlib.pyplot.savefig",
"scipy.integrate.odeint",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"matplotlib.pyplot.title",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
danmalowany/trains-model-zoo
|
[
"2091100057afae9593b18ddcefd81b7d46724a96"
] |
[
"models/detection/SSD/priorbox_optimization/priors_optimization_utils.py"
] |
[
"from itertools import product\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom trains import Task\n\nfrom models.detection.SSD.priorbox_optimization import PriorOptimizationInput, ImageSizeTuple\nfrom models.detection.SSD.priorbox_optimization.bbox_clustering import get_box_pairwise_iou\n\n\ndef collect_ground_truth_stats(ground_truth_loader):\n def just_meta_iter(loader):\n for gt in loader:\n yield gt[-1]\n\n gt = list(just_meta_iter(ground_truth_loader))\n gt_df = get_gt_df_from_gt(gt)\n\n return gt_df\n\n\ndef get_gt_df_from_gt(gt):\n # removing all \"crowd\" labels\n\n def process_meta_element(element):\n boxes = element['boxes']\n iscrowd = element['iscrowd']\n labels = element['labels']\n\n orig_boxes = [box for box, crowd in zip(boxes, iscrowd) if not crowd]\n orig_labels = [label for label, crowd in zip(labels, iscrowd) if not crowd]\n\n orig_boxes = np.around(orig_boxes)\n width = np.around(orig_boxes[:, 2] - orig_boxes[:, 0])\n height = np.around(orig_boxes[:, 3] - orig_boxes[:, 1])\n\n area = width * height\n good_boxes = np.where(area > 0)[0]\n if len(good_boxes) != len(orig_boxes):\n boxes = orig_boxes[good_boxes]\n labels = np.array(orig_labels)[good_boxes].tolist()\n height = height[good_boxes]\n width = width[good_boxes]\n else:\n boxes = orig_boxes\n labels = orig_labels\n\n pairwise_iou = get_box_pairwise_iou(boxes)\n score = np.around(pairwise_iou.sum(axis=0) - 1, decimals=2)\n\n return [(w, h, label, q) for w, h, label, q in zip(width, height, labels, score)]\n\n processed_gt = [process_meta_element(el) for elem in gt for el in elem if len(el['boxes']) > 0]\n all_gt = [elem for elements in processed_gt for elem in elements]\n column_names = ['width', 'height', 'label', 'overlap_score']\n\n return pd.DataFrame(all_gt, columns=column_names)\n\n\ndef get_optimization_input(ground_truth_df, fmap_sizes, input_priors, image_size):\n def fmap_to_pixel_fov(fmap_sizes):\n # fm = [np.array([fmap, fmap]) for fmap in fmap_sizes]\n # fm_np = np.vstack(fm)\n # fm_in_pixels = np.array(image_size) / fm_np\n fm_in_pixels = np.array(image_size) * \\\n np.array([3/fmap_sizes[-7], 3/fmap_sizes[-6], 3/(fmap_sizes[-5]+2), 3/(fmap_sizes[-4]+2),\n 3/(fmap_sizes[-3]+2), 3/(fmap_sizes[-2]+2), 1])\n fm_in_pixels = [np.array([fmap, fmap]) for fmap in fm_in_pixels]\n fm_in_pixels = np.vstack(fm_in_pixels)\n return pd.DataFrame(fm_in_pixels, columns=['width', 'height'])\n\n task = Task.current_task()\n fmap = [np.array([fmap, fmap]) for fmap in fmap_sizes]\n task.upload_artifact('feature_maps_sizes', pd.DataFrame(np.vstack(fmap), columns=['width', 'height']))\n\n fmap_df = fmap_to_pixel_fov(fmap_sizes)\n task.upload_artifact('feature_maps_pixel_fov', fmap_df)\n\n in_priors_df = pd.DataFrame(input_priors.numpy(), columns=['match_group', 'width', 'height'])\n target_image_size = ImageSizeTuple(w=image_size, h=image_size)\n\n return PriorOptimizationInput(\n target_image_size=target_image_size,\n gt_bbox=ground_truth_df,\n fmap_sizes=fmap_df,\n in_priors=in_priors_df,\n )\n\n\ndef convert_optimization_result_to_priors(fm_sizes, steps, opt_result):\n priors_output = opt_result.out_priors\n by_resolution = list(priors_output.groupby('match_group'))\n num_anchors_per_resolution = [len(priors[-1]) for priors in by_resolution]\n if len(num_anchors_per_resolution) < len(fm_sizes):\n print('Some resolution were empty - setting default prior per empty resolution')\n curr_match_groups = opt_result.out_priors.match_group.to_list()\n curr_prior_number = len(curr_match_groups)\n empty_match_groups = list(set(range(len(fm_sizes))) - set(np.unique(curr_match_groups)))\n for empty_match_group in empty_match_groups:\n prior_size = opt_result.target_image_size.w / fm_sizes[empty_match_group]\n new_prior = pd.DataFrame(np.array([empty_match_group, prior_size**2, 1, prior_size, prior_size]).reshape(1, 5),\n columns=['match_group', 'area', 'aspect_ratio', 'width', 'height'])\n new_prior['index'] = 'prior_{}'.format(curr_prior_number)\n new_prior = new_prior.set_index('index')\n priors_output = priors_output.append(new_prior)\n curr_prior_number += 1\n by_resolution.append((empty_match_group, new_prior))\n num_anchors_per_resolution.append(1)\n Task.current_task().register_artifact('priors_output', priors_output.sort_values('match_group'))\n by_resolution = list(priors_output.groupby('match_group'))\n\n boxes = []\n priors = []\n for i, (fm_size, new_priors) in enumerate(zip(fm_sizes, by_resolution)):\n for h, w in product(range(fm_size), repeat=2):\n cx = (w + 0.5) * steps[i]\n cy = (h + 0.5) * steps[i]\n\n for prior in new_priors[-1].iterrows():\n w = prior[-1].width\n h = prior[-1].height\n boxes.append((cx, cy, w, h))\n priors.append((i, w, h))\n\n return torch.Tensor(boxes), torch.Tensor(np.unique(np.array(priors), axis=0)), num_anchors_per_resolution\n"
] |
[
[
"torch.Tensor",
"numpy.unique",
"numpy.around",
"pandas.DataFrame",
"numpy.array",
"numpy.where",
"numpy.vstack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
JYLFamily/Kannada_MNIST
|
[
"5bc4989d581c050ba9b9363cb83829fa35921c4a"
] |
[
"ResNet/ResNet.py"
] |
[
"# coding:utf-8\n\nimport os\nimport gc\nimport numpy as np\nimport pandas as pd\nfrom keras.layers import *\nfrom keras.models import Model\nfrom keras.utils import Sequence\nfrom keras.optimizers import Adam\nfrom matplotlib import pyplot as plt\nfrom keras.initializers import he_normal\nfrom sklearn.model_selection import KFold\nfrom scikitplot.metrics import plot_confusion_matrix\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import EarlyStopping, ReduceLROnPlateau\nnp.random.seed(7)\npd.set_option(\"max_rows\", None)\npd.set_option(\"max_columns\", None)\n\n\ndef residual(input_tensor, filters, strides, flag):\n x = ZeroPadding2D(padding=1, data_format=\"channels_last\")(input_tensor)\n x = Conv2D(\n filters=filters,\n kernel_size=3,\n strides=strides,\n data_format=\"channels_last\",\n kernel_initializer=he_normal(7))(x)\n x = BatchNormalization()(x)\n x = Activation(\"relu\")(x)\n\n x = ZeroPadding2D(padding=1, data_format=\"channels_last\")(x)\n x = Conv2D(\n filters=filters,\n kernel_size=3,\n data_format=\"channels_last\",\n kernel_initializer=he_normal(7))(x)\n x = BatchNormalization()(x)\n\n if flag:\n input_tensor = Conv2D(\n filters=filters,\n kernel_size=1,\n strides=strides,\n data_format=\"channels_last\",\n kernel_initializer=he_normal(7))(input_tensor)\n\n return Activation(\"relu\")(Add()([x, input_tensor]))\n\n\ndef residual_net(output=\"binary\"):\n # input layer\n input_layer = Input(shape=(28, 28, 1))\n x = ZeroPadding2D(padding=1, data_format=\"channels_last\")(input_layer)\n x = Conv2D(\n filters=64,\n kernel_size=3,\n data_format=\"channels_last\",\n kernel_initializer=he_normal(7))(x)\n x = BatchNormalization()(x)\n x = Activation(\"relu\")(x)\n\n # residual block 1\n x = residual(input_tensor=x, filters=64, strides=1, flag=False)\n x = residual(input_tensor=x, filters=64, strides=1, flag=False)\n\n # residual block 2\n x = residual(input_tensor=x, filters=128, strides=2, flag=True)\n x = residual(input_tensor=x, filters=128, strides=1, flag=False)\n\n # residual block 3\n x = residual(input_tensor=x, filters=256, strides=2, flag=True)\n x = residual(input_tensor=x, filters=256, strides=1, flag=False)\n\n # output layer\n if output == \"binary\":\n x = GlobalAveragePooling2D()(x)\n output_layer = Dense(units=1, activation=\"sigmoid\", kernel_initializer=he_normal(7))(x)\n else:\n x = GlobalAveragePooling2D()(x)\n output_layer = Dense(units=10, activation=\"softmax\", kernel_initializer=he_normal(7))(x)\n\n return Model(inputs=input_layer, outputs=output_layer)\n\n\nclass FitGenerator(Sequence):\n def __init__(self, feature, label, batch_size, image_augment):\n self.__index = np.arange(feature.shape[0])\n self.__feature, self.__label = feature, label\n self.__batch_size, self.__image_augment = batch_size, image_augment\n\n def __len__(self):\n return self.__feature.shape[0] // self.__batch_size\n\n def __getitem__(self, idx):\n index = self.__index[idx * self.__batch_size: (idx + 1) * self.__batch_size]\n batch_feature, batch_label = (\n np.array([image / 255 for image in self.__feature[index]]), self.__label[index])\n\n if self.__image_augment is not None:\n batch_feature, batch_label = (\n next(self.__image_augment.flow(np.array(batch_feature), batch_label, batch_size=self.__batch_size)))\n\n return batch_feature, batch_label\n\n def on_epoch_end(self):\n np.random.shuffle(self.__index)\n\n\nclass PredictGenerator(Sequence):\n def __init__(self, feature):\n self.__index = np.arange(feature.shape[0])\n self.__feature = feature\n\n def __len__(self):\n return self.__feature.shape[0]\n\n def __getitem__(self, idx):\n index = self.__index[idx: (idx + 1)]\n\n batch_feature = np.array([image / 255 for image in self.__feature[index]])\n\n return batch_feature\n\n\nclass ResNet(object):\n def __init__(self, *, path):\n self.__path = path\n self.__train, self.__test = [None for _ in range(2)]\n self.__train_feature, self.__test_feature = [None for _ in range(2)]\n self.__train_label, self.__test_index = [None for _ in range(2)]\n\n self.__folds = None\n self.__sub_preds = None\n self.__sub_mixed = None\n self.__val_preds = None\n self.__val_mixed = None\n\n self.__image_data_generator = None\n self.__res_net = None\n\n def data_read(self):\n self.__train = pd.read_csv(os.path.join(self.__path, \"train.csv\"))\n self.__test = pd.read_csv(os.path.join(self.__path, \"test.csv\"))\n\n def data_prepare(self):\n self.__train_feature, self.__train_label = (\n self.__train.iloc[:, 1:].copy(deep=True), self.__train.iloc[:, 0].copy(deep=True))\n self.__test_feature, self.__test_index = (\n self.__test.iloc[:, 1:].copy(deep=True), self.__test.iloc[:, [0]].copy(deep=True))\n del self.__train, self.__test\n gc.collect()\n\n self.__train_feature, self.__train_label = self.__train_feature.to_numpy(), self.__train_label.to_numpy()\n self.__test_feature = self.__test_feature.to_numpy()\n\n self.__train_feature = self.__train_feature.reshape((-1, 28, 28, 1))\n self.__test_feature = self.__test_feature.reshape((-1, 28, 28, 1))\n\n self.__image_data_generator = ImageDataGenerator(\n rotation_range=15,\n zoom_range=0.1,\n width_shift_range=0.1,\n height_shift_range=0.1,\n data_format=\"channels_last\"\n )\n\n def model_fit_predict(self):\n self.__folds = KFold(n_splits=5, shuffle=True, random_state=7)\n self.__sub_preds = np.zeros(shape=(self.__test_feature.shape[0], 10))\n self.__val_preds = np.zeros(shape=(self.__train_feature.shape[0], 10))\n\n # 1. network\n for n_fold, (trn_idx, val_idx) in enumerate(self.__folds.split(\n X=self.__train_feature, y=self.__train_label)):\n print(\"Fold: \" + str(n_fold))\n trn_x = np.copy(self.__train_feature[trn_idx])\n val_x = np.copy(self.__train_feature[val_idx])\n tes_x = np.copy(self.__test_feature)\n\n trn_y = np.copy(self.__train_label[trn_idx])\n val_y = np.copy(self.__train_label[val_idx])\n\n self.__res_net = residual_net(output=\"softmax\")\n self.__res_net.compile(optimizer=Adam(), loss=\"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])\n self.__res_net.fit_generator(\n generator=FitGenerator(trn_x, trn_y, 256, self.__image_data_generator),\n steps_per_epoch=trn_x.shape[0] // 256,\n epochs=60,\n verbose=1,\n callbacks=[\n ReduceLROnPlateau(\n patience=3\n ),\n EarlyStopping(\n patience=6,\n restore_best_weights=True\n )\n ],\n validation_data=FitGenerator(val_x, val_y, 256, None),\n validation_steps=val_x.shape[0] // 256,\n workers=1,\n use_multiprocessing=False\n )\n\n self.__sub_preds += self.__res_net.predict_generator(\n generator=PredictGenerator(tes_x),\n steps=tes_x.shape[0],\n workers=1,\n use_multiprocessing=False) / self.__folds.n_splits\n\n self.__val_preds[val_idx, :] = self.__res_net.predict_generator(\n generator=PredictGenerator(val_x),\n steps=val_x.shape[0],\n workers=1,\n use_multiprocessing=False)\n\n # 2. network\n tra_index = np.where(np.logical_or(\n self.__train_label == 0,\n self.__train_label == 1\n ))[0].tolist()\n tes_index = np.where(np.logical_or(\n np.argmax(self.__sub_preds, axis=1) == 0,\n np.argmax(self.__sub_preds, axis=1) == 1\n ))[0].tolist()\n\n self.__folds = KFold(n_splits=5, shuffle=True, random_state=7)\n self.__sub_mixed = np.zeros(shape=(self.__test_feature[tes_index].shape[0], ))\n self.__val_mixed = np.zeros(shape=(self.__train_feature[tra_index].shape[0],))\n\n for n_fold, (trn_idx, val_idx) in enumerate(self.__folds.split(\n X=self.__train_feature[tra_index], y=self.__train_label[tra_index])):\n print(\"Fold: \" + str(n_fold))\n trn_x = np.copy(self.__train_feature[tra_index][trn_idx])\n val_x = np.copy(self.__train_feature[tra_index][val_idx])\n tes_x = np.copy(self.__test_feature[tes_index])\n\n trn_y = np.copy(self.__train_label[tra_index][trn_idx])\n val_y = np.copy(self.__train_label[tra_index][val_idx])\n\n self.__res_net = residual_net(output=\"binary\")\n self.__res_net.compile(optimizer=Adam(), loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\n self.__res_net.fit_generator(\n generator=FitGenerator(trn_x, trn_y, 256, self.__image_data_generator),\n steps_per_epoch=trn_x.shape[0] // 256,\n epochs=60,\n verbose=1,\n callbacks=[\n ReduceLROnPlateau(\n patience=3\n ),\n EarlyStopping(\n patience=6,\n restore_best_weights=True\n )\n ],\n validation_data=FitGenerator(val_x, val_y, 256, None),\n validation_steps=val_x.shape[0] // 256,\n workers=1,\n use_multiprocessing=False\n )\n\n self.__sub_mixed += self.__res_net.predict_generator(\n generator=PredictGenerator(tes_x),\n steps=tes_x.shape[0],\n workers=1,\n use_multiprocessing=False).reshape(-1, ) / self.__folds.n_splits\n\n self.__val_mixed[val_idx] = self.__res_net.predict_generator(\n generator=PredictGenerator(val_x),\n steps=val_x.shape[0],\n workers=1,\n use_multiprocessing=False).reshape(-1, )\n\n self.__sub_preds = np.argmax(self.__sub_preds, axis=1)\n self.__sub_preds[tes_index] = np.where(self.__sub_mixed > 0.5, 1, 0)\n\n self.__val_preds = np.argmax(self.__val_preds, axis=1)\n self.__val_preds[tra_index] = np.where(self.__val_mixed > 0.5, 1, 0)\n\n def data_write(self):\n self.__test_index[\"label\"] = self.__sub_preds\n self.__test_index.to_csv(\"submission.csv\", index=False)\n\n def plot_image(self):\n plot_confusion_matrix(self.__train_label, self.__val_preds, normalize=True)\n plt.show()\n\n\nif __name__ == \"__main__\":\n rn = ResNet(path=\"D:\\\\Kaggle\\\\Kannada_MNIST\")\n rn.data_read()\n rn.data_prepare()\n rn.model_fit_predict()\n rn.data_write()\n\n"
] |
[
[
"numpy.random.seed",
"numpy.arange",
"numpy.random.shuffle",
"sklearn.model_selection.KFold",
"numpy.logical_or",
"numpy.copy",
"numpy.argmax",
"pandas.set_option",
"numpy.array",
"numpy.zeros",
"numpy.where",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cychu5/GenerativeLSTM
|
[
"33a945465bed5902aa9b101340a429c8a37c4415"
] |
[
"model_training/samples_creator.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 14 19:13:15 2020\n\n@author: Manuel Camargo\n\"\"\"\nimport itertools\nimport numpy as np\nimport random\n\nfrom nltk.util import ngrams\nimport keras.utils as ku\n\n\nclass SequencesCreator():\n\n def __init__(self, one_timestamp, ac_index, rl_index):\n \"\"\"constructor\"\"\"\n self.one_timestamp = one_timestamp\n self.ac_index = ac_index\n self.rl_index = rl_index\n self._vectorizers = dict()\n self._vec_dispatcher = {'basic': self._vectorize_seq,\n 'inter': self._vectorize_seq_inter,\n 'gan': self.gan_simple}\n\n def vectorize(self, model_type, log, params, add_cols):\n self.log = log\n columns = self.define_columns(add_cols, self.one_timestamp)\n loader = self._get_vectorizer(model_type)\n return loader(params, columns)\n\n def register_vectorizer(self, model_type, vectorizer):\n try:\n self._vectorizers[model_type] = self._vec_dispatcher[vectorizer]\n except KeyError:\n raise ValueError(vectorizer)\n\n def _get_vectorizer(self, model_type):\n vectorizer = self._vectorizers.get(model_type)\n if not vectorizer:\n raise ValueError(model_type)\n return vectorizer\n\n @staticmethod\n def define_columns(add_cols, one_timestamp):\n columns = ['ac_index', 'rl_index', 'dur_norm']\n add_cols = [x+'_norm' if x != 'weekday' else x for x in add_cols ]\n columns.extend(add_cols)\n if not one_timestamp:\n columns.extend(['wait_norm'])\n return columns\n\n def _vectorize_seq(self, parms, columns):\n \"\"\"\n Dataframe vectorizer.\n parms:\n columns: list of features to vectorize.\n parms (dict): parms for training the network\n Returns:\n dict: Dictionary that contains all the LSTM inputs.\n \"\"\"\n # TODO: reorganizar este metoo para poder vectorizar los tiempos\n # con uno o dos features de tiempo, posiblemente la idea es\n # hacer equi como si fueran intercases.\n times = ['dur_norm'] if parms['one_timestamp'] else ['dur_norm', 'wait_norm']\n equi = {'ac_index': 'activities', 'rl_index': 'roles'}\n vec = {'prefixes': dict(),\n 'next_evt': dict()}\n x_times_dict = dict()\n y_times_dict = dict()\n self.log = self.reformat_events(columns, parms['one_timestamp'])\n # n-gram definition\n for i, _ in enumerate(self.log):\n for x in columns:\n serie = list(ngrams(self.log[i][x], parms['n_size'],\n pad_left=True, left_pad_symbol=0))\n y_serie = [x[-1] for x in serie]\n serie = serie[:-1]\n y_serie = y_serie[1:]\n if x in list(equi.keys()):\n vec['prefixes'][equi[x]] = (vec['prefixes'][equi[x]] + serie\n if i > 0 else serie)\n vec['next_evt'][equi[x]] = (vec['next_evt'][equi[x]] + y_serie\n if i > 0 else y_serie)\n elif x in times:\n x_times_dict[x] = (\n x_times_dict[x] + serie if i > 0 else serie)\n y_times_dict[x] = (\n y_times_dict[x] + y_serie if i > 0 else y_serie)\n\n # Transform task, dur and role prefixes in vectors\n for value in equi.values():\n vec['prefixes'][value] = np.array(vec['prefixes'][value])\n vec['next_evt'][value] = np.array(vec['next_evt'][value])\n # one-hot encode target values\n vec['next_evt']['activities'] = ku.to_categorical(\n vec['next_evt']['activities'], num_classes=len(self.ac_index))\n vec['next_evt']['roles'] = ku.to_categorical(\n vec['next_evt']['roles'], num_classes=len(self.rl_index))\n # reshape times\n for key, value in x_times_dict.items():\n x_times_dict[key] = np.array(value)\n x_times_dict[key] = x_times_dict[key].reshape(\n (x_times_dict[key].shape[0], x_times_dict[key].shape[1], 1))\n vec['prefixes']['times'] = np.dstack(list(x_times_dict.values()))\n # Reshape y times attributes (suffixes, number of attributes)\n vec['next_evt']['times'] = np.dstack(list(y_times_dict.values()))[0]\n return vec\n\n def _vectorize_seq_inter(self, parms, columns):\n \"\"\"\n Dataframe vectorizer to process intercase or data atributes features.\n parms:\n columns: list of features to vectorize.\n parms (dict): parms for training the network\n Returns:\n dict: Dictionary that contains all the LSTM inputs.\n \"\"\"\n times = ['dur_norm'] if parms['one_timestamp'] else ['dur_norm', 'wait_norm']\n equi = {'ac_index': 'activities', 'rl_index': 'roles'}\n vec = {'prefixes': dict(),\n 'next_evt': dict()}\n x_weekday = list()\n y_weekday = list()\n # times\n x_times_dict = dict()\n y_times_dict = dict()\n # intercases\n x_inter_dict = dict()\n y_inter_dict = dict()\n # self.log = self.log[self.log.caseid.isin(['1', '1770'])].head(3)\n self.log = self.reformat_events(columns, parms['one_timestamp'])\n for i, _ in enumerate(self.log):\n for x in columns:\n serie = list(ngrams(self.log[i][x], parms['n_size'],\n pad_left=True, left_pad_symbol=0))\n y_serie = [x[-1] for x in serie]\n serie = serie[:-1]\n y_serie = y_serie[1:]\n if x in list(equi.keys()):\n vec['prefixes'][equi[x]] = (\n vec['prefixes'][equi[x]] + serie if i > 0 else serie)\n vec['next_evt'][equi[x]] = (\n vec['next_evt'][equi[x]] + y_serie\n if i > 0 else y_serie)\n elif x in times:\n x_times_dict[x] = (\n x_times_dict[x] + serie if i > 0 else serie)\n y_times_dict[x] = (\n y_times_dict[x] + y_serie if i > 0 else y_serie)\n elif x == 'weekday':\n x_weekday = (\n x_weekday + serie if i > 0 else serie)\n y_weekday = (\n y_weekday + y_serie if i > 0 else y_serie)\n else:\n x_inter_dict[x] = (\n x_inter_dict[x] + serie if i > 0 else serie)\n y_inter_dict[x] = (\n y_inter_dict[x] + y_serie if i > 0 else y_serie)\n # Transform task, dur and role prefixes in vectors\n for value in equi.values():\n vec['prefixes'][value] = np.array(vec['prefixes'][value])\n vec['next_evt'][value] = np.array(vec['next_evt'][value])\n # one-hot encode target values\n vec['next_evt']['activities'] = ku.to_categorical(\n vec['next_evt']['activities'], num_classes=len(self.ac_index))\n vec['next_evt']['roles'] = ku.to_categorical(\n vec['next_evt']['roles'], num_classes=len(self.rl_index))\n # reshape times\n for key, value in x_times_dict.items():\n x_times_dict[key] = np.array(value)\n x_times_dict[key] = x_times_dict[key].reshape(\n (x_times_dict[key].shape[0], x_times_dict[key].shape[1], 1))\n vec['prefixes']['times'] = np.dstack(list(x_times_dict.values()))\n # Reshape y times attributes (suffixes, number of attributes)\n vec['next_evt']['times'] = np.dstack(list(y_times_dict.values()))[0]\n # Reshape intercase attributes (prefixes, n-gram size, number of attributes)\n for key, value in x_inter_dict.items():\n x_inter_dict[key] = np.array(value)\n x_inter_dict[key] = x_inter_dict[key].reshape(\n (x_inter_dict[key].shape[0], x_inter_dict[key].shape[1], 1))\n vec['prefixes']['inter_attr'] = np.dstack(list(x_inter_dict.values()))\n # Reshape y intercase attributes (suffixes, number of attributes)\n vec['next_evt']['inter_attr'] = np.dstack(list(y_inter_dict.values()))[0]\n if 'weekday' in columns:\n # Onehot encode weekday\n x_weekday = ku.to_categorical(x_weekday, num_classes=7)\n y_weekday = ku.to_categorical(y_weekday, num_classes=7)\n vec['prefixes']['inter_attr'] = np.concatenate(\n [vec['prefixes']['inter_attr'], x_weekday], axis=2)\n vec['next_evt']['inter_attr'] = np.concatenate(\n [vec['next_evt']['inter_attr'], y_weekday], axis=1)\n return vec\n\n\n def gan_simple(self, parms, columns):\n print(columns)\n vec = {'training':dict()}\n pairs = self.log.copy()\n pairs = pairs[['ac_index', 'rl_index']]\n pairs = pairs.to_records(index=False).tolist()\n # Vectorize discriminator training real inputs\n vec['training']['activities'] = [x[0] for x in pairs]\n vec['training']['activities'] = ku.to_categorical(\n vec['training']['activities'], num_classes=len(self.ac_index))\n vec['training']['roles'] = [x[1] for x in pairs]\n vec['training']['roles'] = ku.to_categorical(\n vec['training']['roles'], num_classes=len(self.rl_index))\n vec['training']['class'] = np.zeros(len(pairs))\n \n # If the discriminator will be pretrained create pretraining examples\n if parms['gan_pretrain']:\n # one third of real events randomly selected\n n_positive = int(round(len(pairs)/3))\n negative_ratio = 2\n \n batch_size = n_positive * (1 + negative_ratio)\n batch = np.zeros((batch_size, 3))\n pairs_set = set(pairs)\n activities = list(self.ac_index.keys())\n roles = list(self.rl_index.keys())\n # randomly choose positive examples\n idx = 0\n for idx, (activity, role) in enumerate(\n random.sample(pairs, n_positive)):\n batch[idx, :] = (activity, role, 0)\n # Increment idx by 1\n idx += 1\n # Add negative examples until reach batch size\n while idx < batch_size:\n # random selection\n random_ac = random.randrange(len(activities))\n random_rl = random.randrange(len(roles))\n # Check to make sure this is not a positive example\n if (random_ac, random_rl) not in pairs_set:\n # Add to batch and increment index, 0 due classification task\n batch[idx, :] = (random_ac, random_rl, 1)\n idx += 1\n vec['pretraining'] = dict()\n # Make sure to shuffle order\n np.random.shuffle(batch)\n vec['pretraining']['activities'] = ku.to_categorical(\n batch[:, 0], num_classes=len(self.ac_index))\n vec['pretraining']['roles'] = ku.to_categorical(\n batch[:, 1], num_classes=len(self.rl_index))\n vec['pretraining']['class'] = batch[:, 2]\n return vec\n\n # =============================================================================\n # Reformat events\n # =============================================================================\n def reformat_events(self, columns, one_timestamp):\n \"\"\"Creates series of activities, roles and relative times per trace.\n parms:\n self.log: dataframe.\n ac_index (dict): index of activities.\n rl_index (dict): index of roles.\n Returns:\n list: lists of activities, roles and relative times.\n \"\"\"\n temp_data = list()\n log_df = self.log.to_dict('records')\n key = 'end_timestamp' if one_timestamp else 'start_timestamp'\n log_df = sorted(log_df, key=lambda x: (x['caseid'], key))\n for key, group in itertools.groupby(log_df, key=lambda x: x['caseid']):\n trace = list(group)\n temp_dict = dict()\n for x in columns:\n serie = [y[x] for y in trace]\n if x == 'ac_index':\n serie.insert(0, self.ac_index[('start')])\n serie.append(self.ac_index[('end')])\n elif x == 'rl_index':\n serie.insert(0, self.rl_index[('start')])\n serie.append(self.rl_index[('end')])\n else:\n serie.insert(0, 0)\n serie.append(0)\n temp_dict = {**{x: serie}, **temp_dict}\n temp_dict = {**{'caseid': key}, **temp_dict}\n temp_data.append(temp_dict)\n return temp_data"
] |
[
[
"numpy.concatenate",
"numpy.array",
"numpy.zeros",
"numpy.random.shuffle"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Marky0/pandas
|
[
"d0dd9820668ddd4a7648ff9fbd581e67298c77db"
] |
[
"pandas/core/indexes/base.py"
] |
[
"from datetime import datetime, timedelta\nimport operator\nfrom textwrap import dedent\nfrom typing import Union\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import (\n algos as libalgos, index as libindex, join as libjoin, lib)\nfrom pandas._libs.lib import is_datetime_array\nfrom pandas._libs.tslibs import OutOfBoundsDatetime, Timedelta, Timestamp\nfrom pandas._libs.tslibs.timezones import tz_compare\nfrom pandas.compat import set_function_name\nfrom pandas.compat.numpy import function as nv\nfrom pandas.util._decorators import Appender, Substitution, cache_readonly\n\nfrom pandas.core.dtypes.cast import maybe_cast_to_integer_array\nfrom pandas.core.dtypes.common import (\n ensure_categorical, ensure_int64, ensure_object, ensure_platform_int,\n is_bool, is_bool_dtype, is_categorical, is_categorical_dtype,\n is_datetime64_any_dtype, is_datetime64tz_dtype, is_dtype_equal,\n is_dtype_union_equal, is_extension_array_dtype, is_float, is_float_dtype,\n is_hashable, is_integer, is_integer_dtype, is_interval_dtype, is_iterator,\n is_list_like, is_object_dtype, is_period_dtype, is_scalar,\n is_signed_integer_dtype, is_timedelta64_dtype, is_unsigned_integer_dtype,\n pandas_dtype)\nimport pandas.core.dtypes.concat as _concat\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame, ABCDateOffset, ABCDatetimeArray, ABCIndexClass,\n ABCMultiIndex, ABCPandasArray, ABCPeriodIndex, ABCSeries,\n ABCTimedeltaArray, ABCTimedeltaIndex)\nfrom pandas.core.dtypes.missing import array_equivalent, isna\n\nfrom pandas.core import ops\nfrom pandas.core.accessor import CachedAccessor, DirNamesMixin\nimport pandas.core.algorithms as algos\nfrom pandas.core.arrays import ExtensionArray\nfrom pandas.core.base import IndexOpsMixin, PandasObject\nimport pandas.core.common as com\nfrom pandas.core.indexes.frozen import FrozenList\nimport pandas.core.missing as missing\nfrom pandas.core.ops import get_op_result_name, make_invalid_op\nimport pandas.core.sorting as sorting\nfrom pandas.core.strings import StringMethods\n\nfrom pandas.io.formats.printing import (\n default_pprint, format_object_attrs, format_object_summary, pprint_thing)\n\n__all__ = ['Index']\n\n_unsortable_types = frozenset(('mixed', 'mixed-integer'))\n\n_index_doc_kwargs = dict(klass='Index', inplace='',\n target_klass='Index',\n unique='Index', duplicated='np.ndarray')\n_index_shared_docs = dict()\n\n\ndef _try_get_item(x):\n try:\n return x.item()\n except AttributeError:\n return x\n\n\ndef _make_comparison_op(op, cls):\n def cmp_method(self, other):\n if isinstance(other, (np.ndarray, Index, ABCSeries)):\n if other.ndim > 0 and len(self) != len(other):\n raise ValueError('Lengths must match to compare')\n\n if is_object_dtype(self) and not isinstance(self, ABCMultiIndex):\n # don't pass MultiIndex\n with np.errstate(all='ignore'):\n result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)\n\n else:\n\n # numpy will show a DeprecationWarning on invalid elementwise\n # comparisons, this will raise in the future\n with warnings.catch_warnings(record=True):\n warnings.filterwarnings(\"ignore\", \"elementwise\", FutureWarning)\n with np.errstate(all='ignore'):\n result = op(self.values, np.asarray(other))\n\n # technically we could support bool dtyped Index\n # for now just return the indexing array directly\n if is_bool_dtype(result):\n return result\n try:\n return Index(result)\n except TypeError:\n return result\n\n name = '__{name}__'.format(name=op.__name__)\n # TODO: docstring?\n return set_function_name(cmp_method, name, cls)\n\n\ndef _make_arithmetic_op(op, cls):\n def index_arithmetic_method(self, other):\n if isinstance(other, (ABCSeries, ABCDataFrame)):\n return NotImplemented\n elif isinstance(other, ABCTimedeltaIndex):\n # Defer to subclass implementation\n return NotImplemented\n elif (isinstance(other, (np.ndarray, ABCTimedeltaArray)) and\n is_timedelta64_dtype(other)):\n # GH#22390; wrap in Series for op, this will in turn wrap in\n # TimedeltaIndex, but will correctly raise TypeError instead of\n # NullFrequencyError for add/sub ops\n from pandas import Series\n other = Series(other)\n out = op(self, other)\n return Index(out, name=self.name)\n\n other = self._validate_for_numeric_binop(other, op)\n\n # handle time-based others\n if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):\n return self._evaluate_with_timedelta_like(other, op)\n elif isinstance(other, (datetime, np.datetime64)):\n return self._evaluate_with_datetime_like(other, op)\n\n values = self.values\n with np.errstate(all='ignore'):\n result = op(values, other)\n\n result = missing.dispatch_missing(op, values, other, result)\n\n attrs = self._get_attributes_dict()\n attrs = self._maybe_update_attributes(attrs)\n if op is divmod:\n result = (Index(result[0], **attrs), Index(result[1], **attrs))\n else:\n result = Index(result, **attrs)\n return result\n\n name = '__{name}__'.format(name=op.__name__)\n # TODO: docstring?\n return set_function_name(index_arithmetic_method, name, cls)\n\n\nclass InvalidIndexError(Exception):\n pass\n\n\n_o_dtype = np.dtype(object)\n_Identity = object\n\n\ndef _new_Index(cls, d):\n \"\"\"\n This is called upon unpickling, rather than the default which doesn't\n have arguments and breaks __new__.\n \"\"\"\n # required for backward compat, because PI can't be instantiated with\n # ordinals through __new__ GH #13277\n if issubclass(cls, ABCPeriodIndex):\n from pandas.core.indexes.period import _new_PeriodIndex\n return _new_PeriodIndex(cls, **d)\n return cls.__new__(cls, **d)\n\n\nclass Index(IndexOpsMixin, PandasObject):\n \"\"\"\n Immutable ndarray implementing an ordered, sliceable set. The basic object\n storing axis labels for all pandas objects.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n dtype : NumPy dtype (default: object)\n If dtype is None, we find the dtype that best fits the data.\n If an actual dtype is provided, we coerce to that dtype if it's safe.\n Otherwise, an error will be raised.\n copy : bool\n Make a copy of input ndarray\n name : object\n Name to be stored in the index\n tupleize_cols : bool (default: True)\n When True, attempt to create a MultiIndex if possible\n\n See Also\n --------\n RangeIndex : Index implementing a monotonic integer range.\n CategoricalIndex : Index of :class:`Categorical` s.\n MultiIndex : A multi-level, or hierarchical, Index.\n IntervalIndex : An Index of :class:`Interval` s.\n DatetimeIndex, TimedeltaIndex, PeriodIndex\n Int64Index, UInt64Index, Float64Index\n\n Notes\n -----\n An Index instance can **only** contain hashable objects\n\n Examples\n --------\n >>> pd.Index([1, 2, 3])\n Int64Index([1, 2, 3], dtype='int64')\n\n >>> pd.Index(list('abc'))\n Index(['a', 'b', 'c'], dtype='object')\n \"\"\"\n # tolist is not actually deprecated, just suppressed in the __dir__\n _deprecations = DirNamesMixin._deprecations | frozenset(['tolist'])\n\n # To hand over control to subclasses\n _join_precedence = 1\n\n # Cython methods; see github.com/cython/cython/issues/2647\n # for why we need to wrap these instead of making them class attributes\n # Moreover, cython will choose the appropriate-dtyped sub-function\n # given the dtypes of the passed arguments\n def _left_indexer_unique(self, left, right):\n return libjoin.left_join_indexer_unique(left, right)\n\n def _left_indexer(self, left, right):\n return libjoin.left_join_indexer(left, right)\n\n def _inner_indexer(self, left, right):\n return libjoin.inner_join_indexer(left, right)\n\n def _outer_indexer(self, left, right):\n return libjoin.outer_join_indexer(left, right)\n\n _typ = 'index'\n _data = None\n _id = None\n name = None\n asi8 = None\n _comparables = ['name']\n _attributes = ['name']\n _is_numeric_dtype = False\n _can_hold_na = True\n\n # would we like our indexing holder to defer to us\n _defer_to_indexing = False\n\n # prioritize current class for _shallow_copy_with_infer,\n # used to infer integers as datetime-likes\n _infer_as_myclass = False\n\n _engine_type = libindex.ObjectEngine\n\n _accessors = {'str'}\n\n str = CachedAccessor(\"str\", StringMethods)\n\n # --------------------------------------------------------------------\n # Constructors\n\n def __new__(cls, data=None, dtype=None, copy=False, name=None,\n fastpath=None, tupleize_cols=True, **kwargs):\n\n if name is None and hasattr(data, 'name'):\n name = data.name\n\n if fastpath is not None:\n warnings.warn(\"The 'fastpath' keyword is deprecated, and will be \"\n \"removed in a future version.\",\n FutureWarning, stacklevel=2)\n if fastpath:\n return cls._simple_new(data, name)\n\n from .range import RangeIndex\n if isinstance(data, ABCPandasArray):\n # ensure users don't accidentally put a PandasArray in an index.\n data = data.to_numpy()\n\n # range\n if isinstance(data, RangeIndex):\n return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)\n elif isinstance(data, range):\n return RangeIndex.from_range(data, copy=copy, dtype=dtype,\n name=name)\n\n # categorical\n elif is_categorical_dtype(data) or is_categorical_dtype(dtype):\n from .category import CategoricalIndex\n return CategoricalIndex(data, dtype=dtype, copy=copy, name=name,\n **kwargs)\n\n # interval\n elif ((is_interval_dtype(data) or is_interval_dtype(dtype)) and\n not is_object_dtype(dtype)):\n from .interval import IntervalIndex\n closed = kwargs.get('closed', None)\n return IntervalIndex(data, dtype=dtype, name=name, copy=copy,\n closed=closed)\n\n elif (is_datetime64_any_dtype(data) or\n (dtype is not None and is_datetime64_any_dtype(dtype)) or\n 'tz' in kwargs):\n from pandas import DatetimeIndex\n\n if dtype is not None and is_dtype_equal(_o_dtype, dtype):\n # GH#23524 passing `dtype=object` to DatetimeIndex is invalid,\n # will raise in the where `data` is already tz-aware. So\n # we leave it out of this step and cast to object-dtype after\n # the DatetimeIndex construction.\n # Note we can pass copy=False because the .astype below\n # will always make a copy\n result = DatetimeIndex(data, copy=False, name=name, **kwargs)\n return result.astype(object)\n else:\n result = DatetimeIndex(data, copy=copy, name=name,\n dtype=dtype, **kwargs)\n return result\n\n elif (is_timedelta64_dtype(data) or\n (dtype is not None and is_timedelta64_dtype(dtype))):\n from pandas import TimedeltaIndex\n if dtype is not None and is_dtype_equal(_o_dtype, dtype):\n # Note we can pass copy=False because the .astype below\n # will always make a copy\n result = TimedeltaIndex(data, copy=False, name=name, **kwargs)\n return result.astype(object)\n else:\n result = TimedeltaIndex(data, copy=copy, name=name,\n dtype=dtype, **kwargs)\n return result\n\n elif is_period_dtype(data) and not is_object_dtype(dtype):\n from pandas import PeriodIndex\n result = PeriodIndex(data, copy=copy, name=name, **kwargs)\n return result\n\n # extension dtype\n elif is_extension_array_dtype(data) or is_extension_array_dtype(dtype):\n data = np.asarray(data)\n if not (dtype is None or is_object_dtype(dtype)):\n\n # coerce to the provided dtype\n data = dtype.construct_array_type()._from_sequence(\n data, dtype=dtype, copy=False)\n\n # coerce to the object dtype\n data = data.astype(object)\n return Index(data, dtype=object, copy=copy, name=name,\n **kwargs)\n\n # index-like\n elif isinstance(data, (np.ndarray, Index, ABCSeries)):\n if dtype is not None:\n try:\n\n # we need to avoid having numpy coerce\n # things that look like ints/floats to ints unless\n # they are actually ints, e.g. '0' and 0.0\n # should not be coerced\n # GH 11836\n if is_integer_dtype(dtype):\n inferred = lib.infer_dtype(data, skipna=False)\n if inferred == 'integer':\n data = maybe_cast_to_integer_array(data, dtype,\n copy=copy)\n elif inferred in ['floating', 'mixed-integer-float']:\n if isna(data).any():\n raise ValueError('cannot convert float '\n 'NaN to integer')\n\n if inferred == \"mixed-integer-float\":\n data = maybe_cast_to_integer_array(data, dtype)\n\n # If we are actually all equal to integers,\n # then coerce to integer.\n try:\n return cls._try_convert_to_int_index(\n data, copy, name, dtype)\n except ValueError:\n pass\n\n # Return an actual float index.\n from .numeric import Float64Index\n return Float64Index(data, copy=copy, dtype=dtype,\n name=name)\n\n elif inferred == 'string':\n pass\n else:\n data = data.astype(dtype)\n elif is_float_dtype(dtype):\n inferred = lib.infer_dtype(data, skipna=False)\n if inferred == 'string':\n pass\n else:\n data = data.astype(dtype)\n else:\n data = np.array(data, dtype=dtype, copy=copy)\n\n except (TypeError, ValueError) as e:\n msg = str(e)\n if (\"cannot convert float\" in msg or\n \"Trying to coerce float values to integer\" in msg):\n raise\n\n # maybe coerce to a sub-class\n from pandas.core.indexes.period import (\n PeriodIndex, IncompatibleFrequency)\n\n if is_signed_integer_dtype(data.dtype):\n from .numeric import Int64Index\n return Int64Index(data, copy=copy, dtype=dtype, name=name)\n elif is_unsigned_integer_dtype(data.dtype):\n from .numeric import UInt64Index\n return UInt64Index(data, copy=copy, dtype=dtype, name=name)\n elif is_float_dtype(data.dtype):\n from .numeric import Float64Index\n return Float64Index(data, copy=copy, dtype=dtype, name=name)\n elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):\n subarr = data.astype('object')\n else:\n subarr = com.asarray_tuplesafe(data, dtype=object)\n\n # asarray_tuplesafe does not always copy underlying data,\n # so need to make sure that this happens\n if copy:\n subarr = subarr.copy()\n\n if dtype is None:\n inferred = lib.infer_dtype(subarr, skipna=False)\n if inferred == 'integer':\n try:\n return cls._try_convert_to_int_index(\n subarr, copy, name, dtype)\n except ValueError:\n pass\n\n return Index(subarr, copy=copy,\n dtype=object, name=name)\n elif inferred in ['floating', 'mixed-integer-float']:\n from .numeric import Float64Index\n return Float64Index(subarr, copy=copy, name=name)\n elif inferred == 'interval':\n from .interval import IntervalIndex\n return IntervalIndex(subarr, name=name, copy=copy)\n elif inferred == 'boolean':\n # don't support boolean explicitly ATM\n pass\n elif inferred != 'string':\n if inferred.startswith('datetime'):\n if (lib.is_datetime_with_singletz_array(subarr) or\n 'tz' in kwargs):\n # only when subarr has the same tz\n from pandas import DatetimeIndex\n try:\n return DatetimeIndex(subarr, copy=copy,\n name=name, **kwargs)\n except OutOfBoundsDatetime:\n pass\n\n elif inferred.startswith('timedelta'):\n from pandas import TimedeltaIndex\n return TimedeltaIndex(subarr, copy=copy, name=name,\n **kwargs)\n elif inferred == 'period':\n try:\n return PeriodIndex(subarr, name=name, **kwargs)\n except IncompatibleFrequency:\n pass\n return cls._simple_new(subarr, name)\n\n elif hasattr(data, '__array__'):\n return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,\n **kwargs)\n elif data is None or is_scalar(data):\n cls._scalar_data_error(data)\n else:\n if tupleize_cols and is_list_like(data):\n # GH21470: convert iterable to list before determining if empty\n if is_iterator(data):\n data = list(data)\n\n if data and all(isinstance(e, tuple) for e in data):\n # we must be all tuples, otherwise don't construct\n # 10697\n from .multi import MultiIndex\n return MultiIndex.from_tuples(\n data, names=name or kwargs.get('names'))\n # other iterable of some kind\n subarr = com.asarray_tuplesafe(data, dtype=object)\n return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)\n\n \"\"\"\n NOTE for new Index creation:\n\n - _simple_new: It returns new Index with the same type as the caller.\n All metadata (such as name) must be provided by caller's responsibility.\n Using _shallow_copy is recommended because it fills these metadata\n otherwise specified.\n\n - _shallow_copy: It returns new Index with the same type (using\n _simple_new), but fills caller's metadata otherwise specified. Passed\n kwargs will overwrite corresponding metadata.\n\n - _shallow_copy_with_infer: It returns new Index inferring its type\n from passed values. It fills caller's metadata otherwise specified as the\n same as _shallow_copy.\n\n See each method's docstring.\n \"\"\"\n\n @classmethod\n def _simple_new(cls, values, name=None, dtype=None, **kwargs):\n \"\"\"\n We require that we have a dtype compat for the values. If we are passed\n a non-dtype compat, then coerce using the constructor.\n\n Must be careful not to recurse.\n \"\"\"\n if not hasattr(values, 'dtype'):\n if (values is None or not len(values)) and dtype is not None:\n values = np.empty(0, dtype=dtype)\n else:\n values = np.array(values, copy=False)\n if is_object_dtype(values):\n values = cls(values, name=name, dtype=dtype,\n **kwargs)._ndarray_values\n\n if isinstance(values, (ABCSeries, ABCIndexClass)):\n # Index._data must always be an ndarray.\n # This is no-copy for when _values is an ndarray,\n # which should be always at this point.\n values = np.asarray(values._values)\n\n result = object.__new__(cls)\n result._data = values\n # _index_data is a (temporary?) fix to ensure that the direct data\n # manipulation we do in `_libs/reduction.pyx` continues to work.\n # We need access to the actual ndarray, since we're messing with\n # data buffers and strides. We don't re-use `_ndarray_values`, since\n # we actually set this value too.\n result._index_data = values\n result.name = name\n for k, v in kwargs.items():\n setattr(result, k, v)\n return result._reset_identity()\n\n @cache_readonly\n def _constructor(self):\n return type(self)\n\n # --------------------------------------------------------------------\n # Index Internals Methods\n\n def _get_attributes_dict(self):\n \"\"\"\n Return an attributes dict for my class.\n \"\"\"\n return {k: getattr(self, k, None) for k in self._attributes}\n\n _index_shared_docs['_shallow_copy'] = \"\"\"\n Create a new Index with the same class as the caller, don't copy the\n data, use the same object attributes with passed in attributes taking\n precedence.\n\n *this is an internal non-public method*\n\n Parameters\n ----------\n values : the values to create the new Index, optional\n kwargs : updates the default attributes for this Index\n \"\"\"\n\n @Appender(_index_shared_docs['_shallow_copy'])\n def _shallow_copy(self, values=None, **kwargs):\n if values is None:\n values = self.values\n attributes = self._get_attributes_dict()\n attributes.update(kwargs)\n if not len(values) and 'dtype' not in kwargs:\n attributes['dtype'] = self.dtype\n\n # _simple_new expects an the type of self._data\n values = getattr(values, '_values', values)\n if isinstance(values, ABCDatetimeArray):\n # `self.values` returns `self` for tz-aware, so we need to unwrap\n # more specifically\n values = values.asi8\n\n return self._simple_new(values, **attributes)\n\n def _shallow_copy_with_infer(self, values, **kwargs):\n \"\"\"\n Create a new Index inferring the class with passed value, don't copy\n the data, use the same object attributes with passed in attributes\n taking precedence.\n\n *this is an internal non-public method*\n\n Parameters\n ----------\n values : the values to create the new Index, optional\n kwargs : updates the default attributes for this Index\n \"\"\"\n attributes = self._get_attributes_dict()\n attributes.update(kwargs)\n attributes['copy'] = False\n if not len(values) and 'dtype' not in kwargs:\n attributes['dtype'] = self.dtype\n if self._infer_as_myclass:\n try:\n return self._constructor(values, **attributes)\n except (TypeError, ValueError):\n pass\n return Index(values, **attributes)\n\n def _update_inplace(self, result, **kwargs):\n # guard when called from IndexOpsMixin\n raise TypeError(\"Index can't be updated inplace\")\n\n def is_(self, other):\n \"\"\"\n More flexible, faster check like ``is`` but that works through views.\n\n Note: this is *not* the same as ``Index.identical()``, which checks\n that metadata is also the same.\n\n Parameters\n ----------\n other : object\n other object to compare against.\n\n Returns\n -------\n True if both have same underlying data, False otherwise : bool\n \"\"\"\n # use something other than None to be clearer\n return self._id is getattr(\n other, '_id', Ellipsis) and self._id is not None\n\n def _reset_identity(self):\n \"\"\"\n Initializes or resets ``_id`` attribute with new object.\n \"\"\"\n self._id = _Identity()\n return self\n\n def _cleanup(self):\n self._engine.clear_mapping()\n\n @cache_readonly\n def _engine(self):\n # property, for now, slow to look up\n return self._engine_type(lambda: self._ndarray_values, len(self))\n\n # --------------------------------------------------------------------\n # Array-Like Methods\n\n # ndarray compat\n def __len__(self):\n \"\"\"\n Return the length of the Index.\n \"\"\"\n return len(self._data)\n\n def __array__(self, dtype=None):\n \"\"\"\n The array interface, return my values.\n \"\"\"\n return np.asarray(self._data, dtype=dtype)\n\n def __array_wrap__(self, result, context=None):\n \"\"\"\n Gets called after a ufunc.\n \"\"\"\n result = lib.item_from_zerodim(result)\n if is_bool_dtype(result) or lib.is_scalar(result):\n return result\n\n attrs = self._get_attributes_dict()\n attrs = self._maybe_update_attributes(attrs)\n return Index(result, **attrs)\n\n @cache_readonly\n def dtype(self):\n \"\"\"\n Return the dtype object of the underlying data.\n \"\"\"\n return self._data.dtype\n\n @cache_readonly\n def dtype_str(self):\n \"\"\"\n Return the dtype str of the underlying data.\n \"\"\"\n return str(self.dtype)\n\n def ravel(self, order='C'):\n \"\"\"\n Return an ndarray of the flattened values of the underlying data.\n\n Returns\n -------\n numpy.ndarray\n Flattened array.\n\n See Also\n --------\n numpy.ndarray.ravel\n \"\"\"\n return self._ndarray_values.ravel(order=order)\n\n def view(self, cls=None):\n\n # we need to see if we are subclassing an\n # index type here\n if cls is not None and not hasattr(cls, '_typ'):\n result = self._data.view(cls)\n else:\n result = self._shallow_copy()\n if isinstance(result, Index):\n result._id = self._id\n return result\n\n _index_shared_docs['astype'] = \"\"\"\n Create an Index with values cast to dtypes. The class of a new Index\n is determined by dtype. When conversion is impossible, a ValueError\n exception is raised.\n\n Parameters\n ----------\n dtype : numpy dtype or pandas type\n Note that any signed integer `dtype` is treated as ``'int64'``,\n and any unsigned integer `dtype` is treated as ``'uint64'``,\n regardless of the size.\n copy : bool, default True\n By default, astype always returns a newly allocated object.\n If copy is set to False and internal requirements on dtype are\n satisfied, the original data is used to create a new Index\n or the original Index is returned.\n\n .. versionadded:: 0.19.0\n\n Returns\n -------\n Index\n Index with values cast to specified dtype.\n \"\"\"\n\n @Appender(_index_shared_docs['astype'])\n def astype(self, dtype, copy=True):\n if is_dtype_equal(self.dtype, dtype):\n return self.copy() if copy else self\n\n elif is_categorical_dtype(dtype):\n from .category import CategoricalIndex\n return CategoricalIndex(self.values, name=self.name, dtype=dtype,\n copy=copy)\n elif is_datetime64tz_dtype(dtype):\n # TODO(GH-24559): Remove this block, use the following elif.\n # avoid FutureWarning from DatetimeIndex constructor.\n from pandas import DatetimeIndex\n tz = pandas_dtype(dtype).tz\n return (DatetimeIndex(np.asarray(self))\n .tz_localize(\"UTC\").tz_convert(tz))\n\n elif is_extension_array_dtype(dtype):\n return Index(np.asarray(self), dtype=dtype, copy=copy)\n\n try:\n if is_datetime64tz_dtype(dtype):\n from pandas import DatetimeIndex\n return DatetimeIndex(self.values, name=self.name, dtype=dtype,\n copy=copy)\n return Index(self.values.astype(dtype, copy=copy), name=self.name,\n dtype=dtype)\n except (TypeError, ValueError):\n msg = 'Cannot cast {name} to dtype {dtype}'\n raise TypeError(msg.format(name=type(self).__name__, dtype=dtype))\n\n _index_shared_docs['take'] = \"\"\"\n Return a new %(klass)s of the values selected by the indices.\n\n For internal compatibility with numpy arrays.\n\n Parameters\n ----------\n indices : list\n Indices to be taken\n axis : int, optional\n The axis over which to select values, always 0.\n allow_fill : bool, default True\n fill_value : bool, default None\n If allow_fill=True and fill_value is not None, indices specified by\n -1 is regarded as NA. If Index doesn't hold NA, raise ValueError\n\n Returns\n -------\n numpy.ndarray\n Elements of given indices.\n\n See Also\n --------\n numpy.ndarray.take\n \"\"\"\n\n @Appender(_index_shared_docs['take'] % _index_doc_kwargs)\n def take(self, indices, axis=0, allow_fill=True,\n fill_value=None, **kwargs):\n if kwargs:\n nv.validate_take(tuple(), kwargs)\n indices = ensure_platform_int(indices)\n if self._can_hold_na:\n taken = self._assert_take_fillable(self.values, indices,\n allow_fill=allow_fill,\n fill_value=fill_value,\n na_value=self._na_value)\n else:\n if allow_fill and fill_value is not None:\n msg = 'Unable to fill values because {0} cannot contain NA'\n raise ValueError(msg.format(self.__class__.__name__))\n taken = self.values.take(indices)\n return self._shallow_copy(taken)\n\n def _assert_take_fillable(self, values, indices, allow_fill=True,\n fill_value=None, na_value=np.nan):\n \"\"\"\n Internal method to handle NA filling of take.\n \"\"\"\n indices = ensure_platform_int(indices)\n\n # only fill if we are passing a non-None fill_value\n if allow_fill and fill_value is not None:\n if (indices < -1).any():\n msg = ('When allow_fill=True and fill_value is not None, '\n 'all indices must be >= -1')\n raise ValueError(msg)\n taken = algos.take(values,\n indices,\n allow_fill=allow_fill,\n fill_value=na_value)\n else:\n taken = values.take(indices)\n return taken\n\n _index_shared_docs['repeat'] = \"\"\"\n Repeat elements of a %(klass)s.\n\n Returns a new %(klass)s where each element of the current %(klass)s\n is repeated consecutively a given number of times.\n\n Parameters\n ----------\n repeats : int or array of ints\n The number of repetitions for each element. This should be a\n non-negative integer. Repeating 0 times will return an empty\n %(klass)s.\n axis : None\n Must be ``None``. Has no effect but is accepted for compatibility\n with numpy.\n\n Returns\n -------\n repeated_index : %(klass)s\n Newly created %(klass)s with repeated elements.\n\n See Also\n --------\n Series.repeat : Equivalent function for Series.\n numpy.repeat : Similar method for :class:`numpy.ndarray`.\n\n Examples\n --------\n >>> idx = pd.Index(['a', 'b', 'c'])\n >>> idx\n Index(['a', 'b', 'c'], dtype='object')\n >>> idx.repeat(2)\n Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object')\n >>> idx.repeat([1, 2, 3])\n Index(['a', 'b', 'b', 'c', 'c', 'c'], dtype='object')\n \"\"\"\n\n @Appender(_index_shared_docs['repeat'] % _index_doc_kwargs)\n def repeat(self, repeats, axis=None):\n nv.validate_repeat(tuple(), dict(axis=axis))\n return self._shallow_copy(self._values.repeat(repeats))\n\n # --------------------------------------------------------------------\n # Copying Methods\n\n _index_shared_docs['copy'] = \"\"\"\n Make a copy of this object. Name and dtype sets those attributes on\n the new object.\n\n Parameters\n ----------\n name : string, optional\n deep : boolean, default False\n dtype : numpy dtype or pandas type\n\n Returns\n -------\n copy : Index\n\n Notes\n -----\n In most cases, there should be no functional difference from using\n ``deep``, but if ``deep`` is passed it will attempt to deepcopy.\n \"\"\"\n\n @Appender(_index_shared_docs['copy'])\n def copy(self, name=None, deep=False, dtype=None, **kwargs):\n if deep:\n new_index = self._shallow_copy(self._data.copy())\n else:\n new_index = self._shallow_copy()\n\n names = kwargs.get('names')\n names = self._validate_names(name=name, names=names, deep=deep)\n new_index = new_index.set_names(names)\n\n if dtype:\n new_index = new_index.astype(dtype)\n return new_index\n\n def __copy__(self, **kwargs):\n return self.copy(**kwargs)\n\n def __deepcopy__(self, memo=None):\n \"\"\"\n Parameters\n ----------\n memo, default None\n Standard signature. Unused\n \"\"\"\n if memo is None:\n memo = {}\n return self.copy(deep=True)\n\n # --------------------------------------------------------------------\n # Rendering Methods\n\n def __unicode__(self):\n \"\"\"\n Return a unicode string representation for this object.\n \"\"\"\n klass = self.__class__.__name__\n data = self._format_data()\n attrs = self._format_attrs()\n space = self._format_space()\n\n prepr = (\",%s\" %\n space).join(\"%s=%s\" % (k, v) for k, v in attrs)\n\n # no data provided, just attributes\n if data is None:\n data = ''\n\n res = \"%s(%s%s)\" % (klass, data, prepr)\n\n return res\n\n def _format_space(self):\n\n # using space here controls if the attributes\n # are line separated or not (the default)\n\n # max_seq_items = get_option('display.max_seq_items')\n # if len(self) > max_seq_items:\n # space = \"\\n%s\" % (' ' * (len(klass) + 1))\n return \" \"\n\n @property\n def _formatter_func(self):\n \"\"\"\n Return the formatter function.\n \"\"\"\n return default_pprint\n\n def _format_data(self, name=None):\n \"\"\"\n Return the formatted data as a unicode string.\n \"\"\"\n\n # do we want to justify (only do so for non-objects)\n is_justify = not (self.inferred_type in ('string', 'unicode') or\n (self.inferred_type == 'categorical' and\n is_object_dtype(self.categories)))\n\n return format_object_summary(self, self._formatter_func,\n is_justify=is_justify, name=name)\n\n def _format_attrs(self):\n \"\"\"\n Return a list of tuples of the (attr,formatted_value).\n \"\"\"\n return format_object_attrs(self)\n\n def _mpl_repr(self):\n # how to represent ourselves to matplotlib\n return self.values\n\n def format(self, name=False, formatter=None, **kwargs):\n \"\"\"\n Render a string representation of the Index.\n \"\"\"\n header = []\n if name:\n header.append(pprint_thing(self.name,\n escape_chars=('\\t', '\\r', '\\n')) if\n self.name is not None else '')\n\n if formatter is not None:\n return header + list(self.map(formatter))\n\n return self._format_with_header(header, **kwargs)\n\n def _format_with_header(self, header, na_rep='NaN', **kwargs):\n values = self.values\n\n from pandas.io.formats.format import format_array\n\n if is_categorical_dtype(values.dtype):\n values = np.array(values)\n\n elif is_object_dtype(values.dtype):\n values = lib.maybe_convert_objects(values, safe=1)\n\n if is_object_dtype(values.dtype):\n result = [pprint_thing(x, escape_chars=('\\t', '\\r', '\\n'))\n for x in values]\n\n # could have nans\n mask = isna(values)\n if mask.any():\n result = np.array(result)\n result[mask] = na_rep\n result = result.tolist()\n\n else:\n result = _trim_front(format_array(values, None, justify='left'))\n return header + result\n\n def to_native_types(self, slicer=None, **kwargs):\n \"\"\"\n Format specified values of `self` and return them.\n\n Parameters\n ----------\n slicer : int, array-like\n An indexer into `self` that specifies which values\n are used in the formatting process.\n kwargs : dict\n Options for specifying how the values should be formatted.\n These options include the following:\n\n 1) na_rep : str\n The value that serves as a placeholder for NULL values\n 2) quoting : bool or None\n Whether or not there are quoted values in `self`\n 3) date_format : str\n The format used to represent date-like values\n\n Returns\n -------\n numpy.ndarray\n Formatted values.\n \"\"\"\n\n values = self\n if slicer is not None:\n values = values[slicer]\n return values._format_native_types(**kwargs)\n\n def _format_native_types(self, na_rep='', quoting=None, **kwargs):\n \"\"\"\n Actually format specific types of the index.\n \"\"\"\n mask = isna(self)\n if not self.is_object() and not quoting:\n values = np.asarray(self).astype(str)\n else:\n values = np.array(self, dtype=object, copy=True)\n\n values[mask] = na_rep\n return values\n\n def _summary(self, name=None):\n \"\"\"\n Return a summarized representation.\n\n Parameters\n ----------\n name : str\n name to use in the summary representation\n\n Returns\n -------\n String with a summarized representation of the index\n \"\"\"\n if len(self) > 0:\n head = self[0]\n if hasattr(head, 'format') and not isinstance(head, str):\n head = head.format()\n tail = self[-1]\n if hasattr(tail, 'format') and not isinstance(tail, str):\n tail = tail.format()\n index_summary = ', %s to %s' % (pprint_thing(head),\n pprint_thing(tail))\n else:\n index_summary = ''\n\n if name is None:\n name = type(self).__name__\n return '%s: %s entries%s' % (name, len(self), index_summary)\n\n def summary(self, name=None):\n \"\"\"\n Return a summarized representation.\n\n .. deprecated:: 0.23.0\n \"\"\"\n warnings.warn(\"'summary' is deprecated and will be removed in a \"\n \"future version.\", FutureWarning, stacklevel=2)\n return self._summary(name)\n\n # --------------------------------------------------------------------\n # Conversion Methods\n\n def to_flat_index(self):\n \"\"\"\n Identity method.\n\n .. versionadded:: 0.24.0\n\n This is implemented for compatability with subclass implementations\n when chaining.\n\n Returns\n -------\n pd.Index\n Caller.\n\n See Also\n --------\n MultiIndex.to_flat_index : Subclass implementation.\n \"\"\"\n return self\n\n def to_series(self, index=None, name=None):\n \"\"\"\n Create a Series with both index and values equal to the index keys\n useful with map for returning an indexer based on an index.\n\n Parameters\n ----------\n index : Index, optional\n index of resulting Series. If None, defaults to original index\n name : string, optional\n name of resulting Series. If None, defaults to name of original\n index\n\n Returns\n -------\n Series : dtype will be based on the type of the Index values.\n \"\"\"\n\n from pandas import Series\n\n if index is None:\n index = self._shallow_copy()\n if name is None:\n name = self.name\n\n return Series(self.values.copy(), index=index, name=name)\n\n def to_frame(self, index=True, name=None):\n \"\"\"\n Create a DataFrame with a column containing the Index.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n index : boolean, default True\n Set the index of the returned DataFrame as the original Index.\n\n name : object, default None\n The passed name should substitute for the index name (if it has\n one).\n\n Returns\n -------\n DataFrame\n DataFrame containing the original Index data.\n\n See Also\n --------\n Index.to_series : Convert an Index to a Series.\n Series.to_frame : Convert Series to DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')\n >>> idx.to_frame()\n animal\n animal\n Ant Ant\n Bear Bear\n Cow Cow\n\n By default, the original Index is reused. To enforce a new Index:\n\n >>> idx.to_frame(index=False)\n animal\n 0 Ant\n 1 Bear\n 2 Cow\n\n To override the name of the resulting column, specify `name`:\n\n >>> idx.to_frame(index=False, name='zoo')\n zoo\n 0 Ant\n 1 Bear\n 2 Cow\n \"\"\"\n\n from pandas import DataFrame\n if name is None:\n name = self.name or 0\n result = DataFrame({name: self._values.copy()})\n\n if index:\n result.index = self\n return result\n\n # --------------------------------------------------------------------\n # Name-Centric Methods\n\n def _validate_names(self, name=None, names=None, deep=False):\n \"\"\"\n Handles the quirks of having a singular 'name' parameter for general\n Index and plural 'names' parameter for MultiIndex.\n \"\"\"\n from copy import deepcopy\n if names is not None and name is not None:\n raise TypeError(\"Can only provide one of `names` and `name`\")\n elif names is None and name is None:\n return deepcopy(self.names) if deep else self.names\n elif names is not None:\n if not is_list_like(names):\n raise TypeError(\"Must pass list-like as `names`.\")\n return names\n else:\n if not is_list_like(name):\n return [name]\n return name\n\n def _get_names(self):\n return FrozenList((self.name, ))\n\n def _set_names(self, values, level=None):\n \"\"\"\n Set new names on index. Each name has to be a hashable type.\n\n Parameters\n ----------\n values : str or sequence\n name(s) to set\n level : int, level name, or sequence of int/level names (default None)\n If the index is a MultiIndex (hierarchical), level(s) to set (None\n for all levels). Otherwise level must be None\n\n Raises\n ------\n TypeError if each name is not hashable.\n \"\"\"\n if not is_list_like(values):\n raise ValueError('Names must be a list-like')\n if len(values) != 1:\n raise ValueError('Length of new names must be 1, got %d' %\n len(values))\n\n # GH 20527\n # All items in 'name' need to be hashable:\n for name in values:\n if not is_hashable(name):\n raise TypeError('{}.name must be a hashable type'\n .format(self.__class__.__name__))\n self.name = values[0]\n\n names = property(fset=_set_names, fget=_get_names)\n\n def set_names(self, names, level=None, inplace=False):\n \"\"\"\n Set Index or MultiIndex name.\n\n Able to set new names partially and by level.\n\n Parameters\n ----------\n names : label or list of label\n Name(s) to set.\n level : int, label or list of int or label, optional\n If the index is a MultiIndex, level(s) to set (None for all\n levels). Otherwise level must be None.\n inplace : bool, default False\n Modifies the object directly, instead of creating a new Index or\n MultiIndex.\n\n Returns\n -------\n Index\n The same type as the caller or None if inplace is True.\n\n See Also\n --------\n Index.rename : Able to set new names without level.\n\n Examples\n --------\n >>> idx = pd.Index([1, 2, 3, 4])\n >>> idx\n Int64Index([1, 2, 3, 4], dtype='int64')\n >>> idx.set_names('quarter')\n Int64Index([1, 2, 3, 4], dtype='int64', name='quarter')\n\n >>> idx = pd.MultiIndex.from_product([['python', 'cobra'],\n ... [2018, 2019]])\n >>> idx\n MultiIndex(levels=[['cobra', 'python'], [2018, 2019]],\n codes=[[1, 1, 0, 0], [0, 1, 0, 1]])\n >>> idx.set_names(['kind', 'year'], inplace=True)\n >>> idx\n MultiIndex(levels=[['cobra', 'python'], [2018, 2019]],\n codes=[[1, 1, 0, 0], [0, 1, 0, 1]],\n names=['kind', 'year'])\n >>> idx.set_names('species', level=0)\n MultiIndex(levels=[['cobra', 'python'], [2018, 2019]],\n codes=[[1, 1, 0, 0], [0, 1, 0, 1]],\n names=['species', 'year'])\n \"\"\"\n\n if level is not None and not isinstance(self, ABCMultiIndex):\n raise ValueError('Level must be None for non-MultiIndex')\n\n if level is not None and not is_list_like(level) and is_list_like(\n names):\n msg = \"Names must be a string when a single level is provided.\"\n raise TypeError(msg)\n\n if not is_list_like(names) and level is None and self.nlevels > 1:\n raise TypeError(\"Must pass list-like as `names`.\")\n\n if not is_list_like(names):\n names = [names]\n if level is not None and not is_list_like(level):\n level = [level]\n\n if inplace:\n idx = self\n else:\n idx = self._shallow_copy()\n idx._set_names(names, level=level)\n if not inplace:\n return idx\n\n def rename(self, name, inplace=False):\n \"\"\"\n Alter Index or MultiIndex name.\n\n Able to set new names without level. Defaults to returning new index.\n Length of names must match number of levels in MultiIndex.\n\n Parameters\n ----------\n name : label or list of labels\n Name(s) to set.\n inplace : boolean, default False\n Modifies the object directly, instead of creating a new Index or\n MultiIndex.\n\n Returns\n -------\n Index\n The same type as the caller or None if inplace is True.\n\n See Also\n --------\n Index.set_names : Able to set new names partially and by level.\n\n Examples\n --------\n >>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score')\n >>> idx.rename('grade')\n Index(['A', 'C', 'A', 'B'], dtype='object', name='grade')\n\n >>> idx = pd.MultiIndex.from_product([['python', 'cobra'],\n ... [2018, 2019]],\n ... names=['kind', 'year'])\n >>> idx\n MultiIndex(levels=[['cobra', 'python'], [2018, 2019]],\n codes=[[1, 1, 0, 0], [0, 1, 0, 1]],\n names=['kind', 'year'])\n >>> idx.rename(['species', 'year'])\n MultiIndex(levels=[['cobra', 'python'], [2018, 2019]],\n codes=[[1, 1, 0, 0], [0, 1, 0, 1]],\n names=['species', 'year'])\n >>> idx.rename('species')\n Traceback (most recent call last):\n TypeError: Must pass list-like as `names`.\n \"\"\"\n return self.set_names([name], inplace=inplace)\n\n # --------------------------------------------------------------------\n # Level-Centric Methods\n\n @property\n def nlevels(self):\n return 1\n\n def _sort_levels_monotonic(self):\n \"\"\"\n Compat with MultiIndex.\n \"\"\"\n return self\n\n def _validate_index_level(self, level):\n \"\"\"\n Validate index level.\n\n For single-level Index getting level number is a no-op, but some\n verification must be done like in MultiIndex.\n\n \"\"\"\n if isinstance(level, int):\n if level < 0 and level != -1:\n raise IndexError(\"Too many levels: Index has only 1 level,\"\n \" %d is not a valid level number\" % (level, ))\n elif level > 0:\n raise IndexError(\"Too many levels:\"\n \" Index has only 1 level, not %d\" %\n (level + 1))\n elif level != self.name:\n raise KeyError('Level %s must be same as name (%s)' %\n (level, self.name))\n\n def _get_level_number(self, level):\n self._validate_index_level(level)\n return 0\n\n def sortlevel(self, level=None, ascending=True, sort_remaining=None):\n \"\"\"\n For internal compatibility with with the Index API.\n\n Sort the Index. This is for compat with MultiIndex\n\n Parameters\n ----------\n ascending : boolean, default True\n False to sort in descending order\n\n level, sort_remaining are compat parameters\n\n Returns\n -------\n Index\n \"\"\"\n return self.sort_values(return_indexer=True, ascending=ascending)\n\n def _get_level_values(self, level):\n \"\"\"\n Return an Index of values for requested level.\n\n This is primarily useful to get an individual level of values from a\n MultiIndex, but is provided on Index as well for compatability.\n\n Parameters\n ----------\n level : int or str\n It is either the integer position or the name of the level.\n\n Returns\n -------\n Index\n Calling object, as there is only one level in the Index.\n\n See Also\n --------\n MultiIndex.get_level_values : Get values for a level of a MultiIndex.\n\n Notes\n -----\n For Index, level should be 0, since there are no multiple levels.\n\n Examples\n --------\n\n >>> idx = pd.Index(list('abc'))\n >>> idx\n Index(['a', 'b', 'c'], dtype='object')\n\n Get level values by supplying `level` as integer:\n\n >>> idx.get_level_values(0)\n Index(['a', 'b', 'c'], dtype='object')\n \"\"\"\n self._validate_index_level(level)\n return self\n\n get_level_values = _get_level_values\n\n def droplevel(self, level=0):\n \"\"\"\n Return index with requested level(s) removed.\n\n If resulting index has only 1 level left, the result will be\n of Index type, not MultiIndex.\n\n .. versionadded:: 0.23.1 (support for non-MultiIndex)\n\n Parameters\n ----------\n level : int, str, or list-like, default 0\n If a string is given, must be the name of a level\n If list-like, elements must be names or indexes of levels.\n\n Returns\n -------\n Index or MultiIndex\n \"\"\"\n if not isinstance(level, (tuple, list)):\n level = [level]\n\n levnums = sorted(self._get_level_number(lev) for lev in level)[::-1]\n\n if len(level) == 0:\n return self\n if len(level) >= self.nlevels:\n raise ValueError(\"Cannot remove {} levels from an index with {} \"\n \"levels: at least one level must be \"\n \"left.\".format(len(level), self.nlevels))\n # The two checks above guarantee that here self is a MultiIndex\n\n new_levels = list(self.levels)\n new_codes = list(self.codes)\n new_names = list(self.names)\n\n for i in levnums:\n new_levels.pop(i)\n new_codes.pop(i)\n new_names.pop(i)\n\n if len(new_levels) == 1:\n\n # set nan if needed\n mask = new_codes[0] == -1\n result = new_levels[0].take(new_codes[0])\n if mask.any():\n result = result.putmask(mask, np.nan)\n\n result.name = new_names[0]\n return result\n else:\n from .multi import MultiIndex\n return MultiIndex(levels=new_levels, codes=new_codes,\n names=new_names, verify_integrity=False)\n\n _index_shared_docs['_get_grouper_for_level'] = \"\"\"\n Get index grouper corresponding to an index level\n\n Parameters\n ----------\n mapper: Group mapping function or None\n Function mapping index values to groups\n level : int or None\n Index level\n\n Returns\n -------\n grouper : Index\n Index of values to group on.\n labels : ndarray of int or None\n Array of locations in level_index.\n uniques : Index or None\n Index of unique values for level.\n \"\"\"\n\n @Appender(_index_shared_docs['_get_grouper_for_level'])\n def _get_grouper_for_level(self, mapper, level=None):\n assert level is None or level == 0\n if mapper is None:\n grouper = self\n else:\n grouper = self.map(mapper)\n\n return grouper, None, None\n\n # --------------------------------------------------------------------\n # Introspection Methods\n\n @property\n def is_monotonic(self):\n \"\"\"\n Alias for is_monotonic_increasing.\n \"\"\"\n return self.is_monotonic_increasing\n\n @property\n def is_monotonic_increasing(self):\n \"\"\"\n Return if the index is monotonic increasing (only equal or\n increasing) values.\n\n Examples\n --------\n >>> Index([1, 2, 3]).is_monotonic_increasing\n True\n >>> Index([1, 2, 2]).is_monotonic_increasing\n True\n >>> Index([1, 3, 2]).is_monotonic_increasing\n False\n \"\"\"\n return self._engine.is_monotonic_increasing\n\n @property\n def is_monotonic_decreasing(self):\n \"\"\"\n Return if the index is monotonic decreasing (only equal or\n decreasing) values.\n\n Examples\n --------\n >>> Index([3, 2, 1]).is_monotonic_decreasing\n True\n >>> Index([3, 2, 2]).is_monotonic_decreasing\n True\n >>> Index([3, 1, 2]).is_monotonic_decreasing\n False\n \"\"\"\n return self._engine.is_monotonic_decreasing\n\n @property\n def _is_strictly_monotonic_increasing(self):\n \"\"\"\n Return if the index is strictly monotonic increasing\n (only increasing) values.\n\n Examples\n --------\n >>> Index([1, 2, 3])._is_strictly_monotonic_increasing\n True\n >>> Index([1, 2, 2])._is_strictly_monotonic_increasing\n False\n >>> Index([1, 3, 2])._is_strictly_monotonic_increasing\n False\n \"\"\"\n return self.is_unique and self.is_monotonic_increasing\n\n @property\n def _is_strictly_monotonic_decreasing(self):\n \"\"\"\n Return if the index is strictly monotonic decreasing\n (only decreasing) values.\n\n Examples\n --------\n >>> Index([3, 2, 1])._is_strictly_monotonic_decreasing\n True\n >>> Index([3, 2, 2])._is_strictly_monotonic_decreasing\n False\n >>> Index([3, 1, 2])._is_strictly_monotonic_decreasing\n False\n \"\"\"\n return self.is_unique and self.is_monotonic_decreasing\n\n def is_lexsorted_for_tuple(self, tup):\n return True\n\n @cache_readonly\n def is_unique(self):\n \"\"\"\n Return if the index has unique values.\n \"\"\"\n return self._engine.is_unique\n\n @property\n def has_duplicates(self):\n return not self.is_unique\n\n def is_boolean(self):\n return self.inferred_type in ['boolean']\n\n def is_integer(self):\n return self.inferred_type in ['integer']\n\n def is_floating(self):\n return self.inferred_type in ['floating', 'mixed-integer-float']\n\n def is_numeric(self):\n return self.inferred_type in ['integer', 'floating']\n\n def is_object(self):\n return is_object_dtype(self.dtype)\n\n def is_categorical(self):\n \"\"\"\n Check if the Index holds categorical data.\n\n Returns\n -------\n boolean\n True if the Index is categorical.\n\n See Also\n --------\n CategoricalIndex : Index for categorical data.\n\n Examples\n --------\n >>> idx = pd.Index([\"Watermelon\", \"Orange\", \"Apple\",\n ... \"Watermelon\"]).astype(\"category\")\n >>> idx.is_categorical()\n True\n\n >>> idx = pd.Index([1, 3, 5, 7])\n >>> idx.is_categorical()\n False\n\n >>> s = pd.Series([\"Peter\", \"Victor\", \"Elisabeth\", \"Mar\"])\n >>> s\n 0 Peter\n 1 Victor\n 2 Elisabeth\n 3 Mar\n dtype: object\n >>> s.index.is_categorical()\n False\n \"\"\"\n return self.inferred_type in ['categorical']\n\n def is_interval(self):\n return self.inferred_type in ['interval']\n\n def is_mixed(self):\n return self.inferred_type in ['mixed']\n\n def holds_integer(self):\n return self.inferred_type in ['integer', 'mixed-integer']\n\n @cache_readonly\n def inferred_type(self):\n \"\"\"\n Return a string of the type inferred from the values.\n \"\"\"\n return lib.infer_dtype(self, skipna=False)\n\n @cache_readonly\n def is_all_dates(self):\n if self._data is None:\n return False\n return is_datetime_array(ensure_object(self.values))\n\n # --------------------------------------------------------------------\n # Pickle Methods\n\n def __reduce__(self):\n d = dict(data=self._data)\n d.update(self._get_attributes_dict())\n return _new_Index, (self.__class__, d), None\n\n def __setstate__(self, state):\n \"\"\"\n Necessary for making this object picklable.\n \"\"\"\n\n if isinstance(state, dict):\n self._data = state.pop('data')\n for k, v in state.items():\n setattr(self, k, v)\n\n elif isinstance(state, tuple):\n\n if len(state) == 2:\n nd_state, own_state = state\n data = np.empty(nd_state[1], dtype=nd_state[2])\n np.ndarray.__setstate__(data, nd_state)\n self.name = own_state[0]\n\n else: # pragma: no cover\n data = np.empty(state)\n np.ndarray.__setstate__(data, state)\n\n self._data = data\n self._reset_identity()\n else:\n raise Exception(\"invalid pickle state\")\n\n _unpickle_compat = __setstate__\n\n # --------------------------------------------------------------------\n # Null Handling Methods\n\n _na_value = np.nan\n \"\"\"The expected NA value to use with this index.\"\"\"\n\n @cache_readonly\n def _isnan(self):\n \"\"\"\n Return if each value is NaN.\n \"\"\"\n if self._can_hold_na:\n return isna(self)\n else:\n # shouldn't reach to this condition by checking hasnans beforehand\n values = np.empty(len(self), dtype=np.bool_)\n values.fill(False)\n return values\n\n @cache_readonly\n def _nan_idxs(self):\n if self._can_hold_na:\n w, = self._isnan.nonzero()\n return w\n else:\n return np.array([], dtype=np.int64)\n\n @cache_readonly\n def hasnans(self):\n \"\"\"\n Return if I have any nans; enables various perf speedups.\n \"\"\"\n if self._can_hold_na:\n return bool(self._isnan.any())\n else:\n return False\n\n def isna(self):\n \"\"\"\n Detect missing values.\n\n Return a boolean same-sized object indicating if the values are NA.\n NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get\n mapped to ``True`` values.\n Everything else get mapped to ``False`` values. Characters such as\n empty strings `''` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n numpy.ndarray\n A boolean array of whether my values are NA.\n\n See Also\n --------\n Index.notna : Boolean inverse of isna.\n Index.dropna : Omit entries with missing values.\n isna : Top-level isna.\n Series.isna : Detect missing values in Series object.\n\n Examples\n --------\n Show which entries in a pandas.Index are NA. The result is an\n array.\n\n >>> idx = pd.Index([5.2, 6.0, np.NaN])\n >>> idx\n Float64Index([5.2, 6.0, nan], dtype='float64')\n >>> idx.isna()\n array([False, False, True], dtype=bool)\n\n Empty strings are not considered NA values. None is considered an NA\n value.\n\n >>> idx = pd.Index(['black', '', 'red', None])\n >>> idx\n Index(['black', '', 'red', None], dtype='object')\n >>> idx.isna()\n array([False, False, False, True], dtype=bool)\n\n For datetimes, `NaT` (Not a Time) is considered as an NA value.\n\n >>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'),\n ... pd.Timestamp(''), None, pd.NaT])\n >>> idx\n DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'],\n dtype='datetime64[ns]', freq=None)\n >>> idx.isna()\n array([False, True, True, True], dtype=bool)\n \"\"\"\n return self._isnan\n isnull = isna\n\n def notna(self):\n \"\"\"\n Detect existing (non-missing) values.\n\n Return a boolean same-sized object indicating if the values are not NA.\n Non-missing values get mapped to ``True``. Characters such as empty\n strings ``''`` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False``\n values.\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n numpy.ndarray\n Boolean array to indicate which entries are not NA.\n\n See Also\n --------\n Index.notnull : Alias of notna.\n Index.isna: Inverse of notna.\n notna : Top-level notna.\n\n Examples\n --------\n Show which entries in an Index are not NA. The result is an\n array.\n\n >>> idx = pd.Index([5.2, 6.0, np.NaN])\n >>> idx\n Float64Index([5.2, 6.0, nan], dtype='float64')\n >>> idx.notna()\n array([ True, True, False])\n\n Empty strings are not considered NA values. None is considered a NA\n value.\n\n >>> idx = pd.Index(['black', '', 'red', None])\n >>> idx\n Index(['black', '', 'red', None], dtype='object')\n >>> idx.notna()\n array([ True, True, True, False])\n \"\"\"\n return ~self.isna()\n notnull = notna\n\n _index_shared_docs['fillna'] = \"\"\"\n Fill NA/NaN values with the specified value\n\n Parameters\n ----------\n value : scalar\n Scalar value to use to fill holes (e.g. 0).\n This value cannot be a list-likes.\n downcast : dict, default is None\n a dict of item->dtype of what to downcast if possible,\n or the string 'infer' which will try to downcast to an appropriate\n equal type (e.g. float64 to int64 if possible)\n\n Returns\n -------\n filled : Index\n \"\"\"\n\n @Appender(_index_shared_docs['fillna'])\n def fillna(self, value=None, downcast=None):\n self._assert_can_do_op(value)\n if self.hasnans:\n result = self.putmask(self._isnan, value)\n if downcast is None:\n # no need to care metadata other than name\n # because it can't have freq if\n return Index(result, name=self.name)\n return self._shallow_copy()\n\n _index_shared_docs['dropna'] = \"\"\"\n Return Index without NA/NaN values\n\n Parameters\n ----------\n how : {'any', 'all'}, default 'any'\n If the Index is a MultiIndex, drop the value when any or all levels\n are NaN.\n\n Returns\n -------\n valid : Index\n \"\"\"\n\n @Appender(_index_shared_docs['dropna'])\n def dropna(self, how='any'):\n if how not in ('any', 'all'):\n raise ValueError(\"invalid how option: {0}\".format(how))\n\n if self.hasnans:\n return self._shallow_copy(self.values[~self._isnan])\n return self._shallow_copy()\n\n # --------------------------------------------------------------------\n # Uniqueness Methods\n\n _index_shared_docs['index_unique'] = (\n \"\"\"\n Return unique values in the index. Uniques are returned in order\n of appearance, this does NOT sort.\n\n Parameters\n ----------\n level : int or str, optional, default None\n Only return values from specified level (for MultiIndex)\n\n .. versionadded:: 0.23.0\n\n Returns\n -------\n Index without duplicates\n\n See Also\n --------\n unique\n Series.unique\n \"\"\")\n\n @Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs)\n def unique(self, level=None):\n if level is not None:\n self._validate_index_level(level)\n result = super().unique()\n return self._shallow_copy(result)\n\n def drop_duplicates(self, keep='first'):\n \"\"\"\n Return Index with duplicate values removed.\n\n Parameters\n ----------\n keep : {'first', 'last', ``False``}, default 'first'\n - 'first' : Drop duplicates except for the first occurrence.\n - 'last' : Drop duplicates except for the last occurrence.\n - ``False`` : Drop all duplicates.\n\n Returns\n -------\n deduplicated : Index\n\n See Also\n --------\n Series.drop_duplicates : Equivalent method on Series.\n DataFrame.drop_duplicates : Equivalent method on DataFrame.\n Index.duplicated : Related method on Index, indicating duplicate\n Index values.\n\n Examples\n --------\n Generate an pandas.Index with duplicate values.\n\n >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'])\n\n The `keep` parameter controls which duplicate values are removed.\n The value 'first' keeps the first occurrence for each\n set of duplicated entries. The default value of keep is 'first'.\n\n >>> idx.drop_duplicates(keep='first')\n Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object')\n\n The value 'last' keeps the last occurrence for each set of duplicated\n entries.\n\n >>> idx.drop_duplicates(keep='last')\n Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object')\n\n The value ``False`` discards all sets of duplicated entries.\n\n >>> idx.drop_duplicates(keep=False)\n Index(['cow', 'beetle', 'hippo'], dtype='object')\n \"\"\"\n return super().drop_duplicates(keep=keep)\n\n def duplicated(self, keep='first'):\n \"\"\"\n Indicate duplicate index values.\n\n Duplicated values are indicated as ``True`` values in the resulting\n array. Either all duplicates, all except the first, or all except the\n last occurrence of duplicates can be indicated.\n\n Parameters\n ----------\n keep : {'first', 'last', False}, default 'first'\n The value or values in a set of duplicates to mark as missing.\n\n - 'first' : Mark duplicates as ``True`` except for the first\n occurrence.\n - 'last' : Mark duplicates as ``True`` except for the last\n occurrence.\n - ``False`` : Mark all duplicates as ``True``.\n\n Returns\n -------\n numpy.ndarray\n\n See Also\n --------\n Series.duplicated : Equivalent method on pandas.Series.\n DataFrame.duplicated : Equivalent method on pandas.DataFrame.\n Index.drop_duplicates : Remove duplicate values from Index.\n\n Examples\n --------\n By default, for each set of duplicated values, the first occurrence is\n set to False and all others to True:\n\n >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama'])\n >>> idx.duplicated()\n array([False, False, True, False, True])\n\n which is equivalent to\n\n >>> idx.duplicated(keep='first')\n array([False, False, True, False, True])\n\n By using 'last', the last occurrence of each set of duplicated values\n is set on False and all others on True:\n\n >>> idx.duplicated(keep='last')\n array([ True, False, True, False, False])\n\n By setting keep on ``False``, all duplicates are True:\n\n >>> idx.duplicated(keep=False)\n array([ True, False, True, False, True])\n \"\"\"\n return super().duplicated(keep=keep)\n\n def get_duplicates(self):\n \"\"\"\n Extract duplicated index elements.\n\n .. deprecated:: 0.23.0\n Use idx[idx.duplicated()].unique() instead\n\n Returns a sorted list of index elements which appear more than once in\n the index.\n\n Returns\n -------\n array-like\n List of duplicated indexes.\n\n See Also\n --------\n Index.duplicated : Return boolean array denoting duplicates.\n Index.drop_duplicates : Return Index with duplicates removed.\n\n Examples\n --------\n\n Works on different Index of types.\n\n >>> pd.Index([1, 2, 2, 3, 3, 3, 4]).get_duplicates() # doctest: +SKIP\n [2, 3]\n\n Note that for a DatetimeIndex, it does not return a list but a new\n DatetimeIndex:\n\n >>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03',\n ... '2018-01-03', '2018-01-04', '2018-01-04'],\n ... format='%Y-%m-%d')\n >>> pd.Index(dates).get_duplicates() # doctest: +SKIP\n DatetimeIndex(['2018-01-03', '2018-01-04'],\n dtype='datetime64[ns]', freq=None)\n\n Sorts duplicated elements even when indexes are unordered.\n\n >>> pd.Index([1, 2, 3, 2, 3, 4, 3]).get_duplicates() # doctest: +SKIP\n [2, 3]\n\n Return empty array-like structure when all elements are unique.\n\n >>> pd.Index([1, 2, 3, 4]).get_duplicates() # doctest: +SKIP\n []\n >>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03'],\n ... format='%Y-%m-%d')\n >>> pd.Index(dates).get_duplicates() # doctest: +SKIP\n DatetimeIndex([], dtype='datetime64[ns]', freq=None)\n \"\"\"\n warnings.warn(\"'get_duplicates' is deprecated and will be removed in \"\n \"a future release. You can use \"\n \"idx[idx.duplicated()].unique() instead\",\n FutureWarning, stacklevel=2)\n\n return self[self.duplicated()].unique()\n\n def _get_unique_index(self, dropna=False):\n \"\"\"\n Returns an index containing unique values.\n\n Parameters\n ----------\n dropna : bool\n If True, NaN values are dropped.\n\n Returns\n -------\n uniques : index\n \"\"\"\n if self.is_unique and not dropna:\n return self\n\n values = self.values\n\n if not self.is_unique:\n values = self.unique()\n\n if dropna:\n try:\n if self.hasnans:\n values = values[~isna(values)]\n except NotImplementedError:\n pass\n\n return self._shallow_copy(values)\n\n # --------------------------------------------------------------------\n # Arithmetic & Logical Methods\n\n def __add__(self, other):\n if isinstance(other, (ABCSeries, ABCDataFrame)):\n return NotImplemented\n return Index(np.array(self) + other)\n\n def __radd__(self, other):\n return Index(other + np.array(self))\n\n def __iadd__(self, other):\n # alias for __add__\n return self + other\n\n def __sub__(self, other):\n return Index(np.array(self) - other)\n\n def __rsub__(self, other):\n return Index(other - np.array(self))\n\n def __and__(self, other):\n return self.intersection(other)\n\n def __or__(self, other):\n return self.union(other)\n\n def __xor__(self, other):\n return self.symmetric_difference(other)\n\n def __nonzero__(self):\n raise ValueError(\"The truth value of a {0} is ambiguous. \"\n \"Use a.empty, a.bool(), a.item(), a.any() or a.all().\"\n .format(self.__class__.__name__))\n\n __bool__ = __nonzero__\n\n # --------------------------------------------------------------------\n # Set Operation Methods\n\n def _get_reconciled_name_object(self, other):\n \"\"\"\n If the result of a set operation will be self,\n return self, unless the name changes, in which\n case make a shallow copy of self.\n \"\"\"\n name = get_op_result_name(self, other)\n if self.name != name:\n return self._shallow_copy(name=name)\n return self\n\n def _validate_sort_keyword(self, sort):\n if sort not in [None, False]:\n raise ValueError(\"The 'sort' keyword only takes the values of \"\n \"None or False; {0} was passed.\".format(sort))\n\n def union(self, other, sort=None):\n \"\"\"\n Form the union of two Index objects.\n\n Parameters\n ----------\n other : Index or array-like\n sort : bool or None, default None\n Whether to sort the resulting Index.\n\n * None : Sort the result, except when\n\n 1. `self` and `other` are equal.\n 2. `self` or `other` has length 0.\n 3. Some values in `self` or `other` cannot be compared.\n A RuntimeWarning is issued in this case.\n\n * False : do not sort the result.\n\n .. versionadded:: 0.24.0\n\n .. versionchanged:: 0.24.1\n\n Changed the default value from ``True`` to ``None``\n (without change in behaviour).\n\n Returns\n -------\n union : Index\n\n Examples\n --------\n\n >>> idx1 = pd.Index([1, 2, 3, 4])\n >>> idx2 = pd.Index([3, 4, 5, 6])\n >>> idx1.union(idx2)\n Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')\n \"\"\"\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other = ensure_index(other)\n\n if len(other) == 0 or self.equals(other):\n return self._get_reconciled_name_object(other)\n\n if len(self) == 0:\n return other._get_reconciled_name_object(self)\n\n # TODO: is_dtype_union_equal is a hack around\n # 1. buggy set ops with duplicates (GH #13432)\n # 2. CategoricalIndex lacking setops (GH #10186)\n # Once those are fixed, this workaround can be removed\n if not is_dtype_union_equal(self.dtype, other.dtype):\n this = self.astype('O')\n other = other.astype('O')\n return this.union(other, sort=sort)\n\n # TODO(EA): setops-refactor, clean all this up\n if is_period_dtype(self) or is_datetime64tz_dtype(self):\n lvals = self._ndarray_values\n else:\n lvals = self._values\n if is_period_dtype(other) or is_datetime64tz_dtype(other):\n rvals = other._ndarray_values\n else:\n rvals = other._values\n\n if sort is None and self.is_monotonic and other.is_monotonic:\n try:\n result = self._outer_indexer(lvals, rvals)[0]\n except TypeError:\n # incomparable objects\n result = list(lvals)\n\n # worth making this faster? a very unusual case\n value_set = set(lvals)\n result.extend([x for x in rvals if x not in value_set])\n else:\n indexer = self.get_indexer(other)\n indexer, = (indexer == -1).nonzero()\n\n if len(indexer) > 0:\n other_diff = algos.take_nd(rvals, indexer,\n allow_fill=False)\n result = _concat._concat_compat((lvals, other_diff))\n\n else:\n result = lvals\n\n if sort is None:\n try:\n result = sorting.safe_sort(result)\n except TypeError as e:\n warnings.warn(\"{}, sort order is undefined for \"\n \"incomparable objects\".format(e),\n RuntimeWarning, stacklevel=3)\n\n # for subclasses\n return self._wrap_setop_result(other, result)\n\n def _wrap_setop_result(self, other, result):\n return self._constructor(result, name=get_op_result_name(self, other))\n\n def intersection(self, other, sort=False):\n \"\"\"\n Form the intersection of two Index objects.\n\n This returns a new Index with elements common to the index and `other`.\n\n Parameters\n ----------\n other : Index or array-like\n sort : False or None, default False\n Whether to sort the resulting index.\n\n * False : do not sort the result.\n * None : sort the result, except when `self` and `other` are equal\n or when the values cannot be compared.\n\n .. versionadded:: 0.24.0\n\n .. versionchanged:: 0.24.1\n\n Changed the default from ``True`` to ``False``, to match\n the behaviour of 0.23.4 and earlier.\n\n Returns\n -------\n intersection : Index\n\n Examples\n --------\n\n >>> idx1 = pd.Index([1, 2, 3, 4])\n >>> idx2 = pd.Index([3, 4, 5, 6])\n >>> idx1.intersection(idx2)\n Int64Index([3, 4], dtype='int64')\n \"\"\"\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other = ensure_index(other)\n\n if self.equals(other):\n return self._get_reconciled_name_object(other)\n\n if not is_dtype_equal(self.dtype, other.dtype):\n this = self.astype('O')\n other = other.astype('O')\n return this.intersection(other, sort=sort)\n\n # TODO(EA): setops-refactor, clean all this up\n if is_period_dtype(self):\n lvals = self._ndarray_values\n else:\n lvals = self._values\n if is_period_dtype(other):\n rvals = other._ndarray_values\n else:\n rvals = other._values\n\n if self.is_monotonic and other.is_monotonic:\n try:\n result = self._inner_indexer(lvals, rvals)[0]\n return self._wrap_setop_result(other, result)\n except TypeError:\n pass\n\n try:\n indexer = Index(rvals).get_indexer(lvals)\n indexer = indexer.take((indexer != -1).nonzero()[0])\n except Exception:\n # duplicates\n indexer = algos.unique1d(\n Index(rvals).get_indexer_non_unique(lvals)[0])\n indexer = indexer[indexer != -1]\n\n taken = other.take(indexer)\n\n if sort is None:\n taken = sorting.safe_sort(taken.values)\n if self.name != other.name:\n name = None\n else:\n name = self.name\n return self._shallow_copy(taken, name=name)\n\n if self.name != other.name:\n taken.name = None\n\n return taken\n\n def difference(self, other, sort=None):\n \"\"\"\n Return a new Index with elements from the index that are not in\n `other`.\n\n This is the set difference of two Index objects.\n\n Parameters\n ----------\n other : Index or array-like\n sort : False or None, default None\n Whether to sort the resulting index. By default, the\n values are attempted to be sorted, but any TypeError from\n incomparable elements is caught by pandas.\n\n * None : Attempt to sort the result, but catch any TypeErrors\n from comparing incomparable elements.\n * False : Do not sort the result.\n\n .. versionadded:: 0.24.0\n\n .. versionchanged:: 0.24.1\n\n Changed the default value from ``True`` to ``None``\n (without change in behaviour).\n\n Returns\n -------\n difference : Index\n\n Examples\n --------\n\n >>> idx1 = pd.Index([2, 1, 3, 4])\n >>> idx2 = pd.Index([3, 4, 5, 6])\n >>> idx1.difference(idx2)\n Int64Index([1, 2], dtype='int64')\n >>> idx1.difference(idx2, sort=False)\n Int64Index([2, 1], dtype='int64')\n \"\"\"\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n\n if self.equals(other):\n # pass an empty np.ndarray with the appropriate dtype\n return self._shallow_copy(self._data[:0])\n\n other, result_name = self._convert_can_do_setop(other)\n\n this = self._get_unique_index()\n\n indexer = this.get_indexer(other)\n indexer = indexer.take((indexer != -1).nonzero()[0])\n\n label_diff = np.setdiff1d(np.arange(this.size), indexer,\n assume_unique=True)\n the_diff = this.values.take(label_diff)\n if sort is None:\n try:\n the_diff = sorting.safe_sort(the_diff)\n except TypeError:\n pass\n\n return this._shallow_copy(the_diff, name=result_name, freq=None)\n\n def symmetric_difference(self, other, result_name=None, sort=None):\n \"\"\"\n Compute the symmetric difference of two Index objects.\n\n Parameters\n ----------\n other : Index or array-like\n result_name : str\n sort : False or None, default None\n Whether to sort the resulting index. By default, the\n values are attempted to be sorted, but any TypeError from\n incomparable elements is caught by pandas.\n\n * None : Attempt to sort the result, but catch any TypeErrors\n from comparing incomparable elements.\n * False : Do not sort the result.\n\n .. versionadded:: 0.24.0\n\n .. versionchanged:: 0.24.1\n\n Changed the default value from ``True`` to ``None``\n (without change in behaviour).\n\n Returns\n -------\n symmetric_difference : Index\n\n Notes\n -----\n ``symmetric_difference`` contains elements that appear in either\n ``idx1`` or ``idx2`` but not both. Equivalent to the Index created by\n ``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates\n dropped.\n\n Examples\n --------\n >>> idx1 = pd.Index([1, 2, 3, 4])\n >>> idx2 = pd.Index([2, 3, 4, 5])\n >>> idx1.symmetric_difference(idx2)\n Int64Index([1, 5], dtype='int64')\n\n You can also use the ``^`` operator:\n\n >>> idx1 ^ idx2\n Int64Index([1, 5], dtype='int64')\n \"\"\"\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other, result_name_update = self._convert_can_do_setop(other)\n if result_name is None:\n result_name = result_name_update\n\n this = self._get_unique_index()\n other = other._get_unique_index()\n indexer = this.get_indexer(other)\n\n # {this} minus {other}\n common_indexer = indexer.take((indexer != -1).nonzero()[0])\n left_indexer = np.setdiff1d(np.arange(this.size), common_indexer,\n assume_unique=True)\n left_diff = this.values.take(left_indexer)\n\n # {other} minus {this}\n right_indexer = (indexer == -1).nonzero()[0]\n right_diff = other.values.take(right_indexer)\n\n the_diff = _concat._concat_compat([left_diff, right_diff])\n if sort is None:\n try:\n the_diff = sorting.safe_sort(the_diff)\n except TypeError:\n pass\n\n attribs = self._get_attributes_dict()\n attribs['name'] = result_name\n if 'freq' in attribs:\n attribs['freq'] = None\n return self._shallow_copy_with_infer(the_diff, **attribs)\n\n def _assert_can_do_setop(self, other):\n if not is_list_like(other):\n raise TypeError('Input must be Index or array-like')\n return True\n\n def _convert_can_do_setop(self, other):\n if not isinstance(other, Index):\n other = Index(other, name=self.name)\n result_name = self.name\n else:\n result_name = get_op_result_name(self, other)\n return other, result_name\n\n # --------------------------------------------------------------------\n # Indexing Methods\n\n _index_shared_docs['get_loc'] = \"\"\"\n Get integer location, slice or boolean mask for requested label.\n\n Parameters\n ----------\n key : label\n method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional\n * default: exact matches only.\n * pad / ffill: find the PREVIOUS index value if no exact match.\n * backfill / bfill: use NEXT index value if no exact match\n * nearest: use the NEAREST index value if no exact match. Tied\n distances are broken by preferring the larger index value.\n tolerance : optional\n Maximum distance from index value for inexact matches. The value of\n the index at the matching location most satisfy the equation\n ``abs(index[loc] - key) <= tolerance``.\n\n Tolerance may be a scalar\n value, which applies the same tolerance to all values, or\n list-like, which applies variable tolerance per element. List-like\n includes list, tuple, array, Series, and must be the same size as\n the index and its dtype must exactly match the index's type.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n loc : int if unique index, slice if monotonic index, else mask\n\n Examples\n --------\n >>> unique_index = pd.Index(list('abc'))\n >>> unique_index.get_loc('b')\n 1\n\n >>> monotonic_index = pd.Index(list('abbc'))\n >>> monotonic_index.get_loc('b')\n slice(1, 3, None)\n\n >>> non_monotonic_index = pd.Index(list('abcb'))\n >>> non_monotonic_index.get_loc('b')\n array([False, True, False, True], dtype=bool)\n \"\"\"\n\n @Appender(_index_shared_docs['get_loc'])\n def get_loc(self, key, method=None, tolerance=None):\n if method is None:\n if tolerance is not None:\n raise ValueError('tolerance argument only valid if using pad, '\n 'backfill or nearest lookups')\n try:\n return self._engine.get_loc(key)\n except KeyError:\n return self._engine.get_loc(self._maybe_cast_indexer(key))\n indexer = self.get_indexer([key], method=method, tolerance=tolerance)\n if indexer.ndim > 1 or indexer.size > 1:\n raise TypeError('get_loc requires scalar valued input')\n loc = indexer.item()\n if loc == -1:\n raise KeyError(key)\n return loc\n\n _index_shared_docs['get_indexer'] = \"\"\"\n Compute indexer and mask for new index given the current index. The\n indexer should be then used as an input to ndarray.take to align the\n current data to the new index.\n\n Parameters\n ----------\n target : %(target_klass)s\n method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional\n * default: exact matches only.\n * pad / ffill: find the PREVIOUS index value if no exact match.\n * backfill / bfill: use NEXT index value if no exact match\n * nearest: use the NEAREST index value if no exact match. Tied\n distances are broken by preferring the larger index value.\n limit : int, optional\n Maximum number of consecutive labels in ``target`` to match for\n inexact matches.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations most\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index's type.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n indexer : ndarray of int\n Integers from 0 to n - 1 indicating that the index at these\n positions matches the corresponding target values. Missing values\n in the target are marked by -1.\n\n Examples\n --------\n >>> index = pd.Index(['c', 'a', 'b'])\n >>> index.get_indexer(['a', 'b', 'x'])\n array([ 1, 2, -1])\n\n Notice that the return value is an array of locations in ``index``\n and ``x`` is marked by -1, as it is not in ``index``.\n \"\"\"\n\n @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)\n def get_indexer(self, target, method=None, limit=None, tolerance=None):\n method = missing.clean_reindex_fill_method(method)\n target = ensure_index(target)\n if tolerance is not None:\n tolerance = self._convert_tolerance(tolerance, target)\n\n # Treat boolean labels passed to a numeric index as not found. Without\n # this fix False and True would be treated as 0 and 1 respectively.\n # (GH #16877)\n if target.is_boolean() and self.is_numeric():\n return ensure_platform_int(np.repeat(-1, target.size))\n\n pself, ptarget = self._maybe_promote(target)\n if pself is not self or ptarget is not target:\n return pself.get_indexer(ptarget, method=method, limit=limit,\n tolerance=tolerance)\n\n if not is_dtype_equal(self.dtype, target.dtype):\n this = self.astype(object)\n target = target.astype(object)\n return this.get_indexer(target, method=method, limit=limit,\n tolerance=tolerance)\n\n if not self.is_unique:\n raise InvalidIndexError('Reindexing only valid with uniquely'\n ' valued Index objects')\n\n if method == 'pad' or method == 'backfill':\n indexer = self._get_fill_indexer(target, method, limit, tolerance)\n elif method == 'nearest':\n indexer = self._get_nearest_indexer(target, limit, tolerance)\n else:\n if tolerance is not None:\n raise ValueError('tolerance argument only valid if doing pad, '\n 'backfill or nearest reindexing')\n if limit is not None:\n raise ValueError('limit argument only valid if doing pad, '\n 'backfill or nearest reindexing')\n\n indexer = self._engine.get_indexer(target._ndarray_values)\n\n return ensure_platform_int(indexer)\n\n def _convert_tolerance(self, tolerance, target):\n # override this method on subclasses\n tolerance = np.asarray(tolerance)\n if target.size != tolerance.size and tolerance.size > 1:\n raise ValueError('list-like tolerance size must match '\n 'target index size')\n return tolerance\n\n def _get_fill_indexer(self, target, method, limit=None, tolerance=None):\n if self.is_monotonic_increasing and target.is_monotonic_increasing:\n method = (self._engine.get_pad_indexer if method == 'pad' else\n self._engine.get_backfill_indexer)\n indexer = method(target._ndarray_values, limit)\n else:\n indexer = self._get_fill_indexer_searchsorted(target, method,\n limit)\n if tolerance is not None:\n indexer = self._filter_indexer_tolerance(target._ndarray_values,\n indexer,\n tolerance)\n return indexer\n\n def _get_fill_indexer_searchsorted(self, target, method, limit=None):\n \"\"\"\n Fallback pad/backfill get_indexer that works for monotonic decreasing\n indexes and non-monotonic targets.\n \"\"\"\n if limit is not None:\n raise ValueError('limit argument for %r method only well-defined '\n 'if index and target are monotonic' % method)\n\n side = 'left' if method == 'pad' else 'right'\n\n # find exact matches first (this simplifies the algorithm)\n indexer = self.get_indexer(target)\n nonexact = (indexer == -1)\n indexer[nonexact] = self._searchsorted_monotonic(target[nonexact],\n side)\n if side == 'left':\n # searchsorted returns \"indices into a sorted array such that,\n # if the corresponding elements in v were inserted before the\n # indices, the order of a would be preserved\".\n # Thus, we need to subtract 1 to find values to the left.\n indexer[nonexact] -= 1\n # This also mapped not found values (values of 0 from\n # np.searchsorted) to -1, which conveniently is also our\n # sentinel for missing values\n else:\n # Mark indices to the right of the largest value as not found\n indexer[indexer == len(self)] = -1\n return indexer\n\n def _get_nearest_indexer(self, target, limit, tolerance):\n \"\"\"\n Get the indexer for the nearest index labels; requires an index with\n values that can be subtracted from each other (e.g., not strings or\n tuples).\n \"\"\"\n left_indexer = self.get_indexer(target, 'pad', limit=limit)\n right_indexer = self.get_indexer(target, 'backfill', limit=limit)\n\n target = np.asarray(target)\n left_distances = abs(self.values[left_indexer] - target)\n right_distances = abs(self.values[right_indexer] - target)\n\n op = operator.lt if self.is_monotonic_increasing else operator.le\n indexer = np.where(op(left_distances, right_distances) |\n (right_indexer == -1), left_indexer, right_indexer)\n if tolerance is not None:\n indexer = self._filter_indexer_tolerance(target, indexer,\n tolerance)\n return indexer\n\n def _filter_indexer_tolerance(self, target, indexer, tolerance):\n distance = abs(self.values[indexer] - target)\n indexer = np.where(distance <= tolerance, indexer, -1)\n return indexer\n\n # --------------------------------------------------------------------\n # Indexer Conversion Methods\n\n _index_shared_docs['_convert_scalar_indexer'] = \"\"\"\n Convert a scalar indexer.\n\n Parameters\n ----------\n key : label of the slice bound\n kind : {'ix', 'loc', 'getitem', 'iloc'} or None\n \"\"\"\n\n @Appender(_index_shared_docs['_convert_scalar_indexer'])\n def _convert_scalar_indexer(self, key, kind=None):\n assert kind in ['ix', 'loc', 'getitem', 'iloc', None]\n\n if kind == 'iloc':\n return self._validate_indexer('positional', key, kind)\n\n if len(self) and not isinstance(self, ABCMultiIndex,):\n\n # we can raise here if we are definitive that this\n # is positional indexing (eg. .ix on with a float)\n # or label indexing if we are using a type able\n # to be represented in the index\n\n if kind in ['getitem', 'ix'] and is_float(key):\n if not self.is_floating():\n return self._invalid_indexer('label', key)\n\n elif kind in ['loc'] and is_float(key):\n\n # we want to raise KeyError on string/mixed here\n # technically we *could* raise a TypeError\n # on anything but mixed though\n if self.inferred_type not in ['floating',\n 'mixed-integer-float',\n 'string',\n 'unicode',\n 'mixed']:\n return self._invalid_indexer('label', key)\n\n elif kind in ['loc'] and is_integer(key):\n if not self.holds_integer():\n return self._invalid_indexer('label', key)\n\n return key\n\n _index_shared_docs['_convert_slice_indexer'] = \"\"\"\n Convert a slice indexer.\n\n By definition, these are labels unless 'iloc' is passed in.\n Floats are not allowed as the start, step, or stop of the slice.\n\n Parameters\n ----------\n key : label of the slice bound\n kind : {'ix', 'loc', 'getitem', 'iloc'} or None\n \"\"\"\n\n @Appender(_index_shared_docs['_convert_slice_indexer'])\n def _convert_slice_indexer(self, key, kind=None):\n assert kind in ['ix', 'loc', 'getitem', 'iloc', None]\n\n # if we are not a slice, then we are done\n if not isinstance(key, slice):\n return key\n\n # validate iloc\n if kind == 'iloc':\n return slice(self._validate_indexer('slice', key.start, kind),\n self._validate_indexer('slice', key.stop, kind),\n self._validate_indexer('slice', key.step, kind))\n\n # potentially cast the bounds to integers\n start, stop, step = key.start, key.stop, key.step\n\n # figure out if this is a positional indexer\n def is_int(v):\n return v is None or is_integer(v)\n\n is_null_slicer = start is None and stop is None\n is_index_slice = is_int(start) and is_int(stop)\n is_positional = is_index_slice and not self.is_integer()\n\n if kind == 'getitem':\n \"\"\"\n called from the getitem slicers, validate that we are in fact\n integers\n \"\"\"\n if self.is_integer() or is_index_slice:\n return slice(self._validate_indexer('slice', key.start, kind),\n self._validate_indexer('slice', key.stop, kind),\n self._validate_indexer('slice', key.step, kind))\n\n # convert the slice to an indexer here\n\n # if we are mixed and have integers\n try:\n if is_positional and self.is_mixed():\n # Validate start & stop\n if start is not None:\n self.get_loc(start)\n if stop is not None:\n self.get_loc(stop)\n is_positional = False\n except KeyError:\n if self.inferred_type == 'mixed-integer-float':\n raise\n\n if is_null_slicer:\n indexer = key\n elif is_positional:\n indexer = key\n else:\n try:\n indexer = self.slice_indexer(start, stop, step, kind=kind)\n except Exception:\n if is_index_slice:\n if self.is_integer():\n raise\n else:\n indexer = key\n else:\n raise\n\n return indexer\n\n def _convert_listlike_indexer(self, keyarr, kind=None):\n \"\"\"\n Parameters\n ----------\n keyarr : list-like\n Indexer to convert.\n\n Returns\n -------\n indexer : numpy.ndarray or None\n Return an ndarray or None if cannot convert.\n keyarr : numpy.ndarray\n Return tuple-safe keys.\n \"\"\"\n if isinstance(keyarr, Index):\n keyarr = self._convert_index_indexer(keyarr)\n else:\n keyarr = self._convert_arr_indexer(keyarr)\n\n indexer = self._convert_list_indexer(keyarr, kind=kind)\n return indexer, keyarr\n\n _index_shared_docs['_convert_arr_indexer'] = \"\"\"\n Convert an array-like indexer to the appropriate dtype.\n\n Parameters\n ----------\n keyarr : array-like\n Indexer to convert.\n\n Returns\n -------\n converted_keyarr : array-like\n \"\"\"\n\n @Appender(_index_shared_docs['_convert_arr_indexer'])\n def _convert_arr_indexer(self, keyarr):\n keyarr = com.asarray_tuplesafe(keyarr)\n return keyarr\n\n _index_shared_docs['_convert_index_indexer'] = \"\"\"\n Convert an Index indexer to the appropriate dtype.\n\n Parameters\n ----------\n keyarr : Index (or sub-class)\n Indexer to convert.\n\n Returns\n -------\n converted_keyarr : Index (or sub-class)\n \"\"\"\n\n @Appender(_index_shared_docs['_convert_index_indexer'])\n def _convert_index_indexer(self, keyarr):\n return keyarr\n\n _index_shared_docs['_convert_list_indexer'] = \"\"\"\n Convert a list-like indexer to the appropriate dtype.\n\n Parameters\n ----------\n keyarr : Index (or sub-class)\n Indexer to convert.\n kind : iloc, ix, loc, optional\n\n Returns\n -------\n positional indexer or None\n \"\"\"\n\n @Appender(_index_shared_docs['_convert_list_indexer'])\n def _convert_list_indexer(self, keyarr, kind=None):\n if (kind in [None, 'iloc', 'ix'] and\n is_integer_dtype(keyarr) and not self.is_floating() and\n not isinstance(keyarr, ABCPeriodIndex)):\n\n if self.inferred_type == 'mixed-integer':\n indexer = self.get_indexer(keyarr)\n if (indexer >= 0).all():\n return indexer\n # missing values are flagged as -1 by get_indexer and negative\n # indices are already converted to positive indices in the\n # above if-statement, so the negative flags are changed to\n # values outside the range of indices so as to trigger an\n # IndexError in maybe_convert_indices\n indexer[indexer < 0] = len(self)\n from pandas.core.indexing import maybe_convert_indices\n return maybe_convert_indices(indexer, len(self))\n\n elif not self.inferred_type == 'integer':\n keyarr = np.where(keyarr < 0, len(self) + keyarr, keyarr)\n return keyarr\n\n return None\n\n def _invalid_indexer(self, form, key):\n \"\"\"\n Consistent invalid indexer message.\n \"\"\"\n raise TypeError(\"cannot do {form} indexing on {klass} with these \"\n \"indexers [{key}] of {kind}\".format(\n form=form, klass=type(self), key=key,\n kind=type(key)))\n\n # --------------------------------------------------------------------\n # Reindex Methods\n\n def _can_reindex(self, indexer):\n \"\"\"\n Check if we are allowing reindexing with this particular indexer.\n\n Parameters\n ----------\n indexer : an integer indexer\n\n Raises\n ------\n ValueError if its a duplicate axis\n \"\"\"\n\n # trying to reindex on an axis with duplicates\n if not self.is_unique and len(indexer):\n raise ValueError(\"cannot reindex from a duplicate axis\")\n\n def reindex(self, target, method=None, level=None, limit=None,\n tolerance=None):\n \"\"\"\n Create index with target's values (move/add/delete values\n as necessary).\n\n Parameters\n ----------\n target : an iterable\n\n Returns\n -------\n new_index : pd.Index\n Resulting index.\n indexer : np.ndarray or None\n Indices of output values in original index.\n \"\"\"\n # GH6552: preserve names when reindexing to non-named target\n # (i.e. neither Index nor Series).\n preserve_names = not hasattr(target, 'name')\n\n # GH7774: preserve dtype/tz if target is empty and not an Index.\n target = _ensure_has_len(target) # target may be an iterator\n\n if not isinstance(target, Index) and len(target) == 0:\n attrs = self._get_attributes_dict()\n attrs.pop('freq', None) # don't preserve freq\n values = self._data[:0] # appropriately-dtyped empty array\n target = self._simple_new(values, dtype=self.dtype, **attrs)\n else:\n target = ensure_index(target)\n\n if level is not None:\n if method is not None:\n raise TypeError('Fill method not supported if level passed')\n _, indexer, _ = self._join_level(target, level, how='right',\n return_indexers=True)\n else:\n if self.equals(target):\n indexer = None\n else:\n\n if self.is_unique:\n indexer = self.get_indexer(target, method=method,\n limit=limit,\n tolerance=tolerance)\n else:\n if method is not None or limit is not None:\n raise ValueError(\"cannot reindex a non-unique index \"\n \"with a method or limit\")\n indexer, missing = self.get_indexer_non_unique(target)\n\n if preserve_names and target.nlevels == 1 and target.name != self.name:\n target = target.copy()\n target.name = self.name\n\n return target, indexer\n\n def _reindex_non_unique(self, target):\n \"\"\"\n Create a new index with target's values (move/add/delete values as\n necessary) use with non-unique Index and a possibly non-unique target.\n\n Parameters\n ----------\n target : an iterable\n\n Returns\n -------\n new_index : pd.Index\n Resulting index.\n indexer : np.ndarray or None\n Indices of output values in original index.\n\n \"\"\"\n\n target = ensure_index(target)\n indexer, missing = self.get_indexer_non_unique(target)\n check = indexer != -1\n new_labels = self.take(indexer[check])\n new_indexer = None\n\n if len(missing):\n length = np.arange(len(indexer))\n\n missing = ensure_platform_int(missing)\n missing_labels = target.take(missing)\n missing_indexer = ensure_int64(length[~check])\n cur_labels = self.take(indexer[check]).values\n cur_indexer = ensure_int64(length[check])\n\n new_labels = np.empty(tuple([len(indexer)]), dtype=object)\n new_labels[cur_indexer] = cur_labels\n new_labels[missing_indexer] = missing_labels\n\n # a unique indexer\n if target.is_unique:\n\n # see GH5553, make sure we use the right indexer\n new_indexer = np.arange(len(indexer))\n new_indexer[cur_indexer] = np.arange(len(cur_labels))\n new_indexer[missing_indexer] = -1\n\n # we have a non_unique selector, need to use the original\n # indexer here\n else:\n\n # need to retake to have the same size as the indexer\n indexer[~check] = -1\n\n # reset the new indexer to account for the new size\n new_indexer = np.arange(len(self.take(indexer)))\n new_indexer[~check] = -1\n\n new_index = self._shallow_copy_with_infer(new_labels, freq=None)\n return new_index, indexer, new_indexer\n\n # --------------------------------------------------------------------\n # Join Methods\n\n _index_shared_docs['join'] = \"\"\"\n Compute join_index and indexers to conform data\n structures to the new index.\n\n Parameters\n ----------\n other : Index\n how : {'left', 'right', 'inner', 'outer'}\n level : int or level name, default None\n return_indexers : boolean, default False\n sort : boolean, default False\n Sort the join keys lexicographically in the result Index. If False,\n the order of the join keys depends on the join type (how keyword)\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n join_index, (left_indexer, right_indexer)\n \"\"\"\n\n @Appender(_index_shared_docs['join'])\n def join(self, other, how='left', level=None, return_indexers=False,\n sort=False):\n self_is_mi = isinstance(self, ABCMultiIndex)\n other_is_mi = isinstance(other, ABCMultiIndex)\n\n # try to figure out the join level\n # GH3662\n if level is None and (self_is_mi or other_is_mi):\n\n # have the same levels/names so a simple join\n if self.names == other.names:\n pass\n else:\n return self._join_multi(other, how=how,\n return_indexers=return_indexers)\n\n # join on the level\n if level is not None and (self_is_mi or other_is_mi):\n return self._join_level(other, level, how=how,\n return_indexers=return_indexers)\n\n other = ensure_index(other)\n\n if len(other) == 0 and how in ('left', 'outer'):\n join_index = self._shallow_copy()\n if return_indexers:\n rindexer = np.repeat(-1, len(join_index))\n return join_index, None, rindexer\n else:\n return join_index\n\n if len(self) == 0 and how in ('right', 'outer'):\n join_index = other._shallow_copy()\n if return_indexers:\n lindexer = np.repeat(-1, len(join_index))\n return join_index, lindexer, None\n else:\n return join_index\n\n if self._join_precedence < other._join_precedence:\n how = {'right': 'left', 'left': 'right'}.get(how, how)\n result = other.join(self, how=how, level=level,\n return_indexers=return_indexers)\n if return_indexers:\n x, y, z = result\n result = x, z, y\n return result\n\n if not is_dtype_equal(self.dtype, other.dtype):\n this = self.astype('O')\n other = other.astype('O')\n return this.join(other, how=how, return_indexers=return_indexers)\n\n _validate_join_method(how)\n\n if not self.is_unique and not other.is_unique:\n return self._join_non_unique(other, how=how,\n return_indexers=return_indexers)\n elif not self.is_unique or not other.is_unique:\n if self.is_monotonic and other.is_monotonic:\n return self._join_monotonic(other, how=how,\n return_indexers=return_indexers)\n else:\n return self._join_non_unique(other, how=how,\n return_indexers=return_indexers)\n elif self.is_monotonic and other.is_monotonic:\n try:\n return self._join_monotonic(other, how=how,\n return_indexers=return_indexers)\n except TypeError:\n pass\n\n if how == 'left':\n join_index = self\n elif how == 'right':\n join_index = other\n elif how == 'inner':\n # TODO: sort=False here for backwards compat. It may\n # be better to use the sort parameter passed into join\n join_index = self.intersection(other, sort=False)\n elif how == 'outer':\n # TODO: sort=True here for backwards compat. It may\n # be better to use the sort parameter passed into join\n join_index = self.union(other)\n\n if sort:\n join_index = join_index.sort_values()\n\n if return_indexers:\n if join_index is self:\n lindexer = None\n else:\n lindexer = self.get_indexer(join_index)\n if join_index is other:\n rindexer = None\n else:\n rindexer = other.get_indexer(join_index)\n return join_index, lindexer, rindexer\n else:\n return join_index\n\n def _join_multi(self, other, how, return_indexers=True):\n from .multi import MultiIndex\n from pandas.core.reshape.merge import _restore_dropped_levels_multijoin\n\n # figure out join names\n self_names = set(com._not_none(*self.names))\n other_names = set(com._not_none(*other.names))\n overlap = self_names & other_names\n\n # need at least 1 in common\n if not overlap:\n raise ValueError(\"cannot join with no overlapping index names\")\n\n self_is_mi = isinstance(self, MultiIndex)\n other_is_mi = isinstance(other, MultiIndex)\n\n if self_is_mi and other_is_mi:\n\n # Drop the non-matching levels from left and right respectively\n ldrop_names = list(self_names - overlap)\n rdrop_names = list(other_names - overlap)\n\n self_jnlevels = self.droplevel(ldrop_names)\n other_jnlevels = other.droplevel(rdrop_names)\n\n # Join left and right\n # Join on same leveled multi-index frames is supported\n join_idx, lidx, ridx = self_jnlevels.join(other_jnlevels, how,\n return_indexers=True)\n\n # Restore the dropped levels\n # Returned index level order is\n # common levels, ldrop_names, rdrop_names\n dropped_names = ldrop_names + rdrop_names\n\n levels, codes, names = (\n _restore_dropped_levels_multijoin(self, other,\n dropped_names,\n join_idx,\n lidx, ridx))\n\n # Re-create the multi-index\n multi_join_idx = MultiIndex(levels=levels, codes=codes,\n names=names, verify_integrity=False)\n\n multi_join_idx = multi_join_idx.remove_unused_levels()\n\n return multi_join_idx, lidx, ridx\n\n jl = list(overlap)[0]\n\n # Case where only one index is multi\n # make the indices into mi's that match\n flip_order = False\n if self_is_mi:\n self, other = other, self\n flip_order = True\n # flip if join method is right or left\n how = {'right': 'left', 'left': 'right'}.get(how, how)\n\n level = other.names.index(jl)\n result = self._join_level(other, level, how=how,\n return_indexers=return_indexers)\n\n if flip_order:\n if isinstance(result, tuple):\n return result[0], result[2], result[1]\n return result\n\n def _join_non_unique(self, other, how='left', return_indexers=False):\n from pandas.core.reshape.merge import _get_join_indexers\n\n left_idx, right_idx = _get_join_indexers([self._ndarray_values],\n [other._ndarray_values],\n how=how,\n sort=True)\n\n left_idx = ensure_platform_int(left_idx)\n right_idx = ensure_platform_int(right_idx)\n\n join_index = np.asarray(self._ndarray_values.take(left_idx))\n mask = left_idx == -1\n np.putmask(join_index, mask, other._ndarray_values.take(right_idx))\n\n join_index = self._wrap_joined_index(join_index, other)\n\n if return_indexers:\n return join_index, left_idx, right_idx\n else:\n return join_index\n\n def _join_level(self, other, level, how='left', return_indexers=False,\n keep_order=True):\n \"\"\"\n The join method *only* affects the level of the resulting\n MultiIndex. Otherwise it just exactly aligns the Index data to the\n labels of the level in the MultiIndex.\n\n If ```keep_order == True```, the order of the data indexed by the\n MultiIndex will not be changed; otherwise, it will tie out\n with `other`.\n \"\"\"\n from .multi import MultiIndex\n\n def _get_leaf_sorter(labels):\n \"\"\"\n Returns sorter for the inner most level while preserving the\n order of higher levels.\n \"\"\"\n if labels[0].size == 0:\n return np.empty(0, dtype='int64')\n\n if len(labels) == 1:\n lab = ensure_int64(labels[0])\n sorter, _ = libalgos.groupsort_indexer(lab, 1 + lab.max())\n return sorter\n\n # find indexers of beginning of each set of\n # same-key labels w.r.t all but last level\n tic = labels[0][:-1] != labels[0][1:]\n for lab in labels[1:-1]:\n tic |= lab[:-1] != lab[1:]\n\n starts = np.hstack(([True], tic, [True])).nonzero()[0]\n lab = ensure_int64(labels[-1])\n return lib.get_level_sorter(lab, ensure_int64(starts))\n\n if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):\n raise TypeError('Join on level between two MultiIndex objects '\n 'is ambiguous')\n\n left, right = self, other\n\n flip_order = not isinstance(self, MultiIndex)\n if flip_order:\n left, right = right, left\n how = {'right': 'left', 'left': 'right'}.get(how, how)\n\n level = left._get_level_number(level)\n old_level = left.levels[level]\n\n if not right.is_unique:\n raise NotImplementedError('Index._join_level on non-unique index '\n 'is not implemented')\n\n new_level, left_lev_indexer, right_lev_indexer = \\\n old_level.join(right, how=how, return_indexers=True)\n\n if left_lev_indexer is None:\n if keep_order or len(left) == 0:\n left_indexer = None\n join_index = left\n else: # sort the leaves\n left_indexer = _get_leaf_sorter(left.codes[:level + 1])\n join_index = left[left_indexer]\n\n else:\n left_lev_indexer = ensure_int64(left_lev_indexer)\n rev_indexer = lib.get_reverse_indexer(left_lev_indexer,\n len(old_level))\n\n new_lev_codes = algos.take_nd(rev_indexer, left.codes[level],\n allow_fill=False)\n\n new_codes = list(left.codes)\n new_codes[level] = new_lev_codes\n\n new_levels = list(left.levels)\n new_levels[level] = new_level\n\n if keep_order: # just drop missing values. o.w. keep order\n left_indexer = np.arange(len(left), dtype=np.intp)\n mask = new_lev_codes != -1\n if not mask.all():\n new_codes = [lab[mask] for lab in new_codes]\n left_indexer = left_indexer[mask]\n\n else: # tie out the order with other\n if level == 0: # outer most level, take the fast route\n ngroups = 1 + new_lev_codes.max()\n left_indexer, counts = libalgos.groupsort_indexer(\n new_lev_codes, ngroups)\n\n # missing values are placed first; drop them!\n left_indexer = left_indexer[counts[0]:]\n new_codes = [lab[left_indexer] for lab in new_codes]\n\n else: # sort the leaves\n mask = new_lev_codes != -1\n mask_all = mask.all()\n if not mask_all:\n new_codes = [lab[mask] for lab in new_codes]\n\n left_indexer = _get_leaf_sorter(new_codes[:level + 1])\n new_codes = [lab[left_indexer] for lab in new_codes]\n\n # left_indexers are w.r.t masked frame.\n # reverse to original frame!\n if not mask_all:\n left_indexer = mask.nonzero()[0][left_indexer]\n\n join_index = MultiIndex(levels=new_levels, codes=new_codes,\n names=left.names, verify_integrity=False)\n\n if right_lev_indexer is not None:\n right_indexer = algos.take_nd(right_lev_indexer,\n join_index.codes[level],\n allow_fill=False)\n else:\n right_indexer = join_index.codes[level]\n\n if flip_order:\n left_indexer, right_indexer = right_indexer, left_indexer\n\n if return_indexers:\n left_indexer = (None if left_indexer is None\n else ensure_platform_int(left_indexer))\n right_indexer = (None if right_indexer is None\n else ensure_platform_int(right_indexer))\n return join_index, left_indexer, right_indexer\n else:\n return join_index\n\n def _join_monotonic(self, other, how='left', return_indexers=False):\n if self.equals(other):\n ret_index = other if how == 'right' else self\n if return_indexers:\n return ret_index, None, None\n else:\n return ret_index\n\n sv = self._ndarray_values\n ov = other._ndarray_values\n\n if self.is_unique and other.is_unique:\n # We can perform much better than the general case\n if how == 'left':\n join_index = self\n lidx = None\n ridx = self._left_indexer_unique(sv, ov)\n elif how == 'right':\n join_index = other\n lidx = self._left_indexer_unique(ov, sv)\n ridx = None\n elif how == 'inner':\n join_index, lidx, ridx = self._inner_indexer(sv, ov)\n join_index = self._wrap_joined_index(join_index, other)\n elif how == 'outer':\n join_index, lidx, ridx = self._outer_indexer(sv, ov)\n join_index = self._wrap_joined_index(join_index, other)\n else:\n if how == 'left':\n join_index, lidx, ridx = self._left_indexer(sv, ov)\n elif how == 'right':\n join_index, ridx, lidx = self._left_indexer(ov, sv)\n elif how == 'inner':\n join_index, lidx, ridx = self._inner_indexer(sv, ov)\n elif how == 'outer':\n join_index, lidx, ridx = self._outer_indexer(sv, ov)\n join_index = self._wrap_joined_index(join_index, other)\n\n if return_indexers:\n lidx = None if lidx is None else ensure_platform_int(lidx)\n ridx = None if ridx is None else ensure_platform_int(ridx)\n return join_index, lidx, ridx\n else:\n return join_index\n\n def _wrap_joined_index(self, joined, other):\n name = get_op_result_name(self, other)\n return Index(joined, name=name)\n\n # --------------------------------------------------------------------\n # Uncategorized Methods\n\n @property\n def values(self):\n \"\"\"\n Return an array representing the data in the Index.\n\n .. warning::\n\n We recommend using :attr:`Index.array` or\n :meth:`Index.to_numpy`, depending on whether you need\n a reference to the underlying data or a NumPy array.\n\n Returns\n -------\n array: numpy.ndarray or ExtensionArray\n\n See Also\n --------\n Index.array : Reference to the underlying data.\n Index.to_numpy : A NumPy array representing the underlying data.\n \"\"\"\n return self._data.view(np.ndarray)\n\n @property\n def _values(self) -> Union[ExtensionArray, ABCIndexClass, np.ndarray]:\n # TODO(EA): remove index types as they become extension arrays\n \"\"\"\n The best array representation.\n\n This is an ndarray, ExtensionArray, or Index subclass. This differs\n from ``_ndarray_values``, which always returns an ndarray.\n\n Both ``_values`` and ``_ndarray_values`` are consistent between\n ``Series`` and ``Index``.\n\n It may differ from the public '.values' method.\n\n index | values | _values | _ndarray_values |\n ----------------- | --------------- | ------------- | --------------- |\n Index | ndarray | ndarray | ndarray |\n CategoricalIndex | Categorical | Categorical | ndarray[int] |\n DatetimeIndex | ndarray[M8ns] | ndarray[M8ns] | ndarray[M8ns] |\n DatetimeIndex[tz] | ndarray[M8ns] | DTI[tz] | ndarray[M8ns] |\n PeriodIndex | ndarray[object] | PeriodArray | ndarray[int] |\n IntervalIndex | IntervalArray | IntervalArray | ndarray[object] |\n\n See Also\n --------\n values\n _ndarray_values\n \"\"\"\n return self._data\n\n def get_values(self):\n \"\"\"\n Return `Index` data as an `numpy.ndarray`.\n\n Returns\n -------\n numpy.ndarray\n A one-dimensional numpy array of the `Index` values.\n\n See Also\n --------\n Index.values : The attribute that get_values wraps.\n\n Examples\n --------\n Getting the `Index` values of a `DataFrame`:\n\n >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n ... index=['a', 'b', 'c'], columns=['A', 'B', 'C'])\n >>> df\n A B C\n a 1 2 3\n b 4 5 6\n c 7 8 9\n >>> df.index.get_values()\n array(['a', 'b', 'c'], dtype=object)\n\n Standalone `Index` values:\n\n >>> idx = pd.Index(['1', '2', '3'])\n >>> idx.get_values()\n array(['1', '2', '3'], dtype=object)\n\n `MultiIndex` arrays also have only one dimension:\n\n >>> midx = pd.MultiIndex.from_arrays([[1, 2, 3], ['a', 'b', 'c']],\n ... names=('number', 'letter'))\n >>> midx.get_values()\n array([(1, 'a'), (2, 'b'), (3, 'c')], dtype=object)\n >>> midx.get_values().ndim\n 1\n \"\"\"\n return self.values\n\n @Appender(IndexOpsMixin.memory_usage.__doc__)\n def memory_usage(self, deep=False):\n result = super().memory_usage(deep=deep)\n\n # include our engine hashtable\n result += self._engine.sizeof(deep=deep)\n return result\n\n _index_shared_docs['where'] = \"\"\"\n Return an Index of same shape as self and whose corresponding\n entries are from self where cond is True and otherwise are from\n other.\n\n .. versionadded:: 0.19.0\n\n Parameters\n ----------\n cond : boolean array-like with the same length as self\n other : scalar, or array-like\n\n Returns\n -------\n Index\n \"\"\"\n\n @Appender(_index_shared_docs['where'])\n def where(self, cond, other=None):\n if other is None:\n other = self._na_value\n\n dtype = self.dtype\n values = self.values\n\n if is_bool(other) or is_bool_dtype(other):\n\n # bools force casting\n values = values.astype(object)\n dtype = None\n\n values = np.where(cond, values, other)\n\n if self._is_numeric_dtype and np.any(isna(values)):\n # We can't coerce to the numeric dtype of \"self\" (unless\n # it's float) if there are NaN values in our output.\n dtype = None\n\n return self._shallow_copy_with_infer(values, dtype=dtype)\n\n # construction helpers\n @classmethod\n def _try_convert_to_int_index(cls, data, copy, name, dtype):\n \"\"\"\n Attempt to convert an array of data into an integer index.\n\n Parameters\n ----------\n data : The data to convert.\n copy : Whether to copy the data or not.\n name : The name of the index returned.\n\n Returns\n -------\n int_index : data converted to either an Int64Index or a\n UInt64Index\n\n Raises\n ------\n ValueError if the conversion was not successful.\n \"\"\"\n\n from .numeric import Int64Index, UInt64Index\n if not is_unsigned_integer_dtype(dtype):\n # skip int64 conversion attempt if uint-like dtype is passed, as\n # this could return Int64Index when UInt64Index is what's desrired\n try:\n res = data.astype('i8', copy=False)\n if (res == data).all():\n return Int64Index(res, copy=copy, name=name)\n except (OverflowError, TypeError, ValueError):\n pass\n\n # Conversion to int64 failed (possibly due to overflow) or was skipped,\n # so let's try now with uint64.\n try:\n res = data.astype('u8', copy=False)\n if (res == data).all():\n return UInt64Index(res, copy=copy, name=name)\n except (OverflowError, TypeError, ValueError):\n pass\n\n raise ValueError\n\n @classmethod\n def _scalar_data_error(cls, data):\n raise TypeError('{0}(...) must be called with a collection of some '\n 'kind, {1} was passed'.format(cls.__name__,\n repr(data)))\n\n @classmethod\n def _string_data_error(cls, data):\n raise TypeError('String dtype not supported, you may need '\n 'to explicitly cast to a numeric type')\n\n @classmethod\n def _coerce_to_ndarray(cls, data):\n \"\"\"\n Coerces data to ndarray.\n\n Converts other iterables to list first and then to array.\n Does not touch ndarrays.\n\n Raises\n ------\n TypeError\n When the data passed in is a scalar.\n \"\"\"\n\n if not isinstance(data, (np.ndarray, Index)):\n if data is None or is_scalar(data):\n cls._scalar_data_error(data)\n\n # other iterable of some kind\n if not isinstance(data, (ABCSeries, list, tuple)):\n data = list(data)\n data = np.asarray(data)\n return data\n\n def _coerce_scalar_to_index(self, item):\n \"\"\"\n We need to coerce a scalar to a compat for our index type.\n\n Parameters\n ----------\n item : scalar item to coerce\n \"\"\"\n dtype = self.dtype\n\n if self._is_numeric_dtype and isna(item):\n # We can't coerce to the numeric dtype of \"self\" (unless\n # it's float) if there are NaN values in our output.\n dtype = None\n\n return Index([item], dtype=dtype, **self._get_attributes_dict())\n\n def _to_safe_for_reshape(self):\n \"\"\"\n Convert to object if we are a categorical.\n \"\"\"\n return self\n\n def _convert_for_op(self, value):\n \"\"\"\n Convert value to be insertable to ndarray.\n \"\"\"\n return value\n\n def _assert_can_do_op(self, value):\n \"\"\"\n Check value is valid for scalar op.\n \"\"\"\n if not is_scalar(value):\n msg = \"'value' must be a scalar, passed: {0}\"\n raise TypeError(msg.format(type(value).__name__))\n\n @property\n def _has_complex_internals(self):\n # to disable groupby tricks in MultiIndex\n return False\n\n def _is_memory_usage_qualified(self):\n \"\"\"\n Return a boolean if we need a qualified .info display.\n \"\"\"\n return self.is_object()\n\n def is_type_compatible(self, kind):\n return kind == self.inferred_type\n\n _index_shared_docs['contains'] = \"\"\"\n Return a boolean indicating whether the provided key is in the index.\n\n Parameters\n ----------\n key : label\n The key to check if it is present in the index.\n\n Returns\n -------\n bool\n Whether the key search is in the index.\n\n See Also\n --------\n Index.isin : Returns an ndarray of boolean dtype indicating whether the\n list-like key is in the index.\n\n Examples\n --------\n >>> idx = pd.Index([1, 2, 3, 4])\n >>> idx\n Int64Index([1, 2, 3, 4], dtype='int64')\n\n >>> idx.contains(2)\n True\n >>> idx.contains(6)\n False\n\n This is equivalent to:\n\n >>> 2 in idx\n True\n >>> 6 in idx\n False\n \"\"\"\n\n @Appender(_index_shared_docs['contains'] % _index_doc_kwargs)\n def __contains__(self, key):\n hash(key)\n try:\n return key in self._engine\n except (OverflowError, TypeError, ValueError):\n return False\n\n @Appender(_index_shared_docs['contains'] % _index_doc_kwargs)\n def contains(self, key):\n hash(key)\n try:\n return key in self._engine\n except (TypeError, ValueError):\n return False\n\n def __hash__(self):\n raise TypeError(\"unhashable type: %r\" % type(self).__name__)\n\n def __setitem__(self, key, value):\n raise TypeError(\"Index does not support mutable operations\")\n\n def __getitem__(self, key):\n \"\"\"\n Override numpy.ndarray's __getitem__ method to work as desired.\n\n This function adds lists and Series as valid boolean indexers\n (ndarrays only supports ndarray with dtype=bool).\n\n If resulting ndim != 1, plain ndarray is returned instead of\n corresponding `Index` subclass.\n\n \"\"\"\n # There's no custom logic to be implemented in __getslice__, so it's\n # not overloaded intentionally.\n getitem = self._data.__getitem__\n promote = self._shallow_copy\n\n if is_scalar(key):\n key = com.cast_scalar_indexer(key)\n return getitem(key)\n\n if isinstance(key, slice):\n # This case is separated from the conditional above to avoid\n # pessimization of basic indexing.\n return promote(getitem(key))\n\n if com.is_bool_indexer(key):\n key = np.asarray(key, dtype=bool)\n\n key = com.values_from_object(key)\n result = getitem(key)\n if not is_scalar(result):\n return promote(result)\n else:\n return result\n\n def _can_hold_identifiers_and_holds_name(self, name):\n \"\"\"\n Faster check for ``name in self`` when we know `name` is a Python\n identifier (e.g. in NDFrame.__getattr__, which hits this to support\n . key lookup). For indexes that can't hold identifiers (everything\n but object & categorical) we just return False.\n\n https://github.com/pandas-dev/pandas/issues/19764\n \"\"\"\n if self.is_object() or self.is_categorical():\n return name in self\n return False\n\n def append(self, other):\n \"\"\"\n Append a collection of Index options together.\n\n Parameters\n ----------\n other : Index or list/tuple of indices\n\n Returns\n -------\n appended : Index\n \"\"\"\n\n to_concat = [self]\n\n if isinstance(other, (list, tuple)):\n to_concat = to_concat + list(other)\n else:\n to_concat.append(other)\n\n for obj in to_concat:\n if not isinstance(obj, Index):\n raise TypeError('all inputs must be Index')\n\n names = {obj.name for obj in to_concat}\n name = None if len(names) > 1 else self.name\n\n return self._concat(to_concat, name)\n\n def _concat(self, to_concat, name):\n\n typs = _concat.get_dtype_kinds(to_concat)\n\n if len(typs) == 1:\n return self._concat_same_dtype(to_concat, name=name)\n return _concat._concat_index_asobject(to_concat, name=name)\n\n def _concat_same_dtype(self, to_concat, name):\n \"\"\"\n Concatenate to_concat which has the same class.\n \"\"\"\n # must be overridden in specific classes\n return _concat._concat_index_asobject(to_concat, name)\n\n def putmask(self, mask, value):\n \"\"\"\n Return a new Index of the values set with the mask.\n\n Returns\n -------\n Index\n\n See Also\n --------\n numpy.ndarray.putmask\n \"\"\"\n values = self.values.copy()\n try:\n np.putmask(values, mask, self._convert_for_op(value))\n return self._shallow_copy(values)\n except (ValueError, TypeError) as err:\n if is_object_dtype(self):\n raise err\n\n # coerces to object\n return self.astype(object).putmask(mask, value)\n\n def equals(self, other):\n \"\"\"\n Determine if two Index objects contain the same elements.\n\n Returns\n -------\n bool\n If two Index objects have equal elements True, otherwise False.\n \"\"\"\n if self.is_(other):\n return True\n\n if not isinstance(other, Index):\n return False\n\n if is_object_dtype(self) and not is_object_dtype(other):\n # if other is not object, use other's logic for coercion\n return other.equals(self)\n\n try:\n return array_equivalent(com.values_from_object(self),\n com.values_from_object(other))\n except Exception:\n return False\n\n def identical(self, other):\n \"\"\"\n Similar to equals, but check that other comparable attributes are\n also equal.\n\n Returns\n -------\n bool\n If two Index objects have equal elements and same type True,\n otherwise False.\n \"\"\"\n return (self.equals(other) and\n all((getattr(self, c, None) == getattr(other, c, None)\n for c in self._comparables)) and\n type(self) == type(other))\n\n def asof(self, label):\n \"\"\"\n Return the label from the index, or, if not present, the previous one.\n\n Assuming that the index is sorted, return the passed index label if it\n is in the index, or return the previous index label if the passed one\n is not in the index.\n\n Parameters\n ----------\n label : object\n The label up to which the method returns the latest index label.\n\n Returns\n -------\n object\n The passed label if it is in the index. The previous label if the\n passed label is not in the sorted index or `NaN` if there is no\n such label.\n\n See Also\n --------\n Series.asof : Return the latest value in a Series up to the\n passed index.\n merge_asof : Perform an asof merge (similar to left join but it\n matches on nearest key rather than equal key).\n Index.get_loc : An `asof` is a thin wrapper around `get_loc`\n with method='pad'.\n\n Examples\n --------\n `Index.asof` returns the latest index label up to the passed label.\n\n >>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03'])\n >>> idx.asof('2014-01-01')\n '2013-12-31'\n\n If the label is in the index, the method returns the passed label.\n\n >>> idx.asof('2014-01-02')\n '2014-01-02'\n\n If all of the labels in the index are later than the passed label,\n NaN is returned.\n\n >>> idx.asof('1999-01-02')\n nan\n\n If the index is not sorted, an error is raised.\n\n >>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02',\n ... '2014-01-03'])\n >>> idx_not_sorted.asof('2013-12-31')\n Traceback (most recent call last):\n ValueError: index must be monotonic increasing or decreasing\n \"\"\"\n try:\n loc = self.get_loc(label, method='pad')\n except KeyError:\n return self._na_value\n else:\n if isinstance(loc, slice):\n loc = loc.indices(len(self))[-1]\n return self[loc]\n\n def asof_locs(self, where, mask):\n \"\"\"\n Find the locations (indices) of the labels from the index for\n every entry in the `where` argument.\n\n As in the `asof` function, if the label (a particular entry in\n `where`) is not in the index, the latest index label upto the\n passed label is chosen and its index returned.\n\n If all of the labels in the index are later than a label in `where`,\n -1 is returned.\n\n `mask` is used to ignore NA values in the index during calculation.\n\n Parameters\n ----------\n where : Index\n An Index consisting of an array of timestamps.\n mask : array-like\n Array of booleans denoting where values in the original\n data are not NA.\n\n Returns\n -------\n numpy.ndarray\n An array of locations (indices) of the labels from the Index\n which correspond to the return values of the `asof` function\n for every element in `where`.\n \"\"\"\n locs = self.values[mask].searchsorted(where.values, side='right')\n locs = np.where(locs > 0, locs - 1, 0)\n\n result = np.arange(len(self))[mask].take(locs)\n\n first = mask.argmax()\n result[(locs == 0) & (where.values < self.values[first])] = -1\n\n return result\n\n def sort_values(self, return_indexer=False, ascending=True):\n \"\"\"\n Return a sorted copy of the index.\n\n Return a sorted copy of the index, and optionally return the indices\n that sorted the index itself.\n\n Parameters\n ----------\n return_indexer : bool, default False\n Should the indices that would sort the index be returned.\n ascending : bool, default True\n Should the index values be sorted in an ascending order.\n\n Returns\n -------\n sorted_index : pandas.Index\n Sorted copy of the index.\n indexer : numpy.ndarray, optional\n The indices that the index itself was sorted by.\n\n See Also\n --------\n Series.sort_values : Sort values of a Series.\n DataFrame.sort_values : Sort values in a DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index([10, 100, 1, 1000])\n >>> idx\n Int64Index([10, 100, 1, 1000], dtype='int64')\n\n Sort values in ascending order (default behavior).\n\n >>> idx.sort_values()\n Int64Index([1, 10, 100, 1000], dtype='int64')\n\n Sort values in descending order, and also get the indices `idx` was\n sorted by.\n\n >>> idx.sort_values(ascending=False, return_indexer=True)\n (Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2]))\n \"\"\"\n _as = self.argsort()\n if not ascending:\n _as = _as[::-1]\n\n sorted_index = self.take(_as)\n\n if return_indexer:\n return sorted_index, _as\n else:\n return sorted_index\n\n def sort(self, *args, **kwargs):\n raise TypeError(\"cannot sort an Index object in-place, use \"\n \"sort_values instead\")\n\n def shift(self, periods=1, freq=None):\n \"\"\"\n Shift index by desired number of time frequency increments.\n\n This method is for shifting the values of datetime-like indexes\n by a specified time increment a given number of times.\n\n Parameters\n ----------\n periods : int, default 1\n Number of periods (or increments) to shift by,\n can be positive or negative.\n freq : pandas.DateOffset, pandas.Timedelta or string, optional\n Frequency increment to shift by.\n If None, the index is shifted by its own `freq` attribute.\n Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.\n\n Returns\n -------\n pandas.Index\n Shifted index.\n\n See Also\n --------\n Series.shift : Shift values of Series.\n\n Notes\n -----\n This method is only implemented for datetime-like index classes,\n i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex.\n\n Examples\n --------\n Put the first 5 month starts of 2011 into an index.\n\n >>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS')\n >>> month_starts\n DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01',\n '2011-05-01'],\n dtype='datetime64[ns]', freq='MS')\n\n Shift the index by 10 days.\n\n >>> month_starts.shift(10, freq='D')\n DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11',\n '2011-05-11'],\n dtype='datetime64[ns]', freq=None)\n\n The default value of `freq` is the `freq` attribute of the index,\n which is 'MS' (month start) in this example.\n\n >>> month_starts.shift(10)\n DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01',\n '2012-03-01'],\n dtype='datetime64[ns]', freq='MS')\n \"\"\"\n raise NotImplementedError(\"Not supported for type %s\" %\n type(self).__name__)\n\n def argsort(self, *args, **kwargs):\n \"\"\"\n Return the integer indices that would sort the index.\n\n Parameters\n ----------\n *args\n Passed to `numpy.ndarray.argsort`.\n **kwargs\n Passed to `numpy.ndarray.argsort`.\n\n Returns\n -------\n numpy.ndarray\n Integer indices that would sort the index if used as\n an indexer.\n\n See Also\n --------\n numpy.argsort : Similar method for NumPy arrays.\n Index.sort_values : Return sorted copy of Index.\n\n Examples\n --------\n >>> idx = pd.Index(['b', 'a', 'd', 'c'])\n >>> idx\n Index(['b', 'a', 'd', 'c'], dtype='object')\n\n >>> order = idx.argsort()\n >>> order\n array([1, 0, 3, 2])\n\n >>> idx[order]\n Index(['a', 'b', 'c', 'd'], dtype='object')\n \"\"\"\n result = self.asi8\n if result is None:\n result = np.array(self)\n return result.argsort(*args, **kwargs)\n\n def get_value(self, series, key):\n \"\"\"\n Fast lookup of value from 1-dimensional ndarray. Only use this if you\n know what you're doing.\n\n Returns\n -------\n scalar\n A value in the Series with the index of the key value in self.\n \"\"\"\n\n # if we have something that is Index-like, then\n # use this, e.g. DatetimeIndex\n # Things like `Series._get_value` (via .at) pass the EA directly here.\n s = getattr(series, '_values', series)\n if isinstance(s, (ExtensionArray, Index)) and is_scalar(key):\n # GH 20882, 21257\n # Unify Index and ExtensionArray treatment\n # First try to convert the key to a location\n # If that fails, raise a KeyError if an integer\n # index, otherwise, see if key is an integer, and\n # try that\n try:\n iloc = self.get_loc(key)\n return s[iloc]\n except KeyError:\n if (len(self) > 0 and\n (self.holds_integer() or self.is_boolean())):\n raise\n elif is_integer(key):\n return s[key]\n\n s = com.values_from_object(series)\n k = com.values_from_object(key)\n\n k = self._convert_scalar_indexer(k, kind='getitem')\n try:\n return self._engine.get_value(s, k,\n tz=getattr(series.dtype, 'tz', None))\n except KeyError as e1:\n if len(self) > 0 and (self.holds_integer() or self.is_boolean()):\n raise\n\n try:\n return libindex.get_value_box(s, key)\n except IndexError:\n raise\n except TypeError:\n # generator/iterator-like\n if is_iterator(key):\n raise InvalidIndexError(key)\n else:\n raise e1\n except Exception: # pragma: no cover\n raise e1\n except TypeError:\n # python 3\n if is_scalar(key): # pragma: no cover\n raise IndexError(key)\n raise InvalidIndexError(key)\n\n def set_value(self, arr, key, value):\n \"\"\"\n Fast lookup of value from 1-dimensional ndarray.\n\n Notes\n -----\n Only use this if you know what you're doing.\n \"\"\"\n self._engine.set_value(com.values_from_object(arr),\n com.values_from_object(key), value)\n\n _index_shared_docs['get_indexer_non_unique'] = \"\"\"\n Compute indexer and mask for new index given the current index. The\n indexer should be then used as an input to ndarray.take to align the\n current data to the new index.\n\n Parameters\n ----------\n target : %(target_klass)s\n\n Returns\n -------\n indexer : ndarray of int\n Integers from 0 to n - 1 indicating that the index at these\n positions matches the corresponding target values. Missing values\n in the target are marked by -1.\n missing : ndarray of int\n An indexer into the target of the values not found.\n These correspond to the -1 in the indexer array.\n \"\"\"\n\n @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)\n def get_indexer_non_unique(self, target):\n target = ensure_index(target)\n if is_categorical(target):\n target = target.astype(target.dtype.categories.dtype)\n pself, ptarget = self._maybe_promote(target)\n if pself is not self or ptarget is not target:\n return pself.get_indexer_non_unique(ptarget)\n\n if self.is_all_dates:\n self = Index(self.asi8)\n tgt_values = target.asi8\n else:\n tgt_values = target._ndarray_values\n\n indexer, missing = self._engine.get_indexer_non_unique(tgt_values)\n return ensure_platform_int(indexer), missing\n\n def get_indexer_for(self, target, **kwargs):\n \"\"\"\n Guaranteed return of an indexer even when non-unique.\n\n This dispatches to get_indexer or get_indexer_nonunique\n as appropriate.\n\n Returns\n -------\n numpy.ndarray\n List of indices.\n \"\"\"\n if self.is_unique:\n return self.get_indexer(target, **kwargs)\n indexer, _ = self.get_indexer_non_unique(target, **kwargs)\n return indexer\n\n def _maybe_promote(self, other):\n # A hack, but it works\n from pandas import DatetimeIndex\n if self.inferred_type == 'date' and isinstance(other, DatetimeIndex):\n return DatetimeIndex(self), other\n elif self.inferred_type == 'boolean':\n if not is_object_dtype(self.dtype):\n return self.astype('object'), other.astype('object')\n return self, other\n\n def groupby(self, values):\n \"\"\"\n Group the index labels by a given array of values.\n\n Parameters\n ----------\n values : array\n Values used to determine the groups.\n\n Returns\n -------\n groups : dict\n {group name -> group labels}\n \"\"\"\n\n # TODO: if we are a MultiIndex, we can do better\n # that converting to tuples\n if isinstance(values, ABCMultiIndex):\n values = values.values\n values = ensure_categorical(values)\n result = values._reverse_indexer()\n\n # map to the label\n result = {k: self.take(v) for k, v in result.items()}\n\n return result\n\n def map(self, mapper, na_action=None):\n \"\"\"\n Map values using input correspondence (a dict, Series, or function).\n\n Parameters\n ----------\n mapper : function, dict, or Series\n Mapping correspondence.\n na_action : {None, 'ignore'}\n If 'ignore', propagate NA values, without passing them to the\n mapping correspondence.\n\n Returns\n -------\n applied : Union[Index, MultiIndex], inferred\n The output of the mapping function applied to the index.\n If the function returns a tuple with more than one element\n a MultiIndex will be returned.\n \"\"\"\n\n from .multi import MultiIndex\n new_values = super()._map_values(mapper, na_action=na_action)\n\n attributes = self._get_attributes_dict()\n\n # we can return a MultiIndex\n if new_values.size and isinstance(new_values[0], tuple):\n if isinstance(self, MultiIndex):\n names = self.names\n elif attributes.get('name'):\n names = [attributes.get('name')] * len(new_values[0])\n else:\n names = None\n return MultiIndex.from_tuples(new_values,\n names=names)\n\n attributes['copy'] = False\n if not new_values.size:\n # empty\n attributes['dtype'] = self.dtype\n\n return Index(new_values, **attributes)\n\n def isin(self, values, level=None):\n \"\"\"\n Return a boolean array where the index values are in `values`.\n\n Compute boolean array of whether each index value is found in the\n passed set of values. The length of the returned boolean array matches\n the length of the index.\n\n Parameters\n ----------\n values : set or list-like\n Sought values.\n\n .. versionadded:: 0.18.1\n\n Support for values as a set.\n\n level : str or int, optional\n Name or position of the index level to use (if the index is a\n `MultiIndex`).\n\n Returns\n -------\n is_contained : ndarray\n NumPy array of boolean values.\n\n See Also\n --------\n Series.isin : Same for Series.\n DataFrame.isin : Same method for DataFrames.\n\n Notes\n -----\n In the case of `MultiIndex` you must either specify `values` as a\n list-like object containing tuples that are the same length as the\n number of levels, or specify `level`. Otherwise it will raise a\n ``ValueError``.\n\n If `level` is specified:\n\n - if it is the name of one *and only one* index level, use that level;\n - otherwise it should be a number indicating level position.\n\n Examples\n --------\n >>> idx = pd.Index([1,2,3])\n >>> idx\n Int64Index([1, 2, 3], dtype='int64')\n\n Check whether each index value in a list of values.\n >>> idx.isin([1, 4])\n array([ True, False, False])\n\n >>> midx = pd.MultiIndex.from_arrays([[1,2,3],\n ... ['red', 'blue', 'green']],\n ... names=('number', 'color'))\n >>> midx\n MultiIndex(levels=[[1, 2, 3], ['blue', 'green', 'red']],\n codes=[[0, 1, 2], [2, 0, 1]],\n names=['number', 'color'])\n\n Check whether the strings in the 'color' level of the MultiIndex\n are in a list of colors.\n\n >>> midx.isin(['red', 'orange', 'yellow'], level='color')\n array([ True, False, False])\n\n To check across the levels of a MultiIndex, pass a list of tuples:\n\n >>> midx.isin([(1, 'red'), (3, 'red')])\n array([ True, False, False])\n\n For a DatetimeIndex, string values in `values` are converted to\n Timestamps.\n\n >>> dates = ['2000-03-11', '2000-03-12', '2000-03-13']\n >>> dti = pd.to_datetime(dates)\n >>> dti\n DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'],\n dtype='datetime64[ns]', freq=None)\n\n >>> dti.isin(['2000-03-11'])\n array([ True, False, False])\n \"\"\"\n if level is not None:\n self._validate_index_level(level)\n return algos.isin(self, values)\n\n def _get_string_slice(self, key, use_lhs=True, use_rhs=True):\n # this is for partial string indexing,\n # overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex\n raise NotImplementedError\n\n def slice_indexer(self, start=None, end=None, step=None, kind=None):\n \"\"\"\n For an ordered or unique index, compute the slice indexer for input\n labels and step.\n\n Parameters\n ----------\n start : label, default None\n If None, defaults to the beginning\n end : label, default None\n If None, defaults to the end\n step : int, default None\n kind : string, default None\n\n Returns\n -------\n indexer : slice\n\n Raises\n ------\n KeyError : If key does not exist, or key is not unique and index is\n not ordered.\n\n Notes\n -----\n This function assumes that the data is sorted, so use at your own peril\n\n Examples\n --------\n This is a method on all index types. For example you can do:\n\n >>> idx = pd.Index(list('abcd'))\n >>> idx.slice_indexer(start='b', end='c')\n slice(1, 3)\n\n >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')])\n >>> idx.slice_indexer(start='b', end=('c', 'g'))\n slice(1, 3)\n \"\"\"\n start_slice, end_slice = self.slice_locs(start, end, step=step,\n kind=kind)\n\n # return a slice\n if not is_scalar(start_slice):\n raise AssertionError(\"Start slice bound is non-scalar\")\n if not is_scalar(end_slice):\n raise AssertionError(\"End slice bound is non-scalar\")\n\n return slice(start_slice, end_slice, step)\n\n def _maybe_cast_indexer(self, key):\n \"\"\"\n If we have a float key and are not a floating index, then try to cast\n to an int if equivalent.\n \"\"\"\n\n if is_float(key) and not self.is_floating():\n try:\n ckey = int(key)\n if ckey == key:\n key = ckey\n except (OverflowError, ValueError, TypeError):\n pass\n return key\n\n def _validate_indexer(self, form, key, kind):\n \"\"\"\n If we are positional indexer, validate that we have appropriate\n typed bounds must be an integer.\n \"\"\"\n assert kind in ['ix', 'loc', 'getitem', 'iloc']\n\n if key is None:\n pass\n elif is_integer(key):\n pass\n elif kind in ['iloc', 'getitem']:\n self._invalid_indexer(form, key)\n return key\n\n _index_shared_docs['_maybe_cast_slice_bound'] = \"\"\"\n This function should be overloaded in subclasses that allow non-trivial\n casting on label-slice bounds, e.g. datetime-like indices allowing\n strings containing formatted datetimes.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'ix', 'loc', 'getitem'}\n\n Returns\n -------\n label : object\n\n Notes\n -----\n Value of `side` parameter should be validated in caller.\n\n \"\"\"\n\n @Appender(_index_shared_docs['_maybe_cast_slice_bound'])\n def _maybe_cast_slice_bound(self, label, side, kind):\n assert kind in ['ix', 'loc', 'getitem', None]\n\n # We are a plain index here (sub-class override this method if they\n # wish to have special treatment for floats/ints, e.g. Float64Index and\n # datetimelike Indexes\n # reject them\n if is_float(label):\n if not (kind in ['ix'] and (self.holds_integer() or\n self.is_floating())):\n self._invalid_indexer('slice', label)\n\n # we are trying to find integer bounds on a non-integer based index\n # this is rejected (generally .loc gets you here)\n elif is_integer(label):\n self._invalid_indexer('slice', label)\n\n return label\n\n def _searchsorted_monotonic(self, label, side='left'):\n if self.is_monotonic_increasing:\n return self.searchsorted(label, side=side)\n elif self.is_monotonic_decreasing:\n # np.searchsorted expects ascending sort order, have to reverse\n # everything for it to work (element ordering, search side and\n # resulting value).\n pos = self[::-1].searchsorted(label, side='right' if side == 'left'\n else 'left')\n return len(self) - pos\n\n raise ValueError('index must be monotonic increasing or decreasing')\n\n def _get_loc_only_exact_matches(self, key):\n \"\"\"\n This is overridden on subclasses (namely, IntervalIndex) to control\n get_slice_bound.\n \"\"\"\n return self.get_loc(key)\n\n def get_slice_bound(self, label, side, kind):\n \"\"\"\n Calculate slice bound that corresponds to given label.\n\n Returns leftmost (one-past-the-rightmost if ``side=='right'``) position\n of given label.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'ix', 'loc', 'getitem'}\n\n Returns\n -------\n int\n Index of label.\n \"\"\"\n assert kind in ['ix', 'loc', 'getitem', None]\n\n if side not in ('left', 'right'):\n raise ValueError(\"Invalid value for side kwarg,\"\n \" must be either 'left' or 'right': %s\" %\n (side, ))\n\n original_label = label\n\n # For datetime indices label may be a string that has to be converted\n # to datetime boundary according to its resolution.\n label = self._maybe_cast_slice_bound(label, side, kind)\n\n # we need to look up the label\n try:\n slc = self._get_loc_only_exact_matches(label)\n except KeyError as err:\n try:\n return self._searchsorted_monotonic(label, side)\n except ValueError:\n # raise the original KeyError\n raise err\n\n if isinstance(slc, np.ndarray):\n # get_loc may return a boolean array or an array of indices, which\n # is OK as long as they are representable by a slice.\n if is_bool_dtype(slc):\n slc = lib.maybe_booleans_to_slice(slc.view('u1'))\n else:\n slc = lib.maybe_indices_to_slice(slc.astype('i8'), len(self))\n if isinstance(slc, np.ndarray):\n raise KeyError(\"Cannot get %s slice bound for non-unique \"\n \"label: %r\" % (side, original_label))\n\n if isinstance(slc, slice):\n if side == 'left':\n return slc.start\n else:\n return slc.stop\n else:\n if side == 'right':\n return slc + 1\n else:\n return slc\n\n def slice_locs(self, start=None, end=None, step=None, kind=None):\n \"\"\"\n Compute slice locations for input labels.\n\n Parameters\n ----------\n start : label, default None\n If None, defaults to the beginning\n end : label, default None\n If None, defaults to the end\n step : int, defaults None\n If None, defaults to 1\n kind : {'ix', 'loc', 'getitem'} or None\n\n Returns\n -------\n start, end : int\n\n See Also\n --------\n Index.get_loc : Get location for a single label.\n\n Notes\n -----\n This method only works if the index is monotonic or unique.\n\n Examples\n --------\n >>> idx = pd.Index(list('abcd'))\n >>> idx.slice_locs(start='b', end='c')\n (1, 3)\n \"\"\"\n inc = (step is None or step >= 0)\n\n if not inc:\n # If it's a reverse slice, temporarily swap bounds.\n start, end = end, start\n\n # GH 16785: If start and end happen to be date strings with UTC offsets\n # attempt to parse and check that the offsets are the same\n if (isinstance(start, (str, datetime))\n and isinstance(end, (str, datetime))):\n try:\n ts_start = Timestamp(start)\n ts_end = Timestamp(end)\n except (ValueError, TypeError):\n pass\n else:\n if not tz_compare(ts_start.tzinfo, ts_end.tzinfo):\n raise ValueError(\"Both dates must have the \"\n \"same UTC offset\")\n\n start_slice = None\n if start is not None:\n start_slice = self.get_slice_bound(start, 'left', kind)\n if start_slice is None:\n start_slice = 0\n\n end_slice = None\n if end is not None:\n end_slice = self.get_slice_bound(end, 'right', kind)\n if end_slice is None:\n end_slice = len(self)\n\n if not inc:\n # Bounds at this moment are swapped, swap them back and shift by 1.\n #\n # slice_locs('B', 'A', step=-1): s='B', e='A'\n #\n # s='A' e='B'\n # AFTER SWAP: | |\n # v ------------------> V\n # -----------------------------------\n # | | |A|A|A|A| | | | | |B|B| | | | |\n # -----------------------------------\n # ^ <------------------ ^\n # SHOULD BE: | |\n # end=s-1 start=e-1\n #\n end_slice, start_slice = start_slice - 1, end_slice - 1\n\n # i == -1 triggers ``len(self) + i`` selection that points to the\n # last element, not before-the-first one, subtracting len(self)\n # compensates that.\n if end_slice == -1:\n end_slice -= len(self)\n if start_slice == -1:\n start_slice -= len(self)\n\n return start_slice, end_slice\n\n def delete(self, loc):\n \"\"\"\n Make new Index with passed location(-s) deleted.\n\n Returns\n -------\n new_index : Index\n \"\"\"\n return self._shallow_copy(np.delete(self._data, loc))\n\n def insert(self, loc, item):\n \"\"\"\n Make new Index inserting new item at location.\n\n Follows Python list.append semantics for negative values.\n\n Parameters\n ----------\n loc : int\n item : object\n\n Returns\n -------\n new_index : Index\n \"\"\"\n _self = np.asarray(self)\n item = self._coerce_scalar_to_index(item)._ndarray_values\n idx = np.concatenate((_self[:loc], item, _self[loc:]))\n return self._shallow_copy_with_infer(idx)\n\n def drop(self, labels, errors='raise'):\n \"\"\"\n Make new Index with passed list of labels deleted.\n\n Parameters\n ----------\n labels : array-like\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and existing labels are dropped.\n\n Returns\n -------\n dropped : Index\n\n Raises\n ------\n KeyError\n If not all of the labels are found in the selected axis\n \"\"\"\n arr_dtype = 'object' if self.dtype == 'object' else None\n labels = com.index_labels_to_array(labels, dtype=arr_dtype)\n indexer = self.get_indexer(labels)\n mask = indexer == -1\n if mask.any():\n if errors != 'ignore':\n raise KeyError(\n '{} not found in axis'.format(labels[mask]))\n indexer = indexer[~mask]\n return self.delete(indexer)\n\n # --------------------------------------------------------------------\n # Generated Arithmetic, Comparison, and Unary Methods\n\n def _evaluate_with_timedelta_like(self, other, op):\n # Timedelta knows how to operate with np.array, so dispatch to that\n # operation and then wrap the results\n if self._is_numeric_dtype and op.__name__ in ['add', 'sub',\n 'radd', 'rsub']:\n raise TypeError(\"Operation {opname} between {cls} and {other} \"\n \"is invalid\".format(opname=op.__name__,\n cls=self.dtype,\n other=type(other).__name__))\n\n other = Timedelta(other)\n values = self.values\n\n with np.errstate(all='ignore'):\n result = op(values, other)\n\n attrs = self._get_attributes_dict()\n attrs = self._maybe_update_attributes(attrs)\n if op == divmod:\n return Index(result[0], **attrs), Index(result[1], **attrs)\n return Index(result, **attrs)\n\n def _evaluate_with_datetime_like(self, other, op):\n raise TypeError(\"can only perform ops with datetime like values\")\n\n @classmethod\n def _add_comparison_methods(cls):\n \"\"\"\n Add in comparison methods.\n \"\"\"\n cls.__eq__ = _make_comparison_op(operator.eq, cls)\n cls.__ne__ = _make_comparison_op(operator.ne, cls)\n cls.__lt__ = _make_comparison_op(operator.lt, cls)\n cls.__gt__ = _make_comparison_op(operator.gt, cls)\n cls.__le__ = _make_comparison_op(operator.le, cls)\n cls.__ge__ = _make_comparison_op(operator.ge, cls)\n\n @classmethod\n def _add_numeric_methods_add_sub_disabled(cls):\n \"\"\"\n Add in the numeric add/sub methods to disable.\n \"\"\"\n cls.__add__ = make_invalid_op('__add__')\n cls.__radd__ = make_invalid_op('__radd__')\n cls.__iadd__ = make_invalid_op('__iadd__')\n cls.__sub__ = make_invalid_op('__sub__')\n cls.__rsub__ = make_invalid_op('__rsub__')\n cls.__isub__ = make_invalid_op('__isub__')\n\n @classmethod\n def _add_numeric_methods_disabled(cls):\n \"\"\"\n Add in numeric methods to disable other than add/sub.\n \"\"\"\n cls.__pow__ = make_invalid_op('__pow__')\n cls.__rpow__ = make_invalid_op('__rpow__')\n cls.__mul__ = make_invalid_op('__mul__')\n cls.__rmul__ = make_invalid_op('__rmul__')\n cls.__floordiv__ = make_invalid_op('__floordiv__')\n cls.__rfloordiv__ = make_invalid_op('__rfloordiv__')\n cls.__truediv__ = make_invalid_op('__truediv__')\n cls.__rtruediv__ = make_invalid_op('__rtruediv__')\n cls.__mod__ = make_invalid_op('__mod__')\n cls.__divmod__ = make_invalid_op('__divmod__')\n cls.__neg__ = make_invalid_op('__neg__')\n cls.__pos__ = make_invalid_op('__pos__')\n cls.__abs__ = make_invalid_op('__abs__')\n cls.__inv__ = make_invalid_op('__inv__')\n\n def _maybe_update_attributes(self, attrs):\n \"\"\"\n Update Index attributes (e.g. freq) depending on op.\n \"\"\"\n return attrs\n\n def _validate_for_numeric_unaryop(self, op, opstr):\n \"\"\"\n Validate if we can perform a numeric unary operation.\n \"\"\"\n if not self._is_numeric_dtype:\n raise TypeError(\"cannot evaluate a numeric op \"\n \"{opstr} for type: {typ}\"\n .format(opstr=opstr, typ=type(self).__name__))\n\n def _validate_for_numeric_binop(self, other, op):\n \"\"\"\n Return valid other; evaluate or raise TypeError if we are not of\n the appropriate type.\n\n Notes\n -----\n This is an internal method called by ops.\n \"\"\"\n opstr = '__{opname}__'.format(opname=op.__name__)\n # if we are an inheritor of numeric,\n # but not actually numeric (e.g. DatetimeIndex/PeriodIndex)\n if not self._is_numeric_dtype:\n raise TypeError(\"cannot evaluate a numeric op {opstr} \"\n \"for type: {typ}\"\n .format(opstr=opstr, typ=type(self).__name__))\n\n if isinstance(other, Index):\n if not other._is_numeric_dtype:\n raise TypeError(\"cannot evaluate a numeric op \"\n \"{opstr} with type: {typ}\"\n .format(opstr=opstr, typ=type(other)))\n elif isinstance(other, np.ndarray) and not other.ndim:\n other = other.item()\n\n if isinstance(other, (Index, ABCSeries, np.ndarray)):\n if len(self) != len(other):\n raise ValueError(\"cannot evaluate a numeric op with \"\n \"unequal lengths\")\n other = com.values_from_object(other)\n if other.dtype.kind not in ['f', 'i', 'u']:\n raise TypeError(\"cannot evaluate a numeric op \"\n \"with a non-numeric dtype\")\n elif isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):\n # higher up to handle\n pass\n elif isinstance(other, (datetime, np.datetime64)):\n # higher up to handle\n pass\n else:\n if not (is_float(other) or is_integer(other)):\n raise TypeError(\"can only perform ops with scalar values\")\n\n return other\n\n @classmethod\n def _add_numeric_methods_binary(cls):\n \"\"\"\n Add in numeric methods.\n \"\"\"\n cls.__add__ = _make_arithmetic_op(operator.add, cls)\n cls.__radd__ = _make_arithmetic_op(ops.radd, cls)\n cls.__sub__ = _make_arithmetic_op(operator.sub, cls)\n cls.__rsub__ = _make_arithmetic_op(ops.rsub, cls)\n cls.__rpow__ = _make_arithmetic_op(ops.rpow, cls)\n cls.__pow__ = _make_arithmetic_op(operator.pow, cls)\n\n cls.__truediv__ = _make_arithmetic_op(operator.truediv, cls)\n cls.__rtruediv__ = _make_arithmetic_op(ops.rtruediv, cls)\n\n # TODO: rmod? rdivmod?\n cls.__mod__ = _make_arithmetic_op(operator.mod, cls)\n cls.__floordiv__ = _make_arithmetic_op(operator.floordiv, cls)\n cls.__rfloordiv__ = _make_arithmetic_op(ops.rfloordiv, cls)\n cls.__divmod__ = _make_arithmetic_op(divmod, cls)\n cls.__mul__ = _make_arithmetic_op(operator.mul, cls)\n cls.__rmul__ = _make_arithmetic_op(ops.rmul, cls)\n\n @classmethod\n def _add_numeric_methods_unary(cls):\n \"\"\"\n Add in numeric unary methods.\n \"\"\"\n def _make_evaluate_unary(op, opstr):\n\n def _evaluate_numeric_unary(self):\n\n self._validate_for_numeric_unaryop(op, opstr)\n attrs = self._get_attributes_dict()\n attrs = self._maybe_update_attributes(attrs)\n return Index(op(self.values), **attrs)\n\n _evaluate_numeric_unary.__name__ = opstr\n return _evaluate_numeric_unary\n\n cls.__neg__ = _make_evaluate_unary(operator.neg, '__neg__')\n cls.__pos__ = _make_evaluate_unary(operator.pos, '__pos__')\n cls.__abs__ = _make_evaluate_unary(np.abs, '__abs__')\n cls.__inv__ = _make_evaluate_unary(lambda x: -x, '__inv__')\n\n @classmethod\n def _add_numeric_methods(cls):\n cls._add_numeric_methods_unary()\n cls._add_numeric_methods_binary()\n\n @classmethod\n def _add_logical_methods(cls):\n \"\"\"\n Add in logical methods.\n \"\"\"\n _doc = \"\"\"\n %(desc)s\n\n Parameters\n ----------\n *args\n These parameters will be passed to numpy.%(outname)s.\n **kwargs\n These parameters will be passed to numpy.%(outname)s.\n\n Returns\n -------\n %(outname)s : bool or array_like (if axis is specified)\n A single element array_like may be converted to bool.\"\"\"\n\n _index_shared_docs['index_all'] = dedent(\"\"\"\n\n See Also\n --------\n Index.any : Return whether any element in an Index is True.\n Series.any : Return whether any element in a Series is True.\n Series.all : Return whether all elements in a Series are True.\n\n Notes\n -----\n Not a Number (NaN), positive infinity and negative infinity\n evaluate to True because these are not equal to zero.\n\n Examples\n --------\n **all**\n\n True, because nonzero integers are considered True.\n\n >>> pd.Index([1, 2, 3]).all()\n True\n\n False, because ``0`` is considered False.\n\n >>> pd.Index([0, 1, 2]).all()\n False\n\n **any**\n\n True, because ``1`` is considered True.\n\n >>> pd.Index([0, 0, 1]).any()\n True\n\n False, because ``0`` is considered False.\n\n >>> pd.Index([0, 0, 0]).any()\n False\n \"\"\")\n\n _index_shared_docs['index_any'] = dedent(\"\"\"\n\n See Also\n --------\n Index.all : Return whether all elements are True.\n Series.all : Return whether all elements are True.\n\n Notes\n -----\n Not a Number (NaN), positive infinity and negative infinity\n evaluate to True because these are not equal to zero.\n\n Examples\n --------\n >>> index = pd.Index([0, 1, 2])\n >>> index.any()\n True\n\n >>> index = pd.Index([0, 0, 0])\n >>> index.any()\n False\n \"\"\")\n\n def _make_logical_function(name, desc, f):\n @Substitution(outname=name, desc=desc)\n @Appender(_index_shared_docs['index_' + name])\n @Appender(_doc)\n def logical_func(self, *args, **kwargs):\n result = f(self.values)\n if (isinstance(result, (np.ndarray, ABCSeries, Index)) and\n result.ndim == 0):\n # return NumPy type\n return result.dtype.type(result.item())\n else: # pragma: no cover\n return result\n\n logical_func.__name__ = name\n return logical_func\n\n cls.all = _make_logical_function('all', 'Return whether all elements '\n 'are True.',\n np.all)\n cls.any = _make_logical_function('any',\n 'Return whether any element is True.',\n np.any)\n\n @classmethod\n def _add_logical_methods_disabled(cls):\n \"\"\"\n Add in logical methods to disable.\n \"\"\"\n cls.all = make_invalid_op('all')\n cls.any = make_invalid_op('any')\n\n\nIndex._add_numeric_methods_disabled()\nIndex._add_logical_methods()\nIndex._add_comparison_methods()\n\n\ndef ensure_index_from_sequences(sequences, names=None):\n \"\"\"\n Construct an index from sequences of data.\n\n A single sequence returns an Index. Many sequences returns a\n MultiIndex.\n\n Parameters\n ----------\n sequences : sequence of sequences\n names : sequence of str\n\n Returns\n -------\n index : Index or MultiIndex\n\n Examples\n --------\n >>> ensure_index_from_sequences([[1, 2, 3]], names=['name'])\n Int64Index([1, 2, 3], dtype='int64', name='name')\n\n >>> ensure_index_from_sequences([['a', 'a'], ['a', 'b']],\n names=['L1', 'L2'])\n MultiIndex(levels=[['a'], ['a', 'b']],\n codes=[[0, 0], [0, 1]],\n names=['L1', 'L2'])\n\n See Also\n --------\n ensure_index\n \"\"\"\n from .multi import MultiIndex\n\n if len(sequences) == 1:\n if names is not None:\n names = names[0]\n return Index(sequences[0], name=names)\n else:\n return MultiIndex.from_arrays(sequences, names=names)\n\n\ndef ensure_index(index_like, copy=False):\n \"\"\"\n Ensure that we have an index from some index-like object.\n\n Parameters\n ----------\n index : sequence\n An Index or other sequence\n copy : bool\n\n Returns\n -------\n index : Index or MultiIndex\n\n Examples\n --------\n >>> ensure_index(['a', 'b'])\n Index(['a', 'b'], dtype='object')\n\n >>> ensure_index([('a', 'a'), ('b', 'c')])\n Index([('a', 'a'), ('b', 'c')], dtype='object')\n\n >>> ensure_index([['a', 'a'], ['b', 'c']])\n MultiIndex(levels=[['a'], ['b', 'c']],\n codes=[[0, 0], [0, 1]])\n\n See Also\n --------\n ensure_index_from_sequences\n \"\"\"\n if isinstance(index_like, Index):\n if copy:\n index_like = index_like.copy()\n return index_like\n if hasattr(index_like, 'name'):\n return Index(index_like, name=index_like.name, copy=copy)\n\n if is_iterator(index_like):\n index_like = list(index_like)\n\n # must check for exactly list here because of strict type\n # check in clean_index_list\n if isinstance(index_like, list):\n if type(index_like) != list:\n index_like = list(index_like)\n\n converted, all_arrays = lib.clean_index_list(index_like)\n\n if len(converted) > 0 and all_arrays:\n from .multi import MultiIndex\n return MultiIndex.from_arrays(converted)\n else:\n index_like = converted\n else:\n # clean_index_list does the equivalent of copying\n # so only need to do this if not list instance\n if copy:\n from copy import copy\n index_like = copy(index_like)\n\n return Index(index_like)\n\n\ndef _ensure_has_len(seq):\n \"\"\"\n If seq is an iterator, put its values into a list.\n \"\"\"\n try:\n len(seq)\n except TypeError:\n return list(seq)\n else:\n return seq\n\n\ndef _trim_front(strings):\n \"\"\"\n Trims zeros and decimal points.\n \"\"\"\n trimmed = strings\n while len(strings) > 0 and all(x[0] == ' ' for x in trimmed):\n trimmed = [x[1:] for x in trimmed]\n return trimmed\n\n\ndef _validate_join_method(method):\n if method not in ['left', 'right', 'inner', 'outer']:\n raise ValueError('do not recognize join method %s' % method)\n\n\ndef default_index(n):\n from pandas.core.index import RangeIndex\n return RangeIndex(0, n, name=None)\n"
] |
[
[
"pandas.core.dtypes.common.ensure_object",
"numpy.where",
"pandas.core.dtypes.common.is_interval_dtype",
"pandas.core.dtypes.concat._concat_index_asobject",
"pandas.core.common.cast_scalar_indexer",
"pandas.core.common._not_none",
"pandas.core.dtypes.common.is_iterator",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas._libs.join.outer_join_indexer",
"pandas.core.dtypes.common.is_list_like",
"numpy.delete",
"numpy.array",
"pandas.core.algorithms.take",
"pandas._libs.lib.is_datetime_with_singletz_array",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.TimedeltaIndex",
"pandas.core.dtypes.missing.isna",
"pandas.io.formats.printing.pprint_thing",
"pandas._libs.tslibs.Timestamp",
"pandas.Series",
"numpy.asarray",
"pandas._libs.join.inner_join_indexer",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"numpy.concatenate",
"pandas._libs.lib.clean_index_list",
"pandas.core.dtypes.common.is_unsigned_integer_dtype",
"pandas.core.index.RangeIndex.from_range",
"pandas.core.common.asarray_tuplesafe",
"pandas.io.formats.format.format_array",
"pandas.core.algorithms.take_nd",
"numpy.errstate",
"pandas._libs.algos.groupsort_indexer",
"pandas.core.ops._comp_method_OBJECT_ARRAY",
"pandas.core.sorting.safe_sort",
"pandas.core.dtypes.common.is_integer",
"pandas._libs.lib.infer_dtype",
"numpy.ndarray.__setstate__",
"pandas._libs.lib.item_from_zerodim",
"numpy.empty",
"pandas.core.dtypes.cast.maybe_cast_to_integer_array",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.core.dtypes.common.ensure_categorical",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas._libs.lib.is_scalar",
"pandas.core.ops.make_invalid_op",
"pandas.core.indexes.frozen.FrozenList",
"pandas.core.reshape.merge._get_join_indexers",
"pandas.core.common.values_from_object",
"numpy.hstack",
"pandas.util._decorators.Substitution",
"pandas.DatetimeIndex",
"pandas.core.dtypes.common.ensure_int64",
"pandas._libs.index.get_value_box",
"numpy.repeat",
"pandas.core.index.RangeIndex",
"pandas._libs.tslibs.timezones.tz_compare",
"pandas.core.dtypes.common.is_dtype_union_equal",
"pandas._libs.join.left_join_indexer_unique",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.util._decorators.Appender",
"pandas.core.dtypes.common.is_categorical",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.core.dtypes.common.is_hashable",
"pandas.core.dtypes.common.is_period_dtype",
"pandas.core.indexes.period._new_PeriodIndex",
"pandas.core.reshape.merge._restore_dropped_levels_multijoin",
"pandas.core.dtypes.common.is_bool",
"pandas._libs.tslibs.Timedelta",
"pandas.core.algorithms.isin",
"pandas.core.common.is_bool_indexer",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.accessor.CachedAccessor",
"pandas.io.formats.printing.format_object_summary",
"numpy.dtype",
"pandas.core.indexes.period.PeriodIndex",
"pandas.core.dtypes.common.is_signed_integer_dtype",
"numpy.arange",
"pandas.core.ops.get_op_result_name",
"pandas.compat.set_function_name",
"pandas.core.dtypes.common.is_float",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.core.dtypes.common.ensure_platform_int",
"pandas.io.formats.printing.format_object_attrs",
"pandas.core.dtypes.concat.get_dtype_kinds",
"pandas._libs.join.left_join_indexer",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.dtypes.concat._concat_compat",
"pandas._libs.lib.maybe_convert_objects",
"pandas.core.common.index_labels_to_array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Debanitrkl/MLAlgorithms
|
[
"f53a267897e4d0babdcbae7c271c5042e07549ca"
] |
[
"mla/rbm.py"
] |
[
"# coding:utf-8\nimport logging\n\nfrom mla.base import BaseEstimator\nfrom scipy.special import expit\nimport numpy as np\n\nfrom mla.utils import batch_iterator\n\nnp.random.seed(9999)\nsigmoid = expit\n\n\"\"\"\nReferences:\nA Practical Guide to Training Restricted Boltzmann Machines https://www.cs.toronto.edu/~hinton/absps/guideTR.pdf\n\"\"\"\n\n\nclass RBM(BaseEstimator):\n y_required = False\n\n def __init__(self, n_hidden=128, learning_rate=0.1, batch_size=10, max_epochs=100):\n \"\"\"Bernoulli Restricted Boltzmann Machine (RBM)\n\n Parameters\n ----------\n\n n_hidden : int, default 128\n The number of hidden units.\n learning_rate : float, default 0.1\n batch_size : int, default 10\n max_epochs : int, default 100\n \"\"\"\n self.max_epochs = max_epochs\n self.batch_size = batch_size\n self.lr = learning_rate\n self.n_hidden = n_hidden\n\n def fit(self, X, y=None):\n self.n_visible = X.shape[1]\n self._init_weights()\n self._setup_input(X, y)\n self._train()\n\n def _init_weights(self):\n\n self.W = np.random.randn(self.n_visible, self.n_hidden) * 0.1\n\n # Bias for visible and hidden units\n self.bias_v = np.zeros(self.n_visible, dtype=np.float32)\n self.bias_h = np.zeros(self.n_hidden, dtype=np.float32)\n\n self.errors = []\n\n def _train(self):\n \"\"\"Use CD-1 training procedure, basically an exact inference for `positive_associations`,\n followed by a \"non burn-in\" block Gibbs Sampling for the `negative_associations`.\"\"\"\n\n for i in range(self.max_epochs):\n error = 0\n for batch in batch_iterator(self.X, batch_size=self.batch_size):\n positive_hidden = sigmoid(np.dot(batch, self.W) + self.bias_h)\n hidden_states = self._sample(positive_hidden) # sample hidden state h1\n positive_associations = np.dot(batch.T, positive_hidden)\n\n negative_visible = sigmoid(np.dot(hidden_states, self.W.T) + self.bias_v)\n negative_visible = self._sample(negative_visible) # use the samped hidden state h1 to sample v1\n negative_hidden = sigmoid(np.dot(negative_visible, self.W) + self.bias_h)\n negative_associations = np.dot(negative_visible.T, negative_hidden)\n\n lr = self.lr / float(batch.shape[0])\n self.W += lr * ((positive_associations - negative_associations) / float(self.batch_size))\n self.bias_h += lr * (negative_hidden.sum(axis=0) - negative_associations.sum(axis=0))\n self.bias_v += lr * (np.asarray(batch.sum(axis=0)).squeeze() - negative_visible.sum(axis=0))\n\n error += np.sum((batch - negative_visible) ** 2)\n\n self.errors.append(error)\n logging.info(\"Iteration %s, error %s\" % (i, error))\n logging.debug(\"Weights: %s\" % self.W)\n logging.debug(\"Hidden bias: %s\" % self.bias_h)\n logging.debug(\"Visible bias: %s\" % self.bias_v)\n\n def _sample(self, X):\n return X > np.random.random_sample(size=X.shape)\n\n def _predict(self, X=None):\n return sigmoid(np.dot(X, self.W) + self.bias_h)\n"
] |
[
[
"numpy.dot",
"numpy.random.seed",
"numpy.random.random_sample",
"numpy.random.randn",
"numpy.zeros",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dataflowr/Project-Neural-Bootstrapper
|
[
"36278a7f6884438553d90d9cdc12eaf0da1bc7bf"
] |
[
"utils/metrics.py"
] |
[
"import torch\nfrom torch.nn.functional import one_hot\n\nimport numpy as np\nfrom scipy.special import softmax\n\n\nclass NbsLoss(torch.nn.Module):\n def __init__(self, reduction='mean',\n base_loss=torch.nn.CrossEntropyLoss(reduction='none')):\n super().__init__()\n self.reduction = reduction\n self.base_loss = base_loss\n\n def forward(self, input, target, w=None):\n out = self.base_loss(input, target)\n if w is not None:\n out = out * w\n if self.reduction == 'mean':\n return out.mean()\n elif self.reduction == 'sum':\n return out.sum()\n else:\n return out\n\n\nclass BrierLoss(torch.nn.Module):\n def __init__(self, reduction='mean', num_classes=10):\n super().__init__()\n self.reduction = reduction\n self.num_classes = num_classes\n self.mse = torch.nn.MSELoss(reduction='none')\n\n def forward(self, input, target):\n target_onehot = one_hot(target, self.num_classes).float()\n out = self.mse(input.softmax(-1), target_onehot).sum(-1)\n if self.reduction == 'mean':\n return out.mean()\n elif self.reduction == 'sum':\n return out.sum()\n else:\n return out\n\n\nclass CrossEntropyLossWithSoftLabel(torch.nn.Module):\n def __init__(self, reduction='mean'):\n super().__init__()\n self.reduction = reduction\n self.logsoftmax = torch.nn.LogSoftmax(dim=1)\n\n def forward(self, input, target):\n log_probs = self.logsoftmax(input)\n loss = (-target * log_probs).sum(dim=1)\n\n if self.reduction == 'mean':\n loss = loss.mean()\n elif self.reduction == 'sum':\n loss = loss.sum()\n return loss\n\n\nclass Accuracy(torch.nn.Module):\n def __init__(self, reduction='mean', nlabels=5):\n super().__init__()\n self.reduction = reduction\n self.nlabels = nlabels\n\n def forward(self, input, target):\n if self.nlabels == 1:\n pred = input.sigmoid().gt(.5).type_as(target)\n else:\n pred = input.argmax(1)\n acc = pred == target\n if self.reduction == 'mean':\n acc = acc.float().mean()\n elif self.reduction == 'sum':\n acc = acc.float().sum()\n return acc\n\n\nclass MeanIOU(torch.nn.Module):\n def __init__(self, reduction='mean', nlabels=5):\n super().__init__()\n self.reduction = reduction\n self.nlabels = nlabels\n self.eps = 0.001\n\n def forward(self, input, target):\n if self.nlabels == 1:\n pred = input.sigmoid().gt(.5).type_as(target)\n else:\n pred = input.argmax(1)\n \n jccs = []\n for l in range(1, self.nlabels):\n _pred = pred.eq(l).float()\n _label = target.eq(l).float()\n\n _cm = _pred * 2 - _label\n dims = list(set(range(target.dim())) - set([0]))\n tp = _cm.eq(1).float().sum(dim=dims)\n tn = _cm.eq(0).float().sum(dim=dims)\n fp = _cm.eq(2).float().sum(dim=dims)\n fn = _cm.eq(-1).float().sum(dim=dims)\n\n jcc = (tp + self.eps) / (fn + fp + tp + self.eps)\n jccs += [jcc[:, None]]\n\n return torch.cat(jccs, dim=1).mean(1)\n\n\nclass ConfusionMatrix(torch.nn.Module):\n def __init__(self, nlabels=5):\n super().__init__()\n self.nlabels = nlabels\n\n def forward(self, input, target):\n if self.nlabels == 1:\n pred = input.sigmoid().gt(.5).type_as(target)\n else:\n pred = input.argmax(1)\n\n cm = torch.zeros([self.nlabels, 4]).cuda()\n for l in range(self.nlabels):\n if self.nlabels == 1:\n _pred = pred.eq(1).float()\n _label = target.eq(l).float()\n else:\n _pred = pred.eq(l).float()\n _label = target.eq(l).float()\n\n _cm = _pred * 2 - _label\n tp = _cm.eq(1).float().sum()\n tn = _cm.eq(0).float().sum()\n fp = _cm.eq(2).float().sum()\n fn = _cm.eq(-1).float().sum()\n\n for j, j_ in zip(cm[l], [tp, tn, fp, fn]):\n j += j_\n\n return cm\n\n\n# class ECE(nn.Module):\n# def __init__(self, num_bins=15, is_mc=False):\n# super().__init__()\n# self.num_bins = num_bins\n# self.is_mc = is_mc\n\n# def forward(self, input, target):\n \n\n\n# ECE\ndef calc_ece(softmax, label, bins=15):\n bin_boundaries = torch.linspace(0, 1, bins + 1)\n bin_lowers = bin_boundaries[:-1]\n bin_uppers = bin_boundaries[1:]\n\n softmax = torch.tensor(softmax)\n labels = torch.tensor(label)\n\n softmax_max, predictions = torch.max(softmax, 1)\n correctness = predictions.eq(labels)\n\n ece = torch.zeros(1)\n\n for bin_lower, bin_upper in zip(bin_lowers, bin_uppers):\n in_bin = softmax_max.gt(bin_lower.item()) * softmax_max.le(bin_upper.item())\n prop_in_bin = in_bin.float().mean()\n\n if prop_in_bin.item() > 0.0:\n accuracy_in_bin = correctness[in_bin].float().mean()\n avg_confidence_in_bin = softmax_max[in_bin].mean()\n\n ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin\n\n return ece.item() * 100\n\n\ndef one_hot_np(array, num=None):\n if not num:\n num = array.max() + 1\n return np.eye(num)[array]\n\n\n# NLL & Brier Score\ndef calc_nll_brier(logit, label, num_classes):\n label_onehot = one_hot_np(label, num_classes)\n logit_softmax = softmax(logit, -1)\n brier_score = np.mean(np.sum((logit_softmax - label_onehot) ** 2, axis=1))\n\n logit = torch.tensor(logit, dtype=torch.float)\n label = torch.tensor(label, dtype=torch.int)\n logsoftmax = torch.nn.LogSoftmax(dim=1)\n\n log_softmax = logsoftmax(logit)\n nll = calc_nll(log_softmax, label)\n\n return nll.item() * 10, brier_score * 100\n\n# NLL & Brier Score\ndef calc_nll_brier_mc(logit, label, num_classes):\n label_onehot = one_hot_np(label, num_classes)\n logit_softmax = softmax(logit, -1).mean(0)\n brier_score = np.mean(np.sum((logit_softmax - label_onehot) ** 2, axis=1))\n\n logit = logit.mean(0)\n logit = torch.tensor(logit, dtype=torch.float)\n label = torch.tensor(label, dtype=torch.int)\n logsoftmax = torch.nn.LogSoftmax(dim=-1)\n\n log_softmax = logsoftmax(logit)\n nll = calc_nll(log_softmax, label)\n\n return nll.item() * 10, brier_score * 100\n\n\n# Calc NLL\ndef calc_nll(log_softmax, label):\n out = torch.zeros_like(label, dtype=torch.float)\n for i in range(len(label)):\n out[i] = log_softmax[i][label[i]]\n\n return -out.sum() / len(out)\n\n\ndef get_metrics(output, label, num_classes):\n acc = (output.argmax(1) == label).mean() * 100\n ece = calc_ece(softmax(output, -1), label)\n nll, brier = calc_nll_brier(output, label, num_classes)\n return acc, ece, nll, brier\n\n\ndef get_metrics_mc(output, label, num_classes):\n acc = (output.mean(0).argmax(-1) == label).mean() * 100\n ece = calc_ece(softmax(output, -1).mean(0), label)\n nll, brier = calc_nll_brier_mc(output, label, num_classes)\n return acc, ece, nll, brier\n\n\nif __name__ == \"__main__\":\n Acc = Accuracy()\n # a = torch.rand(8, 5, 64, 256, 256).float()\n # b = torch.randint(5, [8, 64, 256, 256])\n a = torch.rand(1, 3, 5)\n b = torch.randint(3, (1, 5))\n print(a)\n print(a.argmax(1))\n print(b)\n # print(Acc(a, b))\n\n dice = MeanIOU(reduction='mean', nlabels=3)\n # dice = Dice(reduction='index', index=0)\n # dice = Dice()\n print(dice(a, b).numpy())\n"
] |
[
[
"torch.abs",
"torch.linspace",
"torch.nn.LogSoftmax",
"torch.randint",
"torch.max",
"torch.nn.CrossEntropyLoss",
"torch.zeros",
"numpy.sum",
"torch.cat",
"numpy.eye",
"torch.zeros_like",
"torch.tensor",
"torch.rand",
"torch.nn.functional.one_hot",
"torch.nn.MSELoss"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mil-ad/prospr
|
[
"a92177989f4480f1f2b43a48b3e18a6597ebba6d"
] |
[
"utils.py"
] |
[
"import json\nimport os\nimport subprocess\nimport sys\nfrom contextlib import contextmanager\nfrom datetime import datetime\nfrom pathlib import Path\nfrom time import sleep\nfrom typing import List, Union\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\n\ndef create_logdir(root: Union[str, Path] = None):\n\n if (root is None) or (root == \"\"):\n root = Path.cwd()\n else:\n root = Path(root)\n\n # When running multiple jobs in parallel (e.g. Slurm) we could get the same\n # timestamp so let's allow ourselves to try a few times\n for _ in range(10):\n try:\n timestamp = datetime.now().strftime(\"%Y-%m-%d-%A-%H-%M-%S\")\n\n log_dir = root / \"runs\" / timestamp\n\n log_dir.mkdir(parents=True)\n except FileExistsError:\n sleep(1)\n continue\n else:\n break\n else:\n raise SystemExit(\"Could not create logdir.\")\n\n return log_dir\n\n\ndef save_repo_status(path: Union[str, Path]):\n path = Path(path)\n\n with (path / \"git_commit.txt\").open(\"w\") as f:\n subprocess.run([\"git\", \"rev-parse\", \"HEAD\"], stdout=f)\n\n with (path / \"workspace_changes.diff\").open(\"w\") as f:\n subprocess.run([\"git\", \"diff\"], stdout=f)\n\n\ndef save_command_line(path: Union[str, Path]):\n path = Path(path)\n\n with open(path / \"command_line.txt\", \"w\") as f:\n f.write(\"python \" + \" \".join(sys.argv))\n\n\ndef set_seed(seed: int, allow_nondeterminism: bool):\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n if allow_nondeterminism is False:\n # This can make the training slower\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef unconcatenate(x: torch.Tensor, orig_list: List[torch.Tensor]):\n result = []\n\n processed = 0\n for ref in orig_list:\n result.append(x[processed : processed + ref.numel()].reshape(ref.shape))\n processed += ref.numel()\n\n return result\n\n\ndef save_checkpoint(\n logdir,\n model: torch.nn.Module,\n optimiser: torch.optim.Optimizer,\n lr_scheduler: torch.optim.lr_scheduler._LRScheduler,\n epoch: int,\n max_checkpoints=None,\n):\n\n state = {\n \"model\": model.state_dict(),\n \"optimiser\": optimiser.state_dict(),\n \"lr_scheduler\": lr_scheduler.state_dict(),\n }\n\n p = logdir / f\"chkpt_epoch_{epoch}.pt\"\n torch.save(state, p)\n\n if max_checkpoints:\n chkpts = sorted(logdir.glob(\"chkpt_e[0-9]*.pt\"), key=os.path.getmtime)\n num_unwanted_chckpts = len(chkpts) - max_checkpoints\n if num_unwanted_chckpts > 0:\n for c in chkpts[0:num_unwanted_chckpts]:\n c.unlink()\n\n\ndef load_checkpoint(\n path: Union[Path, str],\n model: torch.nn.Module,\n optimiser: torch.optim.Optimizer,\n lr_scheduler: torch.optim.lr_scheduler._LRScheduler,\n):\n\n path = Path(path)\n if not path.exists():\n raise FileNotFoundError\n\n print(f\"🛻 Loading from checkpoint file {path}.\")\n\n chkpt = torch.load(path)\n\n model.load_state_dict(chkpt[\"model\"])\n print(\"✅ Loaded the model.\")\n\n optimiser.load_state_dict(chkpt[\"optimiser\"])\n print(\"✅ Loaded the optimiser.\")\n\n lr_scheduler.load_state_dict(chkpt[\"lr_scheduler\"])\n print(\"✅ Loaded the LR scheduler.\")\n\n\n@contextmanager\ndef eval_mode(model: nn.Module):\n \"\"\"\n Sets training mode to False and restores it when exiting.\n \"\"\"\n is_training = model.training\n try:\n model.eval()\n yield model\n finally:\n if is_training:\n model.train()\n\n\nclass Hyperparameters:\n def __init__(self, **kwargs):\n self.from_dict(kwargs)\n\n def from_argparse(self, args):\n self.from_dict(args.__dict__)\n\n def from_dict(self, d):\n for k, v in d.items():\n setattr(self, k, v)\n\n def as_dict(self):\n return {k: getattr(self, k) for k in self.__dict__}\n\n def from_json(self, j):\n d = json.loads(j)\n return self.from_dict(d)\n\n def to_json(self, path: Path):\n j = json.dumps(self.as_dict(), indent=4, sort_keys=True)\n path.write_text(j)\n\n def __contains__(self, k):\n return k in self.__dict__\n\n def __str__(self):\n s = [f\"{k}={v}\" for k, v in self.as_dict().items()]\n return \",\".join(s)\n"
] |
[
[
"torch.manual_seed",
"torch.load",
"numpy.random.seed",
"torch.save"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ethanjperez/semanticRetrievalMRS
|
[
"765e00d6e7693e0eaba20ef1407fad0be4a7a92b",
"765e00d6e7693e0eaba20ef1407fad0be4a7a92b"
] |
[
"src/inspect_wikidump/stats_info.py",
"src/fever_models/nli/evidence_adjustment.py"
] |
[
"import json\nimport config\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\ndef flatten_counter_info(counter_dict, range_min=None, range_max=None):\n max_key = max(counter_dict.keys()) if range_max is None else range_max\n min_key = min(counter_dict.keys()) if range_min is None else range_min\n\n print(f\"Range from {min_key} to {max_key}.\")\n\n x = list(range(min_key, max_key + 1))\n y = []\n for i in x:\n if i in counter_dict:\n y.append(counter_dict[i])\n else:\n y.append(0)\n\n return x, y\n\n\nif __name__ == '__main__':\n # with open(config.PDATA_ROOT / \"stats_info/sent_per_para_counter.json\") as in_f:\n # with open(config.PDATA_ROOT / \"stats_info/total_para_counter.json\") as in_f:\n # with open(config.PDATA_ROOT / \"stats_info/total_sent_counter.json\") as in_f:\n # with open(config.PDATA_ROOT / \"stats_info/t_a_counter.json\") as in_f:\n with open(config.PDATA_ROOT / \"stats_info/t_p_counter.json\") as in_f:\n # with open(config.PDATA_ROOT / \"stats_info/t_s_counter.json\") as in_f:\n pain_info = json.load(in_f)\n stats_info = {}\n for k, v in pain_info.items():\n stats_info[int(k)] = v\n\n print(sorted(stats_info, reverse=True))\n\n x, y = flatten_counter_info(stats_info, range_min=0, range_max=80)\n\n print(x)\n print(y)\n print(max(y))\n\n total_num = 0\n total_count = 0\n for count, num in zip(y, x):\n total_num += count * num\n total_count += count\n\n print(\"Ave:\", total_num / total_count)\n\n # exit(-1)\n\n plt.figure(dpi=200)\n sns.barplot(x, y, color=\"lightskyblue\")\n sns.set(rc={\"font.size\": 0.01, \"axes.labelsize\": 5})\n # # Decoration\n # plt.title('Number of Sentences Per Paragraph')\n # plt.title('Number of Sentences Per Article')\n # plt.title('Number of Paragraph Per Article')\n # plt.title('Number of Token Per Article')\n # plt.title('Number of Token Per Paragraph')\n plt.title('Number of Token Per Sentence')\n # plt.legend()\n plt.show()\n # plt.savefig(\"fig.pdf\")\n",
"import json\nimport config\nfrom evaluation import fever_scorer\nimport copy\n\nfrom fever_sampler.nli_new_sampler import get_nli_pair\nfrom utils import common, list_dict_data_tool\nimport numpy as np\n\n\ndef ensemble_nli_results(nli_r_list):\n id2label = {\n 0: \"SUPPORTS\",\n 1: \"REFUTES\",\n 2: \"NOT ENOUGH INFO\",\n }\n\n r_len = len(nli_r_list[0])\n for nli_r in nli_r_list:\n assert len(nli_r) == r_len\n\n new_list = copy.deepcopy(nli_r_list[0])\n logits_list = []\n for i in range(r_len):\n logits_current_logits_list = []\n for nli_r in nli_r_list:\n assert nli_r[i]['oid'] == new_list[i]['oid']\n logits_current_logits_list.append(np.asarray(nli_r[i]['logits'], dtype=np.float32)) # [(3)]\n logits_current_logits = np.stack(logits_current_logits_list, axis=0) # [num, 3]\n current_mean_logits = np.mean(logits_current_logits, axis=0) # [3]\n logits_list.append(current_mean_logits)\n\n logits = np.stack(logits_list, axis=0) # (len, 3)\n y_ = np.argmax(logits, axis=1) # (len)\n assert y_.shape[0] == len(new_list)\n\n for i in range(r_len):\n new_list[i]['predicted_label'] = id2label[y_[i]]\n\n return new_list\n\n\ndef build_submission_file(d_list, filename):\n with open(filename, encoding='utf-8', mode='w') as out_f:\n for item in d_list:\n instance_item = dict()\n instance_item['id'] = int(item['id'])\n instance_item['claim'] = item['claim']\n instance_item['predicted_label'] = item['predicted_label']\n instance_item['predicted_evidence'] = item['predicted_evidence']\n out_f.write(json.dumps(instance_item) + \"\\n\")\n\n\ndef delete_unused_evidence(d_list):\n for item in d_list:\n if item['predicted_label'] == 'NOT ENOUGH INFO':\n item['predicted_evidence'] = []\n\n\ndef evidence_adjustment(tag, sent_file, label_file, filter_prob=0.2, top_k=5):\n dev_sent_filtering_prob = filter_prob\n\n # dev_list = common.load_jsonl(config.FEVER_DEV)\n dev_sent_results_list = common.load_jsonl(sent_file)\n\n dev_fitems, dev_list = get_nli_pair(tag, is_training=False,\n sent_level_results_list=dev_sent_results_list, debug=False,\n sent_top_k=top_k, sent_filter_value=dev_sent_filtering_prob)\n\n cur_eval_results_list = common.load_jsonl(label_file)\n\n ema_results_dict = list_dict_data_tool.list_to_dict(cur_eval_results_list, 'oid')\n copied_dev_list = copy.deepcopy(dev_list)\n list_dict_data_tool.append_item_from_dict_to_list(copied_dev_list, ema_results_dict,\n 'id', 'predicted_label')\n\n mode = {'standard': True}\n # delete_unused_evidence(copied_dev_list)\n strict_score, acc_score, pr, rec, f1 = fever_scorer.fever_score(copied_dev_list, dev_list,\n mode=mode, max_evidence=5)\n logging_item = {\n 'ss': strict_score, 'ac': acc_score,\n 'pr': pr, 'rec': rec, 'f1': f1,\n }\n\n print(logging_item)\n\n\ndef eval_ensemble():\n sent_file = config.PRO_ROOT / \"data/p_fever/fever_sentence_level/04-24-00-11-19_fever_v0_slevel_retri_(ignore_non_verifiable-True)/fever_s_level_dev_results.jsonl\"\n dev_sent_filtering_prob = 0.01\n tag = 'dev'\n top_k = 5\n\n # dev_list = common.load_jsonl(config.FEVER_DEV)\n dev_sent_results_list = common.load_jsonl(sent_file)\n\n dev_fitems, dev_list = get_nli_pair(tag, is_training=False,\n sent_level_results_list=dev_sent_results_list, debug=False,\n sent_top_k=top_k, sent_filter_value=dev_sent_filtering_prob)\n\n pred_file_list = [\n config.PRO_ROOT / \"data/p_fever/fever_nli/04-25-22:02:53_fever_v2_nli_th0.2/ema_i(20000)|e(3)|ss(0.7002700270027002)|ac(0.746024602460246)|pr(0.6141389138913633)|rec(0.8627362736273627)|f1(0.7175148212089147)|seed(12)/nli_dev_label_results_th0.2.jsonl\",\n config.PRO_ROOT / \"data/p_fever/fever_nli/04-26-10:15:39_fever_v2_nli_th0.2/ema_i(14000)|e(2)|ss(0.6991199119911992)|ac(0.7492249224922493)|pr(0.7129412941294097)|rec(0.8338583858385838)|f1(0.7686736484619933)|seed(12)/nli_dev_label_results_th0.2.jsonl\",\n config.PRO_ROOT / \"data/p_fever/fever_nli/04-27-10:03:27_fever_v2_nli_th0.2/ema_i(26000)|e(3)|ss(0.6958695869586958)|ac(0.7447744774477447)|pr(0.7129412941294097)|rec(0.8338583858385838)|f1(0.7686736484619933)|seed(12)/nli_dev_label_results_th0.2.jsonl\",\n ]\n pred_d_list = [common.load_jsonl(file) for file in pred_file_list]\n final_list = ensemble_nli_results(pred_d_list)\n pred_list = final_list\n\n ema_results_dict = list_dict_data_tool.list_to_dict(pred_list, 'oid')\n copied_dev_list = copy.deepcopy(dev_list)\n list_dict_data_tool.append_item_from_dict_to_list(copied_dev_list, ema_results_dict,\n 'id', 'predicted_label')\n\n dev_list = common.load_jsonl(config.FEVER_DEV)\n mode = {'standard': True}\n strict_score, acc_score, pr, rec, f1 = fever_scorer.fever_score(copied_dev_list, dev_list,\n mode=mode, max_evidence=5)\n logging_item = {\n 'ss': strict_score, 'ac': acc_score,\n 'pr': pr, 'rec': rec, 'f1': f1,\n }\n\n print(logging_item)\n\n\nif __name__ == '__main__':\n eval_ensemble()\n # Get sentence file:\n # evidence_adjustment('dev',\n # config.PRO_ROOT / \"data/p_fever/fever_sentence_level/04-24-00-11-19_fever_v0_slevel_retri_(ignore_non_verifiable-True)/fever_s_level_dev_results.jsonl\",\n # config.PRO_ROOT / \"data/p_fever/fever_nli/04-25-22:02:53_fever_v2_nli_th0.2/ema_i(20000)|e(3)|ss(0.7002700270027002)|ac(0.746024602460246)|pr(0.6141389138913633)|rec(0.8627362736273627)|f1(0.7175148212089147)|seed(12)/nli_dev_label_results_th0.2.jsonl\",\n # )\n\n # dev_list = common.load_jsonl(config.FEVER_DEV)\n # prediction_file = config.PRO_ROOT / \"data/p_fever/fever_nli/04-25-22:02:53_fever_v2_nli_th0.2/ema_i(20000)|e(3)|ss(0.7002700270027002)|ac(0.746024602460246)|pr(0.6141389138913633)|rec(0.8627362736273627)|f1(0.7175148212089147)|seed(12)/nli_dev_cp_results_th0.2.jsonl\"\n # pred_list = common.load_jsonl(prediction_file)\n # mode = {'standard': True}\n # strict_score, acc_score, pr, rec, f1 = fever_scorer.fever_score(pred_list, dev_list,\n # mode=mode, max_evidence=5)\n # logging_item = {\n # 'ss': strict_score, 'ac': acc_score,\n # 'pr': pr, 'rec': rec, 'f1': f1,\n # }\n #\n # print(logging_item)\n\n\n # build_submission_file(\n # common.load_jsonl(config.PRO_ROOT / \"data/p_fever/fever_nli/04-25-22:02:53_fever_v2_nli_th0.2/ema_i(20000)|e(3)|ss(0.7002700270027002)|ac(0.746024602460246)|pr(0.6141389138913633)|rec(0.8627362736273627)|f1(0.7175148212089147)|seed(12)/nli_test_cp_results_th0.2.jsonl\"),\n # \"pred.jsonl\",\n # )"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure"
],
[
"numpy.asarray",
"numpy.argmax",
"numpy.mean",
"numpy.stack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lzx1413/PytorchSSD
|
[
"320fe34f394f40aaa3b8a34d1ceed46e7ffecd46"
] |
[
"models/SSD_HarDNet85.py"
] |
[
"import os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom layers import *\n\n\n\nclass Identity(nn.Module):\n def __init__(self):\n super(Identity, self).__init__()\n\n def forward(self, x):\n return x\n\nclass Flatten(nn.Module):\n def __init__(self):\n super().__init__()\n def forward(self, x):\n return x.view(x.data.size(0),-1)\n\n\n\n\nclass CombConvLayer(nn.Sequential):\n def __init__(self, in_channels, out_channels, kernel=1, stride=1, dropout=0.1, bias=False):\n super().__init__()\n self.add_module('layer1',ConvLayer(in_channels, out_channels, kernel))\n self.add_module('layer2',DWConvLayer(out_channels, out_channels, stride=stride))\n\n def forward(self, x):\n return super().forward(x)\n\nclass DWConvLayer(nn.Sequential):\n def __init__(self, in_channels, out_channels, stride=1, bias=False):\n super().__init__()\n out_ch = out_channels\n\n groups = in_channels\n kernel = 3\n #print(kernel, 'x', kernel, 'x', out_channels, 'x', out_channels, 'DepthWise')\n\n self.add_module('dwconv', nn.Conv2d(groups, groups, kernel_size=3,\n stride=stride, padding=1, groups=groups, bias=bias))\n\n self.add_module('norm', nn.BatchNorm2d(groups))\n def forward(self, x):\n return super().forward(x)\n \nclass ConvLayer(nn.Sequential):\n def __init__(self, in_channels, out_channels, kernel=3, stride=1, padding=0, bias=False):\n super().__init__()\n self.out_channels = out_channels\n out_ch = out_channels\n groups = 1\n #print(kernel, 'x', kernel, 'x', in_channels, 'x', out_channels)\n pad = kernel//2 if padding == 0 else padding\n self.add_module('conv', nn.Conv2d(in_channels, out_ch, kernel_size=kernel,\n stride=stride, padding=pad, groups=groups, bias=bias))\n self.add_module('norm', nn.BatchNorm2d(out_ch))\n self.add_module('relu', nn.ReLU(True))\n def forward(self, x):\n return super().forward(x)\n\n\nclass HarDBlock(nn.Module):\n def get_link(self, layer, base_ch, growth_rate, grmul):\n if layer == 0:\n return base_ch, 0, []\n out_channels = growth_rate\n link = []\n for i in range(10):\n dv = 2 ** i\n if layer % dv == 0:\n k = layer - dv\n link.append(k)\n if i > 0:\n out_channels *= grmul\n out_channels = int(int(out_channels + 1) / 2) * 2\n in_channels = 0\n for i in link:\n ch,_,_ = self.get_link(i, base_ch, growth_rate, grmul)\n in_channels += ch\n return out_channels, in_channels, link\n\n def get_out_ch(self):\n return self.out_channels\n\n def __init__(self, in_channels, growth_rate, grmul, n_layers, keepBase=False, residual_out=False, dwconv=False):\n super().__init__()\n self.keepBase = keepBase\n self.links = []\n layers_ = []\n self.out_channels = 0\n \n for i in range(n_layers):\n outch, inch, link = self.get_link(i+1, in_channels, growth_rate, grmul)\n self.links.append(link)\n use_relu = residual_out\n if dwconv:\n layers_.append(CombConvLayer(inch, outch))\n else:\n layers_.append(ConvLayer(inch, outch))\n\n if (i % 2 == 0) or (i == n_layers - 1):\n self.out_channels += outch\n #print(\"Blk out =\",self.out_channels)\n self.layers = nn.ModuleList(layers_)\n\n def forward(self, x):\n layers_ = [x]\n for layer in range(len(self.layers)):\n link = self.links[layer]\n tin = []\n for i in link:\n tin.append(layers_[i])\n x = torch.cat(tin, 1)\n out = self.layers[layer](x)\n layers_.append(out)\n t = len(layers_)\n out_ = []\n for i in range(t):\n if (i == 0 and self.keepBase) or \\\n (i == t-1) or (i%2 == 1):\n out_.append(layers_[i])\n out = torch.cat(out_, 1)\n return out\n \n \nclass HarDNetBase(nn.Module):\n def __init__(self, depth_wise=False):\n super().__init__()\n first_ch = [48, 96]\n second_kernel = 3\n \n ch_list = [ 192, 256, 320, 480, 720]\n grmul = 1.7\n gr = [ 24, 24, 28, 36, 48]\n n_layers = [ 8, 16, 16, 16, 16]\n\n if depth_wise:\n second_kernel = 1\n first_ch = [24, 48]\n\n blks = len(n_layers)\n self.base = nn.ModuleList([])\n\n # First Layer: Standard Conv3x3, Stride=2\n self.base.append (\n ConvLayer(in_channels=3, out_channels=first_ch[0], kernel=3,\n stride=2, bias=False) )\n\n # Second Layer\n self.base.append ( ConvLayer(first_ch[0], first_ch[1], kernel=second_kernel) )\n\n # Maxpooling or DWConv3x3 downsampling\n self.base.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\n\n # Build all HarDNet blocks\n ch = first_ch[1]\n for i in range(blks):\n blk = HarDBlock(ch, gr[i], grmul, n_layers[i], dwconv=depth_wise)\n ch = blk.get_out_ch()\n self.base.append ( blk )\n\n self.base.append ( ConvLayer(ch, ch_list[i], kernel=1) )\n ch = ch_list[i]\n if i== 0:\n self.base.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True))\n elif i != blks-1 and i != 1 and i != 3:\n self.base.append(nn.MaxPool2d(kernel_size=2, stride=2))\n\n\n\nclass SSD(nn.Module):\n \"\"\"Single Shot Multibox Architecture\n The network is composed of a base VGG network followed by the\n added multibox conv layers. Each multibox layer branches into\n 1) conv2d for class conf scores\n 2) conv2d for localization predictions\n 3) associated priorbox layer to produce default bounding\n boxes specific to the layer's feature map size.\n See: https://arxiv.org/pdf/1512.02325.pdf for more details.\n\n Args:\n phase: (string) Can be \"test\" or \"train\"\n base: Harmonic DenseNet 70bn for input, \n extras: extra layers that feed to multibox loc and conf layers\n head: \"multibox head\" consists of loc and conf conv layers\n \"\"\"\n\n def __init__(self, extras, head, num_classes,size):\n super(SSD, self).__init__()\n self.num_classes = num_classes\n self.size = size\n\n\n self.base = HarDNetBase().base\n\n # Additional bridge model without pretaining\n # (please initialize this module before training)\n self.bridge = nn.Sequential(\n nn.MaxPool2d(kernel_size=3, stride=1, padding=1),\n ConvLayer(720, 960),\n ConvLayer(960, 720, kernel=1) )\n \n\n # Layer learns to scale the l2 normalized features from conv4_3\n self.dropout = nn.Dropout2d( p=0.1, inplace=False )\n self.extras = nn.ModuleList(extras)\n self.L2Norm = L2Norm(320, 20)\n\n self.loc = nn.ModuleList(head[0])\n self.conf = nn.ModuleList(head[1])\n\n self.softmax = nn.Softmax()\n\n def forward(self, x, test=False):\n \"\"\"Applies network layers and ops on input image(s) x.\n\n Args:\n x: input image or batch of images. Shape: [batch,3*batch,300,300].\n\n Return:\n Depending on phase:\n test:\n Variable(tensor) of output class label predictions,\n confidence score, and corresponding location predictions for\n each object detected. Shape: [batch,topk,7]\n\n train:\n list of concat outputs from:\n 1: confidence layers, Shape: [batch*num_priors,num_classes]\n 2: localization layers, Shape: [batch,num_priors*4]\n 3: priorbox layers, Shape: [2,num_priors*4]\n \"\"\"\n sources = list()\n loc = list()\n conf = list()\n\n for k in range(10):\n x = self.base[k](x)\n s = self.L2Norm(x)\n sources.append(s)\n\n for k in range(10, len(self.base)):\n x = self.base[k](x)\n # Additional bridge model\n x = self.bridge(x)\n sources.append(x)\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n sources.append(x)\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n\n if test:\n output = (\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(-1, self.num_classes)), # conf preds\n )\n else:\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n )\n return output\n\n def load_weights(self, base_file):\n other, ext = os.path.splitext(base_file)\n if ext == '.pkl' or '.pth':\n print('Loading weights into state dict...')\n self.load_state_dict(torch.load(base_file, map_location=lambda storage, loc: storage))\n print('Finished!')\n else:\n print('Sorry only .pth and .pkl files supported.')\n\n\ndef add_extras(cfg, i, batch_norm=False, size=300):\n # Extra layers added to VGG for feature scaling\n layers = []\n in_channels = i\n flag = False\n for k, v in enumerate(cfg):\n if in_channels != 'S':\n if v == 'S':\n layers += [nn.Conv2d(in_channels, cfg[k + 1],\n kernel_size=(1, 3)[flag], stride=2, padding=1)]\n else:\n layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]\n flag = not flag\n in_channels = v\n if size == 512:\n layers.append(nn.Conv2d(in_channels, 128, kernel_size=1, stride=1))\n layers.append(nn.Conv2d(128, 256, kernel_size=4, stride=1, padding=1))\n return layers\n\n\ndef multibox( extra_layers, cfg, num_classes):\n loc_layers = []\n conf_layers = []\n vgg_source = [24, -2]\n ch = [320, 720]\n source = [0, 1]\n for k, v in enumerate(source):\n loc_layers += [nn.Conv2d(ch[v],\n cfg[k] * 4, kernel_size=3, padding=1)]\n conf_layers += [nn.Conv2d(ch[v],\n cfg[k] * num_classes, kernel_size=3, padding=1)]\n for k, v in enumerate(extra_layers[1::2], 2):\n loc_layers += [nn.Conv2d(v.out_channels, cfg[k]\n * 4, kernel_size=3, padding=1)]\n conf_layers += [nn.Conv2d(v.out_channels, cfg[k]\n * num_classes, kernel_size=3, padding=1)]\n return extra_layers, (loc_layers, conf_layers)\n\n\n\n\nextras = {\n '300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],\n '512': [256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256],\n}\nmbox = {\n '300': [6, 6, 6, 6, 4, 4], # number of boxes per feature map location\n '512': [6, 6, 6, 6, 6, 4, 4],\n}\n\n\ndef build_net(size=300, num_classes=21):\n if size != 300 and size != 512:\n print(\"Error: Sorry only SSD300 and SSD512 is supported currently!\")\n return\n\n return SSD(*multibox(add_extras(extras[str(size)], 720, size=size),\n mbox[str(size)], num_classes), num_classes=num_classes,size=size)\n"
] |
[
[
"torch.nn.Softmax",
"torch.nn.Dropout2d",
"torch.cat",
"torch.load",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ewlumpkin/suzieq
|
[
"9d55a46a631f01535d5b8ab1c0b870f840bbc526",
"9d55a46a631f01535d5b8ab1c0b870f840bbc526",
"9d55a46a631f01535d5b8ab1c0b870f840bbc526"
] |
[
"suzieq/engines/pandas/bgp.py",
"tests/integration/test_sqcmds.py",
"suzieq/poller/services/ospfIf.py"
] |
[
"import pandas as pd\nimport numpy as np\n\nfrom .engineobj import SqPandasEngine\nfrom suzieq.utils import build_query_str, humanize_timestamp\n\n\nclass BgpObj(SqPandasEngine):\n\n @staticmethod\n def table_name():\n return 'bgp'\n\n def get(self, **kwargs):\n \"\"\"Replacing the original interface name in returned result\"\"\"\n\n addnl_fields = kwargs.pop('addnl_fields', [])\n columns = kwargs.get('columns', ['default'])\n vrf = kwargs.pop('vrf', None)\n peer = kwargs.pop('peer', None)\n hostname = kwargs.pop('hostname', None)\n user_query = kwargs.pop('query_str', None)\n\n drop_cols = ['origPeer', 'peerHost']\n addnl_fields.extend(['origPeer'])\n sch = self.schema\n fields = sch.get_display_fields(columns)\n\n for col in ['peerIP', 'updateSource', 'state', 'namespace', 'vrf',\n 'peer', 'hostname']:\n if col not in fields:\n addnl_fields.append(col)\n drop_cols.append(col)\n\n try:\n df = super().get(addnl_fields=addnl_fields, **kwargs)\n except KeyError as ex:\n if ('afi' in str(ex)) or ('safi' in str(ex)):\n df = pd.DataFrame(\n {'error':\n ['ERROR: Migrate BGP data first using sq-coalescer']})\n return df\n\n if df.empty:\n return df\n\n if 'afiSafi' in columns or (columns == ['*']):\n df['afiSafi'] = df['afi'] + ' ' + df['safi']\n query_str = build_query_str([], sch, vrf=vrf, peer=peer,\n hostname=hostname,\n ignore_regex=False)\n if 'peer' in df.columns:\n df['peer'] = np.where(df['origPeer'] != \"\",\n df['origPeer'], df['peer'])\n\n # Convert old data into new 2.0 data format\n if 'peerHostname' in df.columns:\n mdf = self._get_peer_matched_df(df)\n drop_cols = [x for x in drop_cols if x in mdf.columns]\n drop_cols.extend(list(mdf.filter(regex='_y')))\n else:\n mdf = df\n\n mdf = self._handle_user_query_str(mdf, user_query)\n\n if query_str:\n return mdf.query(query_str).drop(columns=drop_cols,\n errors='ignore')\n else:\n return mdf.drop(columns=drop_cols, errors='ignore')\n\n def summarize(self, **kwargs) -> pd.DataFrame:\n \"\"\"Summarize key information about BGP\"\"\"\n\n self._init_summarize(self.iobj._table, **kwargs)\n if self.summary_df.empty or ('error' in self.summary_df.columns):\n return self.summary_df\n\n self.summary_df['afiSafi'] = (\n self.summary_df['afi'] + ' ' + self.summary_df['safi'])\n\n afi_safi_count = self.summary_df.groupby(by=['namespace'])['afiSafi'] \\\n .nunique()\n\n self.summary_df = self.summary_df \\\n .set_index(['namespace', 'hostname', 'vrf',\n 'peer']) \\\n .query('~index.duplicated(keep=\"last\")') \\\n .reset_index()\n self.ns = {i: {} for i in self.summary_df['namespace'].unique()}\n self.nsgrp = self.summary_df.groupby(by=[\"namespace\"],\n observed=True)\n\n self._summarize_on_add_field = [\n ('deviceCnt', 'hostname', 'nunique'),\n ('totalPeerCnt', 'peer', 'count'),\n ('uniqueAsnCnt', 'asn', 'nunique'),\n ('uniqueVrfsCnt', 'vrf', 'nunique')\n ]\n\n self._summarize_on_add_with_query = [\n ('failedPeerCnt', 'state == \"NotEstd\"', 'peer'),\n ('iBGPPeerCnt', 'asn == peerAsn', 'peer'),\n ('eBGPPeerCnt', 'asn != peerAsn', 'peer'),\n ('rrClientPeerCnt', 'rrclient.str.lower() == \"true\"', 'peer',\n 'count'),\n ]\n\n self._gen_summarize_data()\n\n {self.ns[i].update({'activeAfiSafiCnt': afi_safi_count[i]})\n for i in self.ns.keys()}\n self.summary_row_order.append('activeAfiSafiCnt')\n\n self.summary_df['estdTime'] = humanize_timestamp(\n self.summary_df.estdTime,\n self.cfg.get('analyzer', {}).get('timezone', None))\n\n self.summary_df['estdTime'] = (\n self.summary_df['timestamp'] - self.summary_df['estdTime'])\n self.summary_df['estdTime'] = self.summary_df['estdTime'] \\\n .apply(lambda x: x.round('s'))\n # Now come the BGP specific ones\n established = self.summary_df.query(\"state == 'Established'\") \\\n .groupby(by=['namespace'])\n\n uptime = established[\"estdTime\"]\n rx_updates = established[\"updatesRx\"]\n tx_updates = established[\"updatesTx\"]\n self._add_stats_to_summary(uptime, 'upTimeStat')\n self._add_stats_to_summary(rx_updates, 'updatesRxStat')\n self._add_stats_to_summary(tx_updates, 'updatesTxStat')\n\n self.summary_row_order.extend(['upTimeStat', 'updatesRxStat',\n 'updatesTxStat'])\n\n self._post_summarize()\n return self.ns_df.convert_dtypes()\n\n def _get_peer_matched_df(self, df) -> pd.DataFrame:\n \"\"\"Get a BGP dataframe that also contains a session's matching peer\"\"\"\n\n if 'peerHostname' not in df.columns:\n return df\n\n # We have to separate out the Established and non Established entries\n # for the merge. Otherwise we end up with a mess\n df_1 = df[['namespace', 'hostname', 'vrf', 'peer', 'peerIP',\n 'updateSource']] \\\n .drop_duplicates() \\\n .reset_index(drop=True)\n df_2 = df[['namespace', 'hostname', 'vrf', 'updateSource']] \\\n .drop_duplicates() \\\n .reset_index(drop=True)\n\n mdf = df_1.merge(df_2,\n left_on=['namespace', 'peerIP'],\n right_on=['namespace', 'updateSource'],\n suffixes=('', '_y')) \\\n .drop_duplicates(subset=['namespace', 'hostname', 'vrf',\n 'peerIP']) \\\n .rename(columns={'hostname_y': 'peerHost'}) \\\n .fillna(value={'peerHostname': '', 'peerHost': ''}) \\\n .reset_index(drop=True)\n\n df = df.merge(mdf[['namespace', 'hostname', 'vrf', 'peer',\n 'peerHost']],\n on=['namespace', 'hostname', 'vrf', 'peer'], how='left')\n\n df['peerHostname'] = np.where((df['peerHostname'] == '') &\n (df['state'] == \"Established\"),\n df['peerHost'],\n df['peerHostname'])\n df = df.fillna(value={'peerHostname': ''}) \\\n .drop(columns=['peerHost'])\n\n for i in df.select_dtypes(include='category'):\n df[i].cat.add_categories('', inplace=True)\n\n return df\n\n def aver(self, **kwargs) -> pd.DataFrame:\n \"\"\"BGP Assert\"\"\"\n\n def _check_if_state(row, if_df):\n\n if not if_df.empty:\n thisif = if_df.query(f'namespace==\"{row.namespace}\" and '\n f'hostname==\"{row.hostname}\" and '\n f'ifname==\"{row.ifname}\"')\n if not thisif.empty:\n if thisif.adminState.unique()[0] != 'up':\n return ['interface admin down']\n elif thisif.state.unique()[0] != 'up':\n return ['interface down']\n else:\n return []\n\n return []\n\n assert_cols = [\"namespace\", \"hostname\", \"vrf\", \"peer\", \"peerHostname\",\n \"afi\", \"safi\", \"asn\", \"state\", \"peerAsn\", \"bfdStatus\",\n \"reason\", \"notificnReason\", \"afisAdvOnly\", 'ifname',\n \"afisRcvOnly\", \"peerIP\", \"updateSource\", \"timestamp\"]\n\n kwargs.pop(\"columns\", None) # Loose whatever's passed\n status = kwargs.pop(\"status\", 'all')\n\n df = self.get(columns=assert_cols, state='!dynamic', **kwargs)\n if 'error' in df:\n return df\n\n if df.empty:\n if status != \"pass\":\n df['assert'] = 'fail'\n df['assertReason'] = 'No data'\n return df\n\n df = self._get_peer_matched_df(df)\n if df.empty:\n if status != \"pass\":\n df['assert'] = 'fail'\n df['assertReason'] = 'No data'\n return df\n\n # We can get rid of sessions with duplicate peer info since we're\n # interested only in session info here\n df = df.drop_duplicates(\n subset=['namespace', 'hostname', 'vrf', 'peer'])\n\n failed_df = df.query(\"state != 'Established'\").reset_index(drop=True)\n passed_df = df.query(\"state == 'Established'\").reset_index(drop=True)\n\n # Get the interface information\n if_df = self._get_table_sqobj('interfaces').get(\n namespace=failed_df.namespace.unique().tolist(),\n hostname=failed_df.hostname.unique().tolist(),\n ifname=failed_df.ifname.unique().tolist(),\n columns=['namespace', 'hostname', 'ifname', 'state', 'adminState']\n )\n\n failed_df['assertReason'] = [[] for _ in range(len(failed_df))]\n passed_df['assertReason'] = [[] for _ in range(len(passed_df))]\n\n if not failed_df.empty:\n # For not established entries, check if route/ARP entry exists\n failed_df['assertReason'] += failed_df.apply(\n lambda x, ifdf: _check_if_state(x, ifdf),\n args=(if_df,), axis=1)\n\n failed_df['assertReason'] += failed_df.apply(\n lambda x: [\"asn mismatch\"]\n if (x['peerHostname'] and ((x[\"asn\"] != x[\"peerAsn_y\"]) or\n (x['asn_y'] != x['peerAsn'])))\n else [], axis=1)\n\n failed_df['assertReason'] += failed_df.apply(\n lambda x: [f\"{x['reason']}:{x['notificnReason']}\"]\n if ((x['reason'] and x['reason'] != 'None' and\n x['reason'] != \"No error\"))\n else [], axis=1)\n\n # Get list of peer IP addresses for peer not in Established state\n # Returning to performing checks even if we didn't get LLDP/Intf info\n\n passed_df['assertReason'] += passed_df.apply(\n lambda x: ['Not all Afi/Safis enabled']\n if x['afisAdvOnly'].any() or x['afisRcvOnly'].any() else [],\n axis=1)\n\n df = pd.concat([failed_df, passed_df])\n\n df['assert'] = df.apply(lambda x: 'pass'\n if not len(x.assertReason) else 'fail',\n axis=1)\n\n result = df[['namespace', 'hostname', 'vrf', 'peer', 'asn',\n 'peerAsn', 'state', 'peerHostname', 'assert',\n 'assertReason', 'timestamp']] \\\n .explode(column=\"assertReason\") \\\n .fillna({'assertReason': '-'})\n\n if status == \"fail\":\n return result.query('assertReason != \"-\"')\n elif status == \"pass\":\n return result.query('assertReason == \"-\"')\n\n return result\n",
"from _pytest.mark.structures import Mark, MarkDecorator\nimport pytest\nfrom suzieq.cli.sqcmds import *\nfrom nubia import context\nimport os\nfrom tests.conftest import (commands, load_up_the_tests, tables, DATADIR,\n create_dummy_config_file)\nimport json\nfrom tests.conftest import setup_sqcmds\nimport pandas as pd\n\nfrom .utils import assert_df_equal\n\nfrom suzieq.sqobjects import get_sqobject, get_tables\n\n\nbasic_verbs = ['show', 'summarize']\n\n\n# TODO\n# columns length, column names?\n# specific data?\[email protected]\[email protected](\"command, verbs, args\", [\n ('AddressCmd', basic_verbs, [None, None]),\n ('EvpnVniCmd', basic_verbs + ['aver'], [None, None, None]),\n ('InterfaceCmd', basic_verbs + ['top', 'aver'],\n [None, None, None]),\n ('LldpCmd', basic_verbs, [None, None]),\n ('MacCmd', basic_verbs, [None, None]),\n ('MlagCmd', basic_verbs, [None, None, None]),\n ('OspfCmd', basic_verbs + ['aver'], [None, None, None, None]),\n ('RouteCmd', basic_verbs + ['lpm'],\n [None, None, {'address': '10.0.0.1'}]),\n ('DevconfigCmd', basic_verbs, [None, None]),\n ('TopologyCmd', basic_verbs, [None, None]),\n ('TableCmd', ['show', 'describe'], [None, {'table': 'device'}]),\n # ('TopcpuCmd', basic_verbs, [None, None]),\n # ('TopmemCmd', basic_verbs, [None, None]),\n ('VlanCmd', basic_verbs, [None, None])\n])\ndef test_commands(setup_nubia, command, verbs, args):\n \"\"\" runs through all of the commands for each of the sqcmds\n command: one of the sqcmds\n verbs: for each command, the list of verbs\n args: arguments\n \"\"\"\n for v, arg in zip(verbs, args):\n _test_command(command, v, arg)\n\n\ndef _test_command(cmd, verb, arg, filter=None):\n\n s = execute_cmd(cmd, verb, arg, filter)\n assert isinstance(s, int)\n return s\n\n\ndef test_summary_exception(setup_nubia):\n s = None\n with pytest.raises(AttributeError):\n s = execute_cmd('DeviceCmd', 'foop', None, )\n assert s is None\n\n\ngood_commands = commands[:]\n\ncolumn_commands = good_commands[:]\n\n\[email protected](\"cmd\", column_commands)\ndef test_all_columns(setup_nubia, cmd):\n s = _test_command(cmd, 'show', None, filter={'columns': '*'})\n assert s == 0\n\n\[email protected]\[email protected](\"cmd\", good_commands)\ndef test_hostname_show_filter(setup_nubia, cmd):\n s = _test_command(cmd, 'show', None, {'hostname': 'leaf01'})\n assert s == 0\n\n\[email protected]\[email protected](\"cmd\", good_commands)\ndef test_engine_show_filter(setup_nubia, cmd):\n s = _test_command(cmd, 'show', None, {'engine': 'pandas'})\n assert s == 0\n\n\[email protected]\[email protected](\"cmd\", good_commands)\ndef test_namespace_show_filter(setup_nubia, cmd):\n s = _test_command(cmd, 'show', None, {'namespace': 'dual-bgp'})\n assert s == 0\n\n\[email protected]\[email protected](\"cmd\", good_commands)\ndef test_view_show_filter(setup_nubia, cmd):\n s = _test_command(cmd, 'show', None, {'view': 'all'})\n assert s == 0\n\n\[email protected]\[email protected](\"cmd\", good_commands)\ndef test_start_time_show_filter(setup_nubia, cmd):\n s = _test_command(cmd, 'show', None, {\n 'start_time': '2020-01-01 21:43:30.048'})\n assert s == 0\n\n\nshow_columns_commands = good_commands[:]\n\n\[email protected]\[email protected]\[email protected](\"cmd\", show_columns_commands)\ndef test_columns_show_filter(setup_nubia, cmd):\n s = _test_command(cmd, 'show', None, {'columns': 'namespace'})\n assert s == 0\n\n\nbad_hostname_commands = commands[:]\n\n\[email protected]\[email protected](\"cmd\", bad_hostname_commands)\ndef test_bad_show_hostname_filter(setup_nubia, cmd):\n filter = {'hostname': 'unknown'}\n _ = _test_bad_show_filter(cmd, filter)\n\n\nbad_engine_commands = commands[:]\nbad_engine_commands.pop(4) # EvpnVniCmd\nbad_engine_commands.pop(8) # Ospfcmd\n\n\[email protected]\[email protected](\"cmd\", bad_engine_commands)\ndef test_bad_show_engine_filter(setup_nubia, cmd):\n filter = {'engine': 'unknown'}\n _ = _test_bad_show_filter(cmd, filter)\n\n\nbad_start_time_commands = commands[:]\n\n\n# this because I need to xfail these for this bug, I can't xfail individual\n# ones for the filenotfound\n# so I must remove those from the stack\nbad_start_time_commands.pop(3) # EvpnVniCmd\nbad_start_time_commands.pop(7) # Ospfcmd\n\n\[email protected]\[email protected](\"cmd\", bad_start_time_commands)\ndef test_bad_start_time_filter(setup_nubia, cmd):\n filter = {'start_time': 'unknown'}\n _ = _test_bad_show_filter(cmd, filter, True)\n\n\nbad_namespace_commands = bad_hostname_commands[:]\n\n\n# TODO\n# this is just like hostname filtering\[email protected]\[email protected](\"cmd\", bad_namespace_commands)\ndef test_bad_show_namespace_filter(setup_nubia, cmd):\n filter = {'namespace': 'unknown'}\n _ = _test_bad_show_filter(cmd, filter)\n\n\ndef _test_bad_show_filter(cmd, filter, assert_error=False):\n assert len(filter) == 1\n s = _test_command(cmd, 'show', None, filter=filter)\n if assert_error:\n assert s == 1\n else:\n assert s == 0\n return s\n\n\ngood_filters = [{'hostname': ['leaf01']}]\n\n\n# TODO?\n# these only check good cases, I'm assuming the bad cases work the same\n# as the rest of the filtering, and that is too messy to duplicate right now\[email protected]\[email protected]('cmd', good_commands)\ndef test_context_filtering(setup_nubia, cmd):\n for filter in good_filters:\n s = _test_context_filtering(cmd, filter)\n assert s == 0\n\n\ncontext_namespace_commands = commands[:]\n\n\[email protected]\[email protected]('cmd', context_namespace_commands)\ndef test_context_namespace_filtering(setup_nubia, cmd):\n s = _test_context_filtering(cmd, {'namespace': ['dual-bgp']})\n # this has to be list or it will fail, different from any other filtering,\n # namespace is special because it's part of the directory structure\n assert s == 0\n\n\[email protected]\[email protected]('cmd', good_commands)\ndef test_context_engine_filtering(setup_nubia, cmd):\n s = _test_context_filtering(cmd, {'engine': 'pandas'})\n assert s == 0\n\n\[email protected]\[email protected]('cmd', good_commands)\ndef test_context_start_time_filtering(setup_nubia, cmd):\n # before the latest data, so might be more data than the default\n s = _test_context_filtering(cmd, {'start_time': '2020-01-20 0:0:0'})\n assert s == 0\n\n\[email protected]('table', tables)\ndef test_table_describe(setup_nubia, table):\n out = _test_command('TableCmd', 'describe', {\"table\": table})\n assert out == 0\n\n\n@ pytest.mark.parametrize('table',\n [pytest.param(\n x,\n marks=MarkDecorator(Mark(x, [], {})))\n for x in get_tables()\n if x not in ['path', 'topmem', 'topcpu',\n 'topmem', 'time', 'ifCounters',\n 'network', 'inventory']\n ])\n@ pytest.mark.parametrize('datadir', DATADIR)\ndef test_sqcmds_regex_hostname(table, datadir):\n\n cfgfile = create_dummy_config_file(datadir=datadir)\n\n df = get_sqobject(table)(config_file=cfgfile).get(\n hostname=['~leaf.*', '~exit.*'])\n\n if table == 'tables':\n if 'junos' in datadir:\n assert df[df.table == 'device']['deviceCnt'].tolist() == [4]\n elif not any(x in datadir for x in ['vmx', 'mixed']):\n # The hostnames for these output don't match the hostname regex\n assert df[df.table == 'device']['deviceCnt'].tolist() == [6]\n return\n\n if not any(x in datadir for x in ['vmx', 'mixed', 'junos']):\n assert not df.empty\n if table not in ['mlag']:\n assert set(df.hostname.unique()) == set(['leaf01', 'leaf02',\n 'leaf03', 'leaf04',\n 'exit01', 'exit02'])\n else:\n assert set(df.hostname.unique()) == set(['leaf01', 'leaf02',\n 'leaf03', 'leaf04'])\n elif 'junos' in datadir:\n if table == 'mlag':\n # Our current Junos tests don't have MLAG\n return\n assert not df.empty\n if table == 'macs':\n assert set(df.hostname.unique()) == set(['leaf01', 'leaf02'])\n else:\n assert set(df.hostname.unique()) == set(['leaf01', 'leaf02',\n 'exit01', 'exit02'])\n\n\n@ pytest.mark.parametrize('table',\n [pytest.param(\n x,\n marks=MarkDecorator(Mark(x, [], {})))\n for x in get_tables()\n if x not in ['path', 'inventory']\n ])\n@ pytest.mark.parametrize('datadir', ['tests/data/multidc/parquet-out/'])\ndef test_sqcmds_regex_namespace(table, datadir):\n\n cfgfile = create_dummy_config_file(datadir=datadir)\n\n df = get_sqobject(table)(config_file=cfgfile).get(\n hostname=['~leaf.*', '~exit.*'], namespace=['~ospf.*'])\n\n assert not df.empty\n if table == 'tables':\n assert df[df.table == 'device']['namespaces'].tolist() == [2]\n return\n\n if table in ['mlag', 'evpnVni', 'devconfig', 'bgp']:\n # why devconfig is empty for ospf-single needs investigation\n assert set(df.namespace.unique()) == set(['ospf-ibgp'])\n else:\n assert set(df.namespace.unique()) == set(['ospf-ibgp', 'ospf-single'])\n\n if table in ['network']:\n # network show has no hostname\n return\n\n if table not in ['mlag']:\n assert set(df.hostname.unique()) == set(['leaf01', 'leaf02',\n 'leaf03', 'leaf04',\n 'exit01', 'exit02'])\n else:\n assert set(df.hostname.unique()) == set(['leaf01', 'leaf02',\n 'leaf03', 'leaf04'])\n\n\ndef _test_context_filtering(cmd, filter):\n assert len(filter) == 1\n ctx = context.get_context()\n k = next(iter(filter))\n v = filter[k]\n setattr(ctx, k, v)\n s = _test_command(cmd, 'show', None)\n setattr(ctx, k, \"\") # reset ctx back to no filtering\n return s\n\n\ndef execute_cmd(cmd, verb, arg, filter=None):\n # expect the cmd class are in the module cmd and also named cmd\n module = globals()[cmd]\n instance = getattr(module, cmd)\n if filter is None:\n filter = {}\n # filter = {'format': 'dataframe'}\n # else:\n # filter['format'] = 'dataframe'\n instance = instance(**filter)\n\n c = getattr(instance, verb)\n if arg is not None:\n return c(**arg)\n else:\n return c()\n\n\ndef _test_sqcmds(testvar, context_config):\n output, error = setup_sqcmds(testvar, context_config)\n\n if output:\n try:\n jout = json.loads(output.decode('utf-8').strip())\n except json.JSONDecodeError:\n jout = output\n\n if 'ignore-columns' in testvar:\n ignore_cols = testvar['ignore-columns'].split()\n else:\n ignore_cols = []\n\n if 'output' in testvar:\n if testvar.get('format', '') == \"text\":\n assert output.decode('utf8') == testvar['output']\n return\n\n # pandas uses ujson and needs to escape \"/\" in any string its trying\n # to decode. This is true in the case of NXOS' LLDP description which\n # contains a URL causing read_json to abort with weird error messages.\n expected_df = pd.read_json(\n testvar['output'].strip().replace('/', '\\/'))\n\n try:\n got_df = pd.read_json(output.decode('utf8').strip())\n except AttributeError:\n if output:\n got_df = pd.read_json(output)\n else:\n got_df = pd.DataFrame()\n\n # expected_df.sort_values(by=expected_df.columns[:1].tolist()) \\\n # .reset_index(drop=True)\n # got_df = got_df.sort_values(by=got_df.columns[:1].tolist()) \\\n # .reset_index(drop=True)\n # assert(expected_df.shape == got_df.shape)\n assert_df_equal(expected_df, got_df, ignore_cols)\n\n elif not error and 'xfail' in testvar:\n # this was marked to fail, but it succeeded so we must return\n return\n elif error and 'xfail' in testvar and 'error' in testvar['xfail']:\n if jout.decode(\"utf-8\") == testvar['xfail']['error']:\n assert False\n else:\n assert True\n elif error and 'error' in testvar and 'error' in testvar['error']:\n try:\n got_df = pd.DataFrame(json.loads(error.decode('utf-8').strip()))\n except json.JSONDecodeError:\n got_df = pd.DataFrame({'error': [error.decode('utf-8').strip()]})\n\n expected_df = pd.DataFrame(json.loads(testvar['error']['error']))\n\n assert_df_equal(expected_df, got_df, ignore_cols)\n else:\n raise Exception(f\"either xfail or output requried {error}\")\n\n\n@ pytest.mark.smoke\n@ pytest.mark.sqcmds\n@ pytest.mark.parametrize(\n \"testvar\",\n load_up_the_tests(os.scandir(os.path.abspath(os.curdir) +\n '/tests/integration/sqcmds/cumulus-samples')))\ndef test_cumulus_sqcmds(testvar, create_context_config):\n _test_sqcmds(testvar, create_context_config)\n\n\n@ pytest.mark.smoke\n@ pytest.mark.sqcmds\n@ pytest.mark.parametrize(\n \"testvar\",\n load_up_the_tests(os.scandir(os.path.abspath(os.curdir) +\n '/tests/integration/sqcmds/nxos-samples')))\ndef test_nxos_sqcmds(testvar, create_context_config):\n _test_sqcmds(testvar, create_context_config)\n\n\n@ pytest.mark.smoke\n@ pytest.mark.sqcmds\n@ pytest.mark.parametrize(\n \"testvar\",\n load_up_the_tests(os.scandir(os.path.abspath(os.curdir) +\n '/tests/integration/sqcmds/junos-samples')))\ndef test_junos_sqcmds(testvar, create_context_config):\n _test_sqcmds(testvar, create_context_config)\n\n\n@ pytest.mark.smoke\n@ pytest.mark.sqcmds\n@ pytest.mark.parametrize(\n \"testvar\",\n load_up_the_tests(os.scandir(os.path.abspath(os.curdir) +\n '/tests/integration/sqcmds/eos-samples')))\ndef test_eos_sqcmds(testvar, create_context_config):\n _test_sqcmds(testvar, create_context_config)\n\n\n@ pytest.mark.smoke\n@ pytest.mark.sqcmds\n@ pytest.mark.parametrize(\n \"testvar\",\n load_up_the_tests(os.scandir(os.path.abspath(os.curdir) +\n '/tests/integration/sqcmds/mixed-samples')))\ndef test_mixed_sqcmds(testvar, create_context_config):\n _test_sqcmds(testvar, create_context_config)\n\n\n@ pytest.mark.smoke\n@ pytest.mark.sqcmds\n@ pytest.mark.parametrize(\n \"testvar\",\n load_up_the_tests(os.scandir(os.path.abspath(os.curdir) +\n '/tests/integration/sqcmds/vmx-samples')))\ndef test_vmx_sqcmds(testvar, create_context_config):\n _test_sqcmds(testvar, create_context_config)\n\n\n@ pytest.mark.smoke\n@ pytest.mark.sqcmds\n@ pytest.mark.parametrize(\n \"testvar\",\n load_up_the_tests(os.scandir(os.path.abspath(os.curdir) +\n '/tests/integration/sqcmds/common-samples')))\ndef test_common_sqcmds(testvar, create_context_config):\n _test_sqcmds(testvar, create_context_config)\n",
"import numpy as np\n\nfrom suzieq.poller.services.service import Service\nfrom ipaddress import ip_address, IPv4Interface\n\n\nclass OspfIfService(Service):\n \"\"\"OSPF Interface service. Output needs to be munged\"\"\"\n\n def _clean_linux_data(self, processed_data, raw_data):\n for entry in processed_data:\n entry[\"vrf\"] = \"default\"\n entry[\"networkType\"] = entry[\"networkType\"].lower()\n if entry['networkType'] == 'pointopoint':\n entry['networkType'] = 'p2p'\n entry[\"passive\"] = entry[\"passive\"] == \"Passive\"\n entry[\"isUnnumbered\"] = entry[\"isUnnumbered\"] == \"UNNUMBERED\"\n\n return processed_data\n\n def _clean_cumulus_data(self, processed_data, raw_data):\n return self._clean_linux_data(processed_data, raw_data)\n\n def _clean_sonic_data(self, processed_data, raw_data):\n return self._clean_linux_data(processed_data, raw_data)\n\n def _clean_eos_data(self, processed_data, raw_data):\n\n vrf_loip = {}\n vrf_rtrid = {}\n drop_indices = []\n for i, entry in enumerate(processed_data):\n\n if '_entryType' in entry:\n # Retrieve the VRF and routerID\n vrf_rtrid[entry.get('vrf', 'default')] = \\\n entry.get('routerId', '')\n drop_indices.append(i)\n continue\n\n if not entry.get('ifname', ''):\n drop_indices.append(i)\n continue\n\n vrf = entry.get('vrf', '')\n if entry['ifname'].startswith(\"Loopback\"):\n if vrf not in vrf_loip or not vrf_loip[vrf]:\n vrf_loip[vrf] = entry.get('ipAddress', '')\n if entry.get('passive', False):\n entry['bfdStatus'] = \"invalid\"\n entry[\"networkType\"] = entry[\"networkType\"].lower()\n entry[\"isUnnumbered\"] = False\n if entry.get('state', '') in ['dr', 'p2p', 'backupDr']:\n entry['state'] = 'up'\n\n for i, entry in enumerate(processed_data):\n if entry.get('ipAddress', '') == vrf_loip.get(\n entry.get('vrf', ''), ''):\n if not entry.get('type', '') == \"loopback\":\n entry['isUnnumbered'] = True\n if entry['vrf'] in vrf_rtrid:\n entry['routerId'] = vrf_rtrid[entry['vrf']]\n\n processed_data = np.delete(processed_data, drop_indices).tolist()\n return processed_data\n\n def _clean_junos_data(self, processed_data, raw_data):\n\n drop_indices = []\n\n for i, entry in enumerate(processed_data):\n if entry['_entryType'] == 'overview':\n routerId = entry['routerId']\n continue\n\n if not entry.get('ifname', ''):\n drop_indices.append(i)\n continue\n\n entry['routerId'] = routerId\n # Is this right? Don't have a down interface example\n entry['state'] = 'up'\n entry['passive'] = entry['passive'] == \"Passive\"\n if entry['networkType'] == \"LAN\":\n entry['networkType'] = \"broadcast\"\n entry['stub'] = not entry['stub'] == 'Not Stub'\n entry['ipAddress'] = IPv4Interface(\n f'{entry[\"ipAddress\"]}/{entry[\"maskLen\"]}').with_prefixlen\n entry['maskLen'] = int(entry['ipAddress'].split('/')[1])\n entry['vrf'] = 'default' # Juniper doesn't provide this info\n entry['authType'] = entry['authType'].lower()\n entry['networkType'] = entry['networkType'].lower()\n\n # Skip the original record as we don't need the overview record\n processed_data = np.delete(processed_data, drop_indices).tolist()\n return processed_data[1:]\n\n def _clean_nxos_data(self, processed_data, raw_data):\n areas = {} # Need to come back to fixup entries\n drop_indices = []\n\n for i, entry in enumerate(processed_data):\n if not entry.get('ifname', ''):\n drop_indices.append(i)\n continue\n\n if entry['_entryType'] == 'interfaces':\n entry[\"networkType\"] = entry[\"networkType\"].lower()\n if entry['ifname'].startswith('loopback'):\n entry['passive'] = True\n entry['ipAddress'] = \\\n f\"{entry['ipAddress']}/{entry['maskLen']}\"\n if entry['area'] not in areas:\n areas[entry['area']] = []\n\n if entry.get('_adminState', '') == \"down\":\n entry['state'] = \"adminDown\"\n\n areas[entry['area']].append(entry)\n else:\n # ifname is really the area name\n if not entry.get('ifname', []):\n drop_indices.append(i)\n continue\n\n for j, area in enumerate(entry['ifname']):\n for ifentry in areas.get(area, []):\n ifentry['routerId'] = entry['routerId']\n ifentry['authType'] = entry['authType'][j]\n ifentry['isBackbone'] = area == \"0.0.0.0\"\n drop_indices.append(i)\n\n processed_data = np.delete(processed_data, drop_indices).tolist()\n return processed_data\n\n def _clean_ios_data(self, processed_data, raw_data):\n\n drop_indices = []\n\n for i, entry in enumerate(processed_data):\n if not entry.get('ifname', ''):\n drop_indices.append(i)\n continue\n\n area = entry.get('area', '')\n if area and area.isdecimal():\n entry['area'] = str(ip_address(int(area)))\n entry[\"networkType\"] = entry[\"networkType\"].lower()\n entry[\"passive\"] = entry[\"passive\"] == \"stub\"\n entry[\"isUnnumbered\"] = entry[\"isUnnumbered\"] == \"yes\"\n entry['areaStub'] = entry['areaStub'] == \"yes\"\n entry['helloTime'] = int(\n entry['helloTime']) if entry['helloTime'] else 10 # def value\n entry['deadTime'] = int(\n entry['deadTime']) if entry['deadTime'] else 40 # def value\n entry['retxTime'] = int(\n entry['retxTime']) if entry['retxTime'] else 5 # def value\n entry['vrf'] = 'default' # IOS doesn't provide this info\n entry['authType'] = entry.get('authType', '').lower()\n entry['nbrCount'] = int(\n entry['nbrCount']) if entry['nbrCount'] else 0\n entry['noSummary'] = entry.get('noSummary', False)\n if entry['state'] == \"administratively down\":\n entry['state'] = \"down\"\n else:\n entry['state'] = entry['state'].lower()\n\n processed_data = np.delete(processed_data, drop_indices).tolist()\n return processed_data\n\n def _clean_iosxe_data(self, processed_data, raw_data):\n return self._clean_ios_data(processed_data, raw_data)\n"
] |
[
[
"pandas.concat",
"numpy.where",
"pandas.DataFrame"
],
[
"pandas.read_json",
"pandas.DataFrame"
],
[
"numpy.delete"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Chaztikov/probability
|
[
"9d64bfd0a7907f220f910dae134bc30258f25b5e"
] |
[
"tensorflow_probability/python/distributions/distribution_properties_test.py"
] |
[
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Property-based testing for TFP distributions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport functools\nimport inspect\n\nfrom absl import logging\nfrom absl.testing import parameterized\nimport hypothesis as hp\nfrom hypothesis import strategies as hps\nimport numpy as np\nimport six\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python import bijectors as tfb\nfrom tensorflow_probability.python import distributions as tfd\nfrom tensorflow_probability.python import util as tfp_util\nfrom tensorflow_probability.python.bijectors import hypothesis_testlib as bijector_hps\nfrom tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps\nfrom tensorflow_probability.python.internal import tensor_util\nfrom tensorflow_probability.python.internal import tensorshape_util\nfrom tensorflow_probability.python.internal import test_util\n\n\nTF2_FRIENDLY_DISTS = (\n 'Bernoulli',\n 'Beta',\n 'Binomial',\n 'Chi',\n 'Chi2',\n 'CholeskyLKJ',\n 'Categorical',\n 'Cauchy',\n 'Deterministic',\n 'Dirichlet',\n 'DirichletMultinomial',\n 'DoublesidedMaxwell',\n 'Empirical',\n 'Exponential',\n 'FiniteDiscrete',\n 'Gamma',\n 'GammaGamma',\n 'GeneralizedPareto',\n 'Geometric',\n 'Gumbel',\n 'HalfCauchy',\n 'HalfNormal',\n 'Horseshoe',\n 'InverseGamma',\n 'InverseGaussian',\n 'Kumaraswamy',\n 'Laplace',\n 'LKJ',\n 'LogNormal',\n 'Logistic',\n 'Normal',\n 'Multinomial',\n 'NegativeBinomial',\n 'OneHotCategorical',\n 'Pareto',\n 'PERT',\n 'PlackettLuce',\n 'Poisson',\n # 'PoissonLogNormalQuadratureCompound' TODO(b/137956955): Add support\n # for hypothesis testing\n 'ProbitBernoulli',\n 'RelaxedBernoulli',\n 'ExpRelaxedOneHotCategorical',\n # 'SinhArcsinh' TODO(b/137956955): Add support for hypothesis testing\n 'StudentT',\n 'Triangular',\n 'TruncatedNormal',\n 'Uniform',\n 'VonMises',\n 'VonMisesFisher',\n 'WishartTriL',\n 'Zipf',\n)\n\nNO_SAMPLE_PARAM_GRADS = {\n 'Deterministic': ('atol', 'rtol'),\n}\nNO_LOG_PROB_PARAM_GRADS = ('Deterministic', 'Empirical')\nNO_KL_PARAM_GRADS = ('Deterministic',)\n\nMUTEX_PARAMS = (\n set(['logits', 'probs']),\n set(['probits', 'probs']),\n set(['rate', 'log_rate']),\n set(['scale', 'scale_tril', 'scale_diag', 'scale_identity_multiplier']),\n)\n\nSPECIAL_DISTS = (\n 'BatchReshape',\n 'Distribution',\n 'Empirical',\n 'Independent',\n 'MixtureSameFamily',\n 'TransformedDistribution',\n)\n\n# Batch slicing requires implementing `_params_event_ndims`. Generic\n# instantiation (per `instantiable_base_dists`, below) also requires\n# `_params_event_ndims`, but some special distributions can be instantiated\n# without that. Of those, this variable lists the ones that do not support\n# batch slicing.\nINSTANTIABLE_BUT_NOT_SLICABLE = (\n 'BatchReshape',\n)\n\nEXTRA_TENSOR_CONVERSION_DISTS = {\n 'RelaxedBernoulli': 1,\n 'WishartTriL': 3, # not concretizing linear operator scale\n 'Chi': 2, # subclasses `Chi2`, runs redundant checks on `df` parameter\n}\n\n# Whitelist of underlying distributions for QuantizedDistribution (must have\n# continuous, infinite support -- QuantizedDistribution also works for finite-\n# support distributions for which the length of the support along each dimension\n# is at least 1, though it is difficult to construct draws of these\n# distributions in general, and wouldn't contribute much to test coverage.)\nQUANTIZED_BASE_DISTS = (\n 'Chi2',\n 'Exponential',\n 'LogNormal',\n 'Logistic',\n 'Normal',\n 'Pareto',\n 'Poisson',\n 'StudentT',\n)\n\n\n# TODO(b/130815467) All distributions should be auto-vectorizeable.\n# The lists below contain distributions from INSTANTIABLE_BASE_DISTS that are\n# blacklisted by the autovectorization tests. Since not all distributions are\n# in INSTANTIABLE_BASE_DISTS, these should not be taken as exhaustive.\nSAMPLE_AUTOVECTORIZATION_IS_BROKEN = [\n 'Binomial', # No converter for While\n 'Categorical', # No converter for SparseSoftmaxCrossEntropyWithLogits\n 'DirichletMultinomial', # No converter for TensorListFromTensor\n 'FiniteDiscrete', # No converter for SparseSoftmaxCrossEntropyWithLogits\n 'Multinomial', # No converter for TensorListFromTensor\n 'PlackettLuce', # No converter for TopKV2\n 'TruncatedNormal', # No converter for ParameterizedTruncatedNormal\n 'VonMises', # No converter for While\n 'VonMisesFisher', # No converter for While\n 'Zipf', # No converter for While\n]\n\nLOGPROB_AUTOVECTORIZATION_IS_BROKEN = [\n 'Categorical', # No converter for SparseSoftmaxCrossEntropyWithLogits\n 'DirichletMultinomial', # Same as Multinomial.\n 'FiniteDiscrete', # No converter for SparseSoftmaxCrossEntropyWithLogits\n 'Multinomial', # Seemingly runs, but gives `NaN`s sometimes.\n 'OneHotCategorical', # Seemingly runs, but gives `NaN`s sometimes.\n 'PlackettLuce', # Shape error because pfor gather ignores `batch_dims`.\n 'ProbitBernoulli', # Seemingly runs, but gives `NaN`s sometimes.\n 'TruncatedNormal', # Numerical problem: b/145554459\n 'VonMisesFisher', # No converter for CheckNumerics\n 'Wishart', # Actually works, but disabled because log_prob of sample is\n # ill-conditioned for reasons unrelated to pfor.\n 'WishartTriL', # Same as Wishart.\n]\n\nEVENT_SPACE_BIJECTOR_IS_BROKEN = [\n 'InverseGamma', # TODO(b/143090143): Enable this when the bug is fixed.\n # (Reciprocal(Softplus(x)) -> inf for small x)\n]\n\n# Vectorization can rewrite computations in ways that (apparently) lead to\n# minor floating-point inconsistency.\n# TODO(b/142827327): Bring tolerance down to 0 for all distributions.\nVECTORIZED_LOGPROB_ATOL = collections.defaultdict(lambda: 1e-6)\nVECTORIZED_LOGPROB_ATOL.update({\n 'CholeskyLKJ': 1e-4,\n 'LKJ': 1e-3,\n 'StudentT': 5e-5,\n 'TruncatedNormal': 1e-1,\n})\n\n\nclass DistInfo(collections.namedtuple(\n 'DistInfo', ['cls', 'params_event_ndims'])):\n \"\"\"Sufficient information to instantiate a Distribution.\n\n To wit\n\n - The Python class `cls` giving the class, and\n - A Python dict `params_event_ndims` giving the event dimensions for the\n parameters (so that parameters can be built with predictable batch shapes).\n\n Specifically, the `params_event_ndims` dict maps string parameter names to\n Python integers. Each integer gives how many (trailing) dimensions of that\n parameter are part of the event.\n \"\"\"\n __slots__ = ()\n\n\ndef instantiable_base_dists():\n \"\"\"Computes the table of mechanically instantiable base Distributions.\n\n A Distribution is mechanically instantiable if\n\n - The class appears as a symbol binding in `tfp.distributions`;\n - The class defines a `_params_event_ndims` method (necessary\n to generate parameter Tensors with predictable batch shapes); and\n - The name is not blacklisted in `SPECIAL_DISTS`.\n\n Additionally, the Empricial distribution is hardcoded with special\n instantiation rules for each choice of event_ndims among 0, 1, and 2.\n\n Compound distributions like TransformedDistribution have their own\n instantiation rules hard-coded in the `distributions` strategy.\n\n Returns:\n instantiable_base_dists: A Python dict mapping distribution name\n (as a string) to a `DistInfo` carrying the information necessary to\n instantiate it.\n \"\"\"\n result = {}\n for (dist_name, dist_class) in six.iteritems(tfd.__dict__):\n if (not inspect.isclass(dist_class) or\n not issubclass(dist_class, tfd.Distribution) or\n dist_name in SPECIAL_DISTS):\n continue\n try:\n params_event_ndims = dist_class._params_event_ndims()\n except NotImplementedError:\n msg = 'Unable to test tfd.%s: _params_event_ndims not implemented'\n logging.warning(msg, dist_name)\n continue\n result[dist_name] = DistInfo(dist_class, params_event_ndims)\n\n # Empirical._params_event_ndims depends on `self.event_ndims`, so we have to\n # explicitly list these entries.\n result['Empirical|event_ndims=0'] = DistInfo( #\n functools.partial(tfd.Empirical, event_ndims=0), dict(samples=1))\n result['Empirical|event_ndims=1'] = DistInfo( #\n functools.partial(tfd.Empirical, event_ndims=1), dict(samples=2))\n result['Empirical|event_ndims=2'] = DistInfo( #\n functools.partial(tfd.Empirical, event_ndims=2), dict(samples=3))\n\n return result\n\n\n# INSTANTIABLE_BASE_DISTS is a map from str->(DistClass, params_event_ndims)\nINSTANTIABLE_BASE_DISTS = instantiable_base_dists()\ndel instantiable_base_dists\n\nINSTANTIABLE_META_DISTS = (\n 'BatchReshape',\n 'Independent',\n 'MixtureSameFamily',\n 'TransformedDistribution',\n 'QuantizedDistribution',\n)\n\n# pylint is unable to handle @hps.composite (e.g. complains \"No value for\n# argument 'batch_shape' in function call\"), so disable this lint for the file.\n\n# pylint: disable=no-value-for-parameter\n\n\[email protected]\ndef valid_slices(draw, batch_shape):\n \"\"\"Samples a legal (possibly empty) slice for shape batch_shape.\"\"\"\n # We build up a list of slices in several stages:\n # 1. Choose 0 to batch_rank slices to come before an Ellipsis (...).\n # 2. Decide whether or not to add an Ellipsis; if using, updating the indexing\n # used (e.g. batch_shape[i]) to identify safe bounds.\n # 3. Choose 0 to [remaining_dims] slices to come last.\n # 4. Decide where to insert between 0 and 3 newaxis slices.\n batch_shape = tf.TensorShape(batch_shape).as_list()\n slices = []\n batch_rank = len(batch_shape)\n arbitrary_slices = hps.tuples(\n hps.one_of(hps.just(None), hps.integers(min_value=-100, max_value=100)),\n hps.one_of(hps.just(None), hps.integers(min_value=-100, max_value=100)),\n hps.one_of(\n hps.just(None),\n hps.integers(min_value=-100, max_value=100).filter(lambda x: x != 0))\n ).map(lambda tup: slice(*tup))\n\n # 1. Choose 0 to batch_rank slices to come before an Ellipsis (...).\n nslc_before_ellipsis = draw(hps.integers(min_value=0, max_value=batch_rank))\n for i in range(nslc_before_ellipsis):\n slc = draw(\n hps.one_of(\n hps.integers(min_value=0, max_value=batch_shape[i] - 1),\n arbitrary_slices))\n slices.append(slc)\n # 2. Decide whether or not to add an Ellipsis; if using, updating the indexing\n # used (e.g. batch_shape[i]) to identify safe bounds.\n has_ellipsis = draw(hps.booleans().map(lambda x: (Ellipsis, x)))[1]\n nslc_after_ellipsis = draw(\n hps.integers(min_value=0, max_value=batch_rank - nslc_before_ellipsis))\n if has_ellipsis:\n slices.append(Ellipsis)\n remain_start, remain_end = (batch_rank - nslc_after_ellipsis, batch_rank)\n else:\n remain_start = nslc_before_ellipsis\n remain_end = nslc_before_ellipsis + nslc_after_ellipsis\n # 3. Choose 0 to [remaining_dims] slices to come last.\n for i in range(remain_start, remain_end):\n slc = draw(\n hps.one_of(\n hps.integers(min_value=0, max_value=batch_shape[i] - 1),\n arbitrary_slices))\n slices.append(slc)\n # 4. Decide where to insert between 0 and 3 newaxis slices.\n newaxis_positions = draw(\n hps.lists(hps.integers(min_value=0, max_value=len(slices)), max_size=3))\n for i in sorted(newaxis_positions, reverse=True):\n slices.insert(i, tf.newaxis)\n slices = tuple(slices)\n # Since `d[0]` ==> `d.__getitem__(0)` instead of `d.__getitem__((0,))`;\n # and similarly `d[:3]` ==> `d.__getitem__(slice(None, 3))` instead of\n # `d.__getitem__((slice(None, 3),))`; it is useful to test such scenarios.\n if len(slices) == 1 and draw(hps.booleans()):\n # Sometimes only a single item non-tuple.\n return slices[0]\n return slices\n\n\ndef stringify_slices(slices):\n \"\"\"Returns a list of strings describing the items in `slices`.\n\n Each returned string (in order) encodes what to do with one dimension of the\n slicee:\n\n - That number for a single integer slice;\n - 'a:b:c' for a start-stop-step slice, omitting any missing components;\n - 'tf.newaxis' for an axis insertion; or\n - The ellipsis '...' for an arbitrary-rank gap.\n\n Args:\n slices: A single-dimension slice or a Python tuple of single-dimension\n slices.\n\n Returns:\n pretty_slices: A list of Python strings encoding each slice.\n \"\"\"\n pretty_slices = []\n slices = slices if isinstance(slices, tuple) else (slices,)\n for slc in slices:\n if slc == Ellipsis:\n pretty_slices.append('...')\n elif isinstance(slc, slice):\n pretty_slices.append('{}:{}:{}'.format(\n *['' if s is None else s for s in (slc.start, slc.stop, slc.step)]))\n elif isinstance(slc, int) or tf.is_tensor(slc):\n pretty_slices.append(str(slc))\n elif slc is tf.newaxis:\n pretty_slices.append('tf.newaxis')\n else:\n raise ValueError('Unexpected slice type: {}'.format(type(slc)))\n return pretty_slices\n\n\ndef depths():\n return hps.integers(min_value=0, max_value=4)\n\n\[email protected]\ndef broadcasting_params(draw,\n dist_name,\n batch_shape,\n event_dim=None,\n enable_vars=False):\n \"\"\"Strategy for drawing parameters broadcasting to `batch_shape`.\"\"\"\n if dist_name not in INSTANTIABLE_BASE_DISTS:\n raise ValueError('Unknown Distribution name {}'.format(dist_name))\n\n params_event_ndims = INSTANTIABLE_BASE_DISTS[dist_name].params_event_ndims\n\n def _constraint(param):\n return constraint_for(dist_name, param)\n\n return draw(\n tfp_hps.broadcasting_params(\n batch_shape,\n params_event_ndims,\n event_dim=event_dim,\n enable_vars=enable_vars,\n constraint_fn_for=_constraint,\n mutex_params=MUTEX_PARAMS))\n\n\ndef params_used(dist):\n return [k for k, v in six.iteritems(dist.parameters) if v is not None]\n\n\[email protected]\ndef batch_reshapes(\n draw, batch_shape=None, event_dim=None,\n enable_vars=False, depth=None,\n eligibility_filter=lambda name: True, validate_args=True):\n \"\"\"Strategy for drawing `BatchReshape` distributions.\n\n The underlying distribution is drawn from the `distributions` strategy.\n\n Args:\n draw: Hypothesis strategy sampler supplied by `@hps.composite`.\n batch_shape: An optional `TensorShape`. The batch shape of the resulting\n `BatchReshape` distribution. Note that the underlying distribution will\n in general have a different batch shape, to make the reshaping\n non-trivial. Hypothesis will pick one if omitted.\n event_dim: Optional Python int giving the size of each of the underlying\n distribution's parameters' event dimensions. This is shared across all\n parameters, permitting square event matrices, compatible location and\n scale Tensors, etc. If omitted, Hypothesis will choose one.\n enable_vars: TODO(bjp): Make this `True` all the time and put variable\n initialization in slicing_test. If `False`, the returned parameters are\n all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`\n `tfp.util.TransformedVariable`}\n depth: Python `int` giving maximum nesting depth of compound Distributions.\n eligibility_filter: Optional Python callable. Blacklists some Distribution\n class names so they will not be drawn.\n validate_args: Python `bool`; whether to enable runtime assertions.\n\n Returns:\n dists: A strategy for drawing `BatchReshape` distributions with the\n specified `batch_shape` (or an arbitrary one if omitted).\n \"\"\"\n if depth is None:\n depth = draw(depths())\n\n if batch_shape is None:\n batch_shape = draw(tfp_hps.shapes(min_ndims=1, max_side=4))\n\n # TODO(b/142135119): Wanted to draw general input and output shapes like the\n # following, but Hypothesis complained about filtering out too many things.\n # underlying_batch_shape = draw(tfp_hps.shapes(min_ndims=1))\n # hp.assume(\n # batch_shape.num_elements() == underlying_batch_shape.num_elements())\n underlying_batch_shape = [tf.TensorShape(batch_shape).num_elements()]\n\n underlying = draw(\n distributions(\n batch_shape=underlying_batch_shape,\n event_dim=event_dim,\n enable_vars=enable_vars,\n depth=depth - 1,\n eligibility_filter=eligibility_filter,\n validate_args=validate_args))\n hp.note('Forming BatchReshape with underlying dist {}; '\n 'parameters {}; batch_shape {}'.format(\n underlying, params_used(underlying), batch_shape))\n result_dist = tfd.BatchReshape(\n underlying, batch_shape=batch_shape, validate_args=True)\n return result_dist\n\n\[email protected]\ndef independents(\n draw, batch_shape=None, event_dim=None,\n enable_vars=False, depth=None, eligibility_filter=lambda name: True,\n validate_args=True):\n \"\"\"Strategy for drawing `Independent` distributions.\n\n The underlying distribution is drawn from the `distributions` strategy.\n\n Args:\n draw: Hypothesis strategy sampler supplied by `@hps.composite`.\n batch_shape: An optional `TensorShape`. The batch shape of the resulting\n `Independent` distribution. Note that the underlying distribution will in\n general have a higher-rank batch shape, to make room for reinterpreting\n some of those dimensions as the `Independent`'s event. Hypothesis will\n pick one if omitted.\n event_dim: Optional Python int giving the size of each of the underlying\n distribution's parameters' event dimensions. This is shared across all\n parameters, permitting square event matrices, compatible location and\n scale Tensors, etc. If omitted, Hypothesis will choose one.\n enable_vars: TODO(bjp): Make this `True` all the time and put variable\n initialization in slicing_test. If `False`, the returned parameters are\n all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`\n `tfp.util.TransformedVariable`}\n depth: Python `int` giving maximum nesting depth of compound Distributions.\n eligibility_filter: Optional Python callable. Blacklists some Distribution\n class names so they will not be drawn.\n validate_args: Python `bool`; whether to enable runtime assertions.\n\n Returns:\n dists: A strategy for drawing `Independent` distributions with the specified\n `batch_shape` (or an arbitrary one if omitted).\n \"\"\"\n if depth is None:\n depth = draw(depths())\n\n reinterpreted_batch_ndims = draw(hps.integers(min_value=0, max_value=2))\n\n if batch_shape is None:\n batch_shape = draw(\n tfp_hps.shapes(min_ndims=reinterpreted_batch_ndims))\n else: # This independent adds some batch dims to its underlying distribution.\n batch_shape = tensorshape_util.concatenate(\n batch_shape,\n draw(tfp_hps.shapes(\n min_ndims=reinterpreted_batch_ndims,\n max_ndims=reinterpreted_batch_ndims)))\n\n underlying = draw(\n distributions(\n batch_shape=batch_shape,\n event_dim=event_dim,\n enable_vars=enable_vars,\n depth=depth - 1,\n eligibility_filter=eligibility_filter,\n validate_args=validate_args))\n hp.note('Forming Independent with underlying dist {}; '\n 'parameters {}; reinterpreted_batch_ndims {}'.format(\n underlying, params_used(underlying), reinterpreted_batch_ndims))\n result_dist = tfd.Independent(\n underlying,\n reinterpreted_batch_ndims=reinterpreted_batch_ndims,\n validate_args=validate_args)\n expected_shape = batch_shape[:len(batch_shape) - reinterpreted_batch_ndims]\n if expected_shape != result_dist.batch_shape:\n msg = ('Independent strategy generated a bad batch shape '\n 'for {}, should have been {}.').format(result_dist, expected_shape)\n raise AssertionError(msg)\n return result_dist\n\n\[email protected]\ndef transformed_distributions(draw,\n batch_shape=None,\n event_dim=None,\n enable_vars=False,\n depth=None,\n eligibility_filter=lambda name: True,\n validate_args=True):\n \"\"\"Strategy for drawing `TransformedDistribution`s.\n\n The transforming bijector is drawn from the\n `bijectors.hypothesis_testlib.unconstrained_bijectors` strategy.\n\n The underlying distribution is drawn from the `distributions` strategy, except\n that it must be compatible with the bijector according to\n `bijectors.hypothesis_testlib.distribution_filter_for` (these generally check\n that vector bijectors are not combined with scalar distributions, etc).\n\n Args:\n draw: Hypothesis strategy sampler supplied by `@hps.composite`.\n batch_shape: An optional `TensorShape`. The batch shape of the resulting\n `TransformedDistribution`. The underlying distribution will sometimes\n have the same `batch_shape`, and sometimes have scalar batch shape.\n Hypothesis will pick a `batch_shape` if omitted.\n event_dim: Optional Python int giving the size of each of the underlying\n distribution's parameters' event dimensions. This is shared across all\n parameters, permitting square event matrices, compatible location and\n scale Tensors, etc. If omitted, Hypothesis will choose one.\n enable_vars: TODO(bjp): Make this `True` all the time and put variable\n initialization in slicing_test. If `False`, the returned parameters are\n all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`\n `tfp.util.TransformedVariable`}\n depth: Python `int` giving maximum nesting depth of compound Distributions.\n eligibility_filter: Optional Python callable. Blacklists some Distribution\n class names so they will not be drawn.\n validate_args: Python `bool`; whether to enable runtime assertions.\n\n Returns:\n dists: A strategy for drawing `TransformedDistribution`s with the specified\n `batch_shape` (or an arbitrary one if omitted).\n \"\"\"\n if depth is None:\n depth = draw(depths())\n\n bijector = draw(bijector_hps.unconstrained_bijectors())\n hp.note('Drawing TransformedDistribution with bijector {}'.format(bijector))\n if batch_shape is None:\n batch_shape = draw(tfp_hps.shapes())\n underlying_batch_shape = batch_shape\n batch_shape_arg = None\n if draw(hps.booleans()):\n # Use batch_shape overrides.\n underlying_batch_shape = tf.TensorShape([]) # scalar underlying batch\n batch_shape_arg = batch_shape\n underlyings = distributions(\n batch_shape=underlying_batch_shape,\n event_dim=event_dim,\n enable_vars=enable_vars,\n depth=depth - 1,\n eligibility_filter=eligibility_filter,\n validate_args=validate_args).filter(\n bijector_hps.distribution_filter_for(bijector))\n to_transform = draw(underlyings)\n hp.note('Forming TransformedDistribution with '\n 'underlying distribution {}; parameters {}'.format(\n to_transform, params_used(to_transform)))\n # TODO(bjp): Add test coverage for `event_shape` argument of\n # `TransformedDistribution`.\n result_dist = tfd.TransformedDistribution(\n bijector=bijector,\n distribution=to_transform,\n batch_shape=batch_shape_arg,\n validate_args=validate_args)\n if batch_shape != result_dist.batch_shape:\n msg = ('TransformedDistribution strategy generated a bad batch shape '\n 'for {}, should have been {}.').format(result_dist, batch_shape)\n raise AssertionError(msg)\n return result_dist\n\n\[email protected]\ndef quantized_distributions(draw,\n batch_shape=None,\n event_dim=None,\n enable_vars=False,\n eligibility_filter=lambda name: True,\n validate_args=True):\n \"\"\"Strategy for drawing `QuantizedDistribution`s.\n\n The underlying distribution is drawn from the `base_distributions` strategy.\n\n Args:\n draw: Hypothesis strategy sampler supplied by `@hps.composite`.\n batch_shape: An optional `TensorShape`. The batch shape of the resulting\n `QuantizedDistribution`. Hypothesis will pick a `batch_shape` if omitted.\n event_dim: Optional Python int giving the size of each of the underlying\n distribution's parameters' event dimensions. This is shared across all\n parameters, permitting square event matrices, compatible location and\n scale Tensors, etc. If omitted, Hypothesis will choose one.\n enable_vars: TODO(bjp): Make this `True` all the time and put variable\n initialization in slicing_test. If `False`, the returned parameters are\n all Tensors, never Variables or DeferredTensor.\n eligibility_filter: Optional Python callable. Blacklists some Distribution\n class names so they will not be drawn.\n validate_args: Python `bool`; whether to enable runtime assertions.\n\n Returns:\n dists: A strategy for drawing `QuantizedDistribution`s with the specified\n `batch_shape` (or an arbitrary one if omitted).\n \"\"\"\n\n if batch_shape is None:\n batch_shape = draw(tfp_hps.shapes())\n\n low_quantile = draw(\n hps.one_of(\n hps.just(None),\n hps.floats(min_value=0.01, max_value=0.7)))\n high_quantile = draw(\n hps.one_of(\n hps.just(None),\n hps.floats(min_value=0.3, max_value=.99)))\n\n def ok(name):\n return eligibility_filter(name) and name in QUANTIZED_BASE_DISTS\n underlyings = base_distributions(\n batch_shape=batch_shape,\n event_dim=event_dim,\n enable_vars=enable_vars,\n eligibility_filter=ok,\n )\n underlying = draw(underlyings)\n\n if high_quantile is not None:\n high_quantile = tf.convert_to_tensor(high_quantile, dtype=underlying.dtype)\n if low_quantile is not None:\n low_quantile = tf.convert_to_tensor(low_quantile, dtype=underlying.dtype)\n if high_quantile is not None:\n high_quantile = ensure_high_gt_low(low_quantile, high_quantile)\n\n hp.note('Drawing QuantizedDistribution with underlying distribution'\n ' {}'.format(underlying))\n\n try:\n low = None if low_quantile is None else underlying.quantile(low_quantile)\n high = None if high_quantile is None else underlying.quantile(high_quantile)\n except NotImplementedError:\n # The following code makes ReproducibilityTest flaky in graph mode (but not\n # eager). Failures are due either to partial mismatch in the samples in\n # ReproducibilityTest or to `low` and/or `high` being NaN. For now, to avoid\n # this, we set `low` and `high` to `None` for distributions not implementing\n # `quantile`.\n\n # seed = test_util.test_seed(hardcoded_seed=123)\n # low = (None if low_quantile is None\n # else underlying.sample(low_quantile.shape, seed=seed))\n # high = (None if high_quantile is None else\n # underlying.sample(high_quantile.shape, seed=seed))\n low = None\n high = None\n\n # Ensure that `low` and `high` are ints contained in distribution support\n # and span at least a few bins.\n if high is not None:\n high = tf.clip_by_value(high, -2**23, 2**23)\n high = tf.math.ceil(high + 5.)\n\n if low is not None:\n low = tf.clip_by_value(low, -2**23, 2**23)\n low = tf.math.ceil(low)\n\n result_dist = tfd.QuantizedDistribution(\n distribution=underlying,\n low=low,\n high=high,\n validate_args=validate_args)\n\n return result_dist\n\n\[email protected]\ndef mixtures_same_family(draw,\n batch_shape=None,\n event_dim=None,\n enable_vars=False,\n depth=None,\n eligibility_filter=lambda name: True,\n validate_args=True):\n \"\"\"Strategy for drawing `MixtureSameFamily` distributions.\n\n The component distribution is drawn from the `distributions` strategy.\n\n The Categorical mixture distributions are either shared across all batch\n members, or drawn independently for the full batch (as required by\n `MixtureSameFamily`).\n\n Args:\n draw: Hypothesis strategy sampler supplied by `@hps.composite`.\n batch_shape: An optional `TensorShape`. The batch shape of the resulting\n `MixtureSameFamily` distribution. The component distribution will have a\n batch shape of 1 rank higher (for the components being mixed). Hypothesis\n will pick a batch shape if omitted.\n event_dim: Optional Python int giving the size of each of the component\n distribution's parameters' event dimensions. This is shared across all\n parameters, permitting square event matrices, compatible location and\n scale Tensors, etc. If omitted, Hypothesis will choose one.\n enable_vars: TODO(bjp): Make this `True` all the time and put variable\n initialization in slicing_test. If `False`, the returned parameters are\n all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`\n `tfp.util.TransformedVariable`}\n depth: Python `int` giving maximum nesting depth of compound Distributions.\n eligibility_filter: Optional Python callable. Blacklists some Distribution\n class names so they will not be drawn.\n validate_args: Python `bool`; whether to enable runtime assertions.\n\n Returns:\n dists: A strategy for drawing `MixtureSameFamily` distributions with the\n specified `batch_shape` (or an arbitrary one if omitted).\n \"\"\"\n if depth is None:\n depth = draw(depths())\n\n if batch_shape is None:\n # Ensure the components dist has at least one batch dim (a component dim).\n batch_shape = draw(tfp_hps.shapes(min_ndims=1, min_lastdimsize=2))\n else: # This mixture adds a batch dim to its underlying components dist.\n batch_shape = tensorshape_util.concatenate(\n batch_shape,\n draw(tfp_hps.shapes(min_ndims=1, max_ndims=1, min_lastdimsize=2)))\n\n component = draw(\n distributions(\n batch_shape=batch_shape,\n event_dim=event_dim,\n enable_vars=enable_vars,\n eligibility_filter=eligibility_filter,\n depth=depth - 1))\n hp.note('Drawing MixtureSameFamily with component {}; parameters {}'.format(\n component, params_used(component)))\n # scalar or same-shaped categorical?\n mixture_batch_shape = draw(\n hps.one_of(hps.just(batch_shape[:-1]), hps.just(tf.TensorShape([]))))\n mixture_dist = draw(base_distributions(\n dist_name='Categorical',\n batch_shape=mixture_batch_shape,\n event_dim=tensorshape_util.as_list(batch_shape)[-1],\n enable_vars=enable_vars,\n validate_args=validate_args))\n hp.note(('Forming MixtureSameFamily with '\n 'mixture distribution {}; parameters {}').format(\n mixture_dist, params_used(mixture_dist)))\n result_dist = tfd.MixtureSameFamily(\n components_distribution=component,\n mixture_distribution=mixture_dist,\n validate_args=validate_args)\n if batch_shape[:-1] != result_dist.batch_shape:\n msg = ('MixtureSameFamily strategy generated a bad batch shape '\n 'for {}, should have been {}.').format(result_dist, batch_shape[:-1])\n raise AssertionError(msg)\n return result_dist\n\n\ndef assert_shapes_unchanged(target_shaped_dict, possibly_bcast_dict):\n for param, target_param_val in six.iteritems(target_shaped_dict):\n np.testing.assert_array_equal(\n tensorshape_util.as_list(target_param_val.shape),\n tensorshape_util.as_list(possibly_bcast_dict[param].shape))\n\n\[email protected]\ndef base_distributions(draw,\n dist_name=None,\n batch_shape=None,\n event_dim=None,\n enable_vars=False,\n eligibility_filter=lambda name: True,\n validate_args=True):\n \"\"\"Strategy for drawing arbitrary base Distributions.\n\n This does not draw compound distributions like `Independent`,\n `MixtureSameFamily`, or `TransformedDistribution`; only base Distributions\n that do not accept other Distributions as arguments.\n\n Args:\n draw: Hypothesis strategy sampler supplied by `@hps.composite`.\n dist_name: Optional Python `str`. If given, the produced distributions\n will all have this type.\n batch_shape: An optional `TensorShape`. The batch shape of the resulting\n Distribution. Hypothesis will pick a batch shape if omitted.\n event_dim: Optional Python int giving the size of each of the\n distribution's parameters' event dimensions. This is shared across all\n parameters, permitting square event matrices, compatible location and\n scale Tensors, etc. If omitted, Hypothesis will choose one.\n enable_vars: TODO(bjp): Make this `True` all the time and put variable\n initialization in slicing_test. If `False`, the returned parameters are\n all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`\n `tfp.util.TransformedVariable`}.\n eligibility_filter: Optional Python callable. Blacklists some Distribution\n class names so they will not be drawn at the top level.\n validate_args: Python `bool`; whether to enable runtime assertions.\n\n Returns:\n dists: A strategy for drawing Distributions with the specified `batch_shape`\n (or an arbitrary one if omitted).\n \"\"\"\n if dist_name is None:\n names = [k for k in INSTANTIABLE_BASE_DISTS.keys() if eligibility_filter(k)]\n dist_name = draw(hps.sampled_from(sorted(names)))\n\n if dist_name == 'Empirical':\n variants = [k for k in INSTANTIABLE_BASE_DISTS.keys()\n if eligibility_filter(k) and 'Empirical' in k]\n dist_name = draw(hps.sampled_from(sorted(variants)))\n\n if batch_shape is None:\n batch_shape = draw(tfp_hps.shapes())\n\n # Draw raw parameters\n params_kwargs = draw(\n broadcasting_params(\n dist_name, batch_shape, event_dim=event_dim, enable_vars=enable_vars))\n hp.note('Forming dist {} with raw parameters {}'.format(\n dist_name, params_kwargs))\n\n # Constrain them to legal values\n params_constrained = constraint_for(dist_name)(params_kwargs)\n\n # Sometimes the \"distribution constraint\" fn may replace c2t-tracking\n # DeferredTensor params with Tensor params (e.g. fix_triangular). In such\n # cases, we preserve the c2t-tracking DeferredTensors by wrapping them but\n # ignoring the value. We similarly reinstate raw tf.Variables, so they\n # appear in the distribution's `variables` list and can be initialized.\n for k in params_constrained:\n if (k in params_kwargs and\n isinstance(params_kwargs[k], (tfp_util.DeferredTensor, tf.Variable)) and\n params_kwargs[k] is not params_constrained[k]):\n\n def constrained_value(v, val=params_constrained[k]):\n # While the gradient to v will be 0, we only care about the c2t counts.\n return v * 0 + val\n\n params_constrained[k] = tfp_util.DeferredTensor(\n params_kwargs[k], constrained_value)\n\n hp.note('Forming dist {} with constrained parameters {}'.format(\n dist_name, params_constrained))\n assert_shapes_unchanged(params_kwargs, params_constrained)\n params_constrained['validate_args'] = validate_args\n\n if dist_name in ['Wishart', 'WishartTriL']:\n # With the default `input_output_cholesky = False`, Wishart occasionally\n # produces samples for which the Cholesky decompositions fail, causing\n # an error in testDistribution when `log_prob` is called on a sample.\n params_constrained['input_output_cholesky'] = True\n\n # Actually construct the distribution\n dist_cls = INSTANTIABLE_BASE_DISTS[dist_name].cls\n result_dist = dist_cls(**params_constrained)\n\n # Check that the batch shape came out as expected\n if batch_shape != result_dist.batch_shape:\n msg = ('Distributions strategy generated a bad batch shape '\n 'for {}, should have been {}.').format(result_dist, batch_shape)\n raise AssertionError(msg)\n return result_dist\n\n\[email protected]\ndef distributions(draw,\n dist_name=None,\n batch_shape=None,\n event_dim=None,\n enable_vars=False,\n depth=None,\n eligibility_filter=lambda name: True,\n validate_args=True):\n \"\"\"Strategy for drawing arbitrary Distributions.\n\n This may draw compound distributions (i.e., `Independent`,\n `MixtureSameFamily`, and/or `TransformedDistribution`), in which case the\n underlying distributions are drawn recursively from this strategy as well.\n\n Args:\n draw: Hypothesis strategy sampler supplied by `@hps.composite`.\n dist_name: Optional Python `str`. If given, the produced distributions\n will all have this type.\n batch_shape: An optional `TensorShape`. The batch shape of the resulting\n Distribution. Hypothesis will pick a batch shape if omitted.\n event_dim: Optional Python int giving the size of each of the\n distribution's parameters' event dimensions. This is shared across all\n parameters, permitting square event matrices, compatible location and\n scale Tensors, etc. If omitted, Hypothesis will choose one.\n enable_vars: TODO(bjp): Make this `True` all the time and put variable\n initialization in slicing_test. If `False`, the returned parameters are\n all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`\n `tfp.util.TransformedVariable`}.\n depth: Python `int` giving maximum nesting depth of compound Distributions.\n If `None`, Hypothesis will bias choose one, with a bias towards shallow\n nests.\n eligibility_filter: Optional Python callable. Blacklists some Distribution\n class names so they will not be drawn.\n validate_args: Python `bool`; whether to enable runtime assertions.\n\n Returns:\n dists: A strategy for drawing Distributions with the specified `batch_shape`\n (or an arbitrary one if omitted).\n\n Raises:\n ValueError: If it doesn't know how to instantiate a Distribution of class\n `dist_name`.\n \"\"\"\n if depth is None:\n depth = draw(depths())\n\n if dist_name is None and depth > 0:\n bases = hps.just(None)\n candidates = ['BatchReshape', 'Independent',\n 'MixtureSameFamily', 'TransformedDistribution']\n names = [name for name in candidates if eligibility_filter(name)]\n compounds = hps.one_of(map(hps.just, names))\n dist_name = draw(hps.one_of([bases, compounds]))\n\n if (dist_name is None\n or dist_name in INSTANTIABLE_BASE_DISTS\n or dist_name == 'Empirical'):\n return draw(base_distributions(\n dist_name, batch_shape, event_dim, enable_vars,\n eligibility_filter, validate_args))\n if dist_name == 'BatchReshape':\n return draw(batch_reshapes(\n batch_shape, event_dim, enable_vars, depth,\n eligibility_filter, validate_args))\n if dist_name == 'Independent':\n return draw(independents(\n batch_shape, event_dim, enable_vars, depth,\n eligibility_filter, validate_args))\n if dist_name == 'MixtureSameFamily':\n return draw(mixtures_same_family(\n batch_shape, event_dim, enable_vars, depth,\n eligibility_filter, validate_args))\n if dist_name == 'TransformedDistribution':\n return draw(transformed_distributions(\n batch_shape, event_dim, enable_vars, depth,\n eligibility_filter, validate_args))\n if dist_name == 'QuantizedDistribution':\n return draw(quantized_distributions(\n batch_shape, event_dim, enable_vars,\n eligibility_filter, validate_args))\n raise ValueError('Unknown Distribution name {}'.format(dist_name))\n\n\ndef extra_tensor_conversions_allowed(dist):\n \"\"\"Returns number of extra tensor conversions allowed for the input dist.\"\"\"\n extra_conversions = EXTRA_TENSOR_CONVERSION_DISTS.get(type(dist).__name__)\n if extra_conversions:\n return extra_conversions\n if isinstance(dist, tfd.TransformedDistribution):\n return 1\n if isinstance(dist, tfd.BatchReshape):\n # One for the batch_shape_tensor needed by _call_reshape_input_output.\n # One to cover inability to turn off validate_args for the base\n # distribution (b/143297494).\n return 2\n return 0\n\n\n@test_util.test_all_tf_execution_regimes\nclass DistributionParamsAreVarsTest(test_util.TestCase):\n\n @parameterized.named_parameters(\n {'testcase_name': dname, 'dist_name': dname}\n for dname in TF2_FRIENDLY_DISTS)\n @hp.given(hps.data())\n @tfp_hps.tfp_hp_settings()\n def testDistribution(self, dist_name, data):\n seed = test_util.test_seed()\n # Explicitly draw event_dim here to avoid relying on _params_event_ndims\n # later, so this test can support distributions that do not implement the\n # slicing protocol.\n event_dim = data.draw(hps.integers(min_value=2, max_value=6))\n dist = data.draw(distributions(\n dist_name=dist_name, event_dim=event_dim, enable_vars=True))\n batch_shape = dist.batch_shape\n batch_shape2 = data.draw(tfp_hps.broadcast_compatible_shape(batch_shape))\n dist2 = data.draw(\n distributions(\n dist_name=dist_name,\n batch_shape=batch_shape2,\n event_dim=event_dim,\n enable_vars=True))\n self.evaluate([var.initializer for var in dist.variables])\n\n # Check that the distribution passes Variables through to the accessor\n # properties (without converting them to Tensor or anything like that).\n for k, v in six.iteritems(dist.parameters):\n if not tensor_util.is_ref(v):\n continue\n self.assertIs(getattr(dist, k), v)\n\n # Check that standard statistics do not read distribution parameters more\n # than twice (once in the stat itself and up to once in any validation\n # assertions).\n max_permissible = 2 + extra_tensor_conversions_allowed(dist)\n for stat in sorted(data.draw(\n hps.sets(\n hps.one_of(\n map(hps.just, [\n 'covariance', 'entropy', 'mean', 'mode', 'stddev',\n 'variance'\n ])),\n min_size=3,\n max_size=3))):\n hp.note('Testing excessive var usage in {}.{}'.format(dist_name, stat))\n try:\n with tfp_hps.assert_no_excessive_var_usage(\n 'statistic `{}` of `{}`'.format(stat, dist),\n max_permissible=max_permissible):\n getattr(dist, stat)()\n\n except NotImplementedError:\n pass\n\n # Check that `sample` doesn't read distribution parameters more than twice,\n # and that it produces non-None gradients (if the distribution is fully\n # reparameterized).\n with tf.GradientTape() as tape:\n # TDs do bijector assertions twice (once by distribution.sample, and once\n # by bijector.forward).\n max_permissible = 2 + extra_tensor_conversions_allowed(dist)\n with tfp_hps.assert_no_excessive_var_usage(\n 'method `sample` of `{}`'.format(dist),\n max_permissible=max_permissible):\n sample = dist.sample(seed=seed)\n if dist.reparameterization_type == tfd.FULLY_REPARAMETERIZED:\n grads = tape.gradient(sample, dist.variables)\n for grad, var in zip(grads, dist.variables):\n var_name = var.name.rstrip('_0123456789:')\n if var_name in NO_SAMPLE_PARAM_GRADS.get(dist_name, ()):\n continue\n if grad is None:\n raise AssertionError(\n 'Missing sample -> {} grad for distribution {}'.format(\n var_name, dist_name))\n\n # Turn off validations, since TODO(b/129271256) log_prob can choke on dist's\n # own samples. Also, to relax conversion counts for KL (might do >2 w/\n # validate_args).\n dist = dist.copy(validate_args=False)\n dist2 = dist2.copy(validate_args=False)\n\n # Test that KL divergence reads distribution parameters at most once, and\n # that is produces non-None gradients.\n try:\n for d1, d2 in (dist, dist2), (dist2, dist):\n with tf.GradientTape() as tape:\n with tfp_hps.assert_no_excessive_var_usage(\n '`kl_divergence` of (`{}` (vars {}), `{}` (vars {}))'.format(\n d1, d1.variables, d2, d2.variables),\n max_permissible=1): # No validation => 1 convert per var.\n kl = d1.kl_divergence(d2)\n wrt_vars = list(d1.variables) + list(d2.variables)\n grads = tape.gradient(kl, wrt_vars)\n for grad, var in zip(grads, wrt_vars):\n if grad is None and dist_name not in NO_KL_PARAM_GRADS:\n raise AssertionError('Missing KL({} || {}) -> {} grad:\\n'\n '{} vars: {}\\n{} vars: {}'.format(\n d1, d2, var, d1, d1.variables, d2,\n d2.variables))\n except NotImplementedError:\n pass\n\n # Test that log_prob produces non-None gradients, except for distributions\n # on the NO_LOG_PROB_PARAM_GRADS blacklist.\n if dist_name not in NO_LOG_PROB_PARAM_GRADS:\n with tf.GradientTape() as tape:\n lp = dist.log_prob(tf.stop_gradient(sample))\n grads = tape.gradient(lp, dist.variables)\n for grad, var in zip(grads, dist.variables):\n if grad is None:\n raise AssertionError(\n 'Missing log_prob -> {} grad for distribution {}'.format(\n var, dist_name))\n\n # Test that all forms of probability evaluation avoid reading distribution\n # parameters more than once.\n for evaluative in sorted(data.draw(\n hps.sets(\n hps.one_of(\n map(hps.just, [\n 'log_prob', 'prob', 'log_cdf', 'cdf',\n 'log_survival_function', 'survival_function'\n ])),\n min_size=3,\n max_size=3))):\n hp.note('Testing excessive var usage in {}.{}'.format(\n dist_name, evaluative))\n try:\n # No validation => 1 convert. But for TD we allow 2:\n # dist.log_prob(bijector.inverse(samp)) + bijector.ildj(samp)\n max_permissible = 2 + extra_tensor_conversions_allowed(dist)\n with tfp_hps.assert_no_excessive_var_usage(\n 'evaluative `{}` of `{}`'.format(evaluative, dist),\n max_permissible=max_permissible):\n getattr(dist, evaluative)(sample)\n except NotImplementedError:\n pass\n\n\n@test_util.test_all_tf_execution_regimes\nclass ReproducibilityTest(test_util.TestCase):\n\n @parameterized.named_parameters(\n {'testcase_name': dname, 'dist_name': dname}\n for dname in sorted(list(INSTANTIABLE_BASE_DISTS.keys()) +\n list(INSTANTIABLE_META_DISTS)))\n @hp.given(hps.data())\n @tfp_hps.tfp_hp_settings()\n def testDistribution(self, dist_name, data):\n dist = data.draw(distributions(dist_name=dist_name, enable_vars=False))\n seed = test_util.test_seed()\n with tfp_hps.no_tf_rank_errors():\n s1 = self.evaluate(dist.sample(50, seed=seed))\n if tf.executing_eagerly():\n tf.random.set_seed(seed)\n with tfp_hps.no_tf_rank_errors():\n s2 = self.evaluate(dist.sample(50, seed=seed))\n self.assertAllEqual(s1, s2)\n\n\n@test_util.test_all_tf_execution_regimes\nclass EventSpaceBijectorsTest(test_util.TestCase):\n\n def check_bad_loc_scale(self, dist):\n if hasattr(dist, 'loc') and hasattr(dist, 'scale'):\n try:\n loc_ = tf.convert_to_tensor(dist.loc)\n scale_ = tf.convert_to_tensor(dist.scale)\n except (ValueError, TypeError):\n # If they're not Tensor-convertible, don't try to check them. This is\n # the case, in, for example, multivariate normal, where the scale is a\n # `LinearOperator`.\n return\n loc, scale = self.evaluate([loc_, scale_])\n hp.assume(np.all(np.abs(loc / scale) < 1e7))\n\n @hp.given(hps.data())\n @tfp_hps.tfp_hp_settings()\n def testDistribution(self, data):\n enable_vars = data.draw(hps.booleans())\n\n # TODO(b/146572907): Fix `enable_vars` for metadistributions.\n broken_dists = EVENT_SPACE_BIJECTOR_IS_BROKEN\n if enable_vars:\n broken_dists.extend(INSTANTIABLE_META_DISTS)\n\n dist = data.draw(\n distributions(\n enable_vars=enable_vars,\n eligibility_filter=(lambda name: name not in broken_dists)))\n self.evaluate([var.initializer for var in dist.variables])\n self.check_bad_loc_scale(dist)\n\n event_space_bijector = dist._experimental_default_event_space_bijector()\n if event_space_bijector is None:\n return\n\n total_sample_shape = tensorshape_util.concatenate(\n # Draw a sample shape\n data.draw(tfp_hps.shapes()),\n # Draw a shape that broadcasts with `[batch_shape, inverse_event_shape]`\n # where `inverse_event_shape` is the event shape in the bijector's\n # domain. This is the shape of `y` in R**n, such that\n # x = event_space_bijector(y) has the event shape of the distribution.\n data.draw(tfp_hps.broadcasting_shapes(\n tensorshape_util.concatenate(\n dist.batch_shape,\n event_space_bijector.inverse_event_shape(\n dist.event_shape)), n=1))[0])\n\n y = data.draw(\n tfp_hps.constrained_tensors(\n tfp_hps.identity_fn, total_sample_shape.as_list()))\n x = event_space_bijector(y)\n with tf.control_dependencies(dist._sample_control_dependencies(x)):\n self.evaluate(tf.identity(x))\n\n\n@test_util.test_all_tf_execution_regimes\nclass DistributionSlicingTest(test_util.TestCase):\n\n def _test_slicing(self, data, dist):\n strm = test_util.test_seed_stream()\n batch_shape = dist.batch_shape\n slices = data.draw(valid_slices(batch_shape))\n slice_str = 'dist[{}]'.format(', '.join(stringify_slices(slices)))\n # Make sure the slice string appears in Hypothesis' attempted example log\n hp.note('Using slice ' + slice_str)\n if not slices: # Nothing further to check.\n return\n sliced_zeros = np.zeros(batch_shape)[slices]\n sliced_dist = dist[slices]\n hp.note('Using sliced distribution {}.'.format(sliced_dist))\n\n # Check that slicing modifies batch shape as expected.\n self.assertAllEqual(sliced_zeros.shape, sliced_dist.batch_shape)\n\n if not sliced_zeros.size:\n # TODO(b/128924708): Fix distributions that fail on degenerate empty\n # shapes, e.g. Multinomial, DirichletMultinomial, ...\n return\n\n # Check that sampling of sliced distributions executes.\n with tfp_hps.no_tf_rank_errors():\n samples = self.evaluate(dist.sample(seed=strm()))\n sliced_samples = self.evaluate(sliced_dist.sample(seed=strm()))\n\n # Come up with the slices for samples (which must also include event dims).\n sample_slices = (\n tuple(slices) if isinstance(slices, collections.Sequence) else\n (slices,))\n if Ellipsis not in sample_slices:\n sample_slices += (Ellipsis,)\n sample_slices += tuple([slice(None)] *\n tensorshape_util.rank(dist.event_shape))\n\n # Report sub-sliced samples (on which we compare log_prob) to hypothesis.\n hp.note('Sample(s) for testing log_prob ' + str(samples[sample_slices]))\n\n # Check that sampling a sliced distribution produces the same shape as\n # slicing the samples from the original.\n self.assertAllEqual(samples[sample_slices].shape, sliced_samples.shape)\n\n # Check that a sliced distribution can compute the log_prob of its own\n # samples (up to numerical validation errors).\n with tfp_hps.no_tf_rank_errors():\n try:\n lp = self.evaluate(dist.log_prob(samples))\n except tf.errors.InvalidArgumentError:\n # TODO(b/129271256): d.log_prob(d.sample()) should not fail\n # validate_args checks.\n # We only tolerate this case for the non-sliced dist.\n return\n sliced_lp = self.evaluate(sliced_dist.log_prob(samples[sample_slices]))\n\n # Check that the sliced dist's log_prob agrees with slicing the original's\n # log_prob.\n # TODO(b/128708201): Better numerics for Geometric/Beta?\n # Eigen can return quite different results for packet vs non-packet ops.\n # To work around this, we use a much larger rtol for the last 3\n # (assuming packet size 4) elements.\n packetized_lp = lp[slices].reshape(-1)[:-3]\n packetized_sliced_lp = sliced_lp.reshape(-1)[:-3]\n rtol = (0.1 if any(\n x in dist.name for x in ('Geometric', 'Beta', 'Dirichlet')) else 0.05)\n self.assertAllClose(packetized_lp, packetized_sliced_lp, rtol=rtol)\n possibly_nonpacket_lp = lp[slices].reshape(-1)[-3:]\n possibly_nonpacket_sliced_lp = sliced_lp.reshape(-1)[-3:]\n\n # TODO(b/140229057): Resolve nan disagreement between eigen vec/scalar paths\n finite = (np.isfinite(possibly_nonpacket_lp) &\n np.isfinite(possibly_nonpacket_sliced_lp))\n possibly_nonpacket_lp = np.where(finite, possibly_nonpacket_lp, 0)\n possibly_nonpacket_sliced_lp = np.where(\n finite, possibly_nonpacket_sliced_lp, 0)\n self.assertAllClose(\n possibly_nonpacket_lp, possibly_nonpacket_sliced_lp,\n rtol=0.4, atol=1e-4)\n\n def _run_test(self, data):\n def ok(name):\n return name not in INSTANTIABLE_BUT_NOT_SLICABLE\n dist = data.draw(distributions(enable_vars=False, eligibility_filter=ok))\n\n # Check that all distributions still register as non-iterable despite\n # defining __getitem__. (Because __getitem__ magically makes an object\n # iterable for some reason.)\n with self.assertRaisesRegexp(TypeError, 'not iterable'):\n iter(dist)\n\n # Test slicing\n self._test_slicing(data, dist)\n\n # TODO(bjp): Enable sampling and log_prob checks. Currently, too many errors\n # from out-of-domain samples.\n # self.evaluate(dist.log_prob(dist.sample(seed=test_util.test_seed())))\n\n @hp.given(hps.data())\n @tfp_hps.tfp_hp_settings()\n def testDistributions(self, data):\n self._run_test(data)\n\n def disabled_testFailureCase(self):\n # TODO(b/140229057): This test should pass.\n dist = tfd.Chi(df=np.float32(27.744131))\n dist = tfd.TransformedDistribution(\n bijector=tfb.NormalCDF(), distribution=dist, batch_shape=[4])\n dist = tfb.Expm1()(dist)\n samps = 1.7182817 + tf.zeros_like(dist.sample(seed=test_util.test_seed()))\n self.assertAllClose(dist.log_prob(samps)[0], dist[0].log_prob(samps[0]))\n\n\n@test_util.test_all_tf_execution_regimes\nclass DistributionsWorkWithAutoVectorizationTest(test_util.TestCase):\n\n def _test_vectorization(self, dist_name, dist):\n seed = test_util.test_seed()\n\n num_samples = 3\n if dist_name in SAMPLE_AUTOVECTORIZATION_IS_BROKEN:\n sample = self.evaluate(dist.sample(num_samples, seed=seed))\n else:\n sample = self.evaluate(tf.vectorized_map(\n lambda i: dist.sample(seed=seed), tf.range(num_samples)))\n hp.note('Drew samples {}'.format(sample))\n\n tfp_hps.guitar_skip_if_matches('NegativeBinomial', dist_name, 'b/147743999')\n tfp_hps.guitar_skip_if_matches('Binomial', dist_name, 'b/147743999')\n if dist_name not in LOGPROB_AUTOVECTORIZATION_IS_BROKEN:\n pfor_lp = tf.vectorized_map(dist.log_prob, tf.convert_to_tensor(sample))\n batch_lp = dist.log_prob(sample)\n pfor_lp_, batch_lp_ = self.evaluate((pfor_lp, batch_lp))\n self.assertAllClose(pfor_lp_, batch_lp_,\n atol=VECTORIZED_LOGPROB_ATOL[dist_name])\n\n @parameterized.named_parameters(\n {'testcase_name': dname, 'dist_name': dname}\n for dname in sorted(list(INSTANTIABLE_BASE_DISTS.keys())))\n @hp.given(hps.data())\n @tfp_hps.tfp_hp_settings()\n def testVmap(self, dist_name, data):\n dist = data.draw(distributions(\n dist_name=dist_name, enable_vars=False,\n validate_args=False)) # TODO(b/142826246): Enable validate_args.\n self._test_vectorization(dist_name, dist)\n\n\n# Functions used to constrain randomly sampled parameter ndarrays.\n# TODO(b/128518790): Eliminate / minimize the fudge factors in here.\n\n\ndef constrain_between_eps_and_one_minus_eps(eps=1e-6):\n return lambda x: eps + (1 - 2 * eps) * tf.sigmoid(x)\n\n\ndef ensure_high_gt_low(low, high):\n \"\"\"Returns a value with shape matching `high` and gt broadcastable `low`.\"\"\"\n new_high = tf.maximum(low + tf.abs(low) * .1 + .1, high)\n reduce_dims = []\n if (tensorshape_util.rank(new_high.shape) >\n tensorshape_util.rank(high.shape)):\n reduced_leading_axes = tf.range(\n tensorshape_util.rank(new_high.shape) -\n tensorshape_util.rank(high.shape))\n new_high = tf.math.reduce_max(\n new_high, axis=reduced_leading_axes)\n reduce_dims = [\n d for d in range(tensorshape_util.rank(high.shape))\n if high.shape[d] < new_high.shape[d]\n ]\n if reduce_dims:\n new_high = tf.math.reduce_max(\n new_high, axis=reduce_dims, keepdims=True)\n return new_high\n\n\ndef fix_finite_discrete(d):\n size = d.get('probs', d.get('logits', None)).shape[-1]\n return dict(d, outcomes=tf.linspace(-1.0, 1.0, size))\n\n\ndef fix_lkj(d):\n return dict(d, concentration=d['concentration'] + 1, dimension=3)\n\n\ndef fix_pert(d):\n peak = ensure_high_gt_low(d['low'], d['peak'])\n high = ensure_high_gt_low(peak, d['high'])\n temperature = ensure_high_gt_low(\n np.zeros(d['temperature'].shape, dtype=np.float32), d['temperature'])\n return dict(d, peak=peak, high=high, temperature=temperature)\n\n\ndef fix_triangular(d):\n peak = ensure_high_gt_low(d['low'], d['peak'])\n high = ensure_high_gt_low(peak, d['high'])\n return dict(d, peak=peak, high=high)\n\n\ndef fix_wishart(d):\n df = d['df']\n scale = d.get('scale', d.get('scale_tril'))\n return dict(d, df=tf.maximum(df, tf.cast(scale.shape[-1], df.dtype)))\n\n\nCONSTRAINTS = {\n 'atol':\n tf.math.softplus,\n 'rtol':\n tf.math.softplus,\n 'concentration':\n tfp_hps.softplus_plus_eps(),\n 'GeneralizedPareto.concentration': # Permits +ve and -ve concentrations.\n lambda x: tf.math.tanh(x) * 0.24,\n 'concentration0':\n tfp_hps.softplus_plus_eps(),\n 'concentration1':\n tfp_hps.softplus_plus_eps(),\n 'covariance_matrix':\n tfp_hps.positive_definite,\n 'df':\n tfp_hps.softplus_plus_eps(),\n 'InverseGaussian.loc':\n tfp_hps.softplus_plus_eps(),\n 'VonMisesFisher.mean_direction': # max ndims is 3 to avoid instability.\n lambda x: tf.math.l2_normalize(tf.math.sigmoid(x[..., :3]) + 1e-6, -1),\n 'Categorical.probs':\n tf.math.softmax,\n 'ExpRelaxedOneHotCategorical.probs':\n tf.math.softmax,\n 'FiniteDiscrete.probs':\n tf.math.softmax,\n 'Multinomial.probs':\n tf.math.softmax,\n 'OneHotCategorical.probs':\n tf.math.softmax,\n 'RelaxedCategorical.probs':\n tf.math.softmax,\n 'Zipf.power':\n tfp_hps.softplus_plus_eps(1 + 1e-6), # strictly > 1\n 'Geometric.logits': # TODO(b/128410109): re-enable down to -50\n # Capping at 15. so that probability is less than 1, and entropy is\n # defined. b/147394924\n lambda x: tf.minimum(tf.maximum(x, -16.), 15.), # works around the bug\n 'Geometric.probs':\n constrain_between_eps_and_one_minus_eps(),\n 'Binomial.probs':\n tf.sigmoid,\n 'NegativeBinomial.probs':\n tf.sigmoid,\n 'Bernoulli.probs':\n tf.sigmoid,\n 'PlackettLuce.scores':\n tfp_hps.softplus_plus_eps(),\n 'ProbitBernoulli.probs':\n tf.sigmoid,\n 'RelaxedBernoulli.probs':\n tf.sigmoid,\n 'log_rate':\n lambda x: tf.maximum(x, -16.),\n 'mixing_concentration':\n tfp_hps.softplus_plus_eps(),\n 'mixing_rate':\n tfp_hps.softplus_plus_eps(),\n 'rate':\n tfp_hps.softplus_plus_eps(),\n 'scale':\n tfp_hps.softplus_plus_eps(),\n 'Wishart.scale':\n tfp_hps.positive_definite,\n 'scale_diag':\n tfp_hps.softplus_plus_eps(),\n 'scale_identity_multiplier':\n tfp_hps.softplus_plus_eps(),\n 'scale_tril':\n tfp_hps.lower_tril_positive_definite,\n 'temperature':\n tfp_hps.softplus_plus_eps(),\n 'total_count':\n lambda x: tf.floor(tf.sigmoid(x / 100) * 100) + 1,\n 'Bernoulli':\n lambda d: dict(d, dtype=tf.float32),\n 'CholeskyLKJ':\n fix_lkj,\n 'LKJ':\n fix_lkj,\n 'PERT':\n fix_pert,\n 'Triangular':\n fix_triangular,\n 'TruncatedNormal':\n lambda d: dict(d, high=ensure_high_gt_low(d['low'], d['high'])),\n 'Uniform':\n lambda d: dict(d, high=ensure_high_gt_low(d['low'], d['high'])),\n 'Wishart':\n fix_wishart,\n 'WishartTriL':\n fix_wishart,\n 'Zipf':\n lambda d: dict(d, dtype=tf.float32),\n 'FiniteDiscrete':\n fix_finite_discrete,\n}\n\n\ndef constraint_for(dist=None, param=None):\n if param is not None:\n return CONSTRAINTS.get('{}.{}'.format(dist, param),\n CONSTRAINTS.get(param, tfp_hps.identity_fn))\n return CONSTRAINTS.get(dist, tfp_hps.identity_fn)\n\n\nif __name__ == '__main__':\n # Hypothesis often finds numerical near misses. Debugging them is much aided\n # by seeing all the digits of every floating point number, instead of the\n # usual default of truncating the printed representation to 8 digits.\n np.set_printoptions(floatmode='unique', precision=None)\n tf.test.main()\n"
] |
[
[
"tensorflow.compat.v2.math.tanh",
"tensorflow.compat.v2.math.reduce_max",
"tensorflow.compat.v2.executing_eagerly",
"tensorflow.compat.v2.clip_by_value",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.range",
"tensorflow.compat.v2.identity",
"tensorflow.compat.v2.TensorShape",
"numpy.where",
"tensorflow.compat.v2.linspace",
"tensorflow.compat.v2.is_tensor",
"tensorflow.compat.v2.math.ceil",
"numpy.float32",
"tensorflow.compat.v2.abs",
"numpy.zeros",
"tensorflow.compat.v2.math.sigmoid",
"tensorflow.compat.v2.test.main",
"numpy.abs",
"numpy.isfinite",
"tensorflow.compat.v2.maximum",
"numpy.set_printoptions",
"tensorflow.compat.v2.GradientTape",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.random.set_seed",
"tensorflow.compat.v2.stop_gradient",
"tensorflow.compat.v2.sigmoid"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kessido/Neuroscience-seminar
|
[
"09c137638e49a12f9389fb37ed1a810be3cbb7be"
] |
[
"neuralocalize/prediction.py"
] |
[
"\"\"\"This code simulates the prediction code of the of the connectivity model.\n\"\"\"\nimport gzip\nimport pickle\nimport uuid\n\nimport numpy as np\nimport scipy.linalg as sl\nimport sklearn.preprocessing\n\nfrom .utils import constants\nfrom . import feature_extraction, utils\n\n\nclass FeatureExtractor:\n\t\"\"\"A class warping the scaling and feature extraction methods.\n\t\"\"\"\n\n\tdef __init__(self, pca_result = None, default_brain_map = None):\n\t\t\"\"\" Init the Feature Extractor from subjects and pca.\n\t\tCreate a scaling factor of the cortical and sub cortical parts.\n\n\t\t:param pca_result: the PCA to use.\n\t\t\"\"\"\n\t\tself._is_fitted = False\n\t\tself._semi_dense_connectome_data = None\n\t\tself._left_right_hemisphere_data = None\n\t\tself._uuid = uuid.uuid4()\n\t\tself._pca_result = pca_result\n\t\tself._default_brain_map = default_brain_map\n\t\tself._ctx_indices, self._sub_ctx_indices = utils.cifti_utils.get_cortex_and_sub_cortex_indices(default_brain_map)\n\n\tdef _add_features_to_subjects(self, subjects, features):\n\t\tfor subject, feature in zip(subjects, features):\n\t\t\tsubject.features_extractor_uuid = self._uuid\n\t\t\tsubject.features_before_scaling = feature.copy()\n\n\tdef _get_features_for_scaling_ctx_sub_ctx(self, subjects_features):\n\t\tsubjects_features = np.transpose(subjects_features, [1, 0, 2])\n\t\tctx_features = subjects_features[self._ctx_indices, :, :]\n\t\tsub_ctx_features = subjects_features[self._sub_ctx_indices, :, :]\n\t\treturn ctx_features, sub_ctx_features\n\n\tdef _set_features_for_scaling_ctx_sub_ctx(self, subjects_features, ctx_features, sub_ctx_features):\n\t\tsubjects_features = np.transpose(subjects_features, [1, 0, 2])\n\t\tsubjects_features[self._ctx_indices, :, :] = ctx_features\n\t\tsubjects_features[self._sub_ctx_indices, :, :] = sub_ctx_features\n\t\treturn np.transpose(subjects_features, [1, 0, 2])\n\n\tdef _scale_transform(self, subjects_features):\n\t\tctx_features, sub_ctx_features = self._get_features_for_scaling_ctx_sub_ctx(subjects_features)\n\t\tctx_features = utils.utils.fsl_normalize(ctx_features)\n\t\tsub_ctx_features = utils.utils.fsl_normalize(sub_ctx_features)\n\t\treturn self._set_features_for_scaling_ctx_sub_ctx(subjects_features, ctx_features, sub_ctx_features)\n\n\tdef _get_or_create_semi_dense_connectome_data(self):\n\t\tif self._semi_dense_connectome_data is None:\n\t\t\tif self._pca_result is None:\n\t\t\t\tres, _ = utils.cifti_utils.load_cifti_brain_data_from_file(constants.DEFAULT_STRUCTURE_ICA_RESULT_PATH)\n\t\t\telse:\n\t\t\t\tres =feature_extraction.get_subcortical_parcellation(\n\t\t\t\t\tself._pca_result, self._default_brain_map)\n\t\t\tself._semi_dense_connectome_data = res.transpose()\n\t\treturn self._semi_dense_connectome_data\n\n\tdef _get_or_create_left_right_hemisphere_data(self):\n\t\tif self._left_right_hemisphere_data is None:\n\t\t\tif self._pca_result is None:\n\t\t\t\tres, _ = utils.cifti_utils.load_cifti_brain_data_from_file(constants.DEFAULT_ICA_SEPERATED_RESULT_PATH)\n\t\t\telse:\n\t\t\t\tres = feature_extraction.run_group_ica_separately(\n\t\t\t\t\tself._pca_result, self._default_brain_map)\n\t\t\tself._left_right_hemisphere_data = res.transpose()\n\t\treturn self._left_right_hemisphere_data\n\n\n\tdef _load_cached_subjects_features(self, subjects):\n\t\tres = []\n\t\tsubjects_not_loaded_indices = []\n\t\tfor i, subject in enumerate(subjects):\n\t\t\tif subject.features_extractor_uuid == self._uuid:\n\t\t\t\tres.append(subject.features_before_scaling.copy())\n\t\t\telse:\n\t\t\t\tres.append(None)\n\t\t\t\tsubjects_not_loaded_indices.append(i)\n\t\treturn res, subjects_not_loaded_indices\n\n\tdef transform(self, subjects):\n\t\t\"\"\"Extract the subject features.\n\n\t\t:param subjects: The subjects to extract their features [n_subjects, n_data].\n\t\t:return: The subjects' features.\n\t\t\"\"\"\n\t\tprint(\"Extracting features.\")\n\t\tres, subjects_not_loaded_indices = self._load_cached_subjects_features(subjects)\n\t\tif len(subjects_not_loaded_indices) > 0:\n\t\t\tleft_right_hemisphere_data = self._get_or_create_left_right_hemisphere_data()\n\t\t\tsemi_dense_connectome_data = self._get_or_create_semi_dense_connectome_data()\n\t\t\tself._pca_result = None\n\t\t\t\n\t\t\tfeature_extraction.run_dual_regression(left_right_hemisphere_data, self._default_brain_map, subjects)\n\t\t\tsubjects_not_loaded = [subjects[i] for i in subjects_not_loaded_indices]\n\t\t\tfeature_extraction.get_semi_dense_connectome(semi_dense_connectome_data, subjects_not_loaded)\n\t\t\tfeature_extraction_res = [sub.correlation_coefficient.transpose() for sub in subjects_not_loaded]\n\n\t\t\tfor i, subject_result in zip(subjects_not_loaded_indices, feature_extraction_res):\n\t\t\t\tres[i] = subject_result\n\t\tres = np.array(res, dtype=constants.DTYPE)\n\t\tself._add_features_to_subjects(subjects, res)\n\t\tres = self._scale_transform(res)\n\n\t\treturn res\n\n\nclass Predictor:\n\t\"\"\"A class containing all the localizer predictor model data.\n\n\t\tThis allow injecting another model instead, as it uses fit(x,y) and predict(x).\n\t\"\"\"\n\t\n\tclass _DefultPredictorGenerator:\n\t\t\"\"\"Implement a standart predictor generator\"\"\"\n\t\t\n\t\tclass _DefultPredictorModel:\n\t\t\t\"\"\"Implement a standart predictor\"\"\"\n\t\t\t\n\t\t\tdef __init__(self, beta):\n\t\t\t\tself._beta = beta\n\t\t\t\n\t\t\tdef predict(self, X):\n\t\t\t\treturn np.array([utils.utils.add_ones_column_to_matrix(x) for x in X]) @ self._beta\n\n\t\tdef fit(self, X, y):\n\t\t\tbetas = []\n\t\t\tfor subject_feature, task in zip(X, y):\n\t\t\t\tx = utils.utils.add_ones_column_to_matrix(subject_feature)\n\t\t\t\tres = sl.lstsq(x, task)[0]\n\t\t\t\tbetas.append(res)\n\t\t\tbeta = np.mean(np.array(betas), axis=0)\n\t\t\treturn Predictor._DefultPredictorGenerator._DefultPredictorModel(beta)\n\t\t\t\n\tdef __init__(self, pca_result, default_brain_map, predictor_generator=None):\n\t\t\"\"\"Init the predictor.\n\t\t\"\"\"\n\t\tself._is_fitted = False\n\t\tself._betas = None\n\t\tself._pca_result = pca_result\n\t\tself._default_brain_map = default_brain_map\n\t\tself._spatial_filters = None\n\t\tif predictor_generator is None:\n\t\t\tpredictor_generator = Predictor._DefultPredictorGenerator()\n\t\tself._predictor_generator = predictor_generator\n\n\tdef fit(self, subjects_feature, subjects_task):\n\t\t\"\"\"Fit the model from the data.\n\n\t\t:param subjects_feature: X,\n\t\t\t\t[n_samples, n_features] Matrix like object containing the subject features.\n\t\t:param subjects_task: y,\n\t\t\t\t[n_samples, n_results] Matrix like object containing the subject task results.\n\t\t\"\"\"\n\t\tif self._spatial_filters is None:\n\t\t\tif self._pca_result is None:\n\t\t\t\tgroup_ica_together = utils.cifti_utils.load_cifti_brain_data_from_file(constants.DEFAULT_ICA_BOTH_RESULT_PATH)[0].transpose()\n\t\t\telse:\n\t\t\t\tgroup_ica_together = feature_extraction.run_group_ica_together(self._pca_result, self._default_brain_map)\n\t\t\tself._pca_result = None\n\t\t\tself._default_brain_map = None\n\t\t\tself._spatial_filters = feature_extraction.get_spatial_filters(group_ica_together)\n\t\tself._predictors = []\n\t\t\n\t\tsubjects_feature = utils.utils.fsl_normalize(subjects_feature)\n\t\tfor j in range(self._spatial_filters.shape[1]):\n\t\t\tind = self._spatial_filters[:, j] > 0\n\t\t\tif np.any(ind):\t\t\t\n\t\t\t\tpartial_subjects_features = utils.utils.fsl_demean(subjects_feature[:, ind], 1)\n\t\t\t\tpartial_task = subjects_task[:,ind]\n\t\t\t\tself._predictors.append(self._predictor_generator.fit(partial_subjects_features, partial_task))\n\t\t\telse:\n\t\t\t\tself._predictors.append(None)\n\t\tself._is_fitted = True\n\n\tdef predict(self, subjects_features):\n\t\t\"\"\"Predict the task results from the subjects features.\n\n\t\t:param subjects_features: X,\n\t\t\t\t\t[n_subjects, n_features] Matrix like object containing the subjects features.\n\t\t:return: y,\n\t\t\t\t\t[n_subjects, n_results] Matrix like object containing the task result prediction.\n\t\t\"\"\"\n\t\tif not self._is_fitted:\n\t\t\traise BrokenPipeError(\"Cannot predict before the model was trained!\")\n\t\tres = np.zeros((subjects_features.shape[0], self._spatial_filters.shape[0]))\n\t\t\t\n\t\tfor j, predicator in zip(range(self._spatial_filters.shape[1]), self._predictors):\n\t\t\tind = self._spatial_filters[:, j] > 0\n\t\t\tif np.any(ind):\t\t\t\n\t\t\t\tpartial_subjects_features = utils.utils.fsl_demean(subjects_features[:, ind], 1)\n\t\t\t\tres[:,ind] = predicator.predict(partial_subjects_features)\n\t\treturn res\n\nclass Localizer:\n\t\"\"\"A class containing the localizer model data.\n\t\"\"\"\n\n\tdef __init__(self, subjects=None, pca_result=None, compute_pca = False, number_of_pca_component = 1000, predictor_generator = None,\n\t\t\t\t sample_file_path=constants.EXAMPLE_FILE_PATH):\n\t\t\"\"\"Initialize a localizer object\n\t\t\"\"\"\n\t\t_, brain_maps = utils.cifti_utils.load_cifti_brain_data_from_file(sample_file_path)\n\t\t\n\t\tif compute_pca and subjects is None:\n\t\t\traise ValueError(\"Cannot run pca if no subjects were provided.\")\n\t\t\n\t\tif compute_pca:\n\t\t\tpca_result = Localizer._get_pca(subjects, number_of_pca_component) # iterative_pca.iterative_pca(subjects)\n\n\t\tself._feature_extractor = FeatureExtractor(pca_result=pca_result, default_brain_map=brain_maps)\t\t\t\t\t\t\t\t\t\t\t\t \n\t\tself._predictor = Predictor(pca_result=pca_result, default_brain_map=brain_maps, predictor_generator=predictor_generator)\n\n\t@staticmethod\n\tdef _get_pca(subjects, number_of_pca_component):\n\t\tincPCA = sklearn.decomposition.IncrementalPCA(number_of_pca_component)\n\t\tfor subject in subjects:\n\t\t\tses = np.concatenate([ses.cifti.transpose() for ses in subject.sessions], axis=1)\n\t\t\tincPCA.partial_fit(ses)\n\t\tprint(incPCA.n_components.shape)\n\t\treturn incPCA.n_components\n\n\tdef fit(self, subjects, subjects_task):\n\t\t\"\"\"Fit the current loaded model on the given data.\n\t\t\"\"\"\n\t\tsubjects_feature = self._feature_extractor.transform(subjects)\n\t\tself._predictor.fit(subjects_feature, subjects_task)\n\n\tdef predict(self, subjects):\n\t\t\"\"\"Predict the task results from the subjects features.\n\t\t:return: The task result prediction.\n\t\t\"\"\"\n\t\tfeatures = self._feature_extractor.transform(subjects)\n\t\treturn self._predictor.predict(features)\n\n\tdef save_to_file(self, file_path):\n\t\t\"\"\"Save localizer to file.\n\t\t\"\"\"\n\t\tprint(\"Saving model to\", file_path)\n\t\treturn pickle.dump(self, gzip.open(file_path, 'wb'))\n\n\t@staticmethod\n\tdef load_from_file(file_path):\n\t\t\"\"\"Load a localizer from file.\n\t\t:return: The localizer object loaded.\n\t\t\"\"\"\n\t\tprint(\"Loading model from\", file_path)\n\t\tres = pickle.load(gzip.open(file_path, 'rb'))\n\t\tif not isinstance(res, Localizer):\n\t\t\traise TypeError(\"Content of file is either an old type and deprecated Localizer model, \"\n\t\t\t\t\t\t\t\"a corrupted file or in a wrong file format.\")\n\t\treturn res\n"
] |
[
[
"scipy.linalg.lstsq",
"numpy.any",
"numpy.transpose",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
}
] |
karthik20122001/docker-python
|
[
"bdf14ce8f0e848773480084a0e55b34a23be5abe",
"bdf14ce8f0e848773480084a0e55b34a23be5abe",
"bdf14ce8f0e848773480084a0e55b34a23be5abe"
] |
[
"tests/test_lightgbm.py",
"tests/test_nnabla.py",
"tests/test_matplotlib.py"
] |
[
"import unittest\n\nimport lightgbm as lgb\nimport pandas as pd\n\nfrom common import gpu_test\n\nclass TestLightgbm(unittest.TestCase):\n # Based on the \"simple_example\" from their documentation:\n # https://github.com/Microsoft/LightGBM/blob/master/examples/python-guide/simple_example.py\n def test_cpu(self):\n lgb_train, lgb_eval = self.load_datasets()\n\n params = {\n 'task': 'train',\n 'boosting_type': 'gbdt',\n 'objective': 'regression',\n 'metric': {'l2', 'auc'},\n 'num_leaves': 31,\n 'learning_rate': 0.05,\n 'feature_fraction': 0.9,\n 'bagging_fraction': 0.8,\n 'bagging_freq': 5,\n 'force_row_wise': True,\n 'verbose': 0\n }\n\n # Run only one round for faster test\n gbm = lgb.train(params,\n lgb_train,\n num_boost_round=1,\n valid_sets=lgb_eval,\n early_stopping_rounds=1)\n\n self.assertEqual(1, gbm.best_iteration)\n\n @gpu_test\n def test_gpu(self):\n lgb_train, lgb_eval = self.load_datasets()\n \n params = {\n 'boosting_type': 'gbdt',\n 'objective': 'regression',\n 'metric': 'auc',\n 'num_leaves': 31,\n 'learning_rate': 0.05,\n 'feature_fraction': 0.9,\n 'bagging_fraction': 0.8,\n 'bagging_freq': 5,\n 'force_row_wise': True,\n 'verbose': 1,\n 'device': 'gpu'\n }\n \n # Run only one round for faster test\n gbm = lgb.train(params,\n lgb_train,\n num_boost_round=1,\n valid_sets=lgb_eval,\n early_stopping_rounds=1)\n\n self.assertEqual(1, gbm.best_iteration)\n \n def load_datasets(self):\n df_train = pd.read_csv('/input/tests/data/lgb_train.csv', header=None, sep='\\t')\n df_test = pd.read_csv('/input/tests/data/lgb_test.csv', header=None, sep='\\t')\n \n y_train = df_train[0]\n y_test = df_test[0]\n X_train = df_train.drop(0, axis=1)\n X_test = df_test.drop(0, axis=1)\n\n lgb_train = lgb.Dataset(X_train, y_train)\n lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)\n\n return (lgb_train, lgb_eval)\n",
"import unittest\n\nimport numpy as np\nimport nnabla as nn\nimport nnabla.functions as F\nfrom nnabla.ext_utils import get_extension_context\n\nfrom common import gpu_test\n\n\nclass TestNNabla(unittest.TestCase):\n def test_addition(self):\n # entry variables\n a = nn.Variable.from_numpy_array(np.random.random())\n b = nn.Variable.from_numpy_array(np.random.random())\n\n # add operation\n c = a + b\n\n # forward\n c.forward()\n\n self.assertAlmostEqual(c.d, a.d + b.d, places=3)\n\n @gpu_test\n def test_cuda_ext(self):\n ctx = get_extension_context('cudnn', device_id='0')\n nn.set_default_context(ctx)\n",
"import unittest\nimport os.path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclass TestMatplotlib(unittest.TestCase):\n def test_plot(self):\n plt.plot(np.linspace(0,1,50), np.random.rand(50))\n plt.savefig(\"plot1.png\")\n\n self.assertTrue(os.path.isfile(\"plot1.png\"))\n"
] |
[
[
"pandas.read_csv"
],
[
"numpy.random.random"
],
[
"numpy.random.rand",
"matplotlib.pyplot.savefig",
"numpy.linspace"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
andrewheusser/hddm
|
[
"ce335e7969e9ed1e56243acc1e7356730a27daf1"
] |
[
"hddm/simulators/hddm_dataset_generators.py"
] |
[
"import pandas as pd\nimport numpy as np\nfrom scipy.stats import truncnorm\nfrom patsy import dmatrix\nfrom collections import OrderedDict\nfrom hddm.simulators.basic_simulator import *\nfrom hddm.model_config import model_config\nfrom functools import partial\n\n# Helper\ndef hddm_preprocess(\n simulator_data=None,\n subj_id=\"none\",\n keep_negative_responses=False,\n add_model_parameters=False,\n keep_subj_idx=True,\n):\n\n \"\"\"Takes simulator data and turns it into HDDM ready format.\n\n :Arguments:\n simulator_data: tuple\n Output of e.g. the hddm.simulators.basic_simulator function.\n subj_id: str <default='none'>\n Subject id to attach to returned dataset\n keep_negative_responses: bool <default=False>\n Whether or not to turn negative responses into 0\n add_model_parameters: bool <default=False>\n Whether or not to add trial by trial model parameters to returned dataset\n keep_subj_idx: bool <default=True>\n Whether to keep subject id in the returned dataset\n\n \"\"\"\n # Define dataframe if simulator output is normal (comes out as list tuple [rts, choices, metadata])\n if len(simulator_data) == 3:\n df = pd.DataFrame(simulator_data[0].astype(np.double), columns=[\"rt\"])\n df[\"response\"] = simulator_data[1].astype(int)\n\n if not keep_negative_responses:\n df.loc[df[\"response\"] == -1.0, \"response\"] = 0.0\n if keep_subj_idx:\n df[\"subj_idx\"] = subj_id\n\n # Add ground truth parameters to dataframe\n if add_model_parameters:\n for param in model_config[simulator_data[2][\"model\"]][\"params\"]:\n if len(simulator_data[2][param]) > 1:\n df[param] = simulator_data[2][param]\n else:\n # print(param)\n # print(simulator_data[2][param][0])\n df[param] = simulator_data[2][param][0]\n return df\n\n\ndef _add_outliers(\n sim_out=None,\n p_outlier=None, # AF-comment: Redundant argument, can compute from sim_out !\n max_rt_outlier=10.0,\n):\n \"\"\"Add outliers to simulated data\n\n :Arguments:\n sim_out: tuple <default=None>\n Output of hddm.simulators.basic_simulator\n p_outlier: float <default=None>\n Probability of outliers\n max_rt_outlier: float\n Maximum reaction time that an outlier can take\n\n :Return:\n sim_out data with the appropriate number of samples exchanged by the samples\n from the outlier distribution.\n \"\"\"\n\n if p_outlier == 0:\n return sim_out\n else:\n # Sample number of outliers from appropriate binomial\n n_outliers = np.random.binomial(n=sim_out[0].shape[0], p=p_outlier)\n\n # Only if the sampled number of outliers is above 0,\n # do we bother generating and storing them\n if n_outliers > 0:\n # Initialize the outlier data\n outlier_data = np.zeros((n_outliers, 2))\n\n # Generate outliers\n # Reaction times are uniform between 0 and 1/max_rt_outlier (default 1 / 0.1)\n # Choice are random with equal probability among the valid choice options\n outlier_data[:, 0] = np.random.uniform(\n low=0.0, high=max_rt_outlier, size=n_outliers\n )\n outlier_data[:, 1] = np.random.choice(\n sim_out[2][\"possible_choices\"], size=n_outliers\n )\n\n # Exchange the last parts of the simulator data for the outliers\n sim_out[0][-n_outliers:, 0] = outlier_data[:, 0]\n sim_out[1][-n_outliers:, 0] = outlier_data[:, 1]\n return sim_out\n\n\n# -------------------------------------------------------------------------------------\n# Parameter set generator\ndef make_parameter_vectors_nn(model=\"angle\", param_dict=None, n_parameter_vectors=10):\n \"\"\"Generates a (number of) parameter vector(s) for a given model.\n\n :Arguments:\n\n model: str <default='angle'>\n String that specifies the model to be simulated.\n Current options include, 'angle', 'ornstein', 'levy', 'weibull', 'full_ddm'\n param_dict: dict <default=None>\n Dictionary of parameter values that you would like to pre-specify. The dictionary takes the form (for the simple examples of the ddm),\n {'v': [0], 'a': [1.5]} etc.. For a given key supply either a list of length 1, or a list of\n length equal to the n_parameter_vectors argument supplied.\n n_parameter_vectors: int <default=10>\n Nuber of parameter vectors you want to generate\n\n Return: pandas.DataFrame\n Columns are parameter names and rows fill the parameter values.\n \"\"\"\n\n parameter_data = np.zeros((n_parameter_vectors, len(model_config[model][\"params\"])))\n\n if param_dict is not None:\n cnt = 0\n for param in model_config[model][\"params\"]:\n\n if param in param_dict.keys():\n\n if (len(param_dict[param]) == n_parameter_vectors) or (\n len(param_dict[param]) == 1\n ):\n # Check if parameters are properly in bounds\n if (\n np.sum(\n np.array(param_dict[param])\n < model_config[model][\"param_bounds\"][0][cnt]\n )\n > 0\n or np.sum(\n np.array(param_dict[param])\n > model_config[model][\"param_bounds\"][1][cnt]\n )\n > 0\n ):\n\n print(\n \"The parameter: \",\n param,\n \", is out of the accepted bounds [\",\n model_config[model][\"param_bounds\"][0][cnt],\n \",\",\n model_config[model][\"param_bounds\"][1][cnt],\n \"]\",\n )\n return\n else:\n parameter_data[:, cnt] = param_dict[param]\n else:\n print(\n \"Param dict not specified correctly. Lengths of parameter lists needs to be 1 or equal to n_param_sets\"\n )\n\n else:\n parameter_data[:, cnt] = np.random.uniform(\n low=model_config[model][\"param_bounds\"][0][cnt],\n high=model_config[model][\"param_bounds\"][1][cnt],\n size=n_parameter_vectors,\n )\n cnt += 1\n else:\n parameter_data = np.random.uniform(\n low=model_config[model][\"param_bounds\"][0],\n high=model_config[model][\"param_bounds\"][1],\n size=(n_parameter_vectors, len(model_config[model][\"params\"])),\n )\n\n return pd.DataFrame(parameter_data, columns=model_config[model][\"params\"])\n\n\n# Dataset generators\ndef simulator_single_subject(\n parameters=(0, 0, 0),\n p_outlier=0.0,\n max_rt_outlier=10.0,\n model=\"angle\",\n n_samples=1000,\n delta_t=0.001,\n max_t=20,\n bin_dim=None,\n bin_pointwise=False,\n verbose=0,\n):\n \"\"\"Generate a hddm-ready dataset from a single set of parameters\n\n :Arguments:\n parameters: dict, list or numpy array\n Model parameters with which to simulate. Dict is preferable for informative error messages.\n If you know the order of parameters for your model of choice, you can also directly supply a\n list or nump.array which needs to have the parameters in the correct order.\n p_outlier: float between 0 and 1 <default=0>\n Probability of generating outlier datapoints. An outlier is defined\n as a random choice from a uniform RT distribution\n max_rt_outlier: float > 0 <default=10.0>\n Using max_rt_outlier (which is commonly defined for hddm models) here as an imlicit maximum\n on the RT of outliers. Outlier RTs are sampled uniformly from [0, max_rt_outlier]\n model: str <default='angle'>\n String that specifies the model to be simulated.\n Current options include, 'angle', 'ornstein', 'levy', 'weibull', 'full_ddm'\n n_samples: int <default=1000>\n Number of samples to simulate.\n delta_t: float <default=0.001>\n Size fo timesteps in simulator (conceptually measured in seconds)\n max_t: float <default=20>\n Maximum reaction the simulator can reach\n bin_dim: int <default=None>\n If simulator output should be binned, this specifies the number of bins to use\n bin_pointwise: bool <default=False>\n Determines whether to bin simulator output pointwise. Pointwise here is in contrast to producing binned output\n in the form of a histogram. Binning pointwise gives each trial's RT and index which is the respective bin-number.\n This is expected when you are using the 'cnn' network to fit the dataset later. If pointwise is not chosen,\n then the takes the form of a histogram, with bin-wise frequencies.\n\n Return: tuple of (pandas.DataFrame, dict, list)\n The first part of the tuple holds a DataFrame with a 'reaction time' column and a 'response' column. Ready to be fit with hddm.\n The second part of the tuple hold a dict with parameter names as keys and parameter values as values.\n The third part gives back the parameters supplied in array form.\n This return is consistent with the returned objects in other data generators under hddm.simulators\n \"\"\"\n\n # Sanity checks\n assert p_outlier >= 0 and p_outlier <= 1, \"p_outlier is not between 0 and 1\"\n assert max_rt_outlier > 0, \"max_rt__outlier needs to be > 0\"\n\n if verbose:\n print(\"Model: \", model)\n print(\"Parameters needed: \", model_config[model][\"params\"])\n\n if parameters is None:\n print(\"Proposing parameters and checking if in bounds\")\n params_ok = 0\n while not params_ok:\n parameters = np.random.normal(\n loc=model_config[model][\"param_bounds\"][0]\n + (\n (1 / 2)\n * (\n model_config[model][\"param_bounds\"][1]\n - model_config[model][\"param_bounds\"][0]\n )\n ),\n scale=(\n (1 / 4)\n * (\n model_config[model][\"param_bounds\"][1]\n - model_config[model][\"param_bounds\"][0]\n )\n ),\n size=1,\n )\n if not bool(\n int(\n np.sum(\n parameters < np.array(model_config[model][\"param_bounds\"][0])\n )\n + np.sum(\n parameters > np.array(model_config[model][\"param_bounds\"][1])\n )\n )\n ):\n params_ok = 1\n\n gt = {}\n for param in model_config[model][\"params\"]:\n id_tmp = model_config[model][\"params\"].index(param)\n gt[param] = parameters[id_tmp]\n\n elif type(parameters) == list or type(parameters) == np.ndarray:\n gt = {}\n for param in model_config[model][\"params\"]:\n id_tmp = model_config[model][\"params\"].index(param)\n gt[param] = parameters[id_tmp]\n\n elif type(parameters) == dict:\n gt = parameters.copy()\n\n # Get max shape of parameter (in case it is supplied as part length-n vector, part length-1 vector)\n tmp_max = 0\n for key_ in gt.keys():\n tmp_ = len(gt[key_])\n if tmp_ > tmp_max:\n tmp_max = tmp_\n\n parameters = np.zeros((tmp_max, len(model_config[model][\"params\"])))\n\n for param in model_config[model][\"params\"]:\n idx = model_config[model][\"params\"].index(param)\n if param in gt.keys():\n parameters[:, idx] = gt[param]\n else:\n print(\"The parameter \", param, \" was not supplied to the function.\")\n print(\n \"Taking default \",\n param,\n \" from hddm.model_config as\",\n model_config.model_config[model][\"params_default\"],\n )\n parameters[:, idx] = model_config[model][\"params_default\"][idx]\n else:\n return \"parameters argument is not of type list, np.ndarray, dict\"\n\n if verbose:\n print(parameters)\n\n x = simulator(\n theta=parameters,\n model=model,\n n_samples=n_samples,\n delta_t=delta_t,\n max_t=max_t,\n bin_dim=bin_dim,\n bin_pointwise=bin_pointwise,\n )\n\n # Add outliers\n # (Potentially 0 outliers)\n x = _add_outliers(\n sim_out=x,\n p_outlier=p_outlier,\n max_rt_outlier=max_rt_outlier,\n )\n\n data_out = hddm_preprocess(x, add_model_parameters=True)\n\n return (data_out, gt)\n\n\ndef simulator_stimcoding(\n model=\"angle\",\n split_by=\"v\",\n p_outlier=0.0,\n max_rt_outlier=10.0,\n drift_criterion=0.0,\n n_trials_per_condition=1000,\n delta_t=0.001,\n prespecified_params={},\n bin_pointwise=False,\n bin_dim=None,\n max_t=20.0,\n):\n\n \"\"\"Generate a dataset as expected by Hddmstimcoding. Essentially it is a specific way to parameterize two condition data.\n\n :Arguments:\n parameters: list or numpy array\n Model parameters with which to simulate.\n model: str <default='angle'>\n String that specifies the model to be simulated.\n Current options include, 'angle', 'ornstein', 'levy', 'weibull', 'full_ddm'\n split_by: str <default='v'>\n You can split by 'v' or 'z'. If splitting by 'v' one condition's v_0 = drift_criterion + 'v', the other\n condition's v_1 = drift_criterion - 'v'.\n Respectively for 'z', 'z_0' = 'z' and 'z_1' = 1 - 'z'.\n p_outlier: float between 0 and 1 <default=0>\n Probability of generating outlier datapoints. An outlier is defined\n as a random choice from a uniform RT distribution\n max_rt_outlier: float > 0 <default=10.0>\n Using max_rt_outlier (which is commonly defined for hddm models) here as an imlicit maximum\n on the RT of outliers. Outlier RTs are sampled uniformly from [0, max_rt_outlier]\n drift_criterion: float <default=0.0>\n Parameter that can be treated as the 'bias part' of the slope, in case we split_by 'v'.\n n_samples_by_condition: int <default=1000>\n Number of samples to simulate per condition (here 2 condition by design).\n delta_t: float <default=0.001>\n Size fo timesteps in simulator (conceptually measured in seconds)\n prespecified_params: dict <default = {}>\n A dictionary with parameter names keys. Values are list of either length 1, or length equal to the number of conditions (here 2).\n max_t: float <default=20>\n Maximum reaction the simulator can reach\n bin_dim: int <default=None>\n If simulator output should be binned, this specifies the number of bins to use\n bin_pointwise: bool <default=False>\n Determines whether to bin simulator output pointwise. Pointwise here is in contrast to producing binned output\n in the form of a histogram. Binning pointwise gives each trial's RT and index which is the respective bin-number.\n This is expected when you are using the 'cnn' network to fit the dataset later. If pointwise is not chosen,\n then the takes the form of a histogram, with bin-wise frequencies.\n\n Return: pandas.DataFrame holding a 'reaction time' column and a 'response' column. Ready to be fit with hddm.\n \"\"\"\n\n param_base = np.tile(\n np.random.uniform(\n low=model_config[model][\"param_bounds\"][0],\n high=model_config[model][\"param_bounds\"][1],\n size=(1, len(model_config[model][\"params\"])),\n ),\n (2, 1),\n )\n\n # Fill in prespecified parameters if supplied\n if prespecified_params is not None:\n if type(prespecified_params) == dict:\n for param in prespecified_params:\n id_tmp = model_config[model][\"params\"].index(param)\n param_base[:, id_tmp] = prespecified_params[param]\n else:\n print(\n \"prespecified_params is not supplied as a dictionary, please reformat the input\"\n )\n return\n\n if type(split_by) == list:\n pass\n elif type(split_by) == str:\n split_by = [split_by]\n else:\n print(\n \"Can not recognize data-type of argument: split_by, provided neither a list nor a string\"\n )\n return\n\n gt = {}\n for i in range(len(model_config[model][\"params\"])):\n gt[model_config[model][\"params\"][i]] = param_base[0, i]\n\n for i in range(2):\n if i == 0:\n if \"v\" in split_by:\n id_tmp = model_config[model][\"params\"].index(\"v\")\n param_base[i, id_tmp] = drift_criterion - param_base[i, id_tmp]\n gt[\"dc\"] = drift_criterion\n\n if i == 1:\n if \"v\" in split_by:\n id_tmp = model_config[model][\"params\"].index(\"v\")\n param_base[i, id_tmp] = drift_criterion + param_base[i, id_tmp]\n if \"z\" in split_by:\n id_tmp = model_config[model][\"params\"].index(\"z\")\n param_base[i, id_tmp] = 1 - param_base[i, id_tmp]\n\n dataframes = []\n for i in range(2):\n\n sim_out = simulator(\n param_base[i, :],\n model=model,\n n_samples=n_trials_per_condition,\n bin_dim=bin_dim,\n bin_pointwise=bin_pointwise,\n max_t=max_t,\n delta_t=delta_t,\n )\n\n sim_out = _add_outliers(\n sim_out=sim_out,\n p_outlier=p_outlier,\n max_rt_outlier=max_rt_outlier,\n )\n\n dataframes.append(\n hddm_preprocess(\n simulator_data=sim_out, subj_id=i + 1, add_model_parameters=True\n )\n )\n\n data_out = pd.concat(dataframes)\n data_out = data_out.rename(columns={\"subj_idx\": \"stim\"})\n data_out[\"subj_idx\"] = \"none\"\n return (data_out, gt)\n\n\ndef simulator_h_c(\n data=None,\n n_subjects=10,\n n_trials_per_subject=100,\n model=\"ddm_vanilla\",\n conditions=None,\n depends_on=None,\n regression_models=None,\n regression_covariates=None, # need this to make initial covariate matrix from which to use dmatrix (patsy)\n group_only_regressors=True,\n group_only=[\"z\"],\n fixed_at_default=None,\n p_outlier=0.0,\n outlier_max_t=10.0,\n **kwargs,\n):\n\n \"\"\"Flexible simulator that allows specification of models very similar to the hddm model classes. Has two major modes. When data \\n\n is supplied the function generates synthetic versions of the provided data. If no data is provided, you can supply\n a varied of options to create complicated synthetic datasets from scratch.\n\n :Arguments:\n data: pd.DataFrame <default=None>\n Actual covariate dataset. If data is supplied its covariates are used instead of generated.\n n_subjects: int <default=5>\n Number of subjects in the datasets\n n_trials_per_subject: int <default=500>\n Number of trials for each subject\n model: str <default = 'ddm_vanilla'>\n Model to sample from. For traditional hddm supported models, append '_vanilla' to the model. Omitting 'vanilla'\n imposes constraints on the parameter sets to not violate the trained parameter space of our LANs.\n conditions: dict <default=None>\n Keys represent condition relevant columns, and values are lists of unique items for each condition relevant column.\n Example: {\"c_one\": [\"high\", \"low\"], \"c_two\": [\"high\", \"low\"], \"c_three\": [\"high\", \"medium\", \"low\"]}\n depends_on: dict <default=None>\n Keys specify model parameters that depend on the values --> lists of condition relevant columns.\n Follows the syntax in the HDDM model classes. Example: {\"v\": [\"c_one\", \"c_two\"]}\n regression_models: list or strings <default=None>\n Specify regression model formulas for one or more dependent parameters in a list.\n Follows syntax of HDDM model classes.\n Example: [\"z ~ covariate_name\"]\n regression_covariates: dict <default={'covariate_name': {'type': 'categorical', 'range': (0, 4)}}>\n Dictionary in dictionary. Specify the name of the covariate column as keys, and for each key supply the 'type' (categorical, continuous) and\n 'range' ((lower bound, upper bound)) of the covariate.\n Example: {\"covariate_name\": {\"type\": \"categorical\", \"range\": (0, 4)}}\n group_only_regressors: bin <default=True>\n Should regressors only be specified at the group level? If true then only intercepts are specified subject wise.\n Other covariates act globally.\n group_only: list <default = ['z']>\n List of parameters that are specified only at the group level.\n fixed_at_default: list <default=None>\n List of parameters for which defaults are to be used.\n These defaults are specified in the model_config dictionary,\n which you can access via: hddm.simulators.model_config.\n Example: ['t']\n p_outlier: float <default = 0.0>\n Specifies the proportion of outliers in the data.\n outlier_max_t: float <default = 10.0>\n Outliers are generated from np.random.uniform(low = 0, high = outlier_max_t) with random choices.\n Returns:\n (pandas.DataFrame, dict): The Dataframe holds the generated dataset, ready for constuction of an hddm model. The dictionary holds the groundtruth parameter (values) and parameter names (keys). Keys match\n the names of traces when fitting the equivalent hddm model. The parameter dictionary is useful for some graphs, otherwise not neccessary.\n \"\"\"\n\n # print('starting data generation')\n meta_params = {\n \"group_param_dist\": \"normal\",\n \"gen_norm_std\": 1 / 3,\n \"uniform_buffer\": 1 / 5,\n \"gen_std_std\": 1 / 8,\n \"covariate_range\": 1 / 4,\n }\n\n for key_ in kwargs.keys():\n meta_params[key_] = kwargs[key_]\n\n def check_params(data=None, model=None, is_nn=True):\n \"\"\"\n Function checks if parameters are within legal bounds\n \"\"\"\n for key in data.keys():\n if key in model_config[model][\"params\"]:\n if (\n np.sum(\n data[key]\n < model_config[model][\"param_bounds\"][0][\n model_config[model][\"params\"].index(key)\n ]\n )\n > 0\n ):\n return 0\n elif (\n np.sum(\n data[key]\n > model_config[model][\"param_bounds\"][1][\n model_config[model][\"params\"].index(key)\n ]\n )\n > 0\n ):\n return 0\n return 1\n\n def get_parameter_remainder(\n regression_models=None, group_only=None, depends_on=None, fixed_at_default=None\n ):\n\n \"\"\"\n The arguments supplied to the simulator implicitly specify how we should handle a bunch of model parameters.\n If there remain model parameters that did not receive implicit instructions, we call these 'remainder' parameters\n and sample them randomly for our simulations.\n \"\"\"\n\n # Add subject parameters to full_parameter_dict\n total_param_list = model_config[model][\"params\"]\n params_utilized = []\n\n # Regression Part\n # reg_df = make_covariate_df(regression_covariates, n_trials_per_subject)\n if regression_models is not None:\n for regression_model in regression_models:\n separator = regression_model.find(\"~\")\n assert separator != -1, \"No outcome variable specified.\"\n params_utilized += regression_model[:separator].strip(\" \")\n\n # Group only Part\n if group_only is not None:\n params_utilized += group_only\n\n # Fixed Part\n if fixed_at_default is not None:\n params_utilized += fixed_at_default\n\n # Depends on Part\n if depends_on is not None:\n for depends_on_key in depends_on.keys():\n params_utilized += [depends_on_key]\n\n params_utilized = list(set(params_utilized))\n\n # Rest of Params\n remainder = set(total_param_list) - set(params_utilized)\n\n return remainder\n\n def make_covariate_df(regression_covariates, n_trials_per_subject):\n \"\"\"\n Goes through the supplied covariate data, and turns it into a dataframe, with randomly generated covariate values.\n Each column refers to one covariate.\n \"\"\"\n\n cov_df = pd.DataFrame(\n np.zeros((n_trials_per_subject, len(list(regression_covariates.keys())))),\n columns=[key for key in regression_covariates.keys()],\n )\n\n for covariate in regression_covariates.keys():\n tmp = regression_covariates[covariate]\n if tmp[\"type\"] == \"categorical\":\n cov_df[covariate] = (\n np.random.choice(\n np.arange(tmp[\"range\"][0], tmp[\"range\"][1] + 1, 1),\n replace=True,\n size=n_trials_per_subject,\n )\n / (tmp[\"range\"][1])\n )\n else:\n cov_df[covariate] = np.random.uniform(\n low=tmp[\"range\"][0], high=tmp[\"range\"][1], size=n_trials_per_subject\n ) / (tmp[\"range\"][1] - tmp[\"range\"][0])\n\n return cov_df\n\n def make_conditions_df(conditions=None):\n \"\"\"\n Makes a dataframe out of the supplied condition dictionary, that stores each combination as a row.\n \"\"\"\n arg_tuple = tuple([conditions[key] for key in conditions.keys()])\n condition_rows = np.meshgrid(*arg_tuple)\n return pd.DataFrame(\n np.column_stack([x_tmp.flatten() for x_tmp in condition_rows]),\n columns=[key for key in conditions.keys()],\n )\n\n def make_single_sub_cond_df_gen(\n conditions_df,\n depends_on,\n regression_models,\n regression_covariates,\n group_only_regressors,\n group_only,\n fixed_at_default,\n remainder,\n model,\n group_level_parameter_dict,\n n_subjects,\n n_trials_per_subject,\n ):\n\n # Construct subject data\n full_parameter_dict = group_level_parameter_dict.copy()\n\n # Subject part -----------------------\n full_data = []\n # Condition --------------------------\n if conditions_df is None:\n n_conditions = 1\n else:\n n_conditions = conditions_df.shape[0]\n\n for condition_id in range(n_conditions):\n # remainder_set = 0\n regressor_set = 0\n\n for subj_idx in range(n_subjects):\n # Parameter vector\n subj_data = pd.DataFrame(index=np.arange(0, n_trials_per_subject, 1))\n subj_data[\"subj_idx\"] = str(subj_idx)\n\n # Fixed part\n if fixed_at_default is not None:\n for fixed_tmp in fixed_at_default:\n subj_data[fixed_tmp] = group_level_parameter_dict[fixed_tmp]\n\n # Group only part\n if group_only is not None:\n for group_only_tmp in group_only:\n if group_only_tmp in list(depends_on.keys()):\n pass\n else:\n subj_data[group_only_tmp] = group_level_parameter_dict[\n group_only_tmp\n ]\n\n # Remainder part\n if remainder is not None:\n for remainder_tmp in remainder:\n tmp_mean = group_level_parameter_dict[remainder_tmp]\n tmp_std = group_level_parameter_dict[remainder_tmp + \"_std\"]\n\n # If the subject has been seen before, we use the parameters from\n # the previous condition, since the remainder parameters do not change\n # across conditions\n if (\n remainder_tmp + \"_subj.\" + str(subj_idx)\n in full_parameter_dict.keys()\n ):\n pass\n else:\n # Otherwise, generate new parameter for this subject (really only relevant first condition for remainder parameters)\n full_parameter_dict[\n remainder_tmp + \"_subj.\" + str(subj_idx)\n ] = np.random.normal(loc=tmp_mean, scale=tmp_std)\n\n subj_data[remainder_tmp] = full_parameter_dict[\n remainder_tmp + \"_subj.\" + str(subj_idx)\n ]\n\n # Depends on part\n if depends_on is not None:\n # conditions_tmp = conditions_df.iloc[condition_id]\n for depends_tmp in depends_on.keys():\n conditions_df_tmp = conditions_df[depends_on[depends_tmp]].iloc[\n condition_id\n ]\n condition_elem = \".\".join(conditions_df_tmp)\n\n # Add parameters to subject dataframe\n if depends_tmp not in group_only:\n tmp_mean = group_level_parameter_dict[\n depends_tmp + \"(\" + condition_elem + \")\"\n ]\n tmp_std = group_level_parameter_dict[depends_tmp + \"_std\"]\n tmp_param_name = (\n depends_tmp\n + \"_subj(\"\n + condition_elem\n + \").\"\n + str(subj_idx)\n )\n\n # If the subject / condition combination has been see before\n # we do not reassign a new parameter here !\n\n if tmp_param_name in full_parameter_dict.keys():\n pass\n else: # Otherwise assign new parameter\n full_parameter_dict[tmp_param_name] = np.random.normal(\n loc=tmp_mean, scale=tmp_std\n )\n\n # Assign the parameter to subject data\n subj_data[depends_tmp] = full_parameter_dict[\n depends_tmp\n + \"_subj(\"\n + condition_elem\n + \").\"\n + str(subj_idx)\n ]\n else:\n subj_data[depends_tmp] = full_parameter_dict[\n depends_tmp + \"(\" + condition_elem + \")\"\n ]\n\n # Add the respective stimulus columns\n for condition_key_tmp in conditions_df_tmp.keys():\n subj_data[condition_key_tmp] = conditions_df_tmp[\n condition_key_tmp\n ]\n\n # Regressor part\n if regression_covariates is not None:\n cov_df = make_covariate_df(\n regression_covariates, n_trials_per_subject\n )\n\n # Add cov_df to subject data\n for key_tmp in cov_df.keys():\n subj_data[key_tmp] = cov_df[key_tmp].copy()\n\n if regression_models is not None:\n for reg_model in regression_models:\n\n # Make Design Matrix\n separator = reg_model.find(\"~\")\n outcome = reg_model[:separator].strip(\" \")\n reg_model_stripped = reg_model[(separator + 1) :]\n design_matrix = dmatrix(reg_model_stripped, cov_df)\n\n reg_params_tmp = []\n reg_param_names_tmp = []\n for reg_param_key in group_level_parameter_dict[\n outcome + \"_reg\"\n ].keys():\n if (\n group_only_regressors and \"Intercept\" in reg_param_key\n ) or (not group_only_regressors):\n reg_params_tmp.append(\n np.random.normal(\n loc=group_level_parameter_dict[\n outcome + \"_reg\"\n ][reg_param_key],\n scale=group_level_parameter_dict[\n outcome + \"_reg_std\"\n ][reg_param_key + \"_std\"],\n )\n )\n\n reg_param_names_tmp.append(\n reg_param_key + \"_subj.\" + str(subj_idx)\n )\n else:\n reg_params_tmp.append(\n group_level_parameter_dict[outcome + \"_reg\"][\n reg_param_key\n ]\n )\n reg_param_names_tmp.append(reg_param_key)\n\n reg_params_tmp = np.array(reg_params_tmp)\n\n for key in group_level_parameter_dict[outcome + \"_reg\"].keys():\n full_parameter_dict[key] = group_level_parameter_dict[\n outcome + \"_reg\"\n ][key]\n for key in group_level_parameter_dict[\n outcome + \"_reg_std\"\n ].keys():\n full_parameter_dict[key] = group_level_parameter_dict[\n outcome + \"_reg_std\"\n ][key]\n\n if not regressor_set:\n for k in range(len(reg_param_names_tmp)):\n full_parameter_dict[\n reg_param_names_tmp[k]\n ] = reg_params_tmp[k]\n\n subj_data[outcome] = (design_matrix * reg_params_tmp).sum(\n axis=1\n ) # AF-TD: This should probably include a noise term here (parameter really defined as coming from a linear model + noise)\n\n # Append full data:\n full_data.append(subj_data.copy())\n\n remainder_set = 1\n regressor_set = 1\n\n full_data = pd.concat(full_data)\n parameters = full_data[model_config[model][\"params\"]]\n\n # Run the actual simulations\n # print(parameters)\n\n sim_data = simulator(\n theta=parameters.values,\n model=model,\n n_samples=1,\n delta_t=0.001,\n max_t=20,\n no_noise=False,\n bin_dim=None,\n bin_pointwise=False,\n )\n\n # Post-processing\n full_data[\"rt\"] = sim_data[0].astype(np.float64)\n full_data[\"response\"] = sim_data[1].astype(np.float64)\n full_data.loc[full_data[\"response\"] < 0, [\"response\"]] = 0.0\n\n # Add in outliers\n if p_outlier > 0:\n # print('passing through outlier creation')\n outlier_idx = np.random.choice(\n list(data.index),\n replace=False,\n size=int(p_outlier * len(list(data.index))),\n )\n outlier_data = np.zeros((outlier_idx.shape[0], 2))\n\n # Outlier rts\n outlier_data[:, 0] = np.random.uniform(\n low=0.0, high=outlier_max_t, size=outlier_data.shape[0]\n )\n\n # Outlier choices\n outlier_data[:, 1] = np.random.choice(\n sim_data[2][\"possible_choices\"], size=outlier_data.shape[0]\n )\n\n # Exchange data for outliers\n full_data.iloc[\n outlier_idx,\n [\n list(full_data.keys()).index(\"rt\"),\n list(full_data.keys()).index(\"response\"),\n ],\n ] = outlier_data\n\n # Identify outliers in dataframe\n full_data[\"outlier\"] = 0\n full_data[outlier_idx, [list(full_data.keys()).index(\"outlier\")]] = 1\n\n full_data_cols = [\"rt\", \"response\", \"subj_idx\"]\n\n if regression_covariates is not None:\n full_data_cols += [key for key in regression_covariates.keys()]\n if conditions is not None:\n full_data_cols += [key for key in conditions.keys()]\n\n full_data_cols += model_config[model][\"params\"]\n full_data = full_data[full_data_cols]\n full_data.reset_index(drop=True, inplace=True)\n\n # AF-Comment: Does this cover all corner cases?\n # If n_subjects is 1 --> we overwrite the group parameters with the subj.0 parameters\n if n_subjects == 1:\n new_param_dict = {}\n for key, value in full_parameter_dict.items():\n if \"subj\" in key:\n new_key = key\n new_key = new_key.replace(\"_subj\", \"\")\n new_key = new_key[: new_key.find(\".\")]\n new_param_dict[new_key] = value\n elif \"_std\" in key:\n pass\n else:\n new_param_dict[key] = value\n full_parameter_dict = new_param_dict\n\n return full_data, full_parameter_dict\n\n def make_single_sub_cond_df_from_gt(\n data,\n conditions_df,\n depends_on,\n regression_models,\n regression_covariates,\n group_only_regressors,\n group_only,\n fixed_at_default,\n remainder,\n model,\n group_level_parameter_dict,\n ):\n # Construct subject data\n full_parameter_dict = group_level_parameter_dict.copy()\n\n # Subject part -----------------------\n full_data = []\n # Condition --------------------------\n\n # Initialize parameter columns in data\n for param in model_config[model][\"params\"]:\n data[param] = 0\n\n for subj_idx in data[\"subj_idx\"].unique(): # range(n_subjects):\n\n # Fixed part\n if fixed_at_default is not None:\n for fixed_tmp in fixed_at_default:\n data.loc[\n data[\"subj_idx\"] == int(subj_idx), [fixed_tmp]\n ] = group_level_parameter_dict[fixed_tmp]\n # subj_data.loc[fixed_tmp] = group_level_parameter_dict[fixed_tmp]\n\n # Group only part\n if group_only is not None:\n for group_only_tmp in group_only:\n if group_only_tmp in list(depends_on.keys()):\n pass\n else:\n data.loc[\n data[\"subj_idx\"] == int(subj_idx), [group_only_tmp]\n ] = group_level_parameter_dict[group_only_tmp]\n\n # Remainder part\n if remainder is not None:\n for remainder_tmp in remainder:\n # print('group_level_parameter_dict')\n # print(group_level_parameter_dict)\n tmp_mean = group_level_parameter_dict[remainder_tmp]\n tmp_std = group_level_parameter_dict[remainder_tmp + \"_std\"]\n full_parameter_dict[\n remainder_tmp + \"_subj.\" + str(subj_idx)\n ] = np.random.normal(loc=tmp_mean, scale=tmp_std)\n\n data.loc[\n data[\"subj_idx\"] == int(subj_idx), [remainder_tmp]\n ] = full_parameter_dict[remainder_tmp + \"_subj.\" + str(subj_idx)]\n\n # Depends on part\n if depends_on is not None:\n # Go through depends_on variables:\n for depends_tmp in depends_on.keys():\n conditions_df_tmp = conditions_df[\n depends_on[depends_tmp]\n ].drop_duplicates()\n\n for condition_id in range(conditions_df_tmp.shape[0]):\n\n condition_elem = \".\".join(conditions_df_tmp.iloc[condition_id])\n bool_ = data[\"subj_idx\"] == int(subj_idx)\n\n for key_ in conditions_df_tmp.keys():\n bool_ = (bool_) & (\n data[key_].astype(str)\n == conditions_df_tmp.iloc[condition_id][key_]\n )\n\n # Check if there is data which adheres to the condition currently active\n # Otherwise there is nothing to update\n # AF COMMENT: This check should already be applied at the point of generating the condition_df dataframe\n if np.sum(bool_) > 0:\n if depends_tmp not in group_only:\n tmp_mean = group_level_parameter_dict[\n depends_tmp + \"(\" + condition_elem + \")\"\n ]\n tmp_std = group_level_parameter_dict[\n depends_tmp + \"_std\"\n ]\n\n full_parameter_dict[\n depends_tmp\n + \"_subj(\"\n + condition_elem\n + \").\"\n + str(subj_idx)\n ] = np.random.normal(loc=tmp_mean, scale=tmp_std)\n\n data.loc[bool_, depends_tmp] = full_parameter_dict[\n depends_tmp\n + \"_subj(\"\n + condition_elem\n + \").\"\n + str(subj_idx)\n ]\n\n else:\n # print('passed here (group_only) with depends_tmp: ', depends_tmp)\n data.loc[bool_, depends_tmp] = full_parameter_dict[\n depends_tmp + \"(\" + condition_elem + \")\"\n ]\n\n # Regressor part\n # if regression_covariates is not None:\n # cov_df = make_covariate_df(regression_covariates, n_trials_per_subject)\n\n # # Add cov_df to subject data\n # # AF COMMENT: Not necessary if\n # # for key_tmp in cov_df.keys():\n # # subj_data[key_tmp] = cov_df[key_tmp].copy()\n\n if regression_models is not None:\n for reg_model in regression_models:\n # Make Design Matrix\n separator = reg_model.find(\"~\")\n outcome = reg_model[:separator].strip(\" \")\n reg_model_stripped = reg_model[(separator + 1) :]\n design_matrix = dmatrix(\n reg_model_stripped,\n data.loc[data[\"subj_idx\"] == int(subj_idx), :],\n )\n\n reg_params_tmp = []\n reg_param_names_tmp = []\n for reg_param_key in group_level_parameter_dict[\n outcome + \"_reg\"\n ].keys():\n if (group_only_regressors and \"Intercept\" in reg_param_key) or (\n not group_only_regressors\n ):\n reg_params_tmp.append(\n np.random.normal(\n loc=group_level_parameter_dict[outcome + \"_reg\"][\n reg_param_key\n ],\n scale=group_level_parameter_dict[\n outcome + \"_reg_std\"\n ][reg_param_key + \"_std\"],\n )\n )\n\n reg_param_names_tmp.append(\n reg_param_key + \"_subj.\" + str(subj_idx)\n )\n else:\n reg_params_tmp.append(\n group_level_parameter_dict[outcome + \"_reg\"][\n reg_param_key\n ]\n )\n reg_param_names_tmp.append(reg_param_key)\n\n reg_params_tmp = np.array(reg_params_tmp)\n\n for key in group_level_parameter_dict[outcome + \"_reg\"].keys():\n full_parameter_dict[key] = group_level_parameter_dict[\n outcome + \"_reg\"\n ][key]\n\n for key in group_level_parameter_dict[outcome + \"_reg_std\"].keys():\n full_parameter_dict[key] = group_level_parameter_dict[\n outcome + \"_reg_std\"\n ][key]\n\n if not regressor_set:\n for k in range(len(reg_param_names_tmp)):\n full_parameter_dict[\n reg_param_names_tmp[k]\n ] = reg_params_tmp[k]\n\n data.loc[data[\"subj_idx\"] == int(subj_idx), [outcome]] = (\n design_matrix * reg_params_tmp\n ).sum(\n axis=1\n ) # AF-TD: This should probably include a noise term here (parameter really defined as coming from a linear model + noise)\n\n regressor_set = 1\n\n parameters = data[model_config[model][\"params\"]]\n\n sim_data = simulator(\n theta=parameters.values,\n model=model,\n n_samples=1,\n delta_t=0.001,\n max_t=20,\n no_noise=False,\n bin_dim=None,\n bin_pointwise=False,\n )\n\n # Post-processing\n data[\"rt\"] = sim_data[0].astype(np.float64)\n data[\"response\"] = sim_data[1].astype(np.float64)\n data.loc[data[\"response\"] < 0, [\"response\"]] = 0.0\n\n # Add in outliers\n if p_outlier > 0:\n outlier_idx = np.random.choice(\n list(data.index),\n replace=False,\n size=int(p_outlier * len(list(data.index))),\n )\n outlier_data = np.zeros((outlier_idx.shape[0], 2))\n\n # Outlier rts\n outlier_data[:, 0] = np.random.uniform(\n low=0.0, high=outlier_max_t, size=outlier_data.shape[0]\n )\n\n # Outlier choices\n outlier_data[:, 1] = np.random.choice(\n sim_data[2][\"possible_choices\"], size=outlier_data.shape[0]\n )\n\n # Exchange data for outliers\n data.loc[outlier_idx, [\"rt\", \"response\"]] = outlier_data\n\n # Identify outliers in dataframe\n data[\"outlier\"] = 0\n data.loc[outlier_idx, [list(full_data.keys()).index(\"outlier\")]] = 1\n\n # AF-Comment: Does this cover all corner cases?\n # If n_subjects is 1 --> we overwrite the group parameters with the subj.0 parameters\n if len(list(data[\"subj_idx\"].unique())) == 1:\n new_param_dict = {}\n for key, value in full_parameter_dict.items():\n if \"subj\" in key:\n new_key = key\n new_key = new_key.replace(\"_subj\", \"\")\n new_key = new_key[: new_key.find(\".\")]\n new_param_dict[new_key] = value\n elif \"_std\" in key:\n pass\n else:\n new_param_dict[key] = value\n full_parameter_dict = new_param_dict\n\n return data, full_parameter_dict\n\n def make_group_level_params(\n data,\n conditions_df,\n group_only,\n depends_on,\n model,\n fixed_at_default,\n remainder,\n group_only_regressors,\n regression_models,\n regression_covariates,\n group_param_dist=\"normal\",\n gen_norm_std=1 / 4,\n uniform_buffer=1 / 5,\n gen_std_std=1 / 8,\n covariate_range=1\n / 4, # multiplied by range of parameter bounds to give size of covariate\n ):\n \"\"\"\n Make group level parameters from the information supplied.\n \"\"\"\n\n # Some comments\n\n group_level_parameter_dict = {}\n\n # COLLECT PARAMETER WISE DATA AND ON CONSTRAINTS AND RV-GENERATORS ------\n param_gen_info = {}\n for param_name in model_config[model][\"params\"]:\n idx = model_config[model][\"params\"].index(param_name)\n\n param_gen_info[param_name] = {}\n # print(idx)\n # print(model_config[model][\"param_bounds\"])\n param_gen_info[param_name][\"range\"] = (\n model_config[model][\"param_bounds\"][1][idx]\n - model_config[model][\"param_bounds\"][0][idx]\n )\n\n param_gen_info[param_name][\"mid\"] = model_config[model][\"param_bounds\"][0][\n idx\n ] + (param_gen_info[param_name][\"range\"] / 2)\n param_gen_info[param_name][\"gen_norm_std\"] = gen_norm_std * (\n param_gen_info[param_name][\"range\"] / 2\n )\n param_gen_info[param_name][\"uniform_buffer\"] = uniform_buffer * (\n param_gen_info[param_name][\"range\"] / 2\n )\n param_gen_info[param_name][\"std_gen_std\"] = (\n gen_std_std * param_gen_info[param_name][\"range\"]\n )\n param_gen_info[param_name][\"covariate_range\"] = (\n covariate_range * param_gen_info[param_name][\"range\"]\n )\n\n if group_param_dist == \"normal\":\n param_gen_info[param_name][\"rv\"] = partial(\n np.random.normal,\n loc=param_gen_info[param_name][\"mid\"],\n scale=param_gen_info[param_name][\"gen_norm_std\"],\n )\n elif group_param_dist == \"uniform\":\n param_gen_info[param_name][\"rv\"] = partial(\n np.random.uniform,\n low=model_config[model][\"param_bounds\"][0][param_name]\n + param_gen_info[param_name][\"uniform_buffer\"],\n high=model_config[model][\"param_bounds\"][1][param_name]\n - param_gen_info[param_name][\"uniform_buffer\"],\n )\n\n param_gen_info[param_name][\"std_rv\"] = partial(\n np.random.uniform, low=0, high=param_gen_info[param_name][\"std_gen_std\"]\n )\n\n param_gen_info[param_name][\"covariate_rv\"] = partial(\n np.random.uniform,\n low=-param_gen_info[param_name][\"covariate_range\"],\n high=param_gen_info[param_name][\"covariate_range\"],\n )\n # -----------------------------------------------\n\n # Fixed part --------------------------------------------------------\n if fixed_at_default is not None:\n for fixed_tmp in fixed_at_default:\n group_level_parameter_dict[fixed_tmp] = model_config[model][\n \"params_default\"\n ][model_config[model][\"params\"].index(fixed_tmp)]\n\n # Group only part (excluding depends on) ----------------------------\n if len(group_only) > 0:\n for group_only_tmp in group_only:\n if group_only_tmp in list(depends_on.keys()):\n pass\n else:\n group_level_parameter_dict[group_only_tmp] = param_gen_info[\n group_only_tmp\n ][\"rv\"]()\n\n # Remainder part -----------------------------------------------------\n if remainder is not None:\n for remainder_tmp in remainder:\n group_level_parameter_dict[remainder_tmp] = param_gen_info[\n remainder_tmp\n ][\"rv\"]()\n group_level_parameter_dict[remainder_tmp + \"_std\"] = param_gen_info[\n remainder_tmp\n ][\"std_rv\"]()\n\n # Depends on part ----------------------------------------------------\n if depends_on is not None:\n for depends_tmp in depends_on.keys():\n conditions_df_tmp = conditions_df[depends_on[depends_tmp]]\n\n # Get unique elements:\n unique_elems = []\n for i in range(conditions_df_tmp.shape[0]):\n unique_elems.append(\".\".join(conditions_df_tmp.iloc[i]))\n unique_elems = np.unique(np.array(unique_elems))\n\n for unique_elem in unique_elems:\n group_level_parameter_dict[\n depends_tmp + \"(\" + unique_elem + \")\"\n ] = param_gen_info[depends_tmp][\"rv\"]()\n\n if depends_tmp not in group_only:\n group_level_parameter_dict[depends_tmp + \"_std\"] = param_gen_info[\n remainder_tmp\n ][\"std_rv\"]()\n\n # Regressor part ------------------------------------------------------\n if regression_covariates is not None:\n # AF ADDED:\n # IF covariates supplied: skip generation\n if data is None:\n cov_df = make_covariate_df(regression_covariates, n_trials_per_subject)\n else:\n cov_df = data\n\n if regression_models is not None:\n for reg_model in regression_models:\n separator = reg_model.find(\"~\")\n outcome = reg_model[:separator].strip(\" \")\n reg_model_stripped = reg_model[(separator + 1) :]\n\n # Run through patsy dmatrix to get the covariate names\n # that patsy assigns !\n covariate_names = dmatrix(\n reg_model_stripped, cov_df\n ).design_info.column_names\n\n reg_trace_dict = OrderedDict()\n reg_std_trace_dict = OrderedDict()\n\n for covariate in covariate_names:\n if (\"Intercept\" in covariate) or (covariate == \"1\"):\n\n # AF-COMMENT: Here instead of covariate_rv --> just use\n reg_trace_dict[outcome + \"_\" + covariate] = param_gen_info[\n outcome\n ][\"rv\"]()\n\n # Intercept is always fit subject wise\n reg_std_trace_dict[\n outcome + \"_\" + covariate + \"_\" + \"std\"\n ] = param_gen_info[outcome][\"std_rv\"]()\n\n else:\n reg_trace_dict[outcome + \"_\" + covariate] = param_gen_info[\n outcome\n ][\"covariate_rv\"]()\n\n if not group_only_regressors:\n reg_std_trace_dict[\n outcome + \"_\" + covariate + \"_\" + \"std\"\n ] = param_gen_info[outcome][\"std_rv\"]()\n\n group_level_parameter_dict[outcome + \"_reg\"] = reg_trace_dict.copy()\n\n # AF-COMMENT: Is this necessary ?\n # if not group_only_regressors:\n group_level_parameter_dict[\n outcome + \"_reg\" + \"_std\"\n ] = reg_std_trace_dict.copy()\n\n return group_level_parameter_dict\n\n # MAIN PART OF THE FUNCTION -----------------------------------------------------------------\n\n # Some checks\n if group_only is None:\n group_only = []\n\n # Specify 'remainder' parameters --> will be sampled randomly from the allowed range\n remainder = get_parameter_remainder(\n regression_models=regression_models,\n group_only=group_only,\n depends_on=depends_on,\n fixed_at_default=fixed_at_default,\n )\n\n # Make conditions df\n if depends_on is not None:\n print(\"depends_on is: \", depends_on)\n if type(depends_on) == dict:\n if len(list(depends_on.keys())) > 0:\n # If data is None then conditions were supplied as an argument\n if data is None:\n conditions_df = make_conditions_df(conditions=conditions)\n else: # Otherwise we have covariate data, so we can deduce conditions\n conditions = dict()\n for key_ in depends_on.keys():\n for col in depends_on[key_]:\n conditions[col] = np.sort(data[col].unique()).astype(str)\n conditions_df = make_conditions_df(conditions=conditions)\n else:\n conditions_df = None\n else:\n conditions_df = None\n else:\n conditions_df = None\n\n params_ok_all = 0\n cnt = 0\n while params_ok_all == 0:\n if cnt > 0:\n print(\n \"new round of data simulation because parameter bounds where violated\"\n )\n\n group_level_param_dict = make_group_level_params(\n data=data,\n conditions_df=conditions_df,\n group_only=group_only,\n depends_on=depends_on,\n model=model,\n fixed_at_default=fixed_at_default,\n remainder=remainder,\n group_only_regressors=group_only_regressors,\n regression_models=regression_models,\n regression_covariates=regression_covariates,\n group_param_dist=meta_params[\"group_param_dist\"],\n gen_norm_std=meta_params[\"gen_norm_std\"],\n uniform_buffer=meta_params[\"uniform_buffer\"],\n gen_std_std=meta_params[\"gen_std_std\"],\n covariate_range=meta_params[\"covariate_range\"],\n )\n\n if data is None:\n data_, full_parameter_dict = make_single_sub_cond_df_gen(\n conditions_df=conditions_df,\n group_only=group_only,\n depends_on=depends_on,\n model=model,\n fixed_at_default=fixed_at_default,\n remainder=remainder,\n regression_models=regression_models,\n regression_covariates=regression_covariates,\n group_only_regressors=group_only_regressors,\n group_level_parameter_dict=group_level_param_dict,\n n_trials_per_subject=n_trials_per_subject,\n n_subjects=n_subjects,\n )\n else:\n data_, full_parameter_dict = make_single_sub_cond_df_from_gt(\n data=data,\n conditions_df=conditions_df,\n group_only=group_only,\n depends_on=depends_on,\n model=model,\n fixed_at_default=fixed_at_default,\n remainder=remainder,\n regression_models=regression_models,\n regression_covariates=regression_covariates,\n group_only_regressors=group_only_regressors,\n group_level_parameter_dict=group_level_param_dict,\n )\n # params_ok_all = 1\n params_ok_all = check_params(data=data_, model=model)\n cnt += 1\n\n return data_, full_parameter_dict\n"
] |
[
[
"pandas.concat",
"numpy.random.choice",
"numpy.arange",
"pandas.DataFrame",
"numpy.random.normal",
"numpy.random.binomial",
"numpy.random.uniform",
"numpy.array",
"numpy.meshgrid",
"numpy.zeros",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
yaozengwei/icefall
|
[
"9c39d8b009917834f0f2abc57f8c26bc7bb637e6",
"9c39d8b009917834f0f2abc57f8c26bc7bb637e6",
"9c39d8b009917834f0f2abc57f8c26bc7bb637e6"
] |
[
"egs/aishell/ASR/transducer_stateless_modified/pretrained.py",
"egs/librispeech/ASR/transducer_stateless2/decode.py",
"egs/tedlium3/ASR/pruned_transducer_stateless/pretrained.py"
] |
[
"#!/usr/bin/env python3\n# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang,\n# Wei Kang)\n#\n# See ../../../../LICENSE for clarification regarding multiple authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nUsage:\n\n# greedy search\n./transducer_stateless_modified/pretrained.py \\\n --checkpoint /path/to/pretrained.pt \\\n --lang-dir /path/to/lang_char \\\n --method greedy_search \\\n /path/to/foo.wav \\\n /path/to/bar.wav\n\n# beam search\n./transducer_stateless_modified/pretrained.py \\\n --checkpoint /path/to/pretrained.pt \\\n --lang-dir /path/to/lang_char \\\n --method beam_search \\\n --beam-size 4 \\\n /path/to/foo.wav \\\n /path/to/bar.wav\n\n# modified beam search\n./transducer_stateless_modified/pretrained.py \\\n --checkpoint /path/to/pretrained.pt \\\n --lang-dir /path/to/lang_char \\\n --method modified_beam_search \\\n --beam-size 4 \\\n /path/to/foo.wav \\\n /path/to/bar.wav\n\n\"\"\"\n\nimport argparse\nimport logging\nimport math\nfrom pathlib import Path\nfrom typing import List\n\nimport kaldifeat\nimport torch\nimport torchaudio\nfrom beam_search import (\n beam_search,\n greedy_search,\n greedy_search_batch,\n modified_beam_search,\n)\nfrom torch.nn.utils.rnn import pad_sequence\nfrom train import get_params, get_transducer_model\n\nfrom icefall.lexicon import Lexicon\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n parser.add_argument(\n \"--checkpoint\",\n type=str,\n required=True,\n help=\"Path to the checkpoint. \"\n \"The checkpoint is assumed to be saved by \"\n \"icefall.checkpoint.save_checkpoint().\",\n )\n\n parser.add_argument(\n \"--lang-dir\",\n type=Path,\n default=Path(\"data/lang_char\"),\n help=\"The lang dir\",\n )\n\n parser.add_argument(\n \"--method\",\n type=str,\n default=\"greedy_search\",\n help=\"\"\"Possible values are:\n - greedy_search\n - beam_search\n - modified_beam_search\n \"\"\",\n )\n\n parser.add_argument(\n \"sound_files\",\n type=str,\n nargs=\"+\",\n help=\"The input sound file(s) to transcribe. \"\n \"Supported formats are those supported by torchaudio.load(). \"\n \"For example, wav and flac are supported. \"\n \"The sample rate has to be 16kHz.\",\n )\n\n parser.add_argument(\n \"--sample-rate\",\n type=int,\n default=16000,\n help=\"The sample rate of the input sound file\",\n )\n\n parser.add_argument(\n \"--beam-size\",\n type=int,\n default=4,\n help=\"Used only when --method is beam_search and modified_beam_search\",\n )\n\n parser.add_argument(\n \"--context-size\",\n type=int,\n default=2,\n help=\"The context size in the decoder. 1 means bigram; \"\n \"2 means tri-gram\",\n )\n parser.add_argument(\n \"--max-sym-per-frame\",\n type=int,\n default=3,\n help=\"Maximum number of symbols per frame. \"\n \"Use only when --method is greedy_search\",\n )\n return parser\n\n return parser\n\n\ndef read_sound_files(\n filenames: List[str], expected_sample_rate: float\n) -> List[torch.Tensor]:\n \"\"\"Read a list of sound files into a list 1-D float32 torch tensors.\n Args:\n filenames:\n A list of sound filenames.\n expected_sample_rate:\n The expected sample rate of the sound files.\n Returns:\n Return a list of 1-D float32 torch tensors.\n \"\"\"\n ans = []\n for f in filenames:\n wave, sample_rate = torchaudio.load(f)\n assert sample_rate == expected_sample_rate, (\n f\"expected sample rate: {expected_sample_rate}. \"\n f\"Given: {sample_rate}\"\n )\n # We use only the first channel\n ans.append(wave[0])\n return ans\n\n\[email protected]_grad()\ndef main():\n parser = get_parser()\n args = parser.parse_args()\n\n params = get_params()\n params.update(vars(args))\n\n device = torch.device(\"cpu\")\n if torch.cuda.is_available():\n device = torch.device(\"cuda\", 0)\n\n logging.info(f\"device: {device}\")\n\n lexicon = Lexicon(params.lang_dir)\n\n params.blank_id = 0\n params.vocab_size = max(lexicon.tokens) + 1\n\n logging.info(params)\n\n logging.info(\"About to create model\")\n model = get_transducer_model(params)\n\n checkpoint = torch.load(args.checkpoint, map_location=\"cpu\")\n model.load_state_dict(checkpoint[\"model\"])\n model.to(device)\n model.eval()\n model.device = device\n\n logging.info(\"Constructing Fbank computer\")\n opts = kaldifeat.FbankOptions()\n opts.device = device\n opts.frame_opts.dither = 0\n opts.frame_opts.snip_edges = False\n opts.frame_opts.samp_freq = params.sample_rate\n opts.mel_opts.num_bins = params.feature_dim\n\n fbank = kaldifeat.Fbank(opts)\n\n logging.info(f\"Reading sound files: {params.sound_files}\")\n waves = read_sound_files(\n filenames=params.sound_files, expected_sample_rate=params.sample_rate\n )\n waves = [w.to(device) for w in waves]\n\n logging.info(\"Decoding started\")\n features = fbank(waves)\n feature_lens = [f.size(0) for f in features]\n feature_lens = torch.tensor(feature_lens, device=device)\n\n features = pad_sequence(\n features, batch_first=True, padding_value=math.log(1e-10)\n )\n\n encoder_out, encoder_out_lens = model.encoder(\n x=features, x_lens=feature_lens\n )\n hyp_list = []\n if params.method == \"greedy_search\" and params.max_sym_per_frame == 1:\n hyp_list = greedy_search_batch(\n model=model,\n encoder_out=encoder_out,\n )\n elif params.method == \"modified_beam_search\":\n hyp_list = modified_beam_search(\n model=model,\n encoder_out=encoder_out,\n beam=params.beam_size,\n )\n else:\n for i in range(encoder_out.size(0)):\n # fmt: off\n encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]]\n # fmt: on\n if params.method == \"greedy_search\":\n hyp = greedy_search(\n model=model,\n encoder_out=encoder_out_i,\n max_sym_per_frame=params.max_sym_per_frame,\n )\n elif params.method == \"beam_search\":\n hyp = beam_search(\n model=model,\n encoder_out=encoder_out_i,\n beam=params.beam_size,\n )\n else:\n raise ValueError(\n f\"Unsupported decoding method: {params.method}\"\n )\n hyp_list.append(hyp)\n\n hyps = []\n for hyp in hyp_list:\n hyps.append([lexicon.token_table[i] for i in hyp])\n\n s = \"\\n\"\n for filename, hyp in zip(params.sound_files, hyps):\n words = \" \".join(hyp)\n s += f\"{filename}:\\n{words}\\n\\n\"\n logging.info(s)\n\n logging.info(\"Decoding Done\")\n\n\nif __name__ == \"__main__\":\n formatter = (\n \"%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s\"\n )\n\n logging.basicConfig(format=formatter, level=logging.INFO)\n main()\n",
"#!/usr/bin/env python3\n#\n# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang)\n#\n# See ../../../../LICENSE for clarification regarding multiple authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nUsage:\n(1) greedy search\n./transducer_stateless2/decode.py \\\n --epoch 14 \\\n --avg 7 \\\n --exp-dir ./transducer_stateless2/exp \\\n --max-duration 100 \\\n --decoding-method greedy_search\n\n(2) beam search\n./transducer_stateless2/decode.py \\\n --epoch 14 \\\n --avg 7 \\\n --exp-dir ./transducer_stateless2/exp \\\n --max-duration 100 \\\n --decoding-method beam_search \\\n --beam-size 4\n\n(3) modified beam search\n./transducer_stateless2/decode.py \\\n --epoch 14 \\\n --avg 7 \\\n --exp-dir ./transducer_stateless2/exp \\\n --max-duration 100 \\\n --decoding-method modified_beam_search \\\n --beam-size 4\n\"\"\"\n\n\nimport argparse\nimport logging\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Dict, List, Tuple\n\nimport sentencepiece as spm\nimport torch\nimport torch.nn as nn\nfrom asr_datamodule import LibriSpeechAsrDataModule\nfrom beam_search import (\n beam_search,\n greedy_search,\n greedy_search_batch,\n modified_beam_search,\n)\nfrom train import get_params, get_transducer_model\n\nfrom icefall.checkpoint import average_checkpoints, load_checkpoint\nfrom icefall.utils import (\n AttributeDict,\n setup_logger,\n store_transcripts,\n write_error_stats,\n)\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n parser.add_argument(\n \"--epoch\",\n type=int,\n default=29,\n help=\"It specifies the checkpoint to use for decoding.\"\n \"Note: Epoch counts from 0.\",\n )\n parser.add_argument(\n \"--avg\",\n type=int,\n default=13,\n help=\"Number of checkpoints to average. Automatically select \"\n \"consecutive checkpoints before the checkpoint specified by \"\n \"'--epoch'. \",\n )\n\n parser.add_argument(\n \"--exp-dir\",\n type=str,\n default=\"transducer_stateless2/exp\",\n help=\"The experiment dir\",\n )\n\n parser.add_argument(\n \"--bpe-model\",\n type=str,\n default=\"data/lang_bpe_500/bpe.model\",\n help=\"Path to the BPE model\",\n )\n\n parser.add_argument(\n \"--decoding-method\",\n type=str,\n default=\"greedy_search\",\n help=\"\"\"Possible values are:\n - greedy_search\n - beam_search\n - modified_beam_search\n \"\"\",\n )\n\n parser.add_argument(\n \"--beam-size\",\n type=int,\n default=4,\n help=\"\"\"Used only when --decoding-method is\n beam_search or modified_beam_search\"\"\",\n )\n\n parser.add_argument(\n \"--context-size\",\n type=int,\n default=2,\n help=\"The context size in the decoder. 1 means bigram; \"\n \"2 means tri-gram\",\n )\n parser.add_argument(\n \"--max-sym-per-frame\",\n type=int,\n default=1,\n help=\"\"\"Maximum number of symbols per frame.\n Used only when --decoding_method is greedy_search\"\"\",\n )\n\n return parser\n\n\ndef decode_one_batch(\n params: AttributeDict,\n model: nn.Module,\n sp: spm.SentencePieceProcessor,\n batch: dict,\n) -> Dict[str, List[List[str]]]:\n \"\"\"Decode one batch and return the result in a dict. The dict has the\n following format:\n\n - key: It indicates the setting used for decoding. For example,\n if greedy_search is used, it would be \"greedy_search\"\n If beam search with a beam size of 7 is used, it would be\n \"beam_7\"\n - value: It contains the decoding result. `len(value)` equals to\n batch size. `value[i]` is the decoding result for the i-th\n utterance in the given batch.\n Args:\n params:\n It's the return value of :func:`get_params`.\n model:\n The neural model.\n sp:\n The BPE model.\n batch:\n It is the return value from iterating\n `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation\n for the format of the `batch`.\n Returns:\n Return the decoding result. See above description for the format of\n the returned dict.\n \"\"\"\n device = model.device\n feature = batch[\"inputs\"]\n assert feature.ndim == 3\n\n feature = feature.to(device)\n # at entry, feature is (N, T, C)\n\n supervisions = batch[\"supervisions\"]\n feature_lens = supervisions[\"num_frames\"].to(device)\n\n encoder_out, encoder_out_lens = model.encoder(\n x=feature, x_lens=feature_lens\n )\n hyp_list: List[List[int]] = []\n\n if (\n params.decoding_method == \"greedy_search\"\n and params.max_sym_per_frame == 1\n ):\n hyp_list = greedy_search_batch(\n model=model,\n encoder_out=encoder_out,\n )\n elif params.decoding_method == \"modified_beam_search\":\n hyp_list = modified_beam_search(\n model=model,\n encoder_out=encoder_out,\n beam=params.beam_size,\n )\n else:\n batch_size = encoder_out.size(0)\n for i in range(batch_size):\n # fmt: off\n encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]]\n # fmt: on\n if params.decoding_method == \"greedy_search\":\n hyp = greedy_search(\n model=model,\n encoder_out=encoder_out_i,\n max_sym_per_frame=params.max_sym_per_frame,\n )\n elif params.decoding_method == \"beam_search\":\n hyp = beam_search(\n model=model,\n encoder_out=encoder_out_i,\n beam=params.beam_size,\n )\n else:\n raise ValueError(\n f\"Unsupported decoding method: {params.decoding_method}\"\n )\n hyp_list.append(hyp)\n\n hyps = [sp.decode(hyp).split() for hyp in hyp_list]\n\n if params.decoding_method == \"greedy_search\":\n return {\"greedy_search\": hyps}\n else:\n return {f\"beam_{params.beam_size}\": hyps}\n\n\ndef decode_dataset(\n dl: torch.utils.data.DataLoader,\n params: AttributeDict,\n model: nn.Module,\n sp: spm.SentencePieceProcessor,\n) -> Dict[str, List[Tuple[List[str], List[str]]]]:\n \"\"\"Decode dataset.\n\n Args:\n dl:\n PyTorch's dataloader containing the dataset to decode.\n params:\n It is returned by :func:`get_params`.\n model:\n The neural model.\n sp:\n The BPE model.\n Returns:\n Return a dict, whose key may be \"greedy_search\" if greedy search\n is used, or it may be \"beam_7\" if beam size of 7 is used.\n Its value is a list of tuples. Each tuple contains two elements:\n The first is the reference transcript, and the second is the\n predicted result.\n \"\"\"\n num_cuts = 0\n\n try:\n num_batches = len(dl)\n except TypeError:\n num_batches = \"?\"\n\n if params.decoding_method == \"greedy_search\":\n log_interval = 100\n else:\n log_interval = 2\n\n results = defaultdict(list)\n for batch_idx, batch in enumerate(dl):\n texts = batch[\"supervisions\"][\"text\"]\n\n hyps_dict = decode_one_batch(\n params=params,\n model=model,\n sp=sp,\n batch=batch,\n )\n\n for name, hyps in hyps_dict.items():\n this_batch = []\n assert len(hyps) == len(texts)\n for hyp_words, ref_text in zip(hyps, texts):\n ref_words = ref_text.split()\n this_batch.append((ref_words, hyp_words))\n\n results[name].extend(this_batch)\n\n num_cuts += len(texts)\n\n if batch_idx % log_interval == 0:\n batch_str = f\"{batch_idx}/{num_batches}\"\n\n logging.info(\n f\"batch {batch_str}, cuts processed until now is {num_cuts}\"\n )\n return results\n\n\ndef save_results(\n params: AttributeDict,\n test_set_name: str,\n results_dict: Dict[str, List[Tuple[List[int], List[int]]]],\n):\n test_set_wers = dict()\n for key, results in results_dict.items():\n recog_path = (\n params.res_dir / f\"recogs-{test_set_name}-{key}-{params.suffix}.txt\"\n )\n store_transcripts(filename=recog_path, texts=results)\n logging.info(f\"The transcripts are stored in {recog_path}\")\n\n # The following prints out WERs, per-word error statistics and aligned\n # ref/hyp pairs.\n errs_filename = (\n params.res_dir / f\"errs-{test_set_name}-{key}-{params.suffix}.txt\"\n )\n with open(errs_filename, \"w\") as f:\n wer = write_error_stats(\n f, f\"{test_set_name}-{key}\", results, enable_log=True\n )\n test_set_wers[key] = wer\n\n logging.info(\"Wrote detailed error stats to {}\".format(errs_filename))\n\n test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1])\n errs_info = (\n params.res_dir\n / f\"wer-summary-{test_set_name}-{key}-{params.suffix}.txt\"\n )\n with open(errs_info, \"w\") as f:\n print(\"settings\\tWER\", file=f)\n for key, val in test_set_wers:\n print(\"{}\\t{}\".format(key, val), file=f)\n\n s = \"\\nFor {}, WER of different settings are:\\n\".format(test_set_name)\n note = \"\\tbest for {}\".format(test_set_name)\n for key, val in test_set_wers:\n s += \"{}\\t{}{}\\n\".format(key, val, note)\n note = \"\"\n logging.info(s)\n\n\[email protected]_grad()\ndef main():\n parser = get_parser()\n LibriSpeechAsrDataModule.add_arguments(parser)\n args = parser.parse_args()\n args.exp_dir = Path(args.exp_dir)\n\n params = get_params()\n params.update(vars(args))\n\n assert params.decoding_method in (\n \"greedy_search\",\n \"beam_search\",\n \"modified_beam_search\",\n )\n params.res_dir = params.exp_dir / params.decoding_method\n\n params.suffix = f\"epoch-{params.epoch}-avg-{params.avg}\"\n if \"beam_search\" in params.decoding_method:\n params.suffix += f\"-beam-{params.beam_size}\"\n else:\n params.suffix += f\"-context-{params.context_size}\"\n params.suffix += f\"-max-sym-per-frame-{params.max_sym_per_frame}\"\n\n setup_logger(f\"{params.res_dir}/log-decode-{params.suffix}\")\n logging.info(\"Decoding started\")\n\n device = torch.device(\"cpu\")\n if torch.cuda.is_available():\n device = torch.device(\"cuda\", 0)\n\n logging.info(f\"Device: {device}\")\n\n sp = spm.SentencePieceProcessor()\n sp.load(params.bpe_model)\n\n # <blk> is defined in local/train_bpe_model.py\n params.blank_id = sp.piece_to_id(\"<blk>\")\n params.vocab_size = sp.get_piece_size()\n\n logging.info(params)\n\n logging.info(\"About to create model\")\n model = get_transducer_model(params)\n\n if params.avg == 1:\n load_checkpoint(f\"{params.exp_dir}/epoch-{params.epoch}.pt\", model)\n else:\n start = params.epoch - params.avg + 1\n filenames = []\n for i in range(start, params.epoch + 1):\n if start >= 0:\n filenames.append(f\"{params.exp_dir}/epoch-{i}.pt\")\n logging.info(f\"averaging {filenames}\")\n model.to(device)\n model.load_state_dict(average_checkpoints(filenames, device=device))\n\n model.to(device)\n model.eval()\n model.device = device\n\n num_param = sum([p.numel() for p in model.parameters()])\n logging.info(f\"Number of model parameters: {num_param}\")\n\n librispeech = LibriSpeechAsrDataModule(args)\n\n test_clean_cuts = librispeech.test_clean_cuts()\n test_other_cuts = librispeech.test_other_cuts()\n\n test_clean_dl = librispeech.test_dataloaders(test_clean_cuts)\n test_other_dl = librispeech.test_dataloaders(test_other_cuts)\n\n test_sets = [\"test-clean\", \"test-other\"]\n test_dl = [test_clean_dl, test_other_dl]\n\n for test_set, test_dl in zip(test_sets, test_dl):\n results_dict = decode_dataset(\n dl=test_dl,\n params=params,\n model=model,\n sp=sp,\n )\n\n save_results(\n params=params,\n test_set_name=test_set,\n results_dict=results_dict,\n )\n\n logging.info(\"Done!\")\n\n\nif __name__ == \"__main__\":\n main()\n",
"#!/usr/bin/env python3\n# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)\n# \t\t 2022 Xiaomi Crop. (authors: Mingshuang Luo)\n#\n# See ../../../../LICENSE for clarification regarding multiple authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nUsage:\n\n(1) greedy search\n./pruned_transducer_stateless/pretrained.py \\\n --checkpoint ./pruned_transducer_stateless/exp/pretrained.pt \\\n --bpe-model ./data/lang_bpe_500/bpe.model \\\n --method greedy_search \\\n --max-sym-per-frame 1 \\\n /path/to/foo.wav \\\n /path/to/bar.wav\n\n(2) beam search\n./pruned_transducer_stateless/pretrained.py \\\n --checkpoint ./pruned_transducer_stateless/exp/pretrained.pt \\\n --bpe-model ./data/lang_bpe_500/bpe.model \\\n --method beam_search \\\n --beam-size 4 \\\n /path/to/foo.wav \\\n /path/to/bar.wav\n\n(3) modified beam search\n./pruned_transducer_stateless/pretrained.py \\\n --checkpoint ./pruned_transducer_stateless/exp/pretrained.pt \\\n --bpe-model ./data/lang_bpe_500/bpe.model \\\n --method modified_beam_search \\\n --beam-size 4 \\\n /path/to/foo.wav \\\n /path/to/bar.wav\n\n(4) fast beam search\n./pruned_transducer_stateless/pretrained.py \\\n --checkpoint ./pruned_transducer_stateless/exp/pretrained.pt \\\n --bpe-model ./data/lang_bpe_500/bpe.model \\\n --method fast_beam_search \\\n --beam 4 \\\n --max-contexts 4 \\\n --max-states 8 \\\n /path/to/foo.wav \\\n /path/to/bar.wav\n\nYou can also use `./pruned_transducer_stateless/exp/epoch-xx.pt`.\n\nNote: ./pruned_transducer_stateless/exp/pretrained.pt is generated by\n./pruned_transducer_stateless/export.py\n\"\"\"\n\n\nimport argparse\nimport logging\nimport math\nfrom typing import List\n\nimport k2\nimport kaldifeat\nimport sentencepiece as spm\nimport torch\nimport torch.nn as nn\nimport torchaudio\nfrom beam_search import (\n beam_search,\n fast_beam_search,\n greedy_search,\n greedy_search_batch,\n modified_beam_search,\n)\nfrom conformer import Conformer\nfrom decoder import Decoder\nfrom joiner import Joiner\nfrom model import Transducer\nfrom torch.nn.utils.rnn import pad_sequence\n\nfrom icefall.env import get_env_info\nfrom icefall.utils import AttributeDict\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n parser.add_argument(\n \"--checkpoint\",\n type=str,\n required=True,\n help=\"Path to the checkpoint. \"\n \"The checkpoint is assumed to be saved by \"\n \"icefall.checkpoint.save_checkpoint().\",\n )\n\n parser.add_argument(\n \"--bpe-model\",\n type=str,\n help=\"\"\"Path to bpe.model.\n Used only when method is ctc-decoding.\n \"\"\",\n )\n\n parser.add_argument(\n \"--decoding-method\",\n type=str,\n default=\"greedy_search\",\n help=\"\"\"Possible values are:\n - greedy_search\n - beam_search\n - modified_beam_search\n - fast_beam_search\n \"\"\",\n )\n\n parser.add_argument(\n \"sound_files\",\n type=str,\n nargs=\"+\",\n help=\"The input sound file(s) to transcribe. \"\n \"Supported formats are those supported by torchaudio.load(). \"\n \"For example, wav and flac are supported. \"\n \"The sample rate has to be 16kHz.\",\n )\n\n parser.add_argument(\n \"--beam-size\",\n type=int,\n default=4,\n help=\"Used only when --method is beam_search and modified_beam_search \",\n )\n\n parser.add_argument(\n \"--beam\",\n type=float,\n default=4,\n help=\"\"\"A floating point value to calculate the cutoff score during beam\n search (i.e., `cutoff = max-score - beam`), which is the same as the\n `beam` in Kaldi.\n Used only when --decoding-method is fast_beam_search\"\"\",\n )\n\n parser.add_argument(\n \"--max-contexts\",\n type=int,\n default=4,\n help=\"\"\"Used only when --decoding-method is\n fast_beam_search\"\"\",\n )\n\n parser.add_argument(\n \"--max-states\",\n type=int,\n default=8,\n help=\"\"\"Used only when --decoding-method is\n fast_beam_search\"\"\",\n )\n\n parser.add_argument(\n \"--context-size\",\n type=int,\n default=2,\n help=\"The context size in the decoder. 1 means bigram; \"\n \"2 means tri-gram\",\n )\n\n parser.add_argument(\n \"--max-sym-per-frame\",\n type=int,\n default=1,\n help=\"\"\"Maximum number of symbols per frame. Used only when\n --method is greedy_search.\n \"\"\",\n )\n\n return parser\n\n\ndef get_params() -> AttributeDict:\n params = AttributeDict(\n {\n \"sample_rate\": 16000,\n # parameters for conformer\n \"feature_dim\": 80,\n \"subsampling_factor\": 4,\n \"attention_dim\": 512,\n \"nhead\": 8,\n \"dim_feedforward\": 2048,\n \"num_encoder_layers\": 12,\n \"vgg_frontend\": False,\n # parameters for decoder\n \"embedding_dim\": 512,\n \"env_info\": get_env_info(),\n }\n )\n return params\n\n\ndef get_encoder_model(params: AttributeDict) -> nn.Module:\n encoder = Conformer(\n num_features=params.feature_dim,\n output_dim=params.vocab_size,\n subsampling_factor=params.subsampling_factor,\n d_model=params.attention_dim,\n nhead=params.nhead,\n dim_feedforward=params.dim_feedforward,\n num_encoder_layers=params.num_encoder_layers,\n vgg_frontend=params.vgg_frontend,\n )\n return encoder\n\n\ndef get_decoder_model(params: AttributeDict) -> nn.Module:\n decoder = Decoder(\n vocab_size=params.vocab_size,\n embedding_dim=params.embedding_dim,\n blank_id=params.blank_id,\n unk_id=params.unk_id,\n context_size=params.context_size,\n )\n return decoder\n\n\ndef get_joiner_model(params: AttributeDict) -> nn.Module:\n joiner = Joiner(\n input_dim=params.vocab_size,\n inner_dim=params.embedding_dim,\n output_dim=params.vocab_size,\n )\n return joiner\n\n\ndef get_transducer_model(params: AttributeDict) -> nn.Module:\n encoder = get_encoder_model(params)\n decoder = get_decoder_model(params)\n joiner = get_joiner_model(params)\n\n model = Transducer(\n encoder=encoder,\n decoder=decoder,\n joiner=joiner,\n )\n return model\n\n\ndef read_sound_files(\n filenames: List[str], expected_sample_rate: float\n) -> List[torch.Tensor]:\n \"\"\"Read a list of sound files into a list 1-D float32 torch tensors.\n Args:\n filenames:\n A list of sound filenames.\n expected_sample_rate:\n The expected sample rate of the sound files.\n Returns:\n Return a list of 1-D float32 torch tensors.\n \"\"\"\n ans = []\n for f in filenames:\n wave, sample_rate = torchaudio.load(f)\n assert sample_rate == expected_sample_rate, (\n f\"expected sample rate: {expected_sample_rate}. \"\n f\"Given: {sample_rate}\"\n )\n # We use only the first channel\n ans.append(wave[0])\n return ans\n\n\[email protected]_grad()\ndef main():\n parser = get_parser()\n args = parser.parse_args()\n\n params = get_params()\n\n params.update(vars(args))\n\n sp = spm.SentencePieceProcessor()\n sp.load(params.bpe_model)\n\n # <blk> and <unk> are defined in local/train_bpe_model.py\n params.blank_id = sp.piece_to_id(\"<blk>\")\n params.unk_id = sp.piece_to_id(\"<unk>\")\n params.vocab_size = sp.get_piece_size()\n\n logging.info(f\"{params}\")\n\n device = torch.device(\"cpu\")\n if torch.cuda.is_available():\n device = torch.device(\"cuda\", 0)\n\n logging.info(f\"device: {device}\")\n\n logging.info(\"Creating model\")\n model = get_transducer_model(params)\n\n checkpoint = torch.load(args.checkpoint, map_location=\"cpu\")\n model.load_state_dict(checkpoint[\"model\"], strict=False)\n model.to(device)\n model.eval()\n model.device = device\n\n if params.decoding_method == \"fast_beam_search\":\n decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device)\n else:\n decoding_graph = None\n\n logging.info(\"Constructing Fbank computer\")\n opts = kaldifeat.FbankOptions()\n opts.device = device\n opts.frame_opts.dither = 0\n opts.frame_opts.snip_edges = False\n opts.frame_opts.samp_freq = params.sample_rate\n opts.mel_opts.num_bins = params.feature_dim\n\n fbank = kaldifeat.Fbank(opts)\n\n logging.info(f\"Reading sound files: {params.sound_files}\")\n waves = read_sound_files(\n filenames=params.sound_files, expected_sample_rate=params.sample_rate\n )\n waves = [w.to(device) for w in waves]\n\n logging.info(\"Decoding started\")\n features = fbank(waves)\n feature_lengths = [f.size(0) for f in features]\n\n features = pad_sequence(\n features, batch_first=True, padding_value=math.log(1e-10)\n )\n\n feature_lengths = torch.tensor(feature_lengths, device=device)\n\n with torch.no_grad():\n encoder_out, encoder_out_lens = model.encoder(\n x=features, x_lens=feature_lengths\n )\n\n hyps = []\n msg = f\"Using {params.decoding_method}\"\n logging.info(msg)\n\n if params.decoding_method == \"fast_beam_search\":\n hyp_tokens = fast_beam_search(\n model=model,\n decoding_graph=decoding_graph,\n encoder_out=encoder_out,\n encoder_out_lens=encoder_out_lens,\n beam=params.beam,\n max_contexts=params.max_contexts,\n max_states=params.max_states,\n )\n for hyp in sp.decode(hyp_tokens):\n hyps.append(hyp.split())\n elif (\n params.decoding_method == \"greedy_search\"\n and params.max_sym_per_frame == 1\n ):\n hyp_tokens = greedy_search_batch(\n model=model,\n encoder_out=encoder_out,\n )\n for hyp in sp.decode(hyp_tokens):\n hyps.append(hyp.split())\n elif params.decoding_method == \"modified_beam_search\":\n hyp_tokens = modified_beam_search(\n model=model,\n encoder_out=encoder_out,\n beam=params.beam_size,\n )\n for hyp in sp.decode(hyp_tokens):\n hyps.append(hyp.split())\n else:\n batch_size = encoder_out.size(0)\n\n for i in range(batch_size):\n # fmt: off\n encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]]\n # fmt: on\n if params.decoding_method == \"greedy_search\":\n hyp = greedy_search(\n model=model,\n encoder_out=encoder_out_i,\n max_sym_per_frame=params.max_sym_per_frame,\n )\n elif params.decoding_method == \"beam_search\":\n hyp = beam_search(\n model=model,\n encoder_out=encoder_out_i,\n beam=params.beam_size,\n )\n else:\n raise ValueError(\n f\"Unsupported decoding method: {params.decoding_method}\"\n )\n hyps.append(sp.decode(hyp).split())\n\n s = \"\\n\"\n for filename, hyp in zip(params.sound_files, hyps):\n words = \" \".join(hyp)\n s += f\"{filename}:\\n{words}\\n\\n\"\n logging.info(s)\n\n logging.info(\"Decoding Done\")\n\n\nif __name__ == \"__main__\":\n formatter = (\n \"%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s\"\n )\n\n logging.basicConfig(format=formatter, level=logging.INFO)\n main()\n"
] |
[
[
"torch.load",
"torch.tensor",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device"
],
[
"torch.device",
"torch.no_grad",
"torch.cuda.is_available"
],
[
"torch.load",
"torch.tensor",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ancrilin/Chinese-Text-Classification-Pytorch
|
[
"ea37f6e209e0598ced872c26192730fc84f5b16c"
] |
[
"utils_fasttext.py"
] |
[
"# coding: UTF-8\nimport os\nimport torch\nimport numpy as np\nimport pickle as pkl\nfrom tqdm import tqdm\nimport time\nfrom datetime import timedelta\n\n\nMAX_VOCAB_SIZE = 10000\nUNK, PAD = '<UNK>', '<PAD>'\n\n\ndef build_vocab(file_path, tokenizer, max_size, min_freq):\n vocab_dic = {}\n with open(file_path, 'r', encoding='UTF-8') as f:\n for line in tqdm(f):\n lin = line.strip()\n if not lin:\n continue\n content = lin.split('\\t')[0]\n for word in tokenizer(content):\n vocab_dic[word] = vocab_dic.get(word, 0) + 1\n vocab_list = sorted([_ for _ in vocab_dic.items() if _[1] >= min_freq], key=lambda x: x[1], reverse=True)[:max_size]\n vocab_dic = {word_count[0]: idx for idx, word_count in enumerate(vocab_list)}\n vocab_dic.update({UNK: len(vocab_dic), PAD: len(vocab_dic) + 1})\n return vocab_dic\n\n\ndef build_dataset(config, ues_word):\n if ues_word:\n tokenizer = lambda x: x.split(' ') # 以空格隔开,word-level\n else:\n tokenizer = lambda x: [y for y in x] # char-level\n if os.path.exists(config.vocab_path):\n vocab = pkl.load(open(config.vocab_path, 'rb'))\n else:\n vocab = build_vocab(config.train_path, tokenizer=tokenizer, max_size=MAX_VOCAB_SIZE, min_freq=1)\n pkl.dump(vocab, open(config.vocab_path, 'wb'))\n print(f\"Vocab size: {len(vocab)}\")\n\n def biGramHash(sequence, t, buckets):\n t1 = sequence[t - 1] if t - 1 >= 0 else 0\n return (t1 * 14918087) % buckets\n\n def triGramHash(sequence, t, buckets):\n t1 = sequence[t - 1] if t - 1 >= 0 else 0\n t2 = sequence[t - 2] if t - 2 >= 0 else 0\n return (t2 * 14918087 * 18408749 + t1 * 14918087) % buckets\n\n def load_dataset(path, pad_size=32):\n contents = []\n with open(path, 'r', encoding='UTF-8') as f:\n for line in tqdm(f):\n lin = line.strip()\n if not lin:\n continue\n content, label = lin.split('\\t')\n words_line = []\n token = tokenizer(content)\n seq_len = len(token)\n if pad_size:\n if len(token) < pad_size:\n token.extend([vocab.get(PAD)] * (pad_size - len(token)))\n else:\n token = token[:pad_size]\n seq_len = pad_size\n # word to id\n for word in token:\n words_line.append(vocab.get(word, vocab.get(UNK)))\n\n # fasttext ngram\n buckets = config.n_gram_vocab\n bigram = []\n trigram = []\n # ------ngram------\n for i in range(pad_size):\n bigram.append(biGramHash(words_line, i, buckets))\n trigram.append(triGramHash(words_line, i, buckets))\n # -----------------\n contents.append((words_line, int(label), seq_len, bigram, trigram))\n return contents # [([...], 0), ([...], 1), ...]\n train = load_dataset(config.train_path, config.pad_size)\n dev = load_dataset(config.dev_path, config.pad_size)\n test = load_dataset(config.test_path, config.pad_size)\n return vocab, train, dev, test\n\n\nclass DatasetIterater(object):\n def __init__(self, batches, batch_size, device):\n self.batch_size = batch_size\n self.batches = batches\n self.n_batches = len(batches) // batch_size\n self.residue = False # 记录batch数量是否为整数 \n if len(batches) % self.n_batches != 0:\n self.residue = True\n self.index = 0\n self.device = device\n\n def _to_tensor(self, datas):\n # xx = [xxx[2] for xxx in datas]\n # indexx = np.argsort(xx)[::-1]\n # datas = np.array(datas)[indexx]\n x = torch.LongTensor([_[0] for _ in datas]).to(self.device)\n y = torch.LongTensor([_[1] for _ in datas]).to(self.device)\n bigram = torch.LongTensor([_[3] for _ in datas]).to(self.device)\n trigram = torch.LongTensor([_[4] for _ in datas]).to(self.device)\n\n # pad前的长度(超过pad_size的设为pad_size)\n seq_len = torch.LongTensor([_[2] for _ in datas]).to(self.device)\n return (x, seq_len, bigram, trigram), y\n\n def __next__(self):\n if self.residue and self.index == self.n_batches:\n batches = self.batches[self.index * self.batch_size: len(self.batches)]\n self.index += 1\n batches = self._to_tensor(batches)\n return batches\n\n elif self.index >= self.n_batches:\n self.index = 0\n raise StopIteration\n else:\n batches = self.batches[self.index * self.batch_size: (self.index + 1) * self.batch_size]\n self.index += 1\n batches = self._to_tensor(batches)\n return batches\n\n def __iter__(self):\n return self\n\n def __len__(self):\n if self.residue:\n return self.n_batches + 1\n else:\n return self.n_batches\n\n\ndef build_iterator(dataset, config):\n iter = DatasetIterater(dataset, config.batch_size, config.device)\n return iter\n\n\ndef get_time_dif(start_time):\n \"\"\"获取已使用时间\"\"\"\n end_time = time.time()\n time_dif = end_time - start_time\n return timedelta(seconds=int(round(time_dif)))\n\nif __name__ == \"__main__\":\n '''提取预训练词向量'''\n vocab_dir = \"./THUCNews/data/vocab.pkl\"\n pretrain_dir = \"./THUCNews/data/sgns.sogou.char\"\n emb_dim = 300\n filename_trimmed_dir = \"./THUCNews/data/vocab.embedding.sougou\"\n word_to_id = pkl.load(open(vocab_dir, 'rb'))\n embeddings = np.random.rand(len(word_to_id), emb_dim)\n f = open(pretrain_dir, \"r\", encoding='UTF-8')\n for i, line in enumerate(f.readlines()):\n # if i == 0: # 若第一行是标题,则跳过\n # continue\n lin = line.strip().split(\" \")\n if lin[0] in word_to_id:\n idx = word_to_id[lin[0]]\n emb = [float(x) for x in lin[1:301]]\n embeddings[idx] = np.asarray(emb, dtype='float32')\n f.close()\n np.savez_compressed(filename_trimmed_dir, embeddings=embeddings)\n"
] |
[
[
"numpy.asarray",
"torch.LongTensor",
"numpy.savez_compressed"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ess-dmsc/hdf5ToRoot
|
[
"807400419044ea0526f7d1f50345464b19bb3dd1"
] |
[
"analysis/tree-cutter.py"
] |
[
"#!/usr/bin/python3\n\n# VMM Analysis\n# --------------------------------------------------------------------\n# This script is a simple example, showing how to read the data from a\n# ROOT tree, generated with vmm-sdat. In addition, some cuts are\n# applied to the data using pandas. In this specific example, this\n# means that only the position of the clusters is plotted, if the\n# ADC value of the cluster is larger than a specific value.\n# --------------------------------------------------------------------\n# Lucian Scharenberg\n# [email protected]\n# 18 November 2019 and 03 March 2022\n\n\n# --------------------------------------------------------------------\n# PACKAGE HANDLING\n# --------------------------------------------------------------------\n\nimport uproot3 as uproot\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sys\n\n\n# --------------------------------------------------------------------\n# DATA HANDLING\n# --------------------------------------------------------------------\n\n# Get the tree in the ROOT file\ntree = uproot.open(sys.argv[1])['clusters_detector']\n\n# Now get the branches of interest\nadc0 = tree.array('adc0')\npos0 = tree.array('pos0')\npos1 = tree.array('pos1')\n\n# Create a pandas data frame, which is used to apply the cuts\ndata = {'adc0': adc0,\n 'pos0': pos0,\n 'pos1': pos1}\ndf = pd.DataFrame(data)\n\n# Get only the events, which 'survive' the cut\nevents = df.query('adc0 > 500')\n\n\n# --------------------------------------------------------------------\n# PLOT THE DATA\n# --------------------------------------------------------------------\n\n# Create the plot of the positions\nplt.scatter(events['pos0'], events['pos1'], s = 0.05)\n\n# Some labels\nplt.xlabel('Position 0 (Strip Numbers)')\nplt.xlabel('Position 1 (Strip Numbers)')\n\n# Save the plot and show it\nplt.savefig('tree-cutter.png', dpi = 500)\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
j-wilson/Ax
|
[
"555489d0fd743e5422ab3a1994f8e4d44275ae21",
"555489d0fd743e5422ab3a1994f8e4d44275ae21",
"555489d0fd743e5422ab3a1994f8e4d44275ae21"
] |
[
"ax/metrics/tests/test_tensorboard.py",
"ax/early_stopping/strategies.py",
"ax/models/tests/test_cbo_lcem.py"
] |
[
"#!/usr/bin/env python3\n# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom unittest import mock\n\nimport numpy as np\nimport pandas as pd\nfrom ax.metrics.tensorboard import TensorboardCurveMetric\nfrom ax.utils.common.testutils import TestCase\n\n\nclass TensorboardCurveMetricTest(TestCase):\n def test_GetCurvesFromIds(self):\n def mock_get_tb_from_posix(path):\n if path == \"None\":\n return None\n return pd.Series([int(path)] * 2)\n\n mock_path = \"ax.metrics.tensorboard.get_tb_from_posix\"\n with mock.patch(mock_path, side_effect=mock_get_tb_from_posix) as mgtbfp:\n out = TensorboardCurveMetric.get_curves_from_ids([\"1\", \"None\", \"2\"])\n mgtbfp.assert_has_calls([mock.call(\"1\"), mock.call(\"None\"), mock.call(\"2\")])\n self.assertEqual(len(out), 2)\n self.assertTrue(np.array_equal(out[\"1\"].values, np.array([1, 1])))\n self.assertTrue(np.array_equal(out[\"2\"].values, np.array([2, 2])))\n",
"#!/usr/bin/env python3\n# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom abc import ABC, abstractmethod\nfrom typing import List, Any, Dict, Optional, Set, Tuple\n\nimport numpy as np\nimport pandas as pd\nfrom ax.core.base_trial import TrialStatus\nfrom ax.core.experiment import Experiment\nfrom ax.core.map_data import MapData\nfrom ax.early_stopping.utils import align_partial_results\nfrom ax.exceptions.core import UnsupportedError\nfrom ax.utils.common.base import Base\nfrom ax.utils.common.logger import get_logger\nfrom ax.utils.common.typeutils import checked_cast, not_none\n\nlogger = get_logger(__name__)\n\n\nclass BaseEarlyStoppingStrategy(ABC, Base):\n \"\"\"Interface for heuristics that halt trials early, typically based on early\n results from that trial.\"\"\"\n\n def __init__(\n self,\n seconds_between_polls: int = 60,\n true_objective_metric_name: Optional[str] = None,\n ) -> None:\n \"\"\"A BaseEarlyStoppingStrategy class.\n\n Args:\n seconds_between_polls: How often to poll the early stopping metric to\n evaluate whether or not the trial should be early stopped.\n true_objective_metric_name: The actual objective to be optimized; used in\n situations where early stopping uses a proxy objective (such as training\n loss instead of eval loss) for stopping decisions.\n \"\"\"\n self._seconds_between_polls = seconds_between_polls\n self._true_objective_metric_name = true_objective_metric_name\n\n @abstractmethod\n def should_stop_trials_early(\n self,\n trial_indices: Set[int],\n experiment: Experiment,\n **kwargs: Dict[str, Any],\n ) -> Dict[int, Optional[str]]:\n \"\"\"Decide whether to complete trials before evaluation is fully concluded.\n\n Typical examples include stopping a machine learning model's training, or\n halting the gathering of samples before some planned number are collected.\n\n\n Args:\n trial_indices: Indices of candidate trials to stop early.\n experiment: Experiment that contains the trials and other contextual data.\n\n Returns:\n A dictionary mapping trial indices that should be early stopped to\n (optional) messages with the associated reason.\n \"\"\"\n pass # pragma: nocover\n\n @property\n def true_objective_metric_name(self) -> Optional[str]:\n return self._true_objective_metric_name\n\n @true_objective_metric_name.setter\n def true_objective_metric_name(self, true_objective_metric_name: Optional[str]):\n self._true_objective_metric_name = true_objective_metric_name\n\n def _check_validity_and_get_data(self, experiment: Experiment) -> Optional[MapData]:\n \"\"\"Validity checks and returns the `MapData` used for early stopping.\"\"\"\n if experiment.optimization_config is None:\n raise UnsupportedError( # pragma: no cover\n \"Experiment must have an optimization config in order to use an \"\n \"early stopping strategy.\"\n )\n\n optimization_config = not_none(experiment.optimization_config)\n objective_name = optimization_config.objective.metric.name\n\n data = experiment.fetch_data()\n if data.df.empty:\n logger.info(\n f\"{self.__class__.__name__} received empty data. \"\n \"Not stopping any trials.\"\n )\n return None\n if objective_name not in set(data.df[\"metric_name\"]):\n logger.info(\n f\"{self.__class__.__name__} did not receive data \"\n \"from the objective metric. Not stopping any trials.\"\n )\n return None\n\n if not isinstance(data, MapData):\n logger.info(\n f\"{self.__class__.__name__} expects MapData, but the \"\n f\"data attached to experiment is of type {type(data)}. \"\n \"Not stopping any trials.\"\n )\n return None\n\n data = checked_cast(MapData, data)\n map_keys = data.map_keys\n if len(list(map_keys)) > 1:\n logger.info(\n f\"{self.__class__.__name__} expects MapData with a single \"\n \"map key, but the data attached to the experiment has multiple: \"\n f\"{data.map_keys}. Not stopping any trials.\"\n )\n return None\n return data\n\n @property\n def seconds_between_polls(self) -> int:\n return self._seconds_between_polls\n\n @seconds_between_polls.setter\n def seconds_between_polls(self, seconds_between_polls: int) -> None:\n if seconds_between_polls < 0:\n raise ValueError(\"`seconds_between_polls may not be less than 0\")\n\n self._seconds_between_polls = seconds_between_polls\n\n\nclass PercentileEarlyStoppingStrategy(BaseEarlyStoppingStrategy):\n \"\"\"Implements the strategy of stopping a trial if its performance\n falls below that of other trials at the same step.\"\"\"\n\n def __init__(\n self,\n seconds_between_polls: int = 60,\n true_objective_metric_name: Optional[str] = None,\n percentile_threshold: float = 50.0,\n min_progression: float = 0.1,\n min_curves: float = 5,\n trial_indices_to_ignore: Optional[List[int]] = None,\n ) -> None:\n \"\"\"Construct a PercentileEarlyStoppingStrategy instance.\n\n Args:\n true_objective_metric_name: The actual objective to be optimized; used in\n situations where early stopping uses a proxy objective (such as training\n loss instead of eval loss) for stopping decisions.\n percentile_threshold: Falling below this threshold compared to other trials\n at the same step will stop the run. Must be between 0.0 and 100.0.\n e.g. if percentile_threshold=25.0, the bottom 25% of trials are stopped.\n Note that \"bottom\" here is determined based on performance, not\n absolute values; if `minimize` is False, then \"bottom\" actually refers\n to the top trials in terms of metric value.\n min_progression: Only stop trials if the latest progression value\n (e.g. timestamp, epochs, training data used) is greater than this\n threshold. Prevents stopping prematurely before enough data is gathered\n to make a decision. The default value (10) is reasonable when we want\n early stopping to start after 10 epochs.\n min_curves: There must be `min_curves` number of completed trials and\n `min_curves` number of trials with curve data to make a stopping\n decision (i.e., even if there are enough completed trials but not all\n of them are correctly returning data, then do not apply early stopping).\n trial_indices_to_ignore: Trial indices that should not be early stopped.\n \"\"\"\n super().__init__(\n seconds_between_polls=seconds_between_polls,\n true_objective_metric_name=true_objective_metric_name,\n )\n\n self.percentile_threshold = percentile_threshold\n self.min_progression = min_progression\n self.min_curves = min_curves\n self.trial_indices_to_ignore = trial_indices_to_ignore\n\n def should_stop_trials_early(\n self,\n trial_indices: Set[int],\n experiment: Experiment,\n **kwargs: Dict[str, Any],\n ) -> Dict[int, Optional[str]]:\n \"\"\"Stop a trial if its performance is in the bottom `percentile_threshold`\n of the trials at the same step.\n\n Args:\n trial_indices: Indices of candidate trials to consider for early stopping.\n experiment: Experiment that contains the trials and other contextual data.\n\n Returns:\n A dictionary mapping trial indices that should be early stopped to\n (optional) messages with the associated reason. An empty dictionary\n means no suggested updates to any trial's status.\n \"\"\"\n data = self._check_validity_and_get_data(experiment=experiment)\n if data is None:\n # don't stop any trials if we don't get data back\n return {}\n\n optimization_config = not_none(experiment.optimization_config)\n objective_name = optimization_config.objective.metric.name\n\n map_key = next(iter(data.map_keys))\n minimize = optimization_config.objective.minimize\n df = data.map_df\n try:\n metric_to_aligned_means, _ = align_partial_results(\n df=df,\n progr_key=map_key,\n metrics=[objective_name],\n )\n except Exception as e:\n logger.warning(\n f\"Encountered exception while aligning data: {e}. \"\n \"Not early stopping any trials.\"\n )\n return {}\n\n aligned_means = metric_to_aligned_means[objective_name]\n decisions = {\n trial_index: self.should_stop_trial_early(\n trial_index=trial_index,\n experiment=experiment,\n df=aligned_means,\n minimize=minimize,\n )\n for trial_index in trial_indices\n }\n return {\n trial_index: reason\n for trial_index, (should_stop, reason) in decisions.items()\n if should_stop\n }\n\n def should_stop_trial_early(\n self,\n trial_index: int,\n experiment: Experiment,\n df: pd.DataFrame,\n minimize: bool,\n ) -> Tuple[bool, Optional[str]]:\n \"\"\"Stop a trial if its performance is in the bottom `percentile_threshold`\n of the trials at the same step.\n\n Args:\n trial_index: Indices of candidate trial to stop early.\n experiment: Experiment that contains the trials and other contextual data.\n df: Dataframe of partial results after applying interpolation,\n filtered to objective metric.\n minimize: Whether objective value is being minimized.\n\n Returns:\n A tuple `(should_stop, reason)`, where `should_stop` is `True` iff the\n trial should be stopped, and `reason` is an (optional) string providing\n information on why the trial should or should not be stopped.\n \"\"\"\n logger.info(f\"Considering trial {trial_index} for early stopping.\")\n\n # check for ignored indices\n if self.trial_indices_to_ignore is not None:\n if trial_index in self.trial_indices_to_ignore:\n return _log_and_return_trial_ignored(trial_index=trial_index)\n\n # check for no data\n if trial_index not in df or len(not_none(df[trial_index].dropna())) == 0:\n return _log_and_return_no_data(trial_index=trial_index)\n\n # check for min progression\n trial_last_progression = not_none(df[trial_index].dropna()).index.max()\n logger.info(\n f\"Last progression of Trial {trial_index} is {trial_last_progression}.\"\n )\n if trial_last_progression < self.min_progression:\n return _log_and_return_min_progression(\n trial_index=trial_index,\n trial_last_progression=trial_last_progression,\n min_progression=self.min_progression,\n )\n\n # dropna() here will exclude trials that have not made it to the\n # last progression of the trial under consideration, and therefore\n # can't be included in the comparison\n data_at_last_progression = df.loc[trial_last_progression].dropna()\n logger.info(\n \"Early stopping objective at last progression is:\\n\"\n f\"{data_at_last_progression}.\"\n )\n\n # check for enough completed trials\n num_completed = len(experiment.trial_indices_by_status[TrialStatus.COMPLETED])\n if num_completed < self.min_curves:\n return _log_and_return_completed_trials(\n num_completed=num_completed, min_curves=self.min_curves\n )\n\n # check for enough number of trials with data\n if len(data_at_last_progression) < self.min_curves:\n return _log_and_return_num_trials_with_data(\n trial_index=trial_index,\n trial_last_progression=trial_last_progression,\n num_trials_with_data=len(data_at_last_progression),\n min_curves=self.min_curves,\n )\n\n # percentile early stopping logic\n percentile_threshold = (\n 100.0 - self.percentile_threshold if minimize else self.percentile_threshold\n )\n percentile_value = np.percentile(data_at_last_progression, percentile_threshold)\n trial_objective_value = data_at_last_progression[trial_index]\n should_early_stop = (\n trial_objective_value > percentile_value\n if minimize\n else trial_objective_value < percentile_value\n )\n comp = \"worse\" if should_early_stop else \"better\"\n reason = (\n f\"Trial objective value {trial_objective_value} is {comp} than \"\n f\"{percentile_threshold:.1f}-th percentile ({percentile_value}) \"\n \"across comparable trials.\"\n )\n logger.info(\n f\"Early stopping decision for {trial_index}: {should_early_stop}. \"\n f\"Reason: {reason}\"\n )\n return should_early_stop, reason\n\n\nclass ThresholdEarlyStoppingStrategy(BaseEarlyStoppingStrategy):\n \"\"\"Implements the strategy of stopping a trial if its performance\n doesn't reach a pre-specified threshold by a certain progression.\"\"\"\n\n def __init__(\n self,\n true_objective_metric_name: Optional[str] = None,\n metric_threshold: float = 0.2,\n min_progression: float = 10,\n trial_indices_to_ignore: Optional[List[int]] = None,\n ) -> None:\n \"\"\"Construct a ThresholdEarlyStoppingStrategy instance.\n\n Args:\n true_objective_metric_name: The actual objective to be optimized; used in\n situations where early stopping uses a proxy objective (such as training\n loss instead of eval loss) for stopping decisions.\n metric_threshold: The metric threshold that a trial needs to reach by\n min_progression in order not to be stopped.\n min_progression: Only stop trials if the latest progression value\n (e.g. timestamp) is greater than this threshold.\n trial_indices_to_ignore: Trial indices that should not be early stopped.\n \"\"\"\n super().__init__(true_objective_metric_name=true_objective_metric_name)\n\n self.metric_threshold = metric_threshold\n self.min_progression = min_progression\n self.trial_indices_to_ignore = trial_indices_to_ignore\n\n def should_stop_trials_early(\n self,\n trial_indices: Set[int],\n experiment: Experiment,\n **kwargs: Dict[str, Any],\n ) -> Dict[int, Optional[str]]:\n \"\"\"Stop a trial if its performance doesn't reach a pre-specified threshold\n by `min_progression`.\n\n Args:\n trial_indices: Indices of candidate trials to consider for early stopping.\n experiment: Experiment that contains the trials and other contextual data.\n\n Returns:\n A dictionary mapping trial indices that should be early stopped to\n (optional) messages with the associated reason. An empty dictionary\n means no suggested updates to any trial's status.\n \"\"\"\n data = self._check_validity_and_get_data(experiment=experiment)\n if data is None:\n # don't stop any trials if we don't get data back\n return {}\n\n optimization_config = not_none(experiment.optimization_config)\n objective_name = optimization_config.objective.metric.name\n\n map_key = next(iter(data.map_keys))\n minimize = optimization_config.objective.minimize\n df = data.map_df\n df_objective = df[df[\"metric_name\"] == objective_name]\n decisions = {\n trial_index: self.should_stop_trial_early(\n trial_index=trial_index,\n experiment=experiment,\n df=df_objective,\n map_key=map_key,\n minimize=minimize,\n )\n for trial_index in trial_indices\n }\n return {\n trial_index: reason\n for trial_index, (should_stop, reason) in decisions.items()\n if should_stop\n }\n\n def should_stop_trial_early(\n self,\n trial_index: int,\n experiment: Experiment,\n df: pd.DataFrame,\n map_key: str,\n minimize: bool,\n ) -> Tuple[bool, Optional[str]]:\n \"\"\"Stop a trial if its performance doesn't reach a pre-specified threshold\n by `min_progression`.\n\n Args:\n trial_index: Indices of candidate trial to stop early.\n experiment: Experiment that contains the trials and other contextual data.\n df: Dataframe of partial results for the objective metric.\n map_key: Name of the column of the dataset that indicates progression.\n minimize: Whether objective value is being minimized.\n\n Returns:\n A tuple `(should_stop, reason)`, where `should_stop` is `True` iff the\n trial should be stopped, and `reason` is an (optional) string providing\n information on why the trial should or should not be stopped.\n \"\"\"\n logger.info(f\"Considering trial {trial_index} for early stopping.\")\n\n # check for ignored indices\n if self.trial_indices_to_ignore is not None:\n if trial_index in self.trial_indices_to_ignore:\n return _log_and_return_trial_ignored(trial_index=trial_index)\n\n # check for no data\n df_trial = df[df[\"trial_index\"] == trial_index].dropna(subset=[\"mean\"])\n if df_trial.empty:\n return _log_and_return_no_data(trial_index=trial_index)\n\n # check for min progression\n trial_last_progression = df_trial[map_key].max()\n logger.info(\n f\"Last progression of Trial {trial_index} is {trial_last_progression}.\"\n )\n if trial_last_progression < self.min_progression:\n return _log_and_return_min_progression(\n trial_index=trial_index,\n trial_last_progression=trial_last_progression,\n min_progression=self.min_progression,\n )\n\n # threshold early stopping logic\n data_at_last_progression = df_trial[\n df_trial[map_key] == trial_last_progression\n ][\"mean\"].iloc[0]\n logger.info(\n \"Early stopping objective at last progression is:\\n\"\n f\"{data_at_last_progression}.\"\n )\n should_early_stop = (\n data_at_last_progression > self.metric_threshold\n if minimize\n else data_at_last_progression < self.metric_threshold\n )\n comp = \"worse\" if should_early_stop else \"better\"\n reason = (\n f\"Trial objective value {data_at_last_progression} is {comp} than \"\n f\"the metric threshold {self.metric_threshold:}.\"\n )\n logger.info(\n f\"Early stopping decision for {trial_index}: {should_early_stop}. \"\n f\"Reason: {reason}\"\n )\n return should_early_stop, reason\n\n\ndef _log_and_return_trial_ignored(trial_index: int) -> Tuple[bool, str]:\n \"\"\"Helper function for logging/constructing a reason when a trial\n should be ignored.\"\"\"\n logger.info(\n f\"Trial {trial_index} should be ignored and not considered \"\n \"for early stopping.\"\n )\n return False, \"Specified as a trial to be ignored for early stopping.\"\n\n\ndef _log_and_return_no_data(trial_index: int) -> Tuple[bool, str]:\n \"\"\"Helper function for logging/constructing a reason when there is no data.\"\"\"\n logger.info(\n f\"There is not yet any data associated with trial {trial_index}. \"\n \"Not early stopping this trial.\"\n )\n return False, \"No data available to make an early stopping decision.\"\n\n\ndef _log_and_return_min_progression(\n trial_index: int, trial_last_progression: float, min_progression: float\n) -> Tuple[bool, str]:\n \"\"\"Helper function for logging/constructing a reason when min progression\n is not yet reached.\"\"\"\n reason = (\n f\"Most recent progression ({trial_last_progression}) is less than \"\n \"the specified minimum progression for early stopping \"\n f\"({min_progression}). \"\n )\n logger.info(f\"Trial {trial_index}'s m{reason[1:]} Not early stopping this trial.\")\n return False, reason\n\n\ndef _log_and_return_completed_trials(\n num_completed: int, min_curves: float\n) -> Tuple[bool, str]:\n \"\"\"Helper function for logging/constructing a reason when min number of\n completed trials is not yet reached.\"\"\"\n logger.info(\n f\"The number of completed trials ({num_completed}) is less than \"\n \"the minimum number of curves needed for early stopping \"\n f\"({min_curves}). Not early stopping this trial.\"\n )\n reason = (\n f\"Need {min_curves} completed trials, but only {num_completed} \"\n \"completed trials so far.\"\n )\n return False, reason\n\n\ndef _log_and_return_num_trials_with_data(\n trial_index: int,\n trial_last_progression: float,\n num_trials_with_data: int,\n min_curves: float,\n) -> Tuple[bool, str]:\n \"\"\"Helper function for logging/constructing a reason when min number of\n trials with data is not yet reached.\"\"\"\n logger.info(\n f\"The number of trials with data ({num_trials_with_data}) \"\n f\"at trial {trial_index}'s last progression ({trial_last_progression}) \"\n \"is less than the specified minimum number for early stopping \"\n f\"({min_curves}). Not early stopping this trial.\"\n )\n reason = (\n f\"Number of trials with data ({num_trials_with_data}) at \"\n f\"last progression ({trial_last_progression}) is less than the \"\n f\"specified minimum number for early stopping ({min_curves}).\"\n )\n return False, reason\n",
"#!/usr/bin/env python3\n# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport numpy as np\nimport torch\nfrom ax.models.torch.cbo_lcem import LCEMBO\nfrom ax.utils.common.testutils import TestCase\nfrom botorch.models.contextual_multioutput import LCEMGP, FixedNoiseLCEMGP\nfrom botorch.models.model_list_gp_regression import ModelListGP\n\n\nclass LCEMBOTest(TestCase):\n def testLCEMBO(self):\n d = 1\n train_x = torch.rand(10, d)\n train_y = torch.cos(train_x)\n task_indices = torch.tensor([0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n train_x = torch.cat([train_x, task_indices.unsqueeze(-1)], axis=1)\n\n # Test setting attributes\n m = LCEMBO()\n self.assertIsNone(m.context_cat_feature)\n self.assertIsNone(m.context_emb_feature)\n self.assertIsNone(m.embs_dim_list)\n\n # Test get_and_fit_model\n train_yvar = np.nan * torch.ones(train_y.shape)\n gp = m.get_and_fit_model(\n Xs=[train_x],\n Ys=[train_y],\n Yvars=[train_yvar],\n task_features=[d],\n fidelity_features=[],\n metric_names=[],\n )\n self.assertIsInstance(gp, ModelListGP)\n self.assertIsInstance(gp.models[0], LCEMGP)\n\n train_yvar = 0.05 * torch.ones(train_y.shape)\n gp = m.get_and_fit_model(\n Xs=[train_x],\n Ys=[train_y],\n Yvars=[train_yvar],\n task_features=[d],\n fidelity_features=[],\n metric_names=[],\n )\n self.assertIsInstance(gp, ModelListGP)\n self.assertIsInstance(gp.models[0], FixedNoiseLCEMGP)\n\n # Verify errors are raised in get_and_fit_model\n train_yvar = np.nan * torch.ones(train_y.shape)\n with self.assertRaises(NotImplementedError):\n gp = m.get_and_fit_model(\n Xs=[train_x],\n Ys=[train_y],\n Yvars=[train_yvar],\n task_features=[d, 2],\n fidelity_features=[],\n metric_names=[],\n )\n with self.assertRaises(ValueError):\n gp = m.get_and_fit_model(\n Xs=[train_x],\n Ys=[train_y],\n Yvars=[train_yvar],\n task_features=[],\n fidelity_features=[],\n metric_names=[],\n )\n"
] |
[
[
"numpy.array"
],
[
"numpy.percentile"
],
[
"torch.ones",
"torch.rand",
"torch.cos",
"torch.tensor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BIYTC/mobilenet_maskrcnn
|
[
"ee44ac18a5efa91da63ed88e479e645f2fb6d770"
] |
[
"maskrcnn_benchmark/modeling/backbone/bottom2up.py"
] |
[
"import torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom maskrcnn_benchmark.modeling.make_layers import group_norm\n\n\nclass Bottom2UP(nn.Module):\n \"\"\"\n Module that adds PANet on a list of feature maps from FPN.\n The feature maps are currently supposed to be in increasing depth\n order, and must be consecutive\n \"\"\"\n\n def __init__(\n self, cfg, in_channels, num_backbone_stages, top_blocks,\n ):\n \"\"\"\n Arguments:\n in_channels_list (list[int]): number of channels for each feature map that\n will be fed\n out_channels (int): number of channels of the FPN representation\n top_blocks (nn.Module or None): if provided, an extra operation will\n be performed on the output of the last (smallest resolution)\n FPN output, and the result will extend the result list\n \"\"\"\n super(Bottom2UP, self).__init__()\n # self.inner_blocks = []\n # self.layer_blocks = []\n # for idx, in_channels in enumerate(in_channels_list, 1): # 起始索引为1\n # inner_block = \"fpn_inner{}\".format(idx) # 用下表起名: fpn_inner1, fpn_inner2, fpn_inner3, fpn_inner4\n # layer_block = \"fpn_layer{}\".format(idx) # 用下表起名: fpn_layer1, fpn_layer2, fpn_layer3, fpn_layer4\n #\n # if in_channels == 0:\n # continue\n # inner_block_module = conv_block(in_channels, out_channels, 1) # 该1*1卷积层主要作用为改变通道数为out_channels\n # layer_block_module = conv_block(out_channels, out_channels, 3,\n # 1) # 用3*3卷积对融合结果卷积,消除上采样的混叠效(aliasing effect)\n # self.add_module(inner_block, inner_block_module)\n # self.add_module(layer_block, layer_block_module)\n # self.inner_blocks.append(inner_block)\n # self.layer_blocks.append(layer_block)\n # self.top_blocks = top_blocks # 将top_blocks作为FPN类成员变量,指定最后一层的输出是否需要再经过池化等操作,这里是最大值池化\n self.panet_buttomup_conv1_modules = nn.ModuleList()\n self.panet_buttomup_conv2_modules = nn.ModuleList()\n for i in range(num_backbone_stages):\n if cfg.MODEL.FPN.PANET.USE_GN:\n self.panet_buttomup_conv1_modules.append(nn.Sequential(\n nn.Conv2d(in_channels, in_channels, 3, 2, 1, bias=True), # 下采样\n group_norm(in_channels),\n nn.ReLU(inplace=True)\n ))\n self.panet_buttomup_conv2_modules.append(nn.Sequential(\n nn.Conv2d(in_channels, in_channels, 3, 1, 1, bias=True), # 像素相加后使用\n group_norm(in_channels),\n nn.ReLU(inplace=True)\n ))\n else:\n self.panet_buttomup_conv1_modules.append(\n nn.Conv2d(in_channels, in_channels, 3, 2, 1)\n )\n self.panet_buttomup_conv2_modules.append(\n nn.Conv2d(in_channels, in_channels, 3, 1, 1)\n )\n self.top_blocks = top_blocks\n\n def forward(self, x):\n \"\"\"\n Arguments:\n x (list[Tensor]): feature maps for each feature level.输入分辨率最高的位于第一个,先取第一个特征图最大的来运算,\n 然后用for循环做后面的运算。\n Returns:\n results (tuple[Tensor]): feature maps after FPN layers.\n They are ordered from highest resolution first. 分辨率最高的位于第一\n \"\"\"\n # getattr() 函数用于返回一个对象属性值\n last_feature = x[0] # 第一层不做处理\n results = []\n results.append(last_feature)\n\n for feature, buttomup_conv1, buttomup_conv2 in zip(\n x[1:], self.panet_buttomup_conv1_modules, self.panet_buttomup_conv2_modules\n ):\n inner_feature = buttomup_conv1(last_feature)\n last_feature = feature + inner_feature\n last_feature = buttomup_conv2(last_feature)\n results.append(last_feature)\n\n if isinstance(self.top_blocks, LastLevelMaxPool):\n last_results = self.top_blocks(results[-1]) # 这还加入了一张特征图,对最小的特征图进行了池化\n results.extend(last_results)\n\n return tuple(results) # results不是一个特征图,是5张\n\n\nclass LastLevelMaxPool(nn.Module):\n def forward(self, x):\n return [F.max_pool2d(x, 1, 2, 0)]\n"
] |
[
[
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.functional.max_pool2d",
"torch.nn.ReLU"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
andrecianflone/wolf
|
[
"826bbedc58d4d29871110349356868066a3108e6",
"826bbedc58d4d29871110349356868066a3108e6"
] |
[
"wolf/data/image.py",
"wolf/flows/couplings/transform.py"
] |
[
"import os\nimport scipy.io\nimport numpy as np\n\nimport torch\nfrom torchvision import datasets, transforms\n\n\ndef load_datasets(dataset, image_size, data_path):\n if dataset == 'omniglot':\n return load_omniglot()\n elif dataset == 'mnist':\n return load_mnist()\n elif dataset.startswith('lsun'):\n category = None if dataset == 'lsun' else dataset[5:]\n return load_lsun(data_path, category, image_size)\n elif dataset == 'cifar10':\n return load_cifar10(data_path)\n elif dataset == 'imagenet':\n return load_imagenet(data_path, image_size)\n elif dataset == 'celeba':\n return load_celeba(data_path, image_size)\n else:\n raise ValueError('unknown data set %s' % dataset)\n\n\ndef load_omniglot():\n def reshape_data(data):\n return data.T.reshape((-1, 1, 28, 28))\n\n omni_raw = scipy.io.loadmat('data/omniglot/chardata.mat')\n\n train_data = reshape_data(omni_raw['data']).astype(np.float32)\n train_label = omni_raw['target'].argmax(axis=0)\n test_data = reshape_data(omni_raw['testdata']).astype(np.float32)\n test_label = omni_raw['testtarget'].argmax(axis=0)\n\n train_data = torch.from_numpy(train_data).float()\n train_label = torch.from_numpy(train_label).long()\n test_data = torch.from_numpy(test_data).float()\n test_label = torch.from_numpy(test_label).long()\n\n return [(train_data[i], train_label[i]) for i in range(len(train_data))], \\\n [(test_data[i], test_label[i]) for i in range(len(test_data))]\n\n\ndef load_mnist():\n train_data, train_label = torch.load('data/mnist/processed/training.pt')\n test_data, test_label = torch.load('data/mnist/processed/test.pt')\n\n train_data = train_data.float().div(256).unsqueeze(1)\n test_data = test_data.float().div(256).unsqueeze(1)\n\n return [(train_data[i], train_label[i]) for i in range(len(train_data))], \\\n [(test_data[i], test_label[i]) for i in range(len(test_data))]\n\n\ndef load_lsun(data_path, category, image_size):\n if category is None:\n classes_train = 'train'\n classes_val = 'val'\n else:\n classes_train = [category + '_train']\n classes_val = [category + '_val']\n train_data = datasets.LSUN(data_path, classes=classes_train,\n transform=transforms.Compose([\n transforms.CenterCrop(256),\n transforms.Resize(image_size),\n transforms.ToTensor(),\n ]))\n\n val_data = datasets.LSUN(data_path, classes=classes_val,\n transform=transforms.Compose([\n transforms.CenterCrop(256),\n transforms.Resize(image_size),\n transforms.ToTensor(),\n ]))\n return train_data, val_data\n\n\ndef load_cifar10(data_path):\n imageSize = 32\n train_data = datasets.CIFAR10(data_path, train=True,\n download=True,\n transform=transforms.Compose([\n transforms.Pad(4, padding_mode='reflect'),\n transforms.RandomCrop(imageSize),\n transforms.RandomHorizontalFlip(0.5),\n transforms.ToTensor()\n ]))\n test_data = datasets.CIFAR10(data_path, train=False,\n transform=transforms.Compose([\n transforms.ToTensor()\n ]))\n return train_data, test_data\n\n\ndef load_imagenet(data_path, image_size):\n data_path = os.path.join(data_path, 'imagenet{}x{}'.format(image_size, image_size))\n train_data = datasets.ImageFolder(os.path.join(data_path, 'train'),\n transform=transforms.Compose([\n transforms.ToTensor()\n ]))\n val_data = datasets.ImageFolder(os.path.join(data_path, 'val'),\n transform=transforms.Compose([\n transforms.ToTensor()\n ]))\n return train_data, val_data\n\n\ndef load_celeba(data_path, image_size):\n train_data = datasets.ImageFolder(os.path.join(data_path, 'train'),\n transform=transforms.Compose([\n transforms.Resize(image_size),\n transforms.RandomHorizontalFlip(0.5),\n transforms.ToTensor()\n ]))\n val_data = datasets.ImageFolder(os.path.join(data_path, 'val'),\n transform=transforms.Compose([\n transforms.Resize(image_size),\n transforms.ToTensor()\n ]))\n return train_data, val_data\n\n\ndef get_batch(data, indices):\n imgs = []\n labels = []\n for index in indices:\n img, label = data[index]\n imgs.append(img)\n labels.append(label)\n return torch.stack(imgs, dim=0), torch.LongTensor(labels)\n\n\ndef iterate_minibatches(data, indices, batch_size, shuffle):\n if shuffle:\n np.random.shuffle(indices)\n\n for start_idx in range(0, len(indices), batch_size):\n excerpt = indices[start_idx:start_idx + batch_size]\n yield get_batch(data, excerpt)\n\n\ndef binarize_image(img):\n return torch.rand(img.size()).type_as(img).le(img).float()\n\n\ndef binarize_data(data):\n return [(binarize_image(img), label) for img, label in data]\n\n\ndef preprocess(img, n_bits, noise=None):\n n_bins = 2. ** n_bits\n # rescale to 255\n img = img.mul(255)\n if n_bits < 8:\n img = torch.floor(img.div(256. / n_bins))\n\n if noise is not None:\n # [batch, nsamples, channels, H, W]\n img = img.unsqueeze(1) + noise\n # normalize\n img = img.div(n_bins)\n img = (img - 0.5).div(0.5)\n return img\n\n\ndef postprocess(img, n_bits):\n n_bins = 2. ** n_bits\n # re-normalize\n img = img.mul(0.5) + 0.5\n img = img.mul(n_bins)\n # scale\n img = torch.floor(img) * (256. / n_bins)\n img = img.clamp(0, 255).div(255)\n return img\n",
"__author__ = 'max'\n\nimport math\nfrom overrides import overrides\nfrom typing import Tuple\nimport torch\n\n\nclass Transform():\n def calc_params(self, params):\n return params\n\n @staticmethod\n def fwd(z: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:\n raise NotImplementedError\n\n @staticmethod\n def bwd(z: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:\n raise NotImplementedError\n\n def __str__(self):\n return self.__class__.__name__ + '(' + self.extra_repr() + ')'\n\n def extra_repr(self):\n return ''\n\n\nclass Additive(Transform):\n def __init__(self):\n super(Additive, self).__init__()\n\n @staticmethod\n @overrides\n def fwd(z: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:\n mu = params\n z = z + mu\n logdet = z.new_zeros(z.size(0))\n return z, logdet\n\n @staticmethod\n @overrides\n def bwd(z: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:\n mu = params\n z = z - mu\n logdet = z.new_zeros(z.size(0))\n return z, logdet\n\n\nclass Affine(Transform):\n def __init__(self, dim, alpha):\n super(Affine, self).__init__()\n self.dim = dim\n self.alpha = alpha\n\n @overrides\n def calc_params(self, params):\n mu, log_scale = params.chunk(2, dim=self.dim)\n scale = log_scale.mul_(0.5).tanh_().mul(self.alpha).add(1.0)\n return mu, scale\n\n @staticmethod\n @overrides\n def fwd(z: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:\n mu, scale = params\n z = scale * z + mu\n logdet = scale.log().view(z.size(0), -1).sum(dim=1)\n return z, logdet\n\n @staticmethod\n @overrides\n def bwd(z: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:\n mu, scale = params\n z = (z - mu).div(scale + 1e-12)\n logdet = scale.log().view(z.size(0), -1).sum(dim=1) * -1.0\n return z, logdet\n\n @overrides\n def extra_repr(self):\n return 'dim={}, alpha={}'.format(self.dim, self.alpha)\n\n\nclass ReLU(Transform):\n def __init__(self, dim):\n super(ReLU, self).__init__()\n self.dim = dim\n\n @overrides\n def calc_params(self, params):\n mu, log_scale = params.chunk(2, dim=self.dim)\n scale = log_scale.tanh_()\n return mu, scale\n\n @staticmethod\n @overrides\n def fwd(z: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:\n mu, scale = params\n scale = scale * z.gt(0.0).type_as(z) + 1\n z = scale * z + mu\n logdet = scale.log().view(z.size(0), -1).sum(dim=1)\n return z, logdet\n\n @staticmethod\n @overrides\n def bwd(z: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:\n mu, scale = params\n z = z - mu\n scale = scale * z.gt(0.0).type_as(z) + 1\n z = z.div(scale + 1e-12)\n logdet = scale.log().view(z.size(0), -1).sum(dim=1) * -1.0\n return z, logdet\n\n\ndef arccosh(x):\n return torch.log(x + torch.sqrt(x.pow(2) - 1))\n\n\ndef arcsinh(x):\n return torch.log(x + torch.sqrt(x.pow(2) + 1))\n\n\nclass NLSQ(Transform):\n # A = 8 * math.sqrt(3) / 9 - 0.05 # 0.05 is a small number to prevent exactly 0 slope\n logA = math.log(8 * math.sqrt(3) / 9 - 0.05) # 0.05 is a small number to prevent exactly 0 slope\n\n def __init__(self, dim):\n super(NLSQ, self).__init__()\n self.dim = dim\n\n @overrides\n def calc_params(self, params):\n a, logb, cprime, logd, g = params.chunk(5, dim=self.dim)\n\n # for stability\n logb = logb.mul_(0.4)\n cprime = cprime.mul_(0.3)\n logd = logd.mul_(0.4)\n\n # b = logb.add_(2.0).sigmoid_()\n # d = logd.add_(2.0).sigmoid_()\n # c = (NLSQ.A * b / d).mul(cprime.tanh_())\n\n c = (NLSQ.logA + logb - logd).exp_().mul(cprime.tanh_())\n b = logb.exp_()\n d = logd.exp_()\n return a, b, c, d, g\n\n @staticmethod\n @overrides\n def fwd(z: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:\n a, b, c, d, g = params\n\n arg = (d * z).add_(g)\n denom = arg.pow(2).add_(1)\n c = c / denom\n z = b * z + a + c\n logdet = torch.log(b - 2 * c * d * arg / denom)\n logdet = logdet.view(z.size(0), -1).sum(dim=1)\n return z, logdet\n\n @staticmethod\n @overrides\n def bwd(z: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:\n a, b, c, d, g = params\n\n # double needed for stability. No effect on overall speed\n a = a.double()\n b = b.double()\n c = c.double()\n d = d.double()\n g = g.double()\n z = z.double()\n\n aa = -b * d.pow(2)\n bb = (z - a) * d.pow(2) - 2 * b * d * g\n cc = (z - a) * 2 * d * g - b * (1 + g.pow(2))\n dd = (z - a) * (1 + g.pow(2)) - c\n\n p = (3 * aa * cc - bb.pow(2)) / (3 * aa.pow(2))\n q = (2 * bb.pow(3) - 9 * aa * bb * cc + 27 * aa.pow(2) * dd) / (27 * aa.pow(3))\n\n t = -2 * torch.abs(q) / q * torch.sqrt(torch.abs(p) / 3)\n inter_term1 = -3 * torch.abs(q) / (2 * p) * torch.sqrt(3 / torch.abs(p))\n inter_term2 = 1 / 3 * arccosh(torch.abs(inter_term1 - 1) + 1)\n t = t * torch.cosh(inter_term2)\n\n tpos = -2 * torch.sqrt(torch.abs(p) / 3)\n inter_term1 = 3 * q / (2 * p) * torch.sqrt(3 / torch.abs(p))\n inter_term2 = 1 / 3 * arcsinh(inter_term1)\n tpos = tpos * torch.sinh(inter_term2)\n\n t[p > 0] = tpos[p > 0]\n z = t - bb / (3 * aa)\n arg = d * z + g\n denom = arg.pow(2) + 1\n logdet = torch.log(b - 2 * c * d * arg / denom.pow(2))\n\n z = z.float()\n logdet = logdet.float().view(z.size(0), -1).sum(dim=1) * -1.0\n return z, logdet\n\n\nclass SymmELU(Transform):\n def __init__(self, dim):\n super(SymmELU, self).__init__()\n self.dim = dim\n\n @overrides\n def calc_params(self, params):\n mu, log_scale = params.chunk(2, dim=self.dim)\n scale = log_scale.mul_(0.5).tanh_()\n return mu, scale\n\n @staticmethod\n @overrides\n def fwd(z: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:\n mu, scale = params\n sgn = torch.sign(z)\n tmp = torch.exp(-torch.abs(z))\n z = z - sgn * scale * (tmp - 1.0) + mu\n logdet = (scale * tmp + 1).log().view(z.size(0), -1).sum(dim=1)\n return z, logdet\n\n @staticmethod\n @overrides\n def bwd(z: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:\n mu, scale = params\n z = -torch.sign(z) * scale * (torch.exp(-torch.abs(z)) - 1.0) + mu\n return z, None\n\n @overrides\n def extra_repr(self):\n return 'dim={}'.format(self.dim)\n"
] |
[
[
"torch.LongTensor",
"torch.floor",
"torch.load",
"torch.from_numpy",
"numpy.random.shuffle",
"torch.stack"
],
[
"torch.abs",
"torch.sign",
"torch.cosh",
"torch.log",
"torch.sinh"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
varikakasandor/dissertation-balls-into-bins
|
[
"fba69dd5ffd0b4984795c9a5ec119bf8c6f47d9e",
"fba69dd5ffd0b4984795c9a5ec119bf8c6f47d9e"
] |
[
"k_thinning/full_knowledge/RL/DQN/train.py",
"two_thinning/strategies/strategy_base.py"
] |
[
"import copy\nimport random\nimport time\nfrom math import exp\nfrom os import mkdir\n\nimport torch.optim as optim\nimport wandb\nfrom matplotlib import pyplot as plt\n\nfrom helper.replay_memory import ReplayMemory, Transition\nfrom k_choice.simulation import sample_one_choice\nfrom k_thinning.full_knowledge.RL.DQN.constants import *\n\n\n# from pytimedinput import timedInput # Works only with interactive interpreter\n\n\ndef epsilon_greedy(policy_net, loads, choices_left, max_threshold, steps_done, eps_start, eps_end, eps_decay, device):\n sample = random.random()\n eps_threshold = eps_end + (eps_start - eps_end) * exp(-1. * steps_done / eps_decay)\n if sample > eps_threshold:\n with torch.no_grad():\n options = policy_net(torch.tensor(loads + [choices_left]).unsqueeze(0)).squeeze(0)\n return options.max(0)[1].type(dtype=torch.int64)\n else:\n return torch.as_tensor(random.randrange(max_threshold + 1), dtype=torch.int64).to(device)\n\n\ndef greedy(policy_net, loads, choices_left):\n with torch.no_grad():\n options = policy_net(torch.tensor(loads + [choices_left]).unsqueeze(0)).squeeze(0)\n return options.max(0)[1].type(dtype=torch.int64).item() # TODO: instead torch.argmax (?)\n\n\ndef evaluate_q_values(model, n=N, m=M, k=K, reward=REWARD_FUN, eval_runs=EVAL_RUNS_TRAIN,\n max_threshold=MAX_THRESHOLD, use_normalised=USE_NORMALISED,\n print_behaviour=PRINT_BEHAVIOUR): # TODO: do fast version as for two_choice\n with torch.no_grad():\n sum_loads = 0\n for _ in range(eval_runs):\n loads = [0] * n\n for i in range(m):\n choices_left = k\n to_increase = None\n while choices_left > 1:\n a = greedy(model, loads, choices_left)\n a = i / n + a - max_threshold if use_normalised else a\n if print_behaviour:\n print(f\"With loads {loads}, having {choices_left} choices left, the trained model chose {a}\")\n to_increase = random.randrange(n)\n if loads[to_increase] <= a:\n break\n else:\n choices_left -= 1\n\n if choices_left == 1:\n to_increase = random.randrange(n)\n loads[to_increase] += 1\n\n sum_loads += reward(loads)\n avg_score = sum_loads / eval_runs\n return avg_score\n\n\ndef optimize_model(memory, policy_net, target_net, optimizer, batch_size, criterion, device):\n if len(memory) < batch_size:\n return\n transitions = memory.sample(batch_size)\n batch = Transition(*zip(*transitions))\n\n non_final_mask = torch.tensor(tuple(map(lambda s: not s, batch.done)), dtype=torch.bool).to(device) # flip\n non_final_next_states = torch.tensor(\n [next_state for (done, next_state) in zip(batch.done, batch.next_state) if not done])\n\n state_action_values = policy_net(torch.tensor([x for x in batch.state]))\n state_action_values = state_action_values.gather(1,\n torch.as_tensor([[a] for a in batch.action]).to(device)).squeeze()\n\n next_state_values = torch.zeros(batch_size).double().to(device)\n next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach()\n # argmax = target_net(non_final_next_states).max(1)[1].detach() # TODO: double Q learning\n # next_state_values[non_final_mask] = policy(non_final_next_states)[argmax].detach() # TODO: double Q learning\n expected_state_action_values = next_state_values + torch.as_tensor(batch.reward).to(device)\n\n loss = criterion(state_action_values, expected_state_action_values) # .unsqueeze(1))\n\n # Optimize the model\n optimizer.zero_grad()\n loss.backward()\n for param in policy_net.parameters():\n param.grad.data.clamp_(-1, 1) # Gradient clipping\n optimizer.step()\n\n\ndef train(n=N, m=M, k=K, memory_capacity=MEMORY_CAPACITY, num_episodes=TRAIN_EPISODES, reward_fun=REWARD_FUN,\n batch_size=BATCH_SIZE, eps_start=EPS_START, eps_end=EPS_END, report_wandb=False, lr=LR, pacing_fun=PACING_FUN,\n eps_decay=EPS_DECAY, optimise_freq=OPTIMISE_FREQ, target_update_freq=TARGET_UPDATE_FREQ,\n pre_train_episodes=PRE_TRAIN_EPISODES, use_normalised=USE_NORMALISED,\n nn_hidden_size=NN_HIDDEN_SIZE, nn_rnn_num_layers=NN_RNN_NUM_LAYERS, nn_num_lin_layers=NN_NUM_LIN_LAYERS,\n eval_runs=EVAL_RUNS_TRAIN, patience=PATIENCE, potential_fun=POTENTIAL_FUN, loss_function=LOSS_FUCNTION,\n max_threshold=MAX_THRESHOLD, eval_parallel_batch_size=EVAL_PARALLEL_BATCH_SIZE, save_path=SAVE_PATH,\n print_progress=PRINT_PROGRESS, nn_model=NN_MODEL, optimizer_method=OPTIMIZER_METHOD, device=DEVICE):\n start_time = time.time()\n mkdir(save_path)\n\n max_possible_load = m\n max_threshold = max_threshold - m // n if use_normalised else max_threshold # !!!\n nn_max_threshold = 2 * max_threshold if use_normalised else max_threshold\n\n policy_net = nn_model(n=n, max_threshold=nn_max_threshold, k=k, max_possible_load=max_possible_load,\n hidden_size=nn_hidden_size, rnn_num_layers=nn_rnn_num_layers,\n num_lin_layers=nn_num_lin_layers,\n device=device)\n target_net = nn_model(n=n, max_threshold=nn_max_threshold, k=k, max_possible_load=max_possible_load,\n hidden_size=nn_hidden_size, rnn_num_layers=nn_rnn_num_layers,\n num_lin_layers=nn_num_lin_layers,\n device=device)\n best_net = nn_model(n=n, max_threshold=nn_max_threshold, k=k, max_possible_load=max_possible_load,\n hidden_size=nn_hidden_size, rnn_num_layers=nn_rnn_num_layers, num_lin_layers=nn_num_lin_layers,\n device=device)\n target_net.load_state_dict(policy_net.state_dict())\n target_net.eval()\n\n optimizer = optimizer_method(policy_net.parameters(), lr=lr)\n memory = ReplayMemory(memory_capacity)\n\n steps_done = 0\n best_eval_score = None\n not_improved = 0\n threshold_jumps = []\n eval_scores = []\n\n start_loads = []\n for start_size in reversed(range(m)): # pretraining (i.e. curriculum learning)\n for _ in range(pacing_fun(start_size=start_size, n=n, m=m, all_episodes=pre_train_episodes)):\n start_loads.append(sample_one_choice(n=n, m=start_size))\n for _ in range(num_episodes): # training\n start_loads.append([0] * n)\n\n for ep, loads in enumerate(start_loads):\n for i in range(m):\n to_place = None\n threshold = None\n randomly_selected = None\n choices_left = k\n while choices_left > 1:\n threshold = epsilon_greedy(policy_net=policy_net, loads=loads, choices_left=choices_left,\n max_threshold=max_threshold, steps_done=steps_done,\n eps_start=eps_start, eps_end=eps_end, eps_decay=eps_decay, device=device)\n randomly_selected = random.randrange(n)\n if loads[randomly_selected] <= threshold.item():\n to_place = randomly_selected\n break\n elif choices_left > 2:\n curr_state = loads + [choices_left]\n next_state = loads + [choices_left - 1]\n reward = 0\n reward = torch.DoubleTensor([reward]).to(device)\n memory.push(curr_state, threshold, next_state, reward, False)\n steps_done += 1\n\n choices_left -= 1\n\n if choices_left == 1: # nothing was good for the model\n to_place = random.randrange(n)\n choices_left += 1\n\n curr_state = loads + [choices_left] # in a format that can directly go into the neural network\n loads[to_place] += 1\n next_state = (loads + [k])\n\n reward = reward_fun(next_state[:-1]) if i == m - 1 else 0 # \"real\" reward\n reward += potential_fun(next_state[:-1]) - potential_fun(curr_state[:-1])\n reward = torch.DoubleTensor([reward]).to(device)\n memory.push(curr_state, threshold, next_state, reward, i == m - 1)\n\n steps_done += 1\n\n if steps_done % optimise_freq == 0:\n optimize_model(memory=memory, policy_net=policy_net, target_net=target_net, optimizer=optimizer,\n batch_size=batch_size, criterion=loss_function, device=device)\n\n curr_eval_score = evaluate_q_values(policy_net, n=n, m=m, k=k, max_threshold=max_threshold, reward=reward_fun,\n eval_runs=eval_runs, use_normalised=use_normalised)\n if best_eval_score is None or curr_eval_score > best_eval_score:\n curr_eval_score = evaluate_q_values(policy_net, n=n, m=m, k=k, max_threshold=max_threshold, reward=reward_fun,\n eval_runs=5 * eval_runs, use_normalised=use_normalised)\n if report_wandb:\n wandb.log({\"score\": curr_eval_score})\n\n eval_scores.append(curr_eval_score)\n if best_eval_score is None or curr_eval_score > best_eval_score:\n best_eval_score = curr_eval_score\n best_net.load_state_dict(policy_net.state_dict())\n not_improved = 0\n if print_progress:\n print(f\"At episode {ep} the best eval score has improved to {curr_eval_score}.\")\n elif not_improved < patience:\n not_improved += 1\n if print_progress:\n print(f\"At episode {ep} no improvement has happened ({curr_eval_score}).\")\n else:\n if print_progress:\n print(f\"Training has stopped after episode {ep} as the eval score didn't improve anymore.\")\n break\n\n if ep % target_update_freq == 0:\n target_net.load_state_dict(policy_net.state_dict())\n\n plt.rcParams['font.size'] = '14'\n\n final_max_loads = [-x for x in eval_scores]\n file_name = f\"training_progression_{n}_{m}_{k}.pdf\"\n training_save_path = join(dirname(dirname(dirname(dirname(dirname(abspath(__file__)))))), \"evaluation\",\n \"k_thinning\", \"data\", file_name)\n\n plt.plot(final_max_loads)\n plt.xlabel(\"episode\")\n plt.ylabel(\"average maximum load over 25 runs\")\n plt.savefig(training_save_path)\n\n plt.clf()\n\n rolling_window_max_loads = [sum([final_max_loads[j] for j in range(i, i + 10)]) / 10 for i in\n range(len(final_max_loads) - 10)]\n file_name = f\"training_progression_rolling_window_{n}_{m}_{k}.pdf\"\n training_save_path = join(dirname(dirname(dirname(dirname(dirname(abspath(__file__)))))), \"evaluation\",\n \"k_thinning\", \"data\", file_name)\n\n plt.plot(rolling_window_max_loads)\n plt.xlabel(\"episode\")\n plt.ylabel(\"average maximum load over 25 runs\")\n plt.savefig(training_save_path)\n print(f\"--- {(time.time() - start_time)} seconds ---\")\n return best_net\n\n\nif __name__ == \"__main__\":\n train()\n",
"from abc import ABCMeta, abstractmethod\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass StrategyBase(metaclass=ABCMeta):\n\n def __init__(self, n, m):\n self.loads = [0] * n\n self.n = n\n self.m = m\n self.curr_thresholds = []\n self.max_loads = []\n self.offered_loads = []\n self.thresholds = []\n plt.rcParams['font.size'] = '14'\n\n @abstractmethod\n def decide(self, bin):\n pass\n\n @abstractmethod\n def note(self, bin):\n pass\n\n @abstractmethod\n def reset(self):\n pass\n\n def create_analyses(self, save_path): # Does not necessarily need to be overridden\n pass\n\n def create_summary(self, save_path): # Does not necessarily need to be overridden\n pass\n\n def note_(self, bin):\n self.loads[bin] += 1\n self.max_loads.append(max(self.loads))\n self.note(bin)\n\n def reset_(self):\n self.loads = [0] * self.n\n self.thresholds.append(self.curr_thresholds)\n self.curr_thresholds = []\n self.max_loads = []\n self.offered_loads = []\n self.reset()\n\n def decide_(self, bin):\n self.offered_loads.append(self.loads[bin])\n return self.decide(bin)\n\n def create_analyses_(self, save_path):\n self.create_analyses(save_path)\n plt.clf()\n\n def create_summary_(self, save_path):\n self.create_summary(save_path)\n plt.clf()\n\n def create_plot(self, save_path): # Helper function for those strategies which decide based on a\n # threshold\n x = np.arange(self.m)\n plt.plot(x, np.array(self.curr_thresholds), label=\"threshold\")\n # plt.plot(x, np.array(self.max_loads), label=\"max load\")\n # plt.plot(x, np.array(self.offered_loads), label=\"offered load\")\n plt.title(\"Threshold progression\")\n plt.xlabel(\"Ball\")\n plt.ylabel(\"Chosen threshold\")\n plt.legend(loc='upper left')\n plt.savefig(save_path)\n\n def create_summary_plot(self, save_path): # Helper function for those strategies which decide based on a\n # threshold\n plt.plot(np.array(self.thresholds).T)\n plt.title(\"Threshold progression (multiple runs)\")\n plt.xlabel(\"Ball\")\n plt.ylabel(\"Chosen threshold\")\n plt.savefig(save_path)\n"
] |
[
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
csaybar/raster-vision
|
[
"ec6c8309f89c404513862369bb93dd9e6a70b455"
] |
[
"rastervision2/core/utils/zxy2geotiff.py"
] |
[
"import tempfile\n\nfrom PIL import Image\nimport numpy as np\nimport click\nimport mercantile\nimport rasterio\nfrom rasterio.windows import Window\nimport pyproj\n\nfrom rastervision2.pipeline.filesystem import (download_if_needed,\n get_local_path, upload_or_copy)\nfrom rastervision.command.aux.cogify_command import create_cog\n\n\ndef lnglat2merc(lng, lat):\n \"\"\"Convert lng, lat point to x/y Web Mercator tuple.\"\"\"\n return pyproj.transform(\n pyproj.Proj(init='epsg:4326'), pyproj.Proj(init='epsg:3857'), lng, lat)\n\n\ndef merc2lnglat(x, y):\n \"\"\"Convert x, y Web Mercator point to lng/lat tuple.\"\"\"\n return pyproj.transform(\n pyproj.Proj(init='epsg:3857'), pyproj.Proj(init='epsg:4326'), x, y)\n\n\ndef merc2pixel(tile_x, tile_y, zoom, merc_x, merc_y, tile_sz=256):\n \"\"\"Convert Web Mercator point to pixel coordinates.\n\n This is within the coordinate frame of a single ZXY tile.\n\n Args:\n tile_x: (int) x coordinate of ZXY tile\n tile_y: (int) y coordinate of ZXY tile\n zoom: (int) zoom level of ZXY tile\n merc_x: (float) Web Mercator x axis of point\n merc_y: (float) Web Mercator y axis of point\n tile_sz: (int) size of ZXY tile\n \"\"\"\n tile_merc_bounds = mercantile.xy_bounds(tile_x, tile_y, zoom)\n pix_y = int(\n round(tile_sz * ((tile_merc_bounds.top - merc_y) /\n (tile_merc_bounds.top - tile_merc_bounds.bottom))))\n pix_x = int(\n round(tile_sz * ((merc_x - tile_merc_bounds.left) /\n (tile_merc_bounds.right - tile_merc_bounds.left))))\n return (pix_x, pix_y)\n\n\ndef _zxy2geotiff(tile_schema, zoom, bounds, output_uri, make_cog=False):\n \"\"\"Generates a GeoTIFF of a bounded region from a ZXY tile server.\n\n Args:\n tile_schema: (str) the URI schema for zxy tiles (ie. a slippy map tile server)\n of the form /tileserver-uri/{z}/{x}/{y}.png. If {-y} is used, the tiles\n are assumed to be indexed using TMS coordinates, where the y axis starts\n at the southernmost point. The URI can be for http, S3, or the local\n file system.\n zoom: (int) the zoom level to use when retrieving tiles\n bounds: (list) a list of length 4 containing min_lat, min_lng,\n max_lat, max_lng\n output_uri: (str) where to save the GeoTIFF. The URI can be for http, S3, or the\n local file system\n \"\"\"\n min_lat, min_lng, max_lat, max_lng = bounds\n if min_lat >= max_lat:\n raise ValueError('min_lat must be < max_lat')\n if min_lng >= max_lng:\n raise ValueError('min_lng must be < max_lng')\n\n is_tms = False\n if '{-y}' in tile_schema:\n tile_schema = tile_schema.replace('{-y}', '{y}')\n is_tms = True\n\n tmp_dir_obj = tempfile.TemporaryDirectory()\n tmp_dir = tmp_dir_obj.name\n\n # Get range of tiles that cover bounds.\n output_path = get_local_path(output_uri, tmp_dir)\n tile_sz = 256\n t = mercantile.tile(min_lng, max_lat, zoom)\n xmin, ymin = t.x, t.y\n t = mercantile.tile(max_lng, min_lat, zoom)\n xmax, ymax = t.x, t.y\n\n # The supplied bounds are contained within the \"tile bounds\" -- ie. the\n # bounds of the set of tiles that covers the supplied bounds. Therefore,\n # we need to crop out the imagery that lies within the supplied bounds.\n # We do this by computing a top, bottom, left, and right offset in pixel\n # units of the supplied bounds against the tile bounds. Getting the offsets\n # in pixel units involves converting lng/lat to web mercator units since we\n # assume that is the CRS of the tiles. These offsets are then used to crop\n # individual tiles and place them correctly into the output raster.\n nw_merc_x, nw_merc_y = lnglat2merc(min_lng, max_lat)\n left_pix_offset, top_pix_offset = merc2pixel(xmin, ymin, zoom, nw_merc_x,\n nw_merc_y)\n\n se_merc_x, se_merc_y = lnglat2merc(max_lng, min_lat)\n se_left_pix_offset, se_top_pix_offset = merc2pixel(xmax, ymax, zoom,\n se_merc_x, se_merc_y)\n right_pix_offset = tile_sz - se_left_pix_offset\n bottom_pix_offset = tile_sz - se_top_pix_offset\n\n uncropped_height = tile_sz * (ymax - ymin + 1)\n uncropped_width = tile_sz * (xmax - xmin + 1)\n height = uncropped_height - top_pix_offset - bottom_pix_offset\n width = uncropped_width - left_pix_offset - right_pix_offset\n\n transform = rasterio.transform.from_bounds(nw_merc_x, se_merc_y, se_merc_x,\n nw_merc_y, width, height)\n with rasterio.open(\n output_path,\n 'w',\n driver='GTiff',\n height=height,\n width=width,\n count=3,\n crs='epsg:3857',\n transform=transform,\n dtype=rasterio.uint8) as dataset:\n out_x = 0\n for xi, x in enumerate(range(xmin, xmax + 1)):\n tile_xmin, tile_xmax = 0, tile_sz - 1\n if x == xmin:\n tile_xmin += left_pix_offset\n if x == xmax:\n tile_xmax -= right_pix_offset\n window_width = tile_xmax - tile_xmin + 1\n\n out_y = 0\n for yi, y in enumerate(range(ymin, ymax + 1)):\n tile_ymin, tile_ymax = 0, tile_sz - 1\n if y == ymin:\n tile_ymin += top_pix_offset\n if y == ymax:\n tile_ymax -= bottom_pix_offset\n window_height = tile_ymax - tile_ymin + 1\n\n # Convert from xyz to tms if needed.\n # https://gist.github.com/tmcw/4954720\n if is_tms:\n y = (2**zoom) - y - 1\n tile_uri = tile_schema.format(x=x, y=y, z=zoom)\n tile_path = download_if_needed(tile_uri, tmp_dir)\n img = np.array(Image.open(tile_path))\n img = img[tile_ymin:tile_ymax + 1, tile_xmin:tile_xmax + 1, :]\n\n window = Window(out_x, out_y, window_width, window_height)\n dataset.write(\n np.transpose(img[:, :, 0:3], (2, 0, 1)), window=window)\n out_y += window_height\n out_x += window_width\n\n if make_cog:\n create_cog(output_path, output_uri, tmp_dir)\n else:\n upload_or_copy(output_path, output_uri)\n\n\[email protected]()\[email protected]('tile_schema')\[email protected]('zoom')\[email protected]('bounds')\[email protected]('output_uri')\[email protected]('--make-cog', is_flag=True, default=False)\ndef zxy2geotiff(tile_schema, zoom, bounds, output_uri, make_cog):\n \"\"\"Generates a GeoTIFF of a bounded region from a ZXY tile server.\n\n TILE_SCHEMA: the URI schema for zxy tiles (ie. a slippy map tile server) of\n the form /tileserver-uri/{z}/{x}/{y}.png. If {-y} is used, the tiles are\n assumed to be indexed using TMS coordinates, where the y axis starts at\n the southernmost point. The URI can be for http, S3, or the local file\n system.\n\n ZOOM: the zoom level to use when retrieving tiles\n\n BOUNDS: a space-separated string containing min_lat, min_lng, max_lat,\n max_lng\n\n OUTPUT_URI: where to save the GeoTIFF. The URI can be for http, S3, or the\n local file system.\n \"\"\"\n bounds = [float(x) for x in bounds.split(' ')]\n _zxy2geotiff(tile_schema, int(zoom), bounds, output_uri, make_cog=make_cog)\n\n\nif __name__ == '__main__':\n zxy2geotiff()\n"
] |
[
[
"numpy.transpose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fbickfordsmith/finding-houses
|
[
"32b562ee93c8c8dd4d008194654e0c4480ae14f8"
] |
[
"helper_functions.py"
] |
[
"import numpy as np\n\ndef random_crop_flip(x_in, y_in, i0=None, j0=None, crop_shape=(256, 256)):\n # Sample frame from random location in image. Randomly flip frame.\n if i0 == None:\n i0 = np.random.randint(low=0, high=(x_in.shape[0]-crop_shape[0]))\n if j0 == None:\n j0 = np.random.randint(low=0, high=(x_in.shape[1]-crop_shape[1]))\n x_out = x_in[i0:(i0+crop_shape[0]), j0:(j0+crop_shape[1])]\n y_out = y_in[i0:(i0+crop_shape[0]), j0:(j0+crop_shape[1])]\n if np.random.uniform() < 0.5:\n x_out = np.flip(x_out, axis=0)\n y_out = np.flip(y_out, axis=0)\n if np.random.uniform() < 0.5:\n x_out = np.flip(x_out, axis=1)\n y_out = np.flip(y_out, axis=1)\n return x_out, y_out\n\ndef sample_training_data(x_train, y_train, num_examples=1000):\n # Sample set of frames from x_train and y_train.\n x, y = [], []\n for i in range(num_examples):\n xi, yi = random_crop_flip(x_train, y_train)\n x.append(xi)\n y.append(yi)\n return np.array(x), np.array(y)\n\ndef sigmoid(x):\n # Compute numerically stable sigmoid.\n return np.exp(-np.logaddexp(0, -x))\n"
] |
[
[
"numpy.random.uniform",
"numpy.array",
"numpy.flip",
"numpy.logaddexp",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NUDTNASLab/NASLib
|
[
"451cdb4738a7c1501ac62f78727c6244039dc657",
"451cdb4738a7c1501ac62f78727c6244039dc657"
] |
[
"naslib/search_spaces/hierarchical/primitives.py",
"naslib/defaults/trainer_multi.py"
] |
[
"import torch.nn as nn\n\nfrom ..core.primitives import AbstractPrimitive\n\n\nclass ConvBNReLU(AbstractPrimitive):\n def __init__(self, C_in, C_out, kernel_size, stride=1, affine=False):\n super().__init__(locals())\n pad = 0 if stride == 1 and kernel_size == 1 else 1\n self.op = nn.Sequential(\n nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=pad, bias=False),\n nn.BatchNorm2d(C_out, affine=affine),\n nn.ReLU(inplace=False),\n )\n\n def forward(self, x, edge_data):\n return self.op(x)\n\n def get_embedded_ops(self):\n return None\n\n\nclass DepthwiseConv(AbstractPrimitive):\n \"\"\"\n Depthwise convolution\n \"\"\"\n\n def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):\n super().__init__(locals())\n self.op = nn.Sequential(\n nn.Conv2d(\n C_in,\n C_in,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n groups=C_in,\n bias=False,\n ),\n nn.BatchNorm2d(C_in, affine=affine),\n nn.ReLU(inplace=False),\n )\n\n def forward(self, x, edge_data):\n return self.op(x)\n\n def get_embedded_ops(self):\n return None\n",
"import codecs\nimport time\nimport json\nimport logging\nimport os\nimport torch\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nimport torch.utils.data.distributed\n\nfrom copy import deepcopy\nfrom fvcore.common.checkpoint import PeriodicCheckpointer\n\nfrom naslib.search_spaces.core.query_metrics import Metric\n\nfrom naslib.utils import utils\nfrom naslib.utils.logging import log_every_n_seconds, log_first_n\n\nfrom .additional_primitives import DropPathWrapper\n\nlogger = logging.getLogger(__name__)\n\n\nclass Trainer(object):\n \"\"\"\n Default implementation that handles dataloading and preparing batches, the\n train loop, gathering statistics, checkpointing and doing the final\n final evaluation.\n\n If this does not fulfil your needs free do subclass it and implement your\n required logic.\n \"\"\"\n\n def __init__(self, optimizer, config):\n \"\"\"\n Initializes the trainer.\n\n Args:\n optimizer: A NASLib optimizer\n config (AttrDict): The configuration loaded from a yaml file, e.g\n via `utils.get_config_from_args()`\n \"\"\"\n self.optimizer = optimizer\n self.config = config\n self.epochs = self.config.search.epochs\n\n # preparations\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n # measuring stuff\n self.QUERYABLE = False\n self.train_top1 = utils.AverageMeter()\n self.train_top5 = utils.AverageMeter()\n self.train_loss = utils.AverageMeter()\n self.val_top1 = utils.AverageMeter()\n self.val_top5 = utils.AverageMeter()\n self.val_loss = utils.AverageMeter()\n\n n_parameters = optimizer.get_model_size()\n logger.info(\"param size = %fMB\", n_parameters)\n self.errors_dict = utils.AttrDict(\n {\n \"train_acc\": [],\n \"train_loss\": [],\n \"valid_acc\": [],\n \"valid_loss\": [],\n \"test_acc\": [],\n \"test_loss\": [],\n \"runtime\": [],\n \"arch_eval\": [],\n \"params\": n_parameters,\n }\n )\n\n def search(self, resume_from=\"\"):\n \"\"\"\n Start the architecture search.\n\n Generates a json file with training statistics.\n\n Args:\n resume_from (str): Checkpoint file to resume from. If not given then\n train from scratch.\n \"\"\"\n logger.info(\"Start training\")\n self.optimizer.before_training()\n checkpoint_freq = self.config.search.checkpoint_freq\n if self.optimizer.using_step_function:\n self.scheduler = self.build_search_scheduler(\n self.optimizer.op_optimizer, self.config\n )\n\n start_epoch = self._setup_checkpointers(\n resume_from, period=checkpoint_freq, scheduler=self.scheduler\n )\n else:\n start_epoch = self._setup_checkpointers(resume_from, period=checkpoint_freq)\n\n self.train_queue, self.valid_queue, _ = self.build_search_dataloaders(\n self.config\n )\n\n for e in range(start_epoch, self.epochs):\n self.optimizer.new_epoch(e)\n\n start_time = time.time()\n if self.optimizer.using_step_function:\n for step, (data_train, data_val) in enumerate(\n zip(self.train_queue, self.valid_queue)\n ):\n data_train = (\n data_train[0].to(self.device),\n data_train[1].to(self.device, non_blocking=True),\n )\n data_val = (\n data_val[0].to(self.device),\n data_val[1].to(self.device, non_blocking=True),\n )\n\n stats = self.optimizer.step(data_train, data_val)\n logits_train, logits_val, train_loss, val_loss = stats\n\n self._store_accuracies(logits_train, data_train[1], \"train\")\n self._store_accuracies(logits_val, data_val[1], \"val\")\n\n log_every_n_seconds(\n logging.INFO,\n \"Epoch {}-{}, Train loss: {:.5f}, validation loss: {:.5f}, learning rate: {}\".format(\n e, step, train_loss, val_loss, self.scheduler.get_last_lr()\n ),\n n=5,\n )\n\n if torch.cuda.is_available():\n log_first_n(\n logging.INFO,\n \"cuda consumption\\n {}\".format(torch.cuda.memory_summary()),\n n=3,\n )\n\n self.train_loss.update(float(train_loss.detach().cpu()))\n self.val_loss.update(float(val_loss.detach().cpu()))\n\n self.scheduler.step()\n\n end_time = time.time()\n\n self.errors_dict.train_acc.append(self.train_top1.avg)\n self.errors_dict.train_loss.append(self.train_loss.avg)\n self.errors_dict.valid_acc.append(self.val_top1.avg)\n self.errors_dict.valid_loss.append(self.val_loss.avg)\n self.errors_dict.runtime.append(end_time - start_time)\n else:\n end_time = time.time()\n (\n train_acc,\n train_loss,\n valid_acc,\n valid_loss,\n ) = self.optimizer.train_statistics()\n self.errors_dict.train_acc.append(train_acc)\n self.errors_dict.train_loss.append(train_loss)\n self.errors_dict.valid_acc.append(valid_acc)\n self.errors_dict.valid_loss.append(valid_loss)\n self.errors_dict.runtime.append(end_time - start_time)\n self.train_top1.avg = train_acc\n self.val_top1.avg = valid_acc\n\n self.periodic_checkpointer.step(e)\n\n anytime_results = self.optimizer.test_statistics()\n if anytime_results:\n # record anytime performance\n self.errors_dict.arch_eval.append(anytime_results)\n log_every_n_seconds(\n logging.INFO,\n \"Epoch {}, Anytime results: {}\".format(e, anytime_results),\n n=5,\n )\n\n self._log_to_json()\n self._log_and_reset_accuracies(e)\n\n self.optimizer.after_training()\n logger.info(\"Training finished\")\n\n def main_worker(self, gpu, ngpus_per_node, args, search_model, best_arch):\n logger.info(\"Start evaluation\")\n if not best_arch:\n if not search_model:\n search_model = os.path.join(\n self.config.save, \"search\", \"model_final.pth\"\n )\n self._setup_checkpointers(search_model) # required to load the architecture\n\n best_arch = self.optimizer.get_final_architecture()\n logger.info(\"Final architecture:\\n\" + best_arch.modules_str())\n\n if best_arch.QUERYABLE:\n metric = Metric.TEST_ACCURACY\n result = best_arch.query(metric=metric, dataset=self.config.dataset)\n logger.info(\"Queried results ({}): {}\".format(metric, result))\n self.QUERYABLE = True\n return\n\n best_arch.reset_weights(inplace=True)\n logger.info(\"Starting retraining from scratch\")\n\n args.gpu = gpu\n if gpu is not None:\n logger.info(\"Use GPU: {} for training\".format(args.gpu))\n\n if args.distributed:\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n if args.multiprocessing_distributed:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all processes\n args.rank = args.rank * ngpus_per_node + gpu\n dist.init_process_group(\n backend=args.dist_backend,\n init_method=args.dist_url,\n world_size=args.world_size,\n rank=args.rank,\n )\n\n if not torch.cuda.is_available():\n logger.warning(\"Using CPU, this will be slow!\")\n elif args.distributed:\n # For multiprocessing distributed, DistributedDataParallel constructor\n # should always set the single device scope, otherwise,\n # DistributedDataParallel will use all available devices\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n best_arch.cuda(args.gpu)\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n args.batch_size = int(args.batch_size / ngpus_per_node)\n args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)\n best_arch = torch.nn.parallel.DistributedDataParallel(\n best_arch, device_ids=[args.gpu]\n )\n else:\n best_arch.cuda()\n # DistributedDataParallel will divide and allocate batch_size to all\n # available GPUs if device_ids are not set\n best_arch = torch.nn.parallel.DistributedDataParallel(best_arch)\n elif args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n best_arch = best_arch.cuda(args.gpu)\n else:\n # DataParallel will divide and allocate batch_size to all available GPUs\n best_arch = torch.nn.DataParallel(best_arch).cuda()\n\n cudnn.benchmark = True\n\n (\n self.train_queue,\n self.valid_queue,\n self.test_queue,\n ) = self.build_eval_dataloaders(self.config)\n\n optim = self.build_eval_optimizer(best_arch.parameters(), self.config)\n scheduler = self.build_eval_scheduler(optim, self.config)\n\n start_epoch = self._setup_checkpointers(\n args.resume_from,\n search=False,\n period=self.config.evaluation.checkpoint_freq,\n model=best_arch, # checkpointables start here\n optim=optim,\n scheduler=scheduler,\n )\n\n grad_clip = self.config.evaluation.grad_clip\n loss = torch.nn.CrossEntropyLoss()\n\n best_arch.train()\n self.train_top1.reset()\n self.train_top5.reset()\n self.val_top1.reset()\n self.val_top5.reset()\n\n # Enable drop path\n if isinstance(best_arch, torch.nn.DataParallel):\n best_arch.module.update_edges(\n update_func=lambda edge: edge.data.set(\n \"op\", DropPathWrapper(edge.data.op)\n ),\n scope=best_arch.module.OPTIMIZER_SCOPE,\n private_edge_data=True,\n )\n else:\n best_arch.update_edges(\n update_func=lambda edge: edge.data.set(\n \"op\", DropPathWrapper(edge.data.op)\n ),\n scope=best_arch.OPTIMIZER_SCOPE,\n private_edge_data=True,\n )\n\n # train from scratch\n epochs = self.config.evaluation.epochs\n for e in range(start_epoch, epochs):\n # update drop path probability\n drop_path_prob = self.config.evaluation.drop_path_prob * e / epochs\n if isinstance(best_arch, torch.nn.DataParallel):\n best_arch.module.update_edges(\n update_func=lambda edge: edge.data.set(\n \"drop_path_prob\", drop_path_prob\n ),\n scope=best_arch.module.OPTIMIZER_SCOPE,\n private_edge_data=True,\n )\n else:\n best_arch.update_edges(\n update_func=lambda edge: edge.data.set(\n \"drop_path_prob\", drop_path_prob\n ),\n scope=best_arch.OPTIMIZER_SCOPE,\n private_edge_data=True,\n )\n\n # Train queue\n for i, (input_train, target_train) in enumerate(self.train_queue):\n input_train = input_train.to(self.device)\n target_train = target_train.to(self.device, non_blocking=True)\n\n optim.zero_grad()\n logits_train = best_arch(input_train)\n train_loss = loss(logits_train, target_train)\n if hasattr(best_arch, \"auxilary_logits\"): # darts specific stuff\n log_first_n(logging.INFO, \"Auxiliary is used\", n=10)\n auxiliary_loss = loss(best_arch.auxilary_logits(), target_train)\n train_loss += (\n self.config.evaluation.auxiliary_weight * auxiliary_loss\n )\n train_loss.backward()\n if grad_clip:\n torch.nn.utils.clip_grad_norm_(best_arch.parameters(), grad_clip)\n optim.step()\n\n self._store_accuracies(logits_train, target_train, \"train\")\n log_every_n_seconds(\n logging.INFO,\n \"Epoch {}-{}, Train loss: {:.5}, learning rate: {}\".format(\n e, i, train_loss, scheduler.get_last_lr()\n ),\n n=5,\n )\n\n if torch.cuda.is_available():\n log_first_n(\n logging.INFO,\n \"cuda consumption\\n {}\".format(torch.cuda.memory_summary()),\n n=3,\n )\n\n # Validation queue\n if self.valid_queue:\n for i, (input_valid, target_valid) in enumerate(self.valid_queue):\n\n input_valid = input_valid.to(self.device).float()\n target_valid = target_valid.to(\n self.device, non_blocking=True\n ).float()\n\n # just log the validation accuracy\n logits_valid = best_arch(input_valid)\n self._store_accuracies(logits_valid, target_valid, \"val\")\n\n scheduler.step()\n self.periodic_checkpointer.step(e)\n self._log_and_reset_accuracies(e)\n\n def evaluate(\n self,\n retrain=True,\n search_model=\"\",\n resume_from=\"\",\n best_arch=None,\n ):\n \"\"\"\n Evaluate the final architecture as given from the optimizer.\n\n If the search space has an interface to a benchmark then query that.\n Otherwise train as defined in the config.\n\n Args:\n retrain (bool): Reset the weights from the architecure search\n search_model (str): Path to checkpoint file that was created during\n search. If not provided, then try to load 'model_final.pth' from search\n resume_from (str): Resume retraining from the given checkpoint file.\n multi_gpu (bool): Distribute training on multiple gpus.\n best_arch: Parsed model you want to directly evaluate and ignore the final model\n from the optimizer.\n \"\"\"\n\n # best_arch.to(self.device)\n self.config.evaluation.resume_from = resume_from\n if retrain:\n if self.config.gpu is not None:\n logger.warning(\n \"You have chosen a specific GPU. This will completely \\\n disable data parallelism.\"\n )\n\n if (\n self.config.evaluation.dist_url == \"env://\"\n and self.config.evaluation.world_size == -1\n ):\n self.config.evaluation.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n self.config.evaluation.distributed = (\n self.config.evaluation.world_size > 1\n or self.config.evaluation.multiprocessing_distributed\n )\n ngpus_per_node = torch.cuda.device_count()\n\n if self.config.evaluation.multiprocessing_distributed:\n # Since we have ngpus_per_node processes per node, the\n # total world_size needs to be adjusted\n self.config.evaluation.world_size = (\n ngpus_per_node * self.config.evaluation.world_size\n )\n # Use torch.multiprocessing.spawn to launch distributed\n # processes: the main_worker process function\n mp.spawn(\n self.main_worker,\n nprocs=ngpus_per_node,\n args=(\n ngpus_per_node,\n self.config.evaluation,\n search_model,\n best_arch,\n ),\n )\n else:\n # Simply call main_worker function\n self.main_worker(\n self.config.gpu,\n ngpus_per_node,\n self.config.evaluation,\n search_model,\n best_arch,\n )\n\n if not self.QUERYABLE:\n # Disable drop path\n best_arch.update_edges(\n update_func=lambda edge: edge.data.set(\n \"op\", edge.data.op.get_embedded_ops()\n ),\n scope=best_arch.OPTIMIZER_SCOPE,\n private_edge_data=True,\n )\n\n # measure final test accuracy\n top1 = utils.AverageMeter()\n top5 = utils.AverageMeter()\n\n best_arch.eval()\n\n for i, data_test in enumerate(self.test_queue):\n input_test, target_test = data_test\n input_test = input_test.to(self.device)\n target_test = target_test.to(self.device, non_blocking=True)\n\n n = input_test.size(0)\n\n with torch.no_grad():\n logits = best_arch(input_test)\n\n prec1, prec5 = utils.accuracy(logits, target_test, topk=(1, 5))\n top1.update(prec1.data.item(), n)\n top5.update(prec5.data.item(), n)\n\n log_every_n_seconds(\n logging.INFO,\n \"Inference batch {} of {}.\".format(i, len(self.test_queue)),\n n=5,\n )\n\n logger.info(\n \"Evaluation finished. Test accuracies: top-1 = {:.5}, \\\n top-5 = {:.5}\".format(\n top1.avg, top5.avg\n )\n )\n\n @staticmethod\n def build_search_dataloaders(config):\n train_queue, valid_queue, test_queue, _, _ = utils.get_train_val_loaders(\n config, mode=\"train\"\n )\n return train_queue, valid_queue, _ # test_queue is not used in search currently\n\n @staticmethod\n def build_eval_dataloaders(config):\n train_queue, valid_queue, test_queue, _, _ = utils.get_train_val_loaders(\n config, mode=\"val\"\n )\n return train_queue, valid_queue, test_queue\n\n @staticmethod\n def build_eval_optimizer(parameters, config):\n return torch.optim.SGD(\n parameters,\n lr=config.evaluation.learning_rate,\n momentum=config.evaluation.momentum,\n weight_decay=config.evaluation.weight_decay,\n )\n\n @staticmethod\n def build_search_scheduler(optimizer, config):\n return torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer,\n T_max=config.search.epochs,\n eta_min=config.search.learning_rate_min,\n )\n\n @staticmethod\n def build_eval_scheduler(optimizer, config):\n return torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer,\n T_max=config.evaluation.epochs,\n eta_min=config.evaluation.learning_rate_min,\n )\n\n def _log_and_reset_accuracies(self, epoch):\n logger.info(\n \"Epoch {} done. Train accuracy (top1, top5): {:.5f}, {:.5f}, \\\n Validation accuracy: {:.5f}, {:.5f}\".format(\n epoch,\n self.train_top1.avg,\n self.train_top5.avg,\n self.val_top1.avg,\n self.val_top5.avg,\n )\n )\n self.train_top1.reset()\n self.train_top5.reset()\n self.train_loss.reset()\n self.val_top1.reset()\n self.val_top5.reset()\n self.val_loss.reset()\n\n def _store_accuracies(self, logits, target, split):\n \"\"\"Update the accuracy counters\"\"\"\n prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))\n n = logits.size(0)\n\n if split == \"train\":\n self.train_top1.update(prec1.data.item(), n)\n self.train_top5.update(prec5.data.item(), n)\n elif split == \"val\":\n self.val_top1.update(prec1.data.item(), n)\n self.val_top5.update(prec5.data.item(), n)\n else:\n raise ValueError(\"Unknown split: {}. Expected either 'train' or 'val'\")\n\n def _prepare_dataloaders(self, config, mode=\"train\"):\n \"\"\"\n Prepare train, validation, and test dataloaders with the splits defined\n in the config.\n\n Args:\n config (AttrDict): config from config file.\n \"\"\"\n train_queue, valid_queue, test_queue, _, _ = utils.get_train_val_loaders(\n config, mode\n )\n self.train_queue = train_queue\n self.valid_queue = valid_queue\n self.test_queue = test_queue\n\n def _setup_checkpointers(\n self, resume_from=\"\", search=True, period=1, **add_checkpointables\n ):\n \"\"\"\n Sets up a periodic chechkpointer which can be used to save checkpoints\n at every epoch. It will call optimizer's `get_checkpointables()` as objects\n to store.\n\n Args:\n resume_from (str): A checkpoint file to resume the search or evaluation from.\n search (bool): Whether search or evaluation phase is checkpointed. This is required\n because the files are in different folders to not be overridden\n add_checkpointables (object): Additional things to checkpoint together with the\n optimizer's checkpointables.\n \"\"\"\n checkpointables = self.optimizer.get_checkpointables()\n checkpointables.update(add_checkpointables)\n\n checkpointer = utils.Checkpointer(\n model=checkpointables.pop(\"model\"),\n save_dir=self.config.save + \"/search\"\n if search\n else self.config.save + \"/eval\",\n **checkpointables\n )\n\n self.periodic_checkpointer = PeriodicCheckpointer(\n checkpointer,\n period=period,\n max_iter=self.config.search.epochs\n if search\n else self.config.evaluation.epochs,\n )\n\n if resume_from:\n logger.info(\"loading model from file {}\".format(resume_from))\n checkpoint = checkpointer.resume_or_load(resume_from, resume=True)\n if checkpointer.has_checkpoint():\n return checkpoint.get(\"iteration\", -1) + 1\n return 0\n\n def _log_to_json(self):\n \"\"\"log training statistics to json file\"\"\"\n if not os.path.exists(self.config.save):\n os.makedirs(self.config.save)\n with codecs.open(\n os.path.join(self.config.save, \"errors.json\"), \"w\", encoding=\"utf-8\"\n ) as file:\n json.dump(self.errors_dict, file, separators=(\",\", \":\"))\n"
] |
[
[
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d"
],
[
"torch.nn.CrossEntropyLoss",
"torch.distributed.init_process_group",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.multiprocessing.spawn",
"torch.cuda.set_device",
"torch.cuda.memory_summary",
"torch.nn.DataParallel",
"torch.no_grad",
"torch.cuda.is_available",
"torch.optim.SGD",
"torch.cuda.device_count",
"torch.nn.parallel.DistributedDataParallel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ShashankBice/imview
|
[
"81236bf9149e677c15563470ee2cbe850f775b67"
] |
[
"imview/hs_multi.py"
] |
[
"#! /usr/bin/env python\n\n#Create weight rasters needed for multi-directional hillshade\n\nimport os, sys\n\nimport numpy as np\nimport gdal\n\nfrom pygeotools.lib import iolib\n\naz_list = (225, 270, 315, 360)\naspect_fn = sys.argv[1]\naspect_ds = gdal.Open(aspect_fn)\naspect = iolib.ds_getma(aspect_ds)\n\nfor az in az_list:\n w_fn = os.path.splitext(aspect_fn)[0]+'_w%i.tif' % az\n w = np.sin(np.radians(aspect - az))**2\n iolib.writeGTiff(w, w_fn, aspect_ds)\n"
] |
[
[
"numpy.radians"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kalyc/keras-apache-mxnet
|
[
"5497ebd50a45ccc446b8944ebbe11fb7721a5533",
"5497ebd50a45ccc446b8944ebbe11fb7721a5533"
] |
[
"examples/lstm_seq2seq.py",
"tests/integration_tests/test_eia_integration.py"
] |
[
"'''Sequence to sequence example in Keras (character-level).\n\nThis script demonstrates how to implement a basic character-level\nsequence-to-sequence model. We apply it to translating\nshort English sentences into short French sentences,\ncharacter-by-character. Note that it is fairly unusual to\ndo character-level machine translation, as word-level\nmodels are more common in this domain.\n\n# Summary of the algorithm\n\n- We start with input sequences from a domain (e.g. English sentences)\n and corresponding target sequences from another domain\n (e.g. French sentences).\n- An encoder LSTM turns input sequences to 2 state vectors\n (we keep the last LSTM state and discard the outputs).\n- A decoder LSTM is trained to turn the target sequences into\n the same sequence but offset by one timestep in the future,\n a training process called \"teacher forcing\" in this context.\n Is uses as initial state the state vectors from the encoder.\n Effectively, the decoder learns to generate `targets[t+1...]`\n given `targets[...t]`, conditioned on the input sequence.\n- In inference mode, when we want to decode unknown input sequences, we:\n - Encode the input sequence into state vectors\n - Start with a target sequence of size 1\n (just the start-of-sequence character)\n - Feed the state vectors and 1-char target sequence\n to the decoder to produce predictions for the next character\n - Sample the next character using these predictions\n (we simply use argmax).\n - Append the sampled character to the target sequence\n - Repeat until we generate the end-of-sequence character or we\n hit the character limit.\n\n# Data download\n\nEnglish to French sentence pairs.\nhttp://www.manythings.org/anki/fra-eng.zip\n\nLots of neat sentence pairs datasets can be found at:\nhttp://www.manythings.org/anki/\n\n# References\n\n- Sequence to Sequence Learning with Neural Networks\n https://arxiv.org/abs/1409.3215\n- Learning Phrase Representations using\n RNN Encoder-Decoder for Statistical Machine Translation\n https://arxiv.org/abs/1406.1078\n'''\nfrom __future__ import print_function\n\nfrom keras.models import Model\nfrom keras.layers import Input, LSTM, Dense\nimport numpy as np\n\nbatch_size = 64 # Batch size for training.\nepochs = 100 # Number of epochs to train for.\nlatent_dim = 256 # Latent dimensionality of the encoding space.\nnum_samples = 10000 # Number of samples to train on.\n# Path to the data txt file on disk.\ndata_path = 'fra-eng/fra.txt'\n\n# Vectorize the data.\ninput_texts = []\ntarget_texts = []\ninput_characters = set()\ntarget_characters = set()\nwith open(data_path, 'r', encoding='utf-8') as f:\n lines = f.read().split('\\n')\nfor line in lines[: min(num_samples, len(lines) - 1)]:\n input_text, target_text = line.split('\\t')\n # We use \"tab\" as the \"start sequence\" character\n # for the targets, and \"\\n\" as \"end sequence\" character.\n target_text = '\\t' + target_text + '\\n'\n input_texts.append(input_text)\n target_texts.append(target_text)\n for char in input_text:\n if char not in input_characters:\n input_characters.add(char)\n for char in target_text:\n if char not in target_characters:\n target_characters.add(char)\n\ninput_characters = sorted(list(input_characters))\ntarget_characters = sorted(list(target_characters))\nnum_encoder_tokens = len(input_characters)\nnum_decoder_tokens = len(target_characters)\nmax_encoder_seq_length = max([len(txt) for txt in input_texts])\nmax_decoder_seq_length = max([len(txt) for txt in target_texts])\n\nprint('Number of samples:', len(input_texts))\nprint('Number of unique input tokens:', num_encoder_tokens)\nprint('Number of unique output tokens:', num_decoder_tokens)\nprint('Max sequence length for inputs:', max_encoder_seq_length)\nprint('Max sequence length for outputs:', max_decoder_seq_length)\n\ninput_token_index = dict(\n [(char, i) for i, char in enumerate(input_characters)])\ntarget_token_index = dict(\n [(char, i) for i, char in enumerate(target_characters)])\n\nencoder_input_data = np.zeros(\n (len(input_texts), max_encoder_seq_length, num_encoder_tokens),\n dtype='float32')\ndecoder_input_data = np.zeros(\n (len(input_texts), max_decoder_seq_length, num_decoder_tokens),\n dtype='float32')\ndecoder_target_data = np.zeros(\n (len(input_texts), max_decoder_seq_length, num_decoder_tokens),\n dtype='float32')\n\nfor i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)):\n for t, char in enumerate(input_text):\n encoder_input_data[i, t, input_token_index[char]] = 1.\n for t, char in enumerate(target_text):\n # decoder_target_data is ahead of decoder_input_data by one timestep\n decoder_input_data[i, t, target_token_index[char]] = 1.\n if t > 0:\n # decoder_target_data will be ahead by one timestep\n # and will not include the start character.\n decoder_target_data[i, t - 1, target_token_index[char]] = 1.\n\n# Define an input sequence and process it.\n# MXNet backend RNN required input shape, for TensorFlow backend, you can provide the shape as:\n# encoder_inputs = Input(shape=(None, num_encoder_tokens))\nencoder_inputs = Input(shape=(max_encoder_seq_length, num_encoder_tokens))\nencoder = LSTM(latent_dim, return_state=True)\nencoder_outputs, state_h, state_c = encoder(encoder_inputs)\n# We discard `encoder_outputs` and only keep the states.\nencoder_states = [state_h, state_c]\n\n# Set up the decoder, using `encoder_states` as initial state.\n# MXNet backend RNN required input shape, for TensorFlow backend, you can provide the shape as:\n# decoder_inputs = Input(shape=(None, num_decoder_tokens))\ndecoder_inputs = Input(shape=(max_decoder_seq_length, num_decoder_tokens))\n# We set up our decoder to return full output sequences,\n# and to return internal states as well. We don't use the\n# return states in the training model, but we will use them in inference.\ndecoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)\ndecoder_outputs, _, _ = decoder_lstm(decoder_inputs,\n initial_state=encoder_states)\ndecoder_dense = Dense(num_decoder_tokens, activation='softmax')\ndecoder_outputs = decoder_dense(decoder_outputs)\n\n# Define the model that will turn\n# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`\nmodel = Model([encoder_inputs, decoder_inputs], decoder_outputs)\n\n# Run training\nmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy')\nmodel.fit([encoder_input_data, decoder_input_data], decoder_target_data,\n batch_size=batch_size,\n epochs=epochs,\n validation_split=0.2)\n# Save model\nmodel.save('s2s.h5')\n\n# Next: inference mode (sampling).\n# Here's the drill:\n# 1) encode input and retrieve initial decoder state\n# 2) run one step of decoder with this initial state\n# and a \"start of sequence\" token as target.\n# Output will be the next target token\n# 3) Repeat with the current target token and current states\n\n# Define sampling models\nencoder_model = Model(encoder_inputs, encoder_states)\n\ndecoder_state_input_h = Input(shape=(latent_dim,))\ndecoder_state_input_c = Input(shape=(latent_dim,))\ndecoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\ndecoder_outputs, state_h, state_c = decoder_lstm(\n decoder_inputs, initial_state=decoder_states_inputs)\ndecoder_states = [state_h, state_c]\ndecoder_outputs = decoder_dense(decoder_outputs)\ndecoder_model = Model(\n [decoder_inputs] + decoder_states_inputs,\n [decoder_outputs] + decoder_states)\n\n# Reverse-lookup token index to decode sequences back to\n# something readable.\nreverse_input_char_index = dict(\n (i, char) for char, i in input_token_index.items())\nreverse_target_char_index = dict(\n (i, char) for char, i in target_token_index.items())\n\n\ndef decode_sequence(input_seq):\n # Encode the input as state vectors.\n states_value = encoder_model.predict(input_seq)\n\n # Generate empty target sequence of length 1.\n target_seq = np.zeros((1, 1, num_decoder_tokens))\n # Populate the first character of target sequence with the start character.\n target_seq[0, 0, target_token_index['\\t']] = 1.\n\n # Sampling loop for a batch of sequences\n # (to simplify, here we assume a batch of size 1).\n stop_condition = False\n decoded_sentence = ''\n while not stop_condition:\n output_tokens, h, c = decoder_model.predict(\n [target_seq] + states_value)\n\n # Sample a token\n sampled_token_index = np.argmax(output_tokens[0, -1, :])\n sampled_char = reverse_target_char_index[sampled_token_index]\n decoded_sentence += sampled_char\n\n # Exit condition: either hit max length\n # or find stop character.\n if (sampled_char == '\\n' or\n len(decoded_sentence) > max_decoder_seq_length):\n stop_condition = True\n\n # Update the target sequence (of length 1).\n target_seq = np.zeros((1, 1, num_decoder_tokens))\n target_seq[0, 0, sampled_token_index] = 1.\n\n # Update states\n states_value = [h, c]\n\n return decoded_sentence\n\n\nfor seq_index in range(100):\n # Take one sequence (part of the training set)\n # for trying out decoding.\n input_seq = encoder_input_data[seq_index: seq_index + 1]\n decoded_sentence = decode_sequence(input_seq)\n print('-')\n print('Input sentence:', input_texts[seq_index])\n print('Decoded sentence:', decoded_sentence)\n",
"import pytest\nimport keras\nimport numpy as np\nfrom keras import backend as K\nfrom keras.applications.vgg16 import VGG16\nfrom keras.applications.vgg16 import preprocess_input, decode_predictions\n\n\ndef has_eia():\n if K.backend() != 'mxnet':\n return False\n\n import mxnet as mx\n try:\n # try to create eia context\n mx.eia()\n except:\n return False\n\n return True\n\n\[email protected](K.backend() != 'mxnet' or not has_eia(),\n reason='Inference with AWS EIA is currently supported '\n 'with MXNet backend only. We need to have EIA '\n 'to run Keras predictions on EIA tests.')\ndef test_prediction_with_eia():\n import mxnet as mx\n\n # 1. Download and save ImageNet Pre-Trained VGG-16\n model = VGG16(weights='imagenet', input_shape=(224, 224, 3))\n model.save(\"imagenet_vgg16.h5\")\n\n # 2. Load the Model in EIA Context\n with K.Context(\"eia\"):\n model = keras.models.load_model(\"imagenet_vgg16.h5\")\n\n # Verify Model is loaded in EIA context\n assert model._context\n assert model._context[0] == mx.eia()\n\n # 3. Prepare inputs for prediction\n dummy_image1 = np.random.randint(low=0, high=255, size=(224, 224, 3))\n dummy_image1 = np.expand_dims(dummy_image1, axis=0)\n dummy_image1 = preprocess_input(dummy_image1)\n preds = model.predict(dummy_image1)\n assert len(decode_predictions(preds, top=3)[0]) == 3\n\n # 4. Test batch prediction\n dummy_image2 = np.random.randint(low=0, high=255, size=(224, 224, 3))\n dummy_image2 = np.expand_dims(dummy_image2, axis=0)\n dummy_image2 = preprocess_input(dummy_image2)\n\n batch_input = np.concatenate((dummy_image1, dummy_image2), axis=0)\n batch_preds = model.predict_on_batch(batch_input)\n assert len(batch_preds) == 2\n for pred in decode_predictions(batch_preds, top=3):\n assert len(pred[0]) == 3\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n"
] |
[
[
"numpy.argmax",
"numpy.zeros"
],
[
"numpy.concatenate",
"numpy.expand_dims",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.