repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
lboeman/solarforecastarbiter-api | [
"9df598b5c638c3e36d0649e08e955b3ddc1b542d"
]
| [
"sfa_api/utils/request_handling.py"
]
| [
"from collections import defaultdict\nfrom io import StringIO\nimport json\nimport re\n\n\nfrom flask import request, current_app\nimport numpy as np\nimport pandas as pd\nfrom solarforecastarbiter.datamodel import Forecast, Site\nfrom solarforecastarbiter.reference_forecasts import utils as fx_utils\nfrom werkzeug.exceptions import RequestEntityTooLarge\n\n\nfrom sfa_api.utils.errors import (\n BadAPIRequest, NotFoundException, StorageAuthError)\n\n\ndef validate_observation_values(observation_df, quality_flag_range=(0, 1)):\n \"\"\"\n Validate the columns of an observation value DataFrame.\n\n Parameters\n ----------\n observation_df : pandas.DataFrame\n DataFrame to validate columns and values\n quality_flag_range : tuple, default (0, 1)\n Range of allowable quality_flag\n\n Returns\n -------\n pandas.DataFrame\n With types adjusted as appropriate\n\n Raises\n ------\n BadAPIRequest\n For any errors in the columns or values\n \"\"\"\n errors = defaultdict(list)\n try:\n observation_df['value'] = pd.to_numeric(observation_df['value'],\n downcast='float')\n except ValueError:\n errors['value'].append(\n 'Invalid item in \"value\" field. Ensure that all '\n 'values are integers, floats, empty, NaN, or NULL.')\n except KeyError:\n errors['value'].append('Missing \"value\" field.')\n\n try:\n observation_df['timestamp'] = pd.to_datetime(\n observation_df['timestamp'],\n utc=True)\n except ValueError:\n errors['timestamp'].append(\n 'Invalid item in \"timestamp\" field. Ensure '\n 'that timestamps are ISO8601 compliant')\n except KeyError:\n errors['timestamp'].append('Missing \"timestamp\" field.')\n\n try:\n observation_df['quality_flag'].astype(int)\n except KeyError:\n errors['quality_flag'].append('Missing \"quality_flag\" field.')\n except (ValueError, TypeError):\n errors['quality_flag'].append(\n 'Item in \"quality_flag\" field is not an integer.')\n else:\n if not np.isclose(\n observation_df['quality_flag'].mod(1), 0, 1e-12).all():\n errors['quality_flag'].append(\n 'Item in \"quality_flag\" field is not an integer.')\n\n if not observation_df['quality_flag'].between(\n *quality_flag_range).all():\n errors['quality_flag'].append(\n 'Item in \"quality_flag\" field out of range '\n f'{quality_flag_range}.')\n if errors:\n raise BadAPIRequest(errors)\n return observation_df\n\n\ndef parse_csv(csv_string):\n \"\"\"Parse a csv into a dataframe and raise appropriate errors\n\n Parameters\n ----------\n csv_string: str\n String representation of csv to read into a dataframe\n\n Returns\n -------\n pandas.DataFrame\n\n Raises\n ------\n BadAPIRequestError\n If the string cannot be parsed.\n \"\"\"\n raw_data = StringIO(csv_string)\n try:\n value_df = pd.read_csv(raw_data,\n na_values=[-999.0, -9999.0],\n keep_default_na=True,\n comment='#')\n except (pd.errors.EmptyDataError, pd.errors.ParserError):\n raise BadAPIRequest({'error': 'Malformed CSV'})\n return value_df\n\n\ndef parse_json(json_str):\n \"\"\"Parse a string of json values into a DataFrame\n\n Parameters\n ----------\n json_str: str\n\n Returns\n -------\n pandas.DataFrame\n\n Raises\n ------\n BadAPIRequestError\n If the 'values' key is missing, or if the contents of the\n values key cannot be parsed into a DataFrame.\n \"\"\"\n try:\n json_dict = json.loads(json_str)\n except json.decoder.JSONDecodeError:\n raise BadAPIRequest(error='Malformed JSON.')\n try:\n raw_values = json_dict['values']\n except (TypeError, KeyError):\n error = 'Supplied JSON does not contain \"values\" field.'\n raise BadAPIRequest(error=error)\n try:\n value_df = pd.DataFrame(raw_values)\n except ValueError:\n raise BadAPIRequest({'error': 'Malformed JSON'})\n return value_df\n\n\ndef parse_values(decoded_data, mimetype):\n \"\"\"Attempts to parse a string of data into a DataFrame based on MIME type.\n\n Parameters\n ----------\n decoded_data: str\n A string of data to parse.\n mimetype: str\n The MIME type of the data.\n\n Returns\n -------\n pandas.DataFrame\n\n Raises\n ------\n BadAPIRequest\n - If the MIME type is not one of 'text/csv', 'application/json',\n or 'application/vnd.ms-excel'\n - If parsing fails, see parse_json or parse_csv for conditions.\n - If the file contains more than the maximum allowed number of\n datapoints.\n \"\"\"\n if mimetype == 'text/csv' or mimetype == 'application/vnd.ms-excel':\n values = parse_csv(decoded_data)\n elif mimetype == 'application/json':\n values = parse_json(decoded_data)\n else:\n error = \"Unsupported Content-Type or MIME type.\"\n raise BadAPIRequest(error=error)\n if values.index.size > current_app.config.get('MAX_POST_DATAPOINTS'):\n raise BadAPIRequest({\n 'error': ('File exceeds maximum number of datapoints. '\n f'{current_app.config.get(\"MAX_POST_DATAPOINTS\")} '\n f'datapoints allowed, {values.index.size} datapoints '\n 'found in file.')\n })\n return values\n\n\ndef decode_file_in_request_body():\n \"\"\"Decode the data from a utf-8 encoded file into a string and\n return the contents and the file's mimetype.\n\n Returns\n -------\n decoded_data: str\n The posted utf-8 data as a string.\n posted_file.mimetype: str\n MIME type of the file in the request body.\n\n Raises\n ------\n BadAPIRequest\n - There is more than one file in the request.\n - If the request does not contain a file.\n - The file does not contain valid utf-8.\n \"\"\"\n posted_files = list(request.files.keys())\n if len(posted_files) > 1:\n error = \"Multiple files found. Please upload one file at a time.\"\n raise BadAPIRequest(error=error)\n\n try:\n posted_filename = posted_files[0]\n posted_file = request.files[posted_filename]\n except IndexError:\n error = \"Missing file in request body.\"\n raise BadAPIRequest(error=error)\n\n posted_data = posted_file.read()\n\n try:\n decoded_data = posted_data.decode('utf-8')\n except UnicodeDecodeError:\n error = 'File could not be decoded as UTF-8.'\n raise BadAPIRequest(error=error)\n\n return decoded_data, posted_file.mimetype\n\n\ndef validate_parsable_values():\n \"\"\"Can be called from a POST view/endpoint to examine posted\n data for mimetype and attempt to parse to a DataFrame.\n\n Raises\n ------\n BadAPIRequest\n If the data cannot be parsed.\n werkzeug.exceptions.RequestEntityTooLarge\n If the `Content-Length` header is greater than the application's\n `MAX_CONTENT_LENGTH` config variable.\n \"\"\"\n # Default for content length in case of empty body\n content_length = int(request.headers.get('Content-Length', 0))\n if (content_length > current_app.config['MAX_CONTENT_LENGTH']):\n raise RequestEntityTooLarge\n if request.mimetype == 'multipart/form-data':\n decoded_data, mimetype = decode_file_in_request_body()\n else:\n decoded_data = request.get_data(as_text=True)\n mimetype = request.mimetype\n value_df = parse_values(decoded_data, mimetype)\n return value_df\n\n\ndef parse_to_timestamp(dt_string):\n \"\"\"Attempts to parse to Timestamp.\n\n Parameters\n ----------\n dt_string: str\n\n Returns\n -------\n pandas.Timestamp\n\n Raises\n ------\n ValueError\n If the string cannot be parsed to timestamp, or parses to null\n \"\"\"\n timestamp = pd.Timestamp(dt_string)\n if pd.isnull(timestamp):\n raise ValueError\n if timestamp.tzinfo is None:\n # consinstent with schema ISODateTime\n timestamp = timestamp.tz_localize('UTC')\n return timestamp\n\n\ndef validate_start_end():\n \"\"\"Parses start and end query parameters into pandas\n Timestamps.\n\n Returns\n -------\n start: Pandas Timestamp\n end: Pandas TimeStamp\n\n Raises\n ------\n BadAPIRequest\n If start and end values cannot be parsed.\n \"\"\"\n errors = {}\n start = request.args.get('start', None)\n end = request.args.get('end', None)\n if start is not None:\n try:\n start = parse_to_timestamp(start)\n except ValueError:\n errors.update({'start': ['Invalid start date format']})\n else:\n errors.update({'start': ['Must provide a start time']})\n if end is not None:\n try:\n end = parse_to_timestamp(end)\n except ValueError:\n errors.update({'end': ['Invalid end date format']})\n else:\n errors.update({'end': ['Must provide a end time']})\n if errors:\n raise BadAPIRequest(errors)\n\n # parse_to_timestamp ensures there is a tz\n if end.tzinfo != start.tzinfo:\n end = end.tz_convert(start.tzinfo)\n\n if end - start > current_app.config['MAX_DATA_RANGE_DAYS']:\n raise BadAPIRequest({'end': [\n f'Only {current_app.config[\"MAX_DATA_RANGE_DAYS\"].days} days of '\n 'data may be requested per request']})\n return start, end\n\n\ndef validate_index_period(index, interval_length, previous_time):\n \"\"\"\n Validate that the index conforms to interval_length.\n\n Parameters\n ----------\n index : pd.DatetimeIndex\n interval_length : int\n Regular period of data in minutes\n previous_time : pd.Timestamp or None\n The last time in the database before the start of index.\n May be None.\n\n Raises\n ------\n BadApiRequest\n If there are any errors\n \"\"\"\n if len(index) == 0:\n raise BadAPIRequest({'timestamp': ['No times to validate']})\n errors = []\n start = index[0]\n end = index[-1]\n freq = pd.Timedelta(f'{interval_length}min')\n expected_index = pd.date_range(start=start, end=end,\n freq=freq)\n missing_times = expected_index.difference(index)\n if len(missing_times) > 0:\n errors.append(f'Missing {len(missing_times)} timestamps. '\n f'First missing timestamp is {missing_times[0]}. '\n 'Uploads must have equally spaced timestamps '\n f'from {start} to {end} with {interval_length} '\n 'minutes between each timestamp.')\n\n extra_times = index.difference(expected_index)\n if len(extra_times) > 0:\n errors.append(f'{len(extra_times)} extra times present in index. '\n f'First extra time is {extra_times[0]}. '\n 'Uploads must have equally spaced timestamps '\n f'from {start} to {end} with {interval_length} '\n 'minutes between each timestamp.')\n if previous_time is not None:\n if (start - previous_time).total_seconds() % freq.total_seconds() != 0:\n errors.append(\n f'Start of timeseries is not a multiple of {interval_length} '\n 'minutes past the previous time of '\n f'{previous_time.isoformat()}.')\n if errors:\n raise BadAPIRequest({'timestamp': errors})\n\n\ndef validate_forecast_values(forecast_df):\n \"\"\"Validates that posted values are parseable and of the expectedtypes.\n\n Parameters\n ----------\n forecast_df: Pandas DataFrame\n\n Raises\n ------\n BadAPIRequestError\n If an expected field is missing or contains an entry of incorrect\n type.\n \"\"\"\n errors = {}\n try:\n forecast_df['value'] = pd.to_numeric(forecast_df['value'],\n downcast='float')\n except ValueError:\n error = ('Invalid item in \"value\" field. Ensure that all values '\n 'are integers, floats, empty, NaN, or NULL.')\n errors.update({'value': [error]})\n except KeyError:\n errors.update({'value': ['Missing \"value\" field.']})\n try:\n forecast_df['timestamp'] = pd.to_datetime(\n forecast_df['timestamp'],\n utc=True)\n except ValueError:\n error = ('Invalid item in \"timestamp\" field. Ensure that '\n 'timestamps are ISO8601 compliant')\n errors.update({'timestamp': [error]})\n except KeyError:\n errors.update({'timestamp': ['Missing \"timestamp\" field.']})\n if errors:\n raise BadAPIRequest(errors)\n\n\ndef _restrict_in_extra(extra_params):\n match = re.search('\"restrict_upload([\"\\\\s\\\\:]*)true',\n extra_params, re.I)\n return match is not None\n\n\ndef _current_utc_timestamp():\n # for easier testing\n return pd.Timestamp.now(tz='UTC')\n\n\ndef restrict_forecast_upload_window(extra_parameters, get_forecast,\n first_time):\n \"\"\"\n Check that the first_time falls within the window before the\n next initialization time of the forecast from the current time.\n Accounts for forecast lead_time_to_start and interval_label.\n Requires 'read' permission on the forecast in question.\n\n Parameters\n ----------\n extra_parameters : str\n The extra_parameters string for the forecast. If\n '\"restrict_upload\": true' is not found in the string, no restriction\n occurs and this function returns immediately.\n get_forecast : func\n Function to get the forecast from the database.\n first_time : datetime-like\n First timestamp in the posted forecast timeseries.\n\n Raises\n ------\n NotFoundException\n When the user does not have 'read' permission for the forecast or\n it doesn't exist.\n BadAPIRequest\n If the first_time of the timeseries is not consistent for the\n next initaliziation time of the forecast.\n \"\"\"\n if not _restrict_in_extra(extra_parameters):\n return\n\n try:\n fx_dict = get_forecast().copy()\n except (StorageAuthError, NotFoundException):\n raise NotFoundException(errors={\n '404': 'Cannot read forecast or forecast does not exist'})\n # we don't care about the axis or constant values for probabilistic\n fx_dict['site'] = Site('name', 0, 0, 0, 'UTC')\n fx = Forecast.from_dict(fx_dict)\n next_issue_time = fx_utils.get_next_issue_time(\n fx, _current_utc_timestamp())\n expected_start = next_issue_time + fx.lead_time_to_start\n if fx.interval_label == 'ending':\n expected_start += fx.interval_length\n if first_time != expected_start:\n raise BadAPIRequest(errors={'issue_time': (\n f'Currently only accepting forecasts issued for {next_issue_time}.'\n f' Expecting forecast series to start at {expected_start}.'\n )})\n\n\ndef validate_latitude_longitude():\n \"\"\"Validates latitude and longitude parameters\n\n Returns\n -------\n latitude: float\n longitude: float\n\n Raises\n ------\n BadAPIRequest\n If latitude and longitude values are not provided\n or not in range.\n \"\"\"\n errors = {}\n lat = request.args.get('latitude', None)\n lon = request.args.get('longitude', None)\n if lat is not None:\n try:\n lat = float(lat)\n except ValueError:\n errors.update({'latitude': ['Must be a float']})\n else:\n if lat > 90 or lat < -90:\n errors.update({\n 'latitude': ['Must be within [-90, 90].']})\n else:\n errors.update({'latitude': ['Must provide a latitude']})\n if lon is not None:\n try:\n lon = float(lon)\n except ValueError:\n errors.update({'longitude': ['Must be a float']})\n else:\n if lon > 180 or lon < -180:\n errors.update({'longitude':\n ['Must be within (-180, 180].']})\n else:\n errors.update({'longitude': ['Must provide a longitude']})\n if errors:\n raise BadAPIRequest(errors)\n return lat, lon\n\n\ndef validate_event_data(data):\n \"\"\"\n Validate that the data is either 0 or 1\n\n Parameters\n ----------\n data : pd.Dataframe with 'value' column\n\n Raises\n ------\n BadApiRequest\n If there are any errors\n \"\"\"\n isbool = (data['value'] == 0) | (data['value'] == 1)\n if not isbool.all():\n indx = isbool.reset_index()[~isbool.values].index.astype('str')\n raise BadAPIRequest({'value': [\n 'Invalid event values at locations %s' % ', '.join(indx)]})\n"
]
| [
[
"pandas.isnull",
"pandas.to_datetime",
"pandas.Timedelta",
"pandas.DataFrame",
"pandas.date_range",
"pandas.Timestamp.now",
"pandas.Timestamp",
"pandas.read_csv",
"pandas.to_numeric"
]
]
|
jungtaekkim/QMCSoftware | [
"4518d26b06ef737797d5e522cb61d9f7b516d74e"
]
| [
"qmcpy/stopping_criterion/cub_qmc_ml.py"
]
| [
"from ._stopping_criterion import StoppingCriterion\nfrom ..accumulate_data import MLQMCData\nfrom ..discrete_distribution import Lattice\nfrom ..true_measure import Gaussian\nfrom ..integrand import MLCallOptions\nfrom ..util import MaxSamplesWarning, ParameterError\nfrom numpy import *\nfrom scipy.stats import norm\nfrom time import time\nimport warnings\n\n\nclass CubQMCML(StoppingCriterion):\n \"\"\"\n Stopping criterion based on multi-level quasi-Monte Carlo.\n\n >>> mlco = MLCallOptions(Lattice(seed=7))\n >>> sc = CubQMCML(mlco,abs_tol=.05)\n >>> solution,data = sc.integrate()\n >>> solution\n 10.442...\n >>> data\n Solution: 10.4424 \n MLCallOptions (Integrand Object)\n option european\n sigma 0.200\n k 100\n r 0.050\n t 1\n b 85\n Lattice (DiscreteDistribution Object)\n d 2^(6)\n randomize 1\n order natural\n seed 985802\n mimics StdUniform\n Gaussian (TrueMeasure Object)\n mean 0\n covariance 1\n decomp_type pca\n CubQMCML (StoppingCriterion Object)\n rmse_tol 0.019\n n_init 2^(8)\n n_max 10000000000\n replications 2^(5)\n MLQMCData (AccumulateData Object)\n levels 7\n dimensions [ 1. 2. 4. 8. 16. 32. 64.]\n n_level [4096. 512. 256. 256. 256. 256. 256.]\n mean_level [1.005e+01 1.807e-01 1.033e-01 5.482e-02 2.823e-02 1.397e-02 7.290e-03]\n var_level [8.376e-05 2.660e-05 1.911e-05 1.594e-05 3.660e-06 1.478e-06 3.424e-07]\n bias_estimate 0.007\n n_total 188416\n time_integrate ...\n \n \n References:\n \n [1] M.B. Giles and B.J. Waterhouse. 'Multilevel quasi-Monte Carlo path simulation'.\n pp.165-181 in Advanced Financial Modelling, in Radon Series on Computational and Applied Mathematics,\n de Gruyter, 2009. http://people.maths.ox.ac.uk/~gilesm/files/radon.pdf\n \"\"\"\n\n parameters = ['rmse_tol','n_init','n_max','replications']\n\n def __init__(self, integrand, abs_tol=.05, alpha=.01, rmse_tol=None, n_init=256., n_max=1e10, replications=32.):\n \"\"\"\n Args:\n integrand (Integrand): integrand with multi-level g method\n abs_tol (float): absolute tolerance\n alpha (float): uncertaintly level.\n If rmse_tol not supplied, then rmse_tol = abs_tol/norm.ppf(1-alpha/2)\n rmse_tol (float): root mean squared error\n If supplied (not None), then absolute tolerance and alpha are ignored\n in favor of the rmse tolerance\n n_max (int): maximum number of samples\n replications (int): number of replications on each level\n \"\"\"\n # initialization\n if rmse_tol:\n self.rmse_tol = float(rmse_tol)\n else: # use absolute tolerance\n self.rmse_tol = float(abs_tol) / norm.ppf(1-alpha/2)\n self.n_init = float(n_init)\n self.n_max = float(n_max)\n self.replications = float(replications)\n # QMCPy Objs\n self.integrand = integrand\n self.true_measure = self.integrand.true_measure\n self.discrete_distrib = self.integrand.discrete_distrib\n # Verify Compliant Construction\n allowed_levels = ['adaptive-multi']\n allowed_distribs = [\"Lattice\", \"Sobol\",\"Halton\"]\n super(CubQMCML,self).__init__(allowed_levels, allowed_distribs)\n\n def integrate(self):\n \"\"\" See abstract method. \"\"\"\n # Construct AccumulateData Object to House Integration Data\n self.data = MLQMCData(self, self.integrand, self.true_measure, self.discrete_distrib, self.n_init, self.replications)\n t_start = time()\n while True:\n self.data.update_data()\n self.data.eval_level[:] = False\n if self.data.var_level.sum() > (self.rmse_tol**2/2.):\n # double N_l on level with largest V_l/(2^l*N_l)\n efficient_level = argmax(self.data.cost_level)\n self.data.eval_level[efficient_level] = True\n elif self.data.bias_estimate > (self.rmse_tol/sqrt(2.)):\n # add another level\n self.data._add_level()\n else:\n # both conditions met\n break\n total_next_samples = (self.data.replications*self.data.eval_level*self.data.n_level*2).sum()\n if (self.data.n_total + total_next_samples) > self.n_max:\n warning_s = \"\"\"\n Alread generated %d samples.\n Trying to generate %d new samples, which would exceed n_max = %d.\n Stopping integration process.\n Note that error tolerances may no longer be satisfied\"\"\" \\\n % (int(self.data.n_total), int(total_next_samples), int(self.n_max))\n warnings.warn(warning_s, MaxSamplesWarning)\n break\n self.data.time_integrate = time() - t_start\n return self.data.solution,self.data\n \n def set_tolerance(self, abs_tol=None, alpha=.01, rmse_tol=None):\n \"\"\"\n See abstract method. \n \n Args:\n integrand (Integrand): integrand with multi-level g method\n abs_tol (float): absolute tolerance. Reset if supplied, ignored if not. \n alpha (float): uncertaintly level.\n If rmse_tol not supplied, then rmse_tol = abs_tol/norm.ppf(1-alpha/2)\n rel_tol (float): relative tolerance. Reset if supplied, ignored if not.\n Takes priority over aboluste tolerance and alpha if supplied. \n \"\"\"\n if rmse_tol != None:\n self.rmse_tol = float(rmse_tol)\n elif abs_tol != None:\n self.rmse_tol = (float(abs_tol) / norm.ppf(1-alpha/2.))\n"
]
| [
[
"scipy.stats.norm.ppf"
]
]
|
kfranson/VIP | [
"5975fd6ce7c02adace013c8a2412062ac2b92bbc"
]
| [
"vip_hci/preproc/rescaling.py"
]
| [
"#! /usr/bin/env python\n\n\"\"\"\nModule with frame px resampling/rescaling functions.\n\"\"\"\n__author__ = 'Carlos Alberto Gomez Gonzalez, V. Christiaens, R. Farkas'\n__all__ = ['frame_px_resampling',\n 'cube_px_resampling',\n 'cube_rescaling_wavelengths',\n 'frame_rescaling',\n 'check_scal_vector',\n 'find_scal_vector',\n 'scale_fft']\n\nimport numpy as np\nimport warnings\ntry:\n import cv2\n no_opencv = False\nexcept ImportError:\n warnings.warn(\"Opencv python bindings are missing.\", ImportWarning)\n no_opencv = True\n\nfrom scipy.ndimage.interpolation import geometric_transform, zoom\nfrom scipy.optimize import minimize\nfrom ..var import frame_center, get_square\nfrom .subsampling import cube_collapse\n\n\ndef cube_px_resampling(array, scale, imlib='vip-fft', interpolation='lanczos4',\n verbose=True):\n \"\"\"\n Resample the frames of a cube with a single scale factor. Can deal with NaN \n values.\n\n Wrapper of ``frame_px_resampling``. Useful when we need to upsample\n (upscaling) or downsample (pixel binning) a set of frames, e.g. an ADI cube.\n\n Parameters\n ----------\n array : 3d numpy ndarray\n Input cube, 3d array.\n scale : int, float or tuple\n Scale factor for upsampling or downsampling the frames in the cube. If\n a tuple it corresponds to the scale along x and y.\n imlib : str, optional\n See the documentation of the ``vip_hci.preproc.frame_px_resampling``\n function.\n interpolation : str, optional\n See the documentation of the ``vip_hci.preproc.frame_px_resampling``\n function.\n verbose : bool, optional\n Whether to print out additional info such as the new cube shape.\n\n Returns\n -------\n array_resc : numpy ndarray\n Output cube with resampled frames.\n\n \"\"\"\n if array.ndim != 3:\n raise TypeError('Input array is not a cube or 3d array.')\n\n array_resc = []\n for i in range(array.shape[0]):\n imresc = frame_px_resampling(array[i], scale=scale, imlib=imlib,\n interpolation=interpolation)\n array_resc.append(imresc)\n\n array_resc = np.array(array_resc)\n\n if verbose:\n print(\"Cube successfully rescaled\")\n print(\"New shape: {}\".format(array_resc.shape))\n return array_resc\n\n\ndef frame_px_resampling(array, scale, imlib='vip-fft', interpolation='lanczos4',\n verbose=False):\n \"\"\"\n Resample the pixels of a frame wrt to the center, changing the frame size.\n Can deal with NaN values.\n \n If ``scale`` < 1 then the frame is downsampled and if ``scale`` > 1 then its\n pixels are upsampled.\n\n Parameters\n ----------\n array : numpy ndarray\n Input frame, 2d array.\n scale : int, float or tuple\n Scale factor for upsampling or downsampling the frame. If a tuple it\n corresponds to the scale along x and y.\n imlib : {'ndimage', 'opencv', 'vip-fft'}, optional\n Library used for image transformations. 'vip-fft' corresponds to a \n FFT-based rescaling algorithm implemented in VIP \n (``vip_hci.preproc.scale_fft``).\n interpolation : str, optional\n For 'ndimage' library: 'nearneig', bilinear', 'biquadratic', 'bicubic',\n 'biquartic', 'biquintic'. The 'nearneig' interpolation is the fastest\n and the 'biquintic' the slowest. The 'nearneig' is the worst\n option for interpolation of noisy astronomical images.\n For 'opencv' library: 'nearneig', 'bilinear', 'bicubic', 'lanczos4'.\n The 'nearneig' interpolation is the fastest and the 'lanczos4' the\n slowest and accurate.\n verbose : bool, optional\n Whether to print out additional info such as the new image shape.\n\n Returns\n -------\n array_resc : numpy ndarray\n Output resampled frame.\n\n \"\"\"\n if array.ndim != 2:\n raise TypeError('Input array is not a frame or 2d array')\n\n if isinstance(scale, tuple):\n scale_x, scale_y = scale\n elif isinstance(scale, (float, int)):\n scale_x = scale\n scale_y = scale\n else:\n raise TypeError('`scale` must be float, int or tuple')\n\n # Replace any NaN with real values before scaling\n mask = None\n nan_mask = np.isnan(array)\n if np.any(nan_mask):\n medval = np.nanmedian(array)\n array[nan_mask] = medval\n\n mask = np.zeros_like(array)\n mask[nan_mask] = 1\n\n if imlib == 'ndimage':\n if interpolation == 'nearneig':\n order = 0\n elif interpolation == 'bilinear':\n order = 1\n elif interpolation == 'biquadratic':\n order = 2\n elif interpolation == 'bicubic':\n order = 3\n elif interpolation == 'biquartic' or interpolation == 'lanczos4':\n order = 4\n elif interpolation == 'biquintic':\n order = 5\n else:\n raise TypeError('Scipy.ndimage interpolation method not recognized')\n\n if mask is not None:\n mask = zoom(mask, zoom=(scale_y, scale_x), order=order)\n array_resc = zoom(array, zoom=(scale_y, scale_x), order=order)\n array_resc /= scale_y * scale_x\n \n elif imlib == 'opencv':\n if no_opencv:\n msg = 'Opencv python bindings cannot be imported. Install opencv or'\n msg += ' set imlib to ndimage'\n raise RuntimeError(msg)\n\n if interpolation == 'bilinear':\n intp = cv2.INTER_LINEAR\n elif interpolation == 'bicubic':\n intp = cv2.INTER_CUBIC\n elif interpolation == 'nearneig':\n intp = cv2.INTER_NEAREST\n elif interpolation == 'lanczos4':\n intp = cv2.INTER_LANCZOS4\n else:\n raise TypeError('Opencv interpolation method not recognized')\n \n if mask is not None:\n mask = cv2.resize(mask.astype(np.float32), (0, 0), fx=scale_x,\n fy=scale_y, interpolation=intp)\n \n array_resc = cv2.resize(array.astype(np.float32), (0, 0), fx=scale_x,\n fy=scale_y, interpolation=intp)\n array_resc /= scale_y * scale_x\n\n elif imlib == 'vip-fft':\n if scale_x != scale_y:\n msg='FFT scaling only supports identical factors along x and y'\n raise ValueError(msg)\n if array.shape[0] != array.shape[1]:\n msg='FFT scaling only supports square input arrays'\n raise ValueError(msg) \n \n # make array with even dimensions before FFT-scaling\n if array.shape[0]%2:\n odd=True\n array_even = np.zeros([array.shape[0]+1,array.shape[1]+1])\n array_even[1:,1:] = array\n array = array_even\n else:\n odd = False\n\n if mask is not None:\n if odd:\n mask_even = np.zeros([mask.shape[0]+1,mask.shape[1]+1])\n mask_even[1:,1:] = mask\n mask = mask_even \n mask = scale_fft(mask, scale_x)\n if odd:\n mask_odd = np.zeros([mask.shape[0]-1,mask.shape[1]-1])\n mask_odd = mask[1:,1:]\n mask = mask_odd\n \n array_resc = scale_fft(array, scale_x)\n if odd:\n array = np.zeros([array_resc.shape[0]-1,array_resc.shape[1]-1])\n array = array_resc[1:,1:]\n array_resc = array\n \n else:\n raise ValueError('Image transformation library not recognized')\n\n # Place back NaN values in scaled array\n if mask is not None:\n array_resc[mask >= 0.5] = np.nan\n \n if verbose:\n print(\"Image successfully rescaled\")\n print(\"New shape: {}\".format(array_resc.shape))\n\n return array_resc\n\n\ndef cube_rescaling_wavelengths(cube, scal_list, full_output=True, inverse=False,\n y_in=None, x_in=None, imlib='vip-fft',\n interpolation='lanczos4', collapse='median',\n pad_mode='reflect'):\n \"\"\"\n Scale/Descale a cube by scal_list, with padding. Can deal with NaN values.\n\n Wrapper to scale or descale a cube by factors given in scal_list,\n without any loss of information (zero-padding if scaling > 1).\n Important: in case of IFS data, the scaling factors in scal_list should be\n >= 1 (ie. provide the scaling factors as for scaling to the longest\n wavelength channel).\n\n Parameters\n ----------\n cube: 3D-array\n Data cube with frames to be rescaled.\n scal_list: 1D-array\n Vector of same dimension as the first dimension of datacube, containing\n the scaling factor for each frame.\n full_output: bool, optional\n Whether to output just the rescaled cube (False) or also its median,\n the new y and x shapes of the cube, and the new centers cy and cx of the\n frames (True).\n inverse: bool, optional\n Whether to inverse the scaling factors in scal_list before applying them\n or not; i.e. True is to descale the cube (typically after a first scaling\n has already been done)\n y_in, x_in: int\n Initial y and x sizes, required for ``inverse=True``. In case the cube is\n descaled, these values will be used to crop back the cubes/frames to\n their original size.\n imlib : {'opencv', 'ndimage', 'vip-fft'}, str optional\n Library used for image transformations. Opencv is faster than ndimage or\n skimage. 'vip-fft' corresponds to a FFT-based rescaling algorithm \n implemented in VIP (``vip_hci.preproc.scale_fft``).\n interpolation : str, optional\n For 'ndimage' library: 'nearneig', bilinear', 'bicuadratic', 'bicubic',\n 'biquartic', 'biquintic'. The 'nearneig' interpolation is the fastest\n and the 'biquintic' the slowest. The 'nearneig' is the poorer\n option for interpolation of noisy astronomical images.\n For 'opencv' library: 'nearneig', 'bilinear', 'bicubic', 'lanczos4'.\n The 'nearneig' interpolation is the fastest and the 'lanczos4' the\n slowest and accurate. 'lanczos4' is the default.\n collapse : {'median', 'mean', 'sum', 'trimmean'}, str optional\n Sets the way of collapsing the frames for producing a final image.\n pad_mode : str, optional\n One of the following string values:\n\n ``'constant'``\n pads with a constant value\n ``'edge'``\n pads with the edge values of array\n ``'linear_ramp'``\n pads with the linear ramp between end_value and the array edge\n value.\n ``'maximum'``\n pads with the maximum value of all or part of the vector along\n each axis\n ``'mean'``\n pads with the mean value of all or part of the vector along each\n axis\n ``'median'``\n pads with the median value of all or part of the vector along\n each axis\n ``'minimum'``\n pads with the minimum value of all or part of the vector along\n each axis\n ``'reflect'``\n pads with the reflection of the vector mirrored on the first and\n last values of the vector along each axis\n ``'symmetric'``\n pads with the reflection of the vector mirrored along the edge\n of the array\n ``'wrap'``\n pads with the wrap of the vector along the axis. The first\n values are used to pad the end and the end values are used to\n pad the beginning\n\n Returns\n -------\n frame: 2d array\n The median of the rescaled cube.\n cube : 3d array\n [full_output] rescaled cube\n frame : 2d array\n [full_output] median of the rescaled cube\n y,x,cy,cx : float\n [full_output] New y and x shapes of the cube, and the new centers cy and\n cx of the frames\n\n \"\"\"\n n, y, x = cube.shape\n\n max_sc = np.amax(scal_list)\n\n if not inverse and max_sc > 1:\n new_y = int(np.ceil(max_sc * y))\n new_x = int(np.ceil(max_sc * x))\n if (new_y - y) % 2 != 0:\n new_y += 1\n if (new_x - x) % 2 != 0:\n new_x += 1\n pad_len_y = (new_y - y) // 2\n pad_len_x = (new_x - x) // 2\n pad_width = ((0, 0), (pad_len_y, pad_len_y), (pad_len_x, pad_len_x))\n big_cube = np.pad(cube, pad_width, pad_mode)\n else:\n big_cube = cube.copy()\n\n n, y, x = big_cube.shape\n cy, cx = frame_center(big_cube[0])\n\n if inverse:\n scal_list = 1. / scal_list\n cy, cx = frame_center(cube[0])\n\n # (de)scale the cube, so that a planet would now move radially\n cube = _cube_resc_wave(big_cube, scal_list, ref_xy=(cx, cy),\n imlib=imlib, interpolation=interpolation)\n frame = cube_collapse(cube, collapse)\n\n if inverse and max_sc > 1:\n if y_in is None or x_in is None:\n raise ValueError(\"You need to provide y_in and x_in when \"\n \"inverse=True!\")\n siz = max(y_in, x_in)\n if frame.shape[0] > siz:\n frame = get_square(frame, siz, cy, cx)\n if full_output:\n n_z = cube.shape[0]\n array_old = cube.copy()\n cube = np.zeros([n_z, siz, siz])\n for zz in range(n_z):\n cube[zz] = get_square(array_old[zz], siz, cy, cx)\n\n if full_output:\n return cube, frame, y, x, cy, cx\n else:\n return frame\n\n\ndef _scale_func(output_coords, ref_xy=0, scaling=1.0, scale_y=None,\n scale_x=None):\n \"\"\"\n For each coordinate point in a new scaled image (output_coords),\n coordinates in the image before the scaling are returned. This scaling\n function is used within geometric_transform which, for each point in the\n output image, will compute the (spline) interpolated value at the\n corresponding frame coordinates before the scaling.\n \"\"\"\n ref_x, ref_y = ref_xy\n if scale_y is None:\n scale_y = scaling\n if scale_x is None:\n scale_x = scaling\n return (ref_y + (output_coords[0] - ref_y) / scale_y,\n ref_x + (output_coords[1] - ref_x) / scale_x)\n\n\ndef frame_rescaling(array, ref_xy=None, scale=1.0, imlib='vip-fft',\n interpolation='lanczos4', scale_y=None, scale_x=None):\n \"\"\"\n Rescale a frame by a factor wrt a reference point.\n\n The reference point is by default the center of the frame (typically the\n exact location of the star). However, it keeps the same dimensions.\n\n Parameters\n ----------\n array : numpy ndarray\n Input frame, 2d array.\n ref_xy : float, optional\n Coordinates X,Y of the point wrt which the rescaling will be\n applied. By default the rescaling is done with respect to the center\n of the frame.\n scale : float\n Scaling factor. If > 1, it will upsample the input array equally\n along y and x by this factor.\n imlib : {'ndimage', 'opencv', 'vip-fft'}, optional\n Library used for image transformations. 'vip-fft' corresponds to a \n FFT-based rescaling algorithm implemented in VIP \n (``vip_hci.preproc.scale_fft``).\n interpolation : str, optional\n For 'ndimage' library: 'nearneig', bilinear', 'biquadratic', 'bicubic',\n 'biquartic', 'biquintic'. The 'nearneig' interpolation is the fastest\n and the 'biquintic' the slowest. The 'nearneig' is the worst\n option for interpolation of noisy astronomical images.\n For 'opencv' library: 'nearneig', 'bilinear', 'bicubic', 'lanczos4'.\n The 'nearneig' interpolation is the fastest and the 'lanczos4' the\n slowest and accurate.\n scale_y : float\n Scaling factor only for y axis. If provided, it takes priority on\n scale parameter.\n scale_x : float\n Scaling factor only for x axis. If provided, it takes priority on\n scale parameter.\n\n Returns\n -------\n array_out : numpy ndarray\n Resulting frame.\n\n \"\"\"\n if array.ndim != 2:\n raise TypeError('Input array is not a frame or 2d array.')\n\n if scale_y is None:\n scale_y = scale\n if scale_x is None:\n scale_x = scale\n\n outshape = array.shape\n if ref_xy is None:\n ref_xy = frame_center(array)\n \n # Replace any NaN with real values before scaling\n mask = None\n nan_mask = np.isnan(array)\n if np.any(nan_mask):\n medval = np.nanmedian(array)\n array[nan_mask] = medval\n\n mask = np.zeros_like(array)\n mask[nan_mask] = 1\n \n if imlib == 'ndimage':\n if interpolation == 'nearneig':\n order = 0\n elif interpolation == 'bilinear':\n order = 1\n elif interpolation == 'biquadratic':\n order = 2\n elif interpolation == 'bicubic':\n order = 3\n elif interpolation == 'biquartic' or interpolation == 'lanczos4':\n order = 4\n elif interpolation == 'biquintic':\n order = 5\n else:\n raise TypeError(\n 'Scipy.ndimage interpolation method not recognized')\n\n array_out = geometric_transform(array, _scale_func, order=order,\n output_shape=outshape,\n extra_keywords={'ref_xy': ref_xy,\n 'scaling': scale,\n 'scale_y': scale_y,\n 'scale_x': scale_x})\n\n elif imlib == 'opencv':\n if no_opencv:\n msg = 'Opencv python bindings cannot be imported. Install '\n msg += ' opencv or set imlib to skimage'\n raise RuntimeError(msg)\n\n if interpolation == 'bilinear':\n intp = cv2.INTER_LINEAR\n elif interpolation == 'bicubic':\n intp = cv2.INTER_CUBIC\n elif interpolation == 'nearneig':\n intp = cv2.INTER_NEAREST\n elif interpolation == 'lanczos4':\n intp = cv2.INTER_LANCZOS4\n else:\n raise TypeError('Opencv interpolation method not recognized')\n\n M = np.array([[scale_x, 0, (1. - scale_x) * ref_xy[0]],\n [0, scale_y, (1. - scale_y) * ref_xy[1]]])\n array_out = cv2.warpAffine(array.astype(np.float32), M, outshape,\n flags=intp)\n\n elif imlib == 'vip-fft':\n if scale_x != scale_y:\n msg='FFT scaling only supports identical factors along x and y'\n raise ValueError(msg)\n if array.shape[0] != array.shape[1]:\n msg='FFT scaling only supports square input arrays'\n raise ValueError(msg) \n \n # make array with even dimensions before FFT-scaling\n if array.shape[0]%2:\n odd=True\n array_even = np.zeros([array.shape[0]+1,array.shape[1]+1])\n array_even[1:,1:] = array\n array = array_even\n else:\n odd = False\n\n if mask is not None:\n if odd:\n mask_even = np.zeros([mask.shape[0]+1,mask.shape[1]+1])\n mask_even[1:,1:] = mask\n mask = mask_even \n mask = scale_fft(mask, scale_x, ori_dim=True)\n if odd:\n mask_odd = np.zeros([mask.shape[0]-1,mask.shape[1]-1])\n mask_odd = mask[1:,1:]\n mask = mask_odd\n \n array_out = scale_fft(array, scale_x, ori_dim=True)\n if odd:\n array = np.zeros([array_out.shape[0]-1,array_out.shape[1]-1])\n array = array_out[1:,1:]\n array_out = array\n\n else:\n raise ValueError('Image transformation library not recognized')\n\n # Place back NaN values in scaled array\n if mask is not None:\n array_out[mask >= 0.5] = np.nan\n\n array_out /= scale_y * scale_x\n return array_out\n\n\ndef _cube_resc_wave(array, scaling_list, ref_xy=None, imlib='vip-fft',\n interpolation='lanczos4', scaling_y=None, scaling_x=None):\n \"\"\"\n Rescale a cube by factors from ``scaling_list`` wrt a position.\n\n Parameters\n ----------\n array : numpy ndarray\n Input 3d array, cube.\n scaling_list : 1D-array\n Scale corresponding to each frame in the cube.\n ref_xy : float, optional\n Coordinates X,Y of the point with respect to which the rescaling will be\n performed. By default the rescaling is done with respect to the center\n of the frames; central pixel if the frames have odd size.\n imlib : str optional\n See the documentation of ``vip_hci.preproc.cube_rescaling_wavelengths``.\n interpolation : str, optional\n See the documentation of ``vip_hci.preproc.cube_rescaling_wavelengths``.\n scaling_y : 1D-array or list\n Scaling factor only for y axis. If provided, it takes priority on\n scaling_list.\n scaling_x : 1D-array or list\n Scaling factor only for x axis. If provided, it takes priority on\n scaling_list.\n\n Returns\n -------\n array_sc : numpy ndarray\n Resulting cube with rescaled frames.\n\n \"\"\"\n\n if array.ndim != 3:\n raise TypeError('Input array is not a cube or 3d array')\n\n array_sc = []\n if scaling_list is None:\n scaling_list = [None]*array.shape[0]\n for i in range(array.shape[0]):\n array_sc.append(frame_rescaling(array[i], ref_xy=ref_xy,\n scale=scaling_list[i], imlib=imlib,\n interpolation=interpolation,\n scale_y=scaling_y, scale_x=scaling_x))\n return np.array(array_sc)\n\n\ndef check_scal_vector(scal_vec):\n \"\"\"\n Turn wavelengths (IFS data) into a scaling factor list.\n\n It checks that it has the right format: all scaling factors should be >= 1\n (i.e. the scaling should be done wrt the longest wavelength of the cube).\n\n Parameters\n ----------\n scal_vec: 1d array or list\n Vector with the wavelengths.\n\n Returns\n -------\n scal_vec: numpy ndarray, 1d\n Vector containing the scaling factors (after correction to comply with\n the condition >= 1).\n\n \"\"\"\n if not isinstance(scal_vec, (list, np.ndarray)):\n raise TypeError('`Scal_vec` is neither a list or an np.ndarray')\n\n scal_vec = np.array(scal_vec)\n\n # checking if min factor is 1:\n if scal_vec.min() != 1:\n scal_vec = 1 / scal_vec\n scal_vec /= scal_vec.min()\n\n return scal_vec\n\n\ndef find_scal_vector(cube, lbdas, fluxes, mask=None, nfp=2, fm=\"stddev\", \n simplex_options=None, debug=False, **kwargs):\n \"\"\"\n Find the optimal scaling factor for the channels of an IFS cube (or of \n dual-band pairs of images).\n\n The algorithm finds the optimal scaling factor that minimizes residuals in\n the rescaled frames. It takes the inverse of the wavelength vector as a \n first guess, and uses a similar method as the negative fake companion \n technique, but minimizing residuals in either a mask or the whole field.\n\n Parameters\n ----------\n cube: 3D-array\n Data cube with frames to be rescaled.\n lbdas: 1d array or list\n Vector with the wavelengths, used for first guess on scaling factor.\n fluxes: 1d array or list\n Vector with the (unsaturated) fluxes at the different wavelengths, \n used for first guess on flux factor.\n mask: 2D-array, opt\n Binary mask, with ones where the residual intensities should be \n evaluated. If None is provided, the whole field is used.\n nfp: int, opt, {1,2}\n Number of free parameters: spatial scaling alone or spatial scaling + \n flux scaling.\n fm: str, opt, {\"sum\",\"stddev\"}\n Figure of merit to use: sum of squared residuals or stddev of residual \n pixels.\n options: dict, optional\n The scipy.optimize.minimize options.\n **kwargs: optional\n Optional arguments to the scipy.optimize.minimize function\n \n Returns\n -------\n scal_vec: numpy ndarray, 1d\n Vector containing the scaling factors (after correction to comply with\n the condition >= 1).\n if nfp==2, also returns:\n flux_vec: numpy ndarray, 1d\n Vector containing the associated flux factors.\n \"\"\"\n\n scal_vec_ini = lbdas[-1]/lbdas\n n_z = len(lbdas)\n if n_z != len(fluxes) or n_z != cube.shape[0]:\n msg = \"first axis of cube, fluxes and lbda must have same length\"\n raise TypeError(msg)\n\n if simplex_options is None:\n simplex_options = {'xatol': 1e-6, 'fatol': 1e-6, 'maxiter': 800,\n 'maxfev': 2000}\n scal_vec = np.ones(n_z)\n flux_vec = np.ones(n_z)\n for z in range(n_z-1):\n flux_scal = fluxes[-1]/fluxes[z]\n cube_tmp = np.array([cube[z],cube[-1]])\n if nfp==1:\n p_ini = (scal_vec_ini[z],)\n solu = minimize(_chisquare_scal, p_ini, args=(cube_tmp, flux_scal, \n mask, fm),\n method='Nelder-Mead', options=simplex_options, \n **kwargs)\n scal_fac, = solu.x\n flux_fac = flux_scal\n else:\n p_ini = (scal_vec_ini[z],flux_scal)\n solu = minimize(_chisquare_scal_2fp, p_ini, args=(cube_tmp, mask, fm),\n method='Nelder-Mead', options=simplex_options, \n **kwargs) \n scal_fac, flux_fac = solu.x\n if debug:\n print(\"channel {:.0f}:\".format(z), solu.x)\n scal_vec[z] = scal_fac\n flux_vec[z] = flux_fac\n\n scal_vec = check_scal_vector(scal_vec)\n \n return scal_vec, flux_vec\n\n\ndef _find_indices_sdi(wl, dist, index_ref, fwhm, delta_sep=1, nframes=None,\n debug=False):\n \"\"\"\n Find optimal wavelengths which minimize self-subtraction in model PSF\n subtraction.\n\n Parameters\n ----------\n wl : numpy ndarray or list\n Vector with the scaling factors.\n dist : float\n Separation or distance (in pixels) from the center of the array.\n index_ref : int\n The `wl` index for which we are finding the pairs.\n fwhm : float\n Mean FWHM of all the wavelengths (in pixels).\n delta_sep : float, optional\n The threshold separation in terms of the mean FWHM.\n nframes : None or int, optional\n Must be an even value. In not None, then between 2 and adjacent\n ``nframes`` are kept.\n debug : bool, optional\n It True it prints out debug information.\n\n Returns\n -------\n indices : numpy ndarray\n List of good indices.\n\n \"\"\"\n wl = np.asarray(wl)\n wl_ref = wl[index_ref]\n sep_lft = (wl_ref - wl) / wl_ref * ((dist + fwhm * delta_sep) / fwhm)\n sep_rgt = (wl - wl_ref) / wl_ref * ((dist - fwhm * delta_sep) / fwhm)\n map_lft = sep_lft >= delta_sep\n map_rgt = sep_rgt >= delta_sep\n indices = np.nonzero(map_lft | map_rgt)[0]\n\n if debug:\n print(\"dist: {}, index_ref: {}\".format(dist, index_ref))\n print(\"sep_lft:\", \" \".join([\"{:+.2f}\".format(x) for x in sep_lft]))\n print(\"sep_rgt:\", \" \".join([\"{:+.2f}\".format(x) for x in sep_rgt]))\n print(\"indices:\", indices)\n print(\"indices size: {}\".format(indices.size))\n\n if indices.size == 0:\n raise RuntimeError(\"No frames left after radial motion threshold. Try \"\n \"decreasing the value of `delta_sep`\")\n\n if nframes is not None:\n i1 = map_lft.sum()\n window = nframes // 2\n if i1 - window < 0 or i1 + window > indices[-1]:\n window = nframes\n ind1 = max(0, i1 - window)\n ind2 = min(wl.size, i1 + window)\n indices = indices[ind1: ind2]\n\n if indices.size < 2:\n raise RuntimeError(\"No frames left after radial motion threshold. \"\n \"Try decreasing the value of `delta_sep` or \"\n \"`nframes`\")\n\n if debug:\n print(\"indices (nframes):\", indices)\n\n return indices\n\n\ndef _chisquare_scal(modelParameters, cube, flux_fac=1, mask=None, fm='sum'):\n \"\"\"\n Calculate the reduced chi2:\n \\chi^2_r = \\frac{1}{N-3}\\sum_{j=1}^{N} |I_j|,\n where N is the number of pixels within a circular aperture centered on the \n first estimate of the planet position, and I_j the j-th pixel intensity.\n \n Parameters\n ---------- \n modelParameters: tuple\n The model parameters, typically (scal_fac, flux_fac).\n cube: numpy.array\n The cube of fits images expressed as a numpy.array.\n mask: 2D-array, opt\n Binary mask, with ones where the residual intensities should be \n evaluated. If None is provided, the whole field is used.\n fm: str, opt, {\"sum\",\"stddev\"}\n Figure of merit to use: sum of squared residuals or stddev of residual \n pixels.\n \n Returns\n -------\n chi: float\n The reduced chi squared.\n \n \"\"\"\n # rescale in flux and spatially\n array = cube.copy()\n #scale_fac, flux_fac = modelParameters\n scale_fac, = modelParameters\n array[0]*=flux_fac\n scaling_list = np.array([scale_fac,1])\n array = _cube_resc_wave(array, scaling_list)\n\n frame = array[1]-array[0]\n if mask is None:\n mask = np.ones_like(frame)\n \n \n if fm == 'sum':\n chi = np.sum(np.power(frame[np.where(mask)],2))\n elif fm == 'stddev':\n values = frame[np.where(mask)]\n values = values[values != 0]\n chi = np.std(values)\n else:\n raise RuntimeError('fm choice not recognized.')\n \n return chi\n\ndef _chisquare_scal_2fp(modelParameters, cube, mask=None, fm='sum'):\n \"\"\"\n Calculate the reduced chi2:\n \\chi^2_r = \\frac{1}{N-3}\\sum_{j=1}^{N} |I_j|,\n where N is the number of pixels within a circular aperture centered on the \n first estimate of the planet position, and I_j the j-th pixel intensity.\n \n Parameters\n ---------- \n modelParameters: tuple\n The model parameters, typically (scal_fac, flux_fac).\n cube: numpy.array\n The cube of fits images expressed as a numpy.array.\n mask: 2D-array, opt\n Binary mask, with ones where the residual intensities should be \n evaluated. If None is provided, the whole field is used.\n fm: str, opt, {\"sum\",\"stddev\"}\n Figure of merit to use: sum of squared residuals or stddev of residual \n pixels.\n \n Returns\n -------\n chi: float\n The reduced chi squared.\n \n \"\"\"\n # rescale in flux and spatially\n array = cube.copy()\n scale_fac, flux_fac = modelParameters\n array[0]*=flux_fac\n scaling_list = np.array([scale_fac,1])\n array = _cube_resc_wave(array, scaling_list)\n\n frame = array[1]-array[0]\n if mask is None:\n mask = np.ones_like(frame)\n \n \n if fm == 'sum':\n chi = np.sum(np.power(frame[np.where(mask)],2))\n elif fm == 'stddev':\n values = frame[np.where(mask)]\n values = values[values != 0]\n chi = np.std(values)\n else:\n raise RuntimeError('fm choice not recognized.')\n \n return chi\n\n\n \ndef scale_fft(array, scale, ori_dim=False):\n \"\"\"\n Resample the frames of a cube with a single scale factor using a FFT-based\n method.\n\n Parameters\n ----------\n array : 3d numpy ndarray\n Input cube, 3d array.\n scale : int or float\n Scale factor for upsampling or downsampling the frames in the cube. If\n a tuple it corresponds to the scale along x and y.\n ori_dim: bool, opt\n Whether to crop/pad scaled array in order to have the output with the\n same dimensions as the input array. By default, the x,y dimensions of \n the output are the closest integer to scale*dim_input, with the same \n parity as the input.\n \n Returns\n -------\n array_resc : numpy ndarray\n Output cube with resampled frames.\n \n \"\"\"\n if scale == 1:\n return array\n dim = array.shape[0] # even square\n dtype = array.dtype.kind\n\n kd_array = np.arange(dim/2 + 1, dtype=np.int)\n \n # scaling factor chosen as *close* as possible to N''/N', where: \n # N' = N + 2*KD (N': dim after FT)\n # N\" = N + 2*KF (N'': dim after FT-1 of FT image), \n # => N\" = 2*round(N'*sc/2)\n # => KF = (N\"-N)/2 = round(N'*sc/2 - N/2) \n # = round(N/2*(sc-1) + KD*sc)\n # We call yy=N/2*(sc-1) +KD*sc \n yy = dim/2 * (scale - 1) + kd_array.astype(np.float)*scale\n \n # We minimize the difference between the `ideal' N\" and its closest \n # integer value by minimizing |yy-int(yy)|.\n kf_array = np.round(yy).astype(np.int)\n tmp = np.abs(yy-kf_array)\n imin = np.nanargmin(tmp)\n\n kd_io = kd_array[imin]\n kf_io = kf_array[imin]\n \n # Extract a part of array and place into dim_p array\n dim_p = int(dim + 2*kd_io)\n tmp = np.zeros((dim_p, dim_p), dtype=dtype)\n tmp[kd_io:kd_io+dim, kd_io:kd_io+dim] = array\n\n # Fourier-transform the larger array\n array_f = np.fft.fftshift(np.fft.fft2(tmp))\n \n # Extract a part of, or expand, the FT to dim_pp pixels\n dim_pp = int(dim + 2*kf_io)\n \n if dim_pp > dim_p:\n tmp = np.zeros((dim_pp, dim_pp), dtype=np.complex)\n tmp[(dim_pp-dim_p)//2:(dim_pp+dim_p)//2, \n (dim_pp-dim_p)//2:(dim_pp+dim_p)//2] = array_f\n else:\n tmp = array_f[kd_io-kf_io:kd_io-kf_io+dim_pp, \n kd_io-kf_io:kd_io-kf_io+dim_pp]\n\n # inverse Fourier-transform the FT\n tmp = np.fft.ifft2(np.fft.fftshift(tmp))\n array_resc = tmp.real\n del tmp\n\n # Extract a part of or expand the scaled image to desired number of pixels\n dim_resc = int(round(scale*dim))\n if dim_resc>dim and dim_resc%2 != dim%2:\n dim_resc+=1\n elif dim_resc<dim and dim_resc%2 != dim%2:\n dim_resc-=1 # for reversibility\n \n if not ori_dim and dim_pp > dim_resc:\n array_resc = array_resc[(dim_pp-dim_resc)//2:(dim_pp+dim_resc)//2,\n (dim_pp-dim_resc)//2:(dim_pp+dim_resc)//2]\n elif not ori_dim and dim_pp <= dim_resc:\n array = np.zeros((dim_resc,dim_resc))\n array[(dim_resc-dim_pp)//2:(dim_resc+dim_pp)//2,\n (dim_resc-dim_pp)//2:(dim_resc+dim_pp)//2] = array_resc\n array_resc = array\n elif dim_pp > dim:\n array_resc = array_resc[kf_io:kf_io+dim, kf_io:kf_io+dim]\n elif dim_pp <= dim:\n scaled = array*0\n scaled[-kf_io:-kf_io+dim_pp, -kf_io:-kf_io+dim_pp] = array_resc\n array_resc = scaled\n\n return array_resc\n"
]
| [
[
"numpy.ones_like",
"numpy.fft.fft2",
"numpy.where",
"numpy.zeros_like",
"numpy.nonzero",
"scipy.ndimage.interpolation.zoom",
"numpy.arange",
"numpy.fft.fftshift",
"scipy.optimize.minimize",
"numpy.nanmedian",
"numpy.array",
"numpy.pad",
"numpy.zeros",
"numpy.round",
"numpy.std",
"numpy.amax",
"numpy.nanargmin",
"scipy.ndimage.interpolation.geometric_transform",
"numpy.ceil",
"numpy.isnan",
"numpy.asarray",
"numpy.ones",
"numpy.any",
"numpy.abs"
]
]
|
locuslab/robust-nn-control | [
"666fb1540f20555aa04bccde12603e67a1c0b913"
]
| [
"envs/cartpole.py"
]
| [
"import numpy as np\nimport torch\nimport os\n\nfrom envs import ode_env\nimport disturb_models as dm\nfrom constants import *\n\n\nclass CartPoleEnv(ode_env.NLDIEnv):\n\n def __init__(self, l=1, m_cart=1, m_pole=1, g=9.81, Q=None, R=None, random_seed=None, device=None):\n if random_seed is not None:\n np.random.seed(random_seed)\n torch.manual_seed(random_seed+1)\n\n self.l = l\n self.m_cart = m_cart\n self.m_pole = m_pole\n self.g = g\n\n self.n, self.m, = 4, 1\n\n # TODO: have reasonable objective?\n self.Q = Q\n self.R = R\n if Q is None:\n Q = np.random.randn(self.n, self.n)\n Q = Q.T @ Q\n # Q = np.eye(self.n)\n self.Q = torch.tensor(Q, dtype=TORCH_DTYPE, device=device)\n if R is None:\n R = np.random.randn(self.m, self.m)\n R = R.T @ R\n # R = np.eye(self.m)\n self.R = torch.tensor(R, dtype=TORCH_DTYPE, device=device)\n\n # TODO: hacky, assumes call from main.py in top level directory\n array_path = os.path.join('problem_gen', 'cartpole')\n self.A = torch.tensor(np.load(os.path.join(array_path, 'A.npy')), dtype=TORCH_DTYPE, device=device)\n self.B = torch.tensor(np.load(os.path.join(array_path, 'B.npy')), dtype=TORCH_DTYPE, device=device)\n self.G_lin = torch.tensor(np.load(os.path.join(array_path, 'G.npy')), dtype=TORCH_DTYPE, device=device)\n self.C_lin = torch.tensor(np.load(os.path.join(array_path, 'C.npy')), dtype=TORCH_DTYPE, device=device)\n self.D_lin = torch.tensor(np.load(os.path.join(array_path, 'D.npy')), dtype=TORCH_DTYPE, device=device)\n\n disturb_n = 2\n self.G_disturb = torch.tensor(np.random.randn(self.n, disturb_n), dtype=TORCH_DTYPE, device=device)\n self.C_disturb = torch.tensor(0.1 * np.random.randn(disturb_n, self.n), dtype=TORCH_DTYPE, device=device)\n self.D_disturb = torch.tensor(0.001 * np.random.randn(disturb_n, self.m), dtype=TORCH_DTYPE, device=device)\n\n self.G = torch.cat([self.G_lin, self.G_disturb], dim=1)\n self.C = torch.cat([self.C_lin, self.C_disturb], dim=0)\n self.D = torch.cat([self.D_lin, self.D_disturb], dim=0)\n\n self.wp, self.wq = self.G.shape[1], self.C.shape[0]\n\n self.disturb_f = dm.NLDIDisturbModel(self.C_disturb, self.D_disturb, self.n, self.m, self.G_disturb.shape[1])\n if device is not None:\n self.disturb_f.to(device=device, dtype=TORCH_DTYPE)\n\n self.adversarial_disturb_f = None\n\n # Max and min values for state and action: [x, xdot, theta, thetadot, u]\n self.yumax = torch.tensor([1.2, 1.0, 0.1, 1.0, 10], dtype=TORCH_DTYPE, device=device)\n self.yumin = torch.tensor([-1.2, -1.0, -0.1, -1.0, -10], dtype=TORCH_DTYPE, device=device)\n\n self.y_0_max = torch.tensor([1.0, 0.0, 0.1, 0.0], dtype=TORCH_DTYPE, device=device)\n self.y_0_min = torch.tensor([-1.0, -0.0, -0.1, -0.0], dtype=TORCH_DTYPE, device=device)\n\n self.viewer = None\n\n # Keeping external interface, but renaming internally\n def xdot_f(self, state, u_in, t):\n # x = state[:, 0]\n x_dot = state[:, 1]\n theta = state[:, 2]\n theta_dot = state[:, 3]\n\n # limit action magnitude\n if self.m == 1:\n u = torch.clamp(u_in, self.yumin[-1], self.yumax[-1]).squeeze(1)\n else:\n raise NotImplementedError()\n\n sin_theta = torch.sin(theta)\n cos_theta = torch.cos(theta)\n temp = 1/(self.m_cart + self.m_pole * (sin_theta * sin_theta))\n x_ddot = temp * (u + self.m_pole * sin_theta * (self.l * (theta_dot**2)\n - self.g * cos_theta))\n theta_ddot = -(1/self.l) * temp * (u * cos_theta\n + self.m_pole * self.l * (theta_dot**2) * cos_theta * sin_theta\n - (self.m_cart + self.m_pole) * self.g * sin_theta)\n\n return torch.stack([x_dot, x_ddot, theta_dot, theta_ddot]).T\n\n def xdot_adversarial_f(self, x, u, t):\n if self.adversarial_disturb_f is None:\n raise ValueError('You must initialize adversarial_disturb_f before running in adversarial mode')\n\n # # limit action magnitude\n # if self.m == 1:\n # u = torch.clamp(u_in, self.yumin[-1], self.yumax[-1]).squeeze(1)\n # else:\n # raise NotImplementedError()\n\n p = self.adversarial_disturb_f(x, u, t)\n return x @ self.A.T + u @ self.B.T + p @ self.G.T\n\n def cost_f(self, x, u, t):\n return ((x @ self.Q) * x).sum(-1) + ((u @ self.R) * u).sum(-1)\n\n def get_nldi_linearization(self):\n return self.A, self.B, self.G, self.C, self.D, self.Q, self.R\n\n def gen_states(self, num_states, device=None):\n prop = torch.tensor(np.random.rand(num_states, self.n), device=device, dtype=TORCH_DTYPE)\n return self.y_0_max[:self.n].detach()*prop + self.y_0_min[:self.n].detach()*(1-prop)\n\n def __copy__(self):\n new_env = CartPoleEnv.__new__(CartPoleEnv)\n new_env.__dict__.update(self.__dict__)\n return new_env\n\n # Copied from Open AI gym: https://github.com/openai/gym/blob/master/gym/envs/classic_control/cartpole.py\n def render(self, state, mode='human'):\n screen_width = 600\n screen_height = 400\n\n world_width = 10\n scale = screen_width / world_width\n carty = 100 # TOP OF CART\n polewidth = 10.0\n polelen = scale * (2 * self.l)\n cartwidth = 50.0\n cartheight = 30.0\n\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(screen_width, screen_height)\n l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2\n axleoffset = cartheight / 4.0\n cart = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])\n self.carttrans = rendering.Transform()\n cart.add_attr(self.carttrans)\n self.viewer.add_geom(cart)\n l, r, t, b = -polewidth / 2, polewidth / 2, polelen - polewidth / 2, -polewidth / 2\n pole = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])\n pole.set_color(.8, .6, .4)\n self.poletrans = rendering.Transform(translation=(0, axleoffset))\n pole.add_attr(self.poletrans)\n pole.add_attr(self.carttrans)\n self.viewer.add_geom(pole)\n self.axle = rendering.make_circle(polewidth / 2)\n self.axle.add_attr(self.poletrans)\n self.axle.add_attr(self.carttrans)\n self.axle.set_color(.5, .5, .8)\n self.viewer.add_geom(self.axle)\n self.track = rendering.Line((0, carty), (screen_width, carty))\n self.track.set_color(0, 0, 0)\n self.viewer.add_geom(self.track)\n\n self._pole_geom = pole\n\n # Edit the pole polygon vertex\n pole = self._pole_geom\n l, r, t, b = -polewidth / 2, polewidth / 2, polelen - polewidth / 2, -polewidth / 2\n pole.v = [(l, b), (l, t), (r, t), (r, b)]\n\n cartx = state[0] * scale + screen_width / 2.0 # MIDDLE OF CART\n self.carttrans.set_translation(cartx, carty)\n self.poletrans.set_rotation(-state[2])\n\n return self.viewer.render(return_rgb_array=mode == 'rgb_array')\n\n def close(self):\n if self.viewer:\n self.viewer.close()\n self.viewer = None\n"
]
| [
[
"torch.cos",
"torch.cat",
"torch.stack",
"numpy.random.rand",
"torch.sin",
"numpy.random.seed",
"numpy.random.randn",
"torch.clamp",
"torch.manual_seed",
"torch.tensor"
]
]
|
astog/soccer-predictor | [
"c6030b350fd5049be43bd474fca8d5ea933a4604"
]
| [
"soccer_predictor_fodds.py"
]
| [
"from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom torch.autograd import Variable\nfrom soccer_loader import SoccerDataset\nfrom mlp import Net\n\nimport time\nimport datetime\nimport numpy as np\n\n# Training settings\nparser = argparse.ArgumentParser(description='MLP for Soccer Odds Dataset')\nparser.add_argument('--seed', type=int, default=1234)\nparser.add_argument('--lr-start', type=float, default=0.001)\nparser.add_argument('--lr-end', type=float, default=0.0001)\nparser.add_argument('--epochs', type=int, default=100)\nparser.add_argument('--no-shuffle', action='store_true', default=False)\nparser.add_argument('--batch_size', type=int, default=128)\nparser.add_argument('--hunits', type=int, default=1024)\nparser.add_argument('--npasses', type=int, default=8)\nparser.add_argument('--wd', type=float, default=0.0)\nparser.add_argument('--dp-hidden', type=float, default=0.5)\nparser.add_argument('--no-cuda', action='store_true', default=False)\nparser.add_argument('--no-save', action='store_true', default=False)\nparser.add_argument('--log-interval', type=int, default=50)\n\nargs = parser.parse_args()\n\nfor arg in vars(args):\n print(\"{0:{1}<20} {2}\".format(str(arg) + ' ', '-', getattr(args, arg)))\nprint(\"\\n\")\n\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\nargs.shuffle = not args.no_shuffle\nargs.save = not args.no_save\n\nLR_decay = (args.lr_end / args.lr_start)**(1. / args.epochs)\n\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\ntest_subset = 0.2\ndataset_path = 'match_odds_only.pkl'\n\nsoccer_dataset = SoccerDataset(dataset_path)\nnum_examples = len(soccer_dataset)\nindices = list(range(num_examples))\nsplit = int(np.floor(test_subset * num_examples))\n\nnp.random.shuffle(indices)\ntrain_idx, test_idx = indices[split:], indices[:split]\ntrain_sampler = SubsetRandomSampler(train_idx)\ntest_sampler = SubsetRandomSampler(test_idx)\n\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\ntrain_loader = torch.utils.data.DataLoader(\n dataset=soccer_dataset, batch_size=args.batch_size, sampler=train_sampler, **kwargs)\ntest_loader = torch.utils.data.DataLoader(\n dataset=soccer_dataset, batch_size=args.batch_size, sampler=test_sampler, **kwargs)\n\n\nmodel = Net(30, 3)\nif args.cuda:\n torch.cuda.set_device(0)\n model.cuda()\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr=args.lr_start, weight_decay=args.wd)\n\n\ndef train(epoch):\n # Initialize batchnorm and dropout layers for training\n model.train()\n\n # Logging variables\n train_batch_count = 0\n train_batch_avg_loss = 0\n train_batch_avg_count = 0\n\n for batch_idx, (data, target) in enumerate(train_loader, 1):\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data), Variable(target, requires_grad=False)\n target.data.squeeze_(-1)\n\n output = model(data)\n\n loss = criterion(output, target)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n train_batch_count += 1\n train_batch_avg_loss += float(loss)\n train_batch_avg_count += 1\n\n if batch_idx % args.log_interval == 0:\n print(\"Epoch: {: <6}\\tBatches: {: 7.2f}%\\tAverage Batch Loss: {:.6e}\".format(\n epoch, 100. * train_batch_count / len(train_loader),\n train_batch_avg_loss / train_batch_avg_count\n ))\n train_batch_avg_loss = 0\n train_batch_avg_count = 0\n\n if train_batch_avg_count > 0:\n print(\"Epoch: {: <6}\\tBatches: {: 7.2f}%\\tAverage Batch Loss: {:.6e}\".format(\n epoch, 100. * train_batch_count / len(train_loader),\n train_batch_avg_loss / train_batch_avg_count\n ))\n\n\ndef test(epoch):\n # Initialize batchnorm and dropout layers for testing\n model.eval()\n\n # Logging variables\n correct = 0\n test_batch_count = 0\n test_batch_avg_loss = 0\n test_batch_avg_count = 0\n\n for batch_idx, (data, target) in enumerate(test_loader, 1):\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data, requires_grad=False), Variable(target, requires_grad=False)\n target.data.squeeze_(-1)\n\n output = model(data)\n loss = criterion(output, target).data.item() # sum up batch loss\n\n pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n\n test_batch_count += 1\n test_batch_avg_loss += float(loss)\n test_batch_avg_count += 1\n\n if batch_idx % args.log_interval == 0:\n print(\"Epoch: {: <6}\\tBatches: {: 7.2f}%\\tAverage Batch Loss: {:.6e}\".format(\n epoch, 100. * test_batch_count / len(test_loader),\n test_batch_avg_loss / test_batch_avg_count\n ))\n test_batch_avg_loss = 0\n test_batch_avg_count = 0\n\n if test_batch_avg_count > 0:\n print(\"Epoch: {: <6}\\tBatches: {: 7.2f}%\\tAverage Batch Loss: {:.6e}\".format(\n epoch, 100. * test_batch_count / len(test_loader),\n float(loss) / test_batch_avg_count\n ))\n\n print('\\nTest set accuracy: {}/{} ({:.4f}%)'.format(\n correct, len(test_loader) * args.batch_size,\n 100. * (float(correct) / (len(test_loader) * args.batch_size))\n ))\n\n return correct\n\n\nif __name__ == '__main__':\n print(\"Training batches:\", len(train_loader))\n print(\"Test batches:\", len(test_loader), end='\\n\\n')\n test_correct = 0\n scheduler = optim.lr_scheduler.ExponentialLR(optimizer, LR_decay)\n for epoch in range(1, args.epochs + 1):\n time_start = time.clock()\n train(epoch)\n scheduler.step()\n\n print(\"\\n{:-<72}\".format(\"\"))\n print(\"Test:\\n\")\n test_correct = test(epoch)\n\n time_complete = time.clock() - time_start\n print(\"\\nTime to complete epoch {} == {} sec(s)\".format(\n epoch, time_complete\n ))\n print(\"Estimated time left == {}\".format(\n str(datetime.timedelta(seconds=time_complete * (args.epochs - epoch)))\n ))\n\n print(\"{:=<72}\\n\".format(\"\"))\n\n print('\\nFinal Test set accuracy: {}/{} ({:.4f}%)'.format(\n test_correct, len(test_loader) * args.batch_size,\n 100. * (float(test_correct) / (len(test_loader) * args.batch_size))\n ))\n"
]
| [
[
"torch.cuda.manual_seed",
"torch.autograd.Variable",
"torch.optim.lr_scheduler.ExponentialLR",
"numpy.random.shuffle",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.utils.data.sampler.SubsetRandomSampler",
"torch.nn.CrossEntropyLoss",
"numpy.floor"
]
]
|
PengWan-Yang/few-shot-transformer | [
"c055239061744124c72960420cd4037495952b6d"
]
| [
"models/position_encoding.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nVarious positional encodings for the transformer.\n\"\"\"\nimport math\nimport torch\nfrom torch import nn\n\nfrom util.misc import NestedTensor\n\n\nclass PositionEmbeddingSine(nn.Module):\n \"\"\"\n This is a more standard version of the position embedding, very similar to the one\n used by the Attention is all you need paper, generalized to work on images.\n \"\"\"\n def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):\n super().__init__()\n self.num_pos_feats = num_pos_feats\n self.temperature = temperature\n self.normalize = normalize\n if scale is not None and normalize is False:\n raise ValueError(\"normalize should be True if scale is passed\")\n if scale is None:\n scale = 2 * math.pi\n self.scale = scale\n\n def forward(self, tensor_list: NestedTensor):\n x = tensor_list.tensors\n mask = tensor_list.mask\n assert mask is not None\n not_mask = ~mask\n x_embed = not_mask.cumsum(1, dtype=torch.float32)\n y_embed = not_mask.cumsum(2, dtype=torch.float32)\n z_embed = not_mask.cumsum(3, dtype=torch.float32)\n if self.normalize:\n eps = 1e-6\n x_embed = x_embed / (x_embed[:, -1:, :, :] + eps) * self.scale\n y_embed = y_embed / (y_embed[:, :, -1:, :] + eps) * self.scale\n z_embed = z_embed / (z_embed[:, :, :, -1:] + eps) * self.scale\n\n\n dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)\n dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)\n\n pos_x = x_embed[:, :, :, :, None] / dim_t\n pos_y = y_embed[:, :, :, :, None] / dim_t\n pos_z = z_embed[:, :, :, :, None] / dim_t\n pos_x = torch.stack((pos_x[:, :, :, :, 0::2].sin(), pos_x[:, :, :, :, 1::2].cos()), dim=5).flatten(4)\n pos_y = torch.stack((pos_y[:, :, :, :, 0::2].sin(), pos_y[:, :, :, :, 1::2].cos()), dim=5).flatten(4)\n pos_z = torch.stack((pos_z[:, :, :, :, 0::2].sin(), pos_z[:, :, :, :, 1::2].cos()), dim=5).flatten(4)\n\n pos = torch.cat((pos_x, pos_y, pos_z), dim=4).permute(0, 4, 1, 2, 3)\n return pos\n\n\nclass PositionEmbeddingLearned(nn.Module):\n \"\"\"\n Absolute pos embedding, learned.\n \"\"\"\n def __init__(self, num_pos_feats=256):\n super().__init__()\n self.row_embed = nn.Embedding(50, num_pos_feats)\n self.col_embed = nn.Embedding(50, num_pos_feats)\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.uniform_(self.row_embed.weight)\n nn.init.uniform_(self.col_embed.weight)\n\n def forward(self, tensor_list: NestedTensor):\n x = tensor_list.tensors\n h, w = x.shape[-2:]\n i = torch.arange(w, device=x.device)\n j = torch.arange(h, device=x.device)\n x_emb = self.col_embed(i)\n y_emb = self.row_embed(j)\n pos = torch.cat([\n x_emb.unsqueeze(0).repeat(h, 1, 1),\n y_emb.unsqueeze(1).repeat(1, w, 1),\n ], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)\n return pos\n\n\ndef build_position_encoding(args):\n N_steps = args.hidden_dim // 3\n if args.position_embedding in ('v2', 'sine'):\n # TODO find a better way of exposing other arguments\n position_embedding = PositionEmbeddingSine(N_steps, normalize=True)\n elif args.position_embedding in ('v3', 'learned'):\n position_embedding = PositionEmbeddingLearned(N_steps)\n else:\n raise ValueError(f\"not supported {args.position_embedding}\")\n\n return position_embedding\n"
]
| [
[
"torch.nn.init.uniform_",
"torch.cat",
"torch.nn.Embedding",
"torch.arange"
]
]
|
beyondacm/lingvo | [
"99c0da11d4abacf41850e2d9df1e11d211a30420"
]
| [
"lingvo/core/favor_attention_test.py"
]
| [
"# Lint as: python3\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for FAVOR attention.\"\"\"\n\nimport math\nfrom absl.testing import parameterized\nfrom lingvo import compat as tf\nfrom lingvo.core import favor_attention as favor\nfrom lingvo.core import test_utils\n\nimport numpy as np\n\n\nclass FAVORTest(test_utils.TestCase, parameterized.TestCase):\n\n def test_softmax_noncausal_attention_block_output(self):\n batch_size = 1\n length = 2\n num_heads = 1\n dim = 8\n num_random_features = 1000\n query = tf.random.normal([batch_size, length, num_heads, dim])\n key = tf.random.normal([batch_size, length, num_heads, dim])\n value = tf.random.normal([batch_size, length, num_heads, dim])\n kernel_transformation = favor.softmax_kernel_transformation\n projection_matrix = favor.create_projection_matrix(num_random_features, dim)\n attention_block_output = favor.favor_attention(query, key, value,\n kernel_transformation, False,\n projection_matrix)\n\n query = tf.multiply(query, 1.0 / math.sqrt(float(dim)))\n attention_scores = tf.einsum(\"BXHD,BYHD->BXYH\", query, key)\n attention_scores = tf.nn.softmax(attention_scores, axis=2)\n exact_attention_block_output = tf.einsum(\"BXYH,BYHD->BXHD\",\n attention_scores, value)\n max_error = 0.5\n with self.session(use_gpu=False) as sess:\n favor_output, groundtruth_output = sess.run(\n [exact_attention_block_output, attention_block_output])\n error = np.max(\n np.abs((groundtruth_output - favor_output) / groundtruth_output))\n self.assertLess(error, max_error)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
]
| [
[
"numpy.abs"
]
]
|
akoul1/mvlearn | [
"177d391bb12c6e94335720d9af3608bd719d8be1"
]
| [
"mvlearn/embed/gcca.py"
]
| [
"# Copyright 2019 NeuroData (http://neurodata.io)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .base import BaseEmbed\nfrom ..utils.utils import check_Xs\n\nimport numpy as np\nfrom scipy import linalg, stats\nfrom scipy.sparse.linalg import svds\nfrom sklearn.preprocessing import normalize\nfrom mvlearn.embed.utils import select_dimension\n\n\nclass GCCA(BaseEmbed):\n \"\"\"\n An implementation of Generalized Canonical Correalation Analysis. Computes\n individual projections into a common subspace such that the correlations\n between pairwise projections are minimized (ie. maximize pairwise\n correlation). Reduces to CCA in the two sample case.\n\n Parameters\n ----------\n sv_tolerance : float, optional, default=None\n Selects the number of SVD components to keep for each view by\n thresholding singular values. If none, another selection\n method is used.\n n_components : int (positive), optional, default=None\n If ``self.sv_tolerance=None``, selects the number of SVD\n components to keep for each view. If none, another selection\n method is used.\n fraction_var : float, default=None\n If ``self.sv_tolerance=None``, and ``self.n_components=None``,\n selects the number of SVD components to keep for each view by\n capturing enough of the variance. If none, another selection\n method is used.\n n_elbows : int, optional, default: 2\n If ``self.fraction_var=None``, ``self.sv_tolerance=None``, and\n ``self.n_components=None``, then compute the optimal embedding\n dimension using :func:`~mvlearn.embed.gcca.select_dimension`.\n Otherwise, ignored.\n tall : boolean, default=False\n Set to true if n_samples > n_features, speeds up SVD\n\n Attributes\n ----------\n projection_mats_ : list of arrays\n A projection matrix for each view, from the given space to the\n latent space\n ranks_ : list of ints\n number of left singular vectors kept for each view during the first\n SVD\n\n References\n ----------\n .. [#1] B. Afshin-Pour, G.A. Hossein-Zadeh, S.C. Strother, H.\n Soltanian-Zadeh. Enhancing reproducibility of fMRI statistical\n maps using generalized canonical correlation analysis in NPAIRS\n framework. Neuroimage, 60 (2012), pp. 1970-1981\n \"\"\"\n\n def __init__(\n self,\n fraction_var=None,\n sv_tolerance=None,\n n_components=None,\n n_elbows=2,\n tall=False\n ):\n\n self.fraction_var = fraction_var\n self.sv_tolerance = sv_tolerance\n self.n_components = n_components\n self.n_elbows = n_elbows\n self.tall = tall\n self.projection_mats_ = None\n self.ranks_ = None\n\n def center(self, X):\n \"\"\"\n Subtracts the row means and divides by the row standard deviations.\n Then subtracts column means.\n\n Parameters\n ----------\n X : array-like, shape (n_observations, n_features)\n The data to preprocess\n\n Returns\n -------\n centered_X : preprocessed data matrix\n \"\"\"\n\n # Mean along rows using sample mean and sample std\n centered_X = stats.zscore(X, axis=1, ddof=1)\n # Mean along columns\n mu = np.mean(centered_X, axis=0)\n centered_X -= mu\n return centered_X\n\n def fit(self, Xs):\n \"\"\"\n Calculates a projection from each view to a latentent space such that\n the sum of pairwise latent space correlations is maximized. Each view\n 'X' is normalized and the left singular vectors of 'X^T X' are\n calculated using SVD. The number of singular vectors kept is determined\n by either the percent variance explained, a given rank threshold, or a\n given number of components. The singular vectors kept are concatenated\n and SVD of that is taken and used to calculated projections for each\n view.\n\n Parameters\n ----------\n Xs : list of array-likes or numpy.ndarray\n - Xs length: n_views\n - Xs[i] shape: (n_samples, n_features_i)\n The data to fit to. Each view will receive its own embedding.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n\n Xs = check_Xs(Xs, multiview=True)\n n = Xs[0].shape[0]\n min_m = min(X.shape[1] for X in Xs)\n\n data = [self.center(x) for x in Xs]\n\n Uall = []\n Sall = []\n Vall = []\n ranks = []\n\n for x in data:\n # Preprocess\n x[np.isnan(x)] = 0\n\n # compute the SVD of the data\n if self.tall:\n v, s, ut = linalg.svd(x.T, full_matrices=False)\n else:\n u, s, vt = linalg.svd(x, full_matrices=False)\n ut = u.T\n v = vt.T\n\n Sall.append(s)\n Vall.append(v)\n # Dimensions to reduce to\n if self.sv_tolerance:\n if not isinstance(self.sv_tolerance, float) and not isinstance(\n self.sv_tolerance, int\n ):\n raise TypeError(\"sv_tolerance must be numeric\")\n elif self.sv_tolerance <= 0:\n raise ValueError(\n \"sv_tolerance must be greater than 0\"\n )\n\n rank = sum(s > self.sv_tolerance)\n elif self.n_components:\n if not isinstance(self.n_components, int):\n raise TypeError(\"n_components must be an integer\")\n elif self.n_components <= 0:\n raise ValueError(\n \"n_components must be greater than 0\"\n )\n elif self.n_components > min((n, min_m)):\n raise ValueError(\n \"n_components must be less than or equal to the \\\n minimum input rank\"\n )\n\n rank = self.n_components\n elif self.fraction_var:\n if not isinstance(self.fraction_var, float) and not isinstance(\n self.fraction_var, int\n ):\n raise TypeError(\n \"fraction_var must be an integer or float\"\n )\n elif self.fraction_var <= 0 or self.fraction_var > 1:\n raise ValueError(\"fraction_var must be in (0,1]\")\n\n s2 = np.square(s)\n rank = sum(np.cumsum(s2 / sum(s2)) < self.fraction_var) + 1\n else:\n s = s[: int(np.ceil(np.log2(np.min(x.shape))))]\n elbows, _ = select_dimension(\n s, n_elbows=self.n_elbows, threshold=None\n )\n rank = elbows[-1]\n\n ranks.append(rank)\n\n u = ut.T[:, :rank]\n Uall.append(u)\n\n d = min(ranks)\n\n # Create a concatenated view of Us\n Uall_c = np.concatenate(Uall, axis=1)\n\n _, _, VV = svds(Uall_c, d)\n VV = np.flip(VV.T, axis=1)\n VV = VV[:, : min([d, VV.shape[1]])]\n\n # SVDS the concatenated Us\n idx_end = 0\n projXs = []\n projection_mats = []\n for i in range(len(data)):\n idx_start = idx_end\n idx_end = idx_start + ranks[i]\n VVi = normalize(VV[idx_start:idx_end, :], \"l2\", axis=0)\n\n # Compute the canonical projections\n A = np.sqrt(n - 1) * Vall[i][:, : ranks[i]]\n A = A @ (linalg.solve(np.diag(Sall[i][: ranks[i]]), VVi))\n projXs.append(data[i] @ A)\n projection_mats.append(A)\n\n self.projection_mats_ = projection_mats\n self.ranks_ = ranks\n\n return self\n\n def transform(self, Xs, view_idx=None):\n \"\"\"\n Embeds data matrix(s) using the fitted projection matrices. May be\n used for out-of-sample embeddings.\n\n Parameters\n ----------\n Xs : list of array-likes or numpy.ndarray\n - Xs length: n_views\n - Xs[i] shape: (n_samples, n_features_i)\n A list of data matrices from each view to transform based on the\n prior fit function. If view_idx defined, then Xs is a 2D data\n matrix corresponding to a single view.\n view_idx : int, default=None\n For transformation of a single view. If not None, then Xs is 2D\n and views_idx specifies the index of the view from which Xs comes\n from.\n\n Returns\n -------\n Xs_transformed : list of array-likes or array-like\n Same shape as Xs\n \"\"\"\n if self.projection_mats_ is None:\n raise RuntimeError(\"Must call fit function before transform\")\n Xs = check_Xs(Xs)\n if view_idx is not None:\n return self.center(Xs[0]) @ self.projection_mats_[view_idx]\n else:\n return np.array(\n [\n self.center(x) @ proj\n for x, proj in zip(Xs, self.projection_mats_)\n ]\n )\n\n def fit_transform(self, Xs):\n \"\"\"\n Fits transformer to Xs and returns a transformed version of the Xs.\n\n Parameters\n ----------\n Xs : list of array-likes or numpy.ndarray\n - Xs length: n_views\n - Xs[i] shape: (n_samples, n_features_i)\n The data to fit to. Each view will receive its own\n transformation matrix and projection.\n\n Returns\n -------\n Xs_transformed : array-like 2D if view_idx not None, otherwise\n (n_views, n_samples, self.n_components)\n \"\"\"\n\n return self.fit(Xs).transform(Xs)\n"
]
| [
[
"scipy.stats.zscore",
"scipy.sparse.linalg.svds",
"numpy.concatenate",
"numpy.isnan",
"numpy.square",
"scipy.linalg.svd",
"numpy.min",
"numpy.mean",
"sklearn.preprocessing.normalize",
"numpy.sqrt",
"numpy.diag",
"numpy.flip"
]
]
|
mrleu/lightning-flash | [
"644f2b559f87b40a5b623f5260eee0cf924ea0a5"
]
| [
"flash_examples/tabular_classification.py"
]
| [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport torch\n\nimport flash\nfrom flash.core.data.utils import download_data\nfrom flash.tabular import TabularClassificationData, TabularClassifier\n\n# 1. Create the DataModule\ndownload_data(\"https://pl-flash-data.s3.amazonaws.com/titanic.zip\", \"./data\")\n\ndatamodule = TabularClassificationData.from_csv(\n [\"Sex\", \"Age\", \"SibSp\", \"Parch\", \"Ticket\", \"Cabin\", \"Embarked\"],\n \"Fare\",\n target_fields=\"Survived\",\n train_file=\"data/titanic/titanic.csv\",\n val_split=0.1,\n)\n\n# 2. Build the task\nmodel = TabularClassifier.from_data(datamodule)\n\n# 3. Create the trainer and train the model\ntrainer = flash.Trainer(max_epochs=3, gpus=torch.cuda.device_count())\ntrainer.fit(model, datamodule=datamodule)\n\n# 4. Generate predictions from a CSV\ndatamodule = TabularClassificationData.from_csv(\n predict_file=\"data/titanic/titanic.csv\",\n parameters=datamodule.parameters,\n)\npredictions = trainer.predict(model, datamodule=datamodule)\nprint(predictions)\n\n# 5. Save the model!\ntrainer.save_checkpoint(\"tabular_classification_model.pt\")\n"
]
| [
[
"torch.cuda.device_count"
]
]
|
crisien/ooi-data-explorations | [
"5504444eded7f96ec59917edb002c77438ae6b10"
]
| [
"python/ooi_data_explorations/qartod/endurance/qartod_ce_phsen.py"
]
| [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@author Christopher Wingard\n@brief Load the PHSEN data from the uncabled, Coastal Endurance Surface\n Moorings and processes the data to generate QARTOD Gross Range and\n Climatology test limits\n\"\"\"\nimport dateutil.parser as parser\nimport os\nimport pandas as pd\nimport pytz\n\nfrom ooi_data_explorations.common import get_annotations, load_gc_thredds, add_annotation_qc_flags\nfrom ooi_data_explorations.combine_data import combine_datasets\nfrom ooi_data_explorations.uncabled.process_phsen import phsen_datalogger, phsen_instrument, quality_checks\nfrom ooi_data_explorations.qartod.qc_processing import identify_blocks, create_annotations, process_gross_range, \\\n process_climatology, inputs\n\n\ndef combine_delivery_methods(site, node, sensor):\n \"\"\"\n Takes the downloaded data from each of the three data delivery methods and\n combines them into a single, merged xarray data set.\n\n :param site: Site designator, extracted from the first part of the\n reference designator\n :param node: Node designator, extracted from the second part of the\n reference designator\n :param sensor: Sensor designator, extracted from the third and fourth part\n of the reference designator\n :return merged:\n \"\"\"\n # download the telemetered data and re-process it to create a more useful and coherent data set\n tag = '.*PHSEN.*\\\\.nc$'\n telem = load_gc_thredds(site, node, sensor, 'telemetered', 'phsen_abcdef_dcl_instrument', tag)\n telem = phsen_datalogger(telem)\n\n # download the recovered host data and re-process it to create a more useful and coherent data set\n rhost = load_gc_thredds(site, node, sensor, 'recovered_host', 'phsen_abcdef_dcl_instrument_recovered', tag)\n rhost = phsen_datalogger(rhost)\n\n # download the recovered instrument data and re-process it to create a more useful and coherent data set\n rinst = load_gc_thredds(site, node, sensor, 'recovered_inst', 'phsen_abcdef_instrument', tag)\n rinst = phsen_instrument(rinst)\n\n # combine the three datasets into a single, merged time series resampled to a 3 hour interval time series\n merged = combine_datasets(telem, rhost, rinst, 180)\n\n # re-run the quality checks, since averaging will change the flag values\n merged['seawater_ph_quality_flag'] = quality_checks(merged)\n return merged\n\n\ndef generate_qartod(site, node, sensor, cut_off):\n \"\"\"\n Load all of the pH data for a defined reference designator (using the site,\n node and sensor names to construct the reference designator) collected via\n the three data delivery methods of telemetered, recovered host and\n recovered instrument and combine them into a single data set from which\n QARTOD test limits for the gross range and climatology tests can be\n calculated.\n\n :param site: Site designator, extracted from the first part of the\n reference designator\n :param node: Node designator, extracted from the second part of the\n reference designator\n :param sensor: Sensor designator, extracted from the third and fourth part\n of the reference designator\n :param cut_off: string formatted date to use as cut-off for data to add\n to QARTOD test sets\n :return annotations: Initial list of auto-generated HITL annotations as\n a pandas dataframe\n :return gr_lookup: CSV formatted strings to save to a csv file for the\n QARTOD gross range lookup tables.\n :return clm_lookup: CSV formatted strings to save to a csv file for the\n QARTOD climatology lookup tables.\n :return clm_table: CSV formatted strings to save to a csv file for the\n QARTOD climatology range tables.\n \"\"\"\n # load and combine all of the data sources for the pH sensor\n data = combine_delivery_methods(site, node, sensor)\n\n # create a boolean array of the data marked as \"fail\" by the pH quality checks and generate initial\n # HITL annotations that can be combined with system annotations and pH quality checks to create\n # a cleaned up data set prior to calculating the QARTOD test values\n fail = data.seawater_ph_quality_flag.where(data.seawater_ph_quality_flag == 4).notnull()\n blocks = identify_blocks(fail, [24, 24])\n hitl = create_annotations(site, node, sensor, blocks)\n\n # get the current system annotations for the sensor\n annotations = get_annotations(site, node, sensor)\n annotations = pd.DataFrame(annotations)\n if not annotations.empty:\n annotations = annotations.drop(columns=['@class'])\n annotations['beginDate'] = pd.to_datetime(annotations.beginDT, unit='ms').dt.strftime('%Y-%m-%dT%H:%M:%S')\n annotations['endDate'] = pd.to_datetime(annotations.endDT, unit='ms').dt.strftime('%Y-%m-%dT%H:%M:%S')\n\n # append the fail annotations to the existing annotations\n annotations = annotations.append(pd.DataFrame(hitl), ignore_index=True, sort=False)\n\n # create a roll-up annotation flag\n data = add_annotation_qc_flags(data, annotations)\n\n # clean-up the data, removing values that fail the pH quality checks or were marked as fail in the annotations\n data = data.where((data.seawater_ph_quality_flag != 4) & (data.rollup_annotations_qc_results != 4))\n\n # if a cut_off date was used, limit data to all data collected up to the cut_off date.\n # otherwise, set the limit to the range of the downloaded data.\n if cut_off:\n cut = parser.parse(cut_off)\n cut = cut.astimezone(pytz.utc)\n end_date = cut.strftime('%Y-%m-%dT%H:%M:%S')\n src_date = cut.strftime('%Y-%m-%d')\n else:\n cut = parser.parse(data.time_coverage_end)\n cut = cut.astimezone(pytz.utc)\n end_date = cut.strftime('%Y-%m-%dT%H:%M:%S')\n src_date = cut.strftime('%Y-%m-%d')\n\n data = data.sel(time=slice(\"2014-01-01T00:00:00\", end_date))\n\n # create the initial gross range entry\n gr = process_gross_range(data, ['seawater_ph'], [6.9, 9.0], site=site, node=node, sensor=sensor)\n\n # re-work gross entry for the different streams and parameter names\n gr_lookup = pd.DataFrame()\n gr_lookup = gr_lookup.append([gr, gr, gr], ignore_index=True)\n gr_lookup['parameter'][0] = {'inp': 'phsen_abcdef_ph_seawater'}\n gr_lookup['stream'][0] = 'phsen_abcdef_dcl_instrument'\n gr_lookup['parameter'][1] = {'inp': 'phsen_abcdef_ph_seawater'}\n gr_lookup['stream'][1] = 'phsen_abcdef_dcl_instrument_recovered'\n gr_lookup['parameter'][2] = {'inp': 'phsen_abcdef_ph_seawater'}\n gr_lookup['stream'][2] = 'phsen_abcdef_instrument'\n gr_lookup['source'] = ('Sensor min/max based on the vendor standard calibration range. '\n 'The user min/max is the historical mean of all data collected '\n 'up to {} +/- 3 standard deviations.'.format(src_date))\n\n # create and format the climatology entry and table\n cll, clm_table = process_climatology(data, ['seawater_ph'], [6.9, 9.0], site=site, node=node, sensor=sensor)\n\n # re-work climatology entry for the different streams and parameter names\n clm_lookup = pd.DataFrame()\n clm_lookup = clm_lookup.append([cll, cll, cll])\n clm_lookup['parameters'][0] = {'inp': 'phsen_abcdef_ph_seawater', 'tinp': 'time', 'zinp': 'None'}\n clm_lookup['stream'][0] = 'phsen_abcdef_dcl_instrument'\n clm_lookup['parameters'][1] = {'inp': 'phsen_abcdef_ph_seawater', 'tinp': 'time', 'zinp': 'None'}\n clm_lookup['stream'][1] = 'phsen_abcdef_dcl_instrument_recovered'\n clm_lookup['parameters'][2] = {'inp': 'phsen_abcdef_ph_seawater', 'tinp': 'time', 'zinp': 'None'}\n clm_lookup['stream'][2] = 'phsen_abcdef_instrument'\n\n return annotations, gr_lookup, clm_lookup, clm_table\n\n\ndef main(argv=None):\n \"\"\"\n Download the PHSEN data from the Gold Copy THREDDS server and create the\n QARTOD gross range and climatology test lookup tables.\n \"\"\"\n # setup the input arguments\n args = inputs(argv)\n site = args.site\n node = args.node\n sensor = args.sensor\n cut_off = args.cut_off\n\n # create the initial HITL annotation blocks, the QARTOD gross range and climatology lookup values, and\n # the climatology table for the seawater_ph parameter\n annotations, gr_lookup, clm_lookup, clm_table = generate_qartod(site, node, sensor, cut_off)\n\n # save the resulting annotations and qartod lookups and tables\n out_path = os.path.join(os.path.expanduser('~'), 'ooidata/qartod/phsen')\n out_path = os.path.abspath(out_path)\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n\n # save the annotations to a csv file for further processing\n csv_names = ['id', 'subsite', 'node', 'sensor', 'method', 'stream', 'parameters',\n 'beginDate', 'endDate', 'exclusionFlag', 'qcFlag', 'source', 'annotation']\n anno_csv = '-'.join([site, node, sensor]) + '.quality_annotations.csv'\n annotations.to_csv(os.path.join(out_path, anno_csv), index=False, columns=csv_names)\n\n # save the gross range values to a csv for further processing\n csv_names = ['subsite', 'node', 'sensor', 'stream', 'parameter', 'qcConfig', 'source']\n gr_csv = '-'.join([site, node, sensor]) + '.gross_range.csv'\n gr_lookup.to_csv(os.path.join(out_path, gr_csv), index=False, columns=csv_names)\n\n # save the climatology values and table to a csv for further processing\n csv_names = ['subsite', 'node', 'sensor', 'stream', 'parameters', 'climatologyTable', 'source']\n clm_csv = '-'.join([site, node, sensor]) + '.climatology.csv'\n clm_tbl = '-'.join([site, node, sensor]) + '-seawater_ph.csv'\n clm_lookup.to_csv(os.path.join(out_path, clm_csv), index=False, columns=csv_names)\n with open(os.path.join(out_path, clm_tbl), 'w') as clm:\n clm.write(clm_table[0])\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"pandas.to_datetime",
"pandas.DataFrame"
]
]
|
arra1997/zipline | [
"38d47f1b470f47ff7e8c35d9874d68785d6d2927"
]
| [
"zipline/data/bcolz_daily_bars.py"
]
| [
"# Copyright 2015 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom functools import partial\nimport warnings\n\nfrom bcolz import carray, ctable\nimport logbook\nimport numpy as np\nfrom numpy import (\n array,\n full,\n iinfo,\n nan,\n)\nfrom pandas import (\n DatetimeIndex,\n NaT,\n read_csv,\n to_datetime,\n Timestamp,\n)\nfrom six import iteritems, viewkeys\nfrom toolz import compose\nfrom trading_calendars import get_calendar\n\nfrom zipline.data.session_bars import SessionBarReader\nfrom zipline.data.bar_reader import (\n NoDataAfterDate,\n NoDataBeforeDate,\n NoDataOnDate,\n)\nfrom zipline.utils.functional import apply\nfrom zipline.utils.input_validation import expect_element\nfrom zipline.utils.numpy_utils import iNaT, float64_dtype, uint32_dtype\nfrom zipline.utils.memoize import lazyval\nfrom zipline.utils.cli import maybe_show_progress\nfrom ._equities import _compute_row_slices, _read_bcolz_data\n\n\nlogger = logbook.Logger('UsEquityPricing')\n\nOHLC = frozenset(['open', 'high', 'low', 'close'])\nUS_EQUITY_PRICING_BCOLZ_COLUMNS = (\n 'open', 'high', 'low', 'close', 'volume', 'day', 'id'\n)\n\nUINT32_MAX = iinfo(np.uint32).max\n\n\ndef check_uint32_safe(value, colname):\n if value >= UINT32_MAX:\n raise ValueError(\n \"Value %s from column '%s' is too large\" % (value, colname)\n )\n\n\n@expect_element(invalid_data_behavior={'warn', 'raise', 'ignore'})\ndef winsorise_uint32(df, invalid_data_behavior, column, *columns):\n \"\"\"Drops any record where a value would not fit into a uint32.\n\n Parameters\n ----------\n df : pd.DataFrame\n The dataframe to winsorise.\n invalid_data_behavior : {'warn', 'raise', 'ignore'}\n What to do when data is outside the bounds of a uint32.\n *columns : iterable[str]\n The names of the columns to check.\n\n Returns\n -------\n truncated : pd.DataFrame\n ``df`` with values that do not fit into a uint32 zeroed out.\n \"\"\"\n columns = list((column,) + columns)\n mask = df[columns] > UINT32_MAX\n\n if invalid_data_behavior != 'ignore':\n mask |= df[columns].isnull()\n else:\n # we are not going to generate a warning or error for this so just use\n # nan_to_num\n df[columns] = np.nan_to_num(df[columns])\n\n mv = mask.values\n if mv.any():\n if invalid_data_behavior == 'raise':\n raise ValueError(\n '%d values out of bounds for uint32: %r' % (\n mv.sum(), df[mask.any(axis=1)],\n ),\n )\n if invalid_data_behavior == 'warn':\n warnings.warn(\n 'Ignoring %d values because they are out of bounds for'\n ' uint32: %r' % (\n mv.sum(), df[mask.any(axis=1)],\n ),\n stacklevel=3, # one extra frame for `expect_element`\n )\n\n df[mask] = 0\n return df\n\n\nclass BcolzDailyBarWriter(object):\n \"\"\"\n Class capable of writing daily OHLCV data to disk in a format that can\n be read efficiently by BcolzDailyOHLCVReader.\n\n Parameters\n ----------\n filename : str\n The location at which we should write our output.\n calendar : zipline.utils.calendar.trading_calendar\n Calendar to use to compute asset calendar offsets.\n start_session: pd.Timestamp\n Midnight UTC session label.\n end_session: pd.Timestamp\n Midnight UTC session label.\n\n See Also\n --------\n zipline.data.bcolz_daily_bars.BcolzDailyBarReader\n \"\"\"\n _csv_dtypes = {\n 'open': float64_dtype,\n 'high': float64_dtype,\n 'low': float64_dtype,\n 'close': float64_dtype,\n 'volume': float64_dtype,\n }\n\n def __init__(self, filename, calendar, start_session, end_session):\n self._filename = filename\n\n if start_session != end_session:\n if not calendar.is_session(start_session):\n raise ValueError(\n \"Start session %s is invalid!\" % start_session\n )\n if not calendar.is_session(end_session):\n raise ValueError(\n \"End session %s is invalid!\" % end_session\n )\n\n self._start_session = start_session\n self._end_session = end_session\n\n self._calendar = calendar\n\n @property\n def progress_bar_message(self):\n return \"Merging daily equity files:\"\n\n def progress_bar_item_show_func(self, value):\n return value if value is None else str(value[0])\n\n def write(self,\n data,\n assets=None,\n show_progress=False,\n invalid_data_behavior='warn'):\n \"\"\"\n Parameters\n ----------\n data : iterable[tuple[int, pandas.DataFrame or bcolz.ctable]]\n The data chunks to write. Each chunk should be a tuple of sid\n and the data for that asset.\n assets : set[int], optional\n The assets that should be in ``data``. If this is provided\n we will check ``data`` against the assets and provide better\n progress information.\n show_progress : bool, optional\n Whether or not to show a progress bar while writing.\n invalid_data_behavior : {'warn', 'raise', 'ignore'}, optional\n What to do when data is encountered that is outside the range of\n a uint32.\n\n Returns\n -------\n table : bcolz.ctable\n The newly-written table.\n \"\"\"\n ctx = maybe_show_progress(\n (\n (sid, self.to_ctable(df, invalid_data_behavior))\n for sid, df in data\n ),\n show_progress=show_progress,\n item_show_func=self.progress_bar_item_show_func,\n label=self.progress_bar_message,\n length=len(assets) if assets is not None else None,\n )\n with ctx as it:\n return self._write_internal(it, assets)\n\n def write_csvs(self,\n asset_map,\n show_progress=False,\n invalid_data_behavior='warn'):\n \"\"\"Read CSVs as DataFrames from our asset map.\n\n Parameters\n ----------\n asset_map : dict[int -> str]\n A mapping from asset id to file path with the CSV data for that\n asset\n show_progress : bool\n Whether or not to show a progress bar while writing.\n invalid_data_behavior : {'warn', 'raise', 'ignore'}\n What to do when data is encountered that is outside the range of\n a uint32.\n \"\"\"\n read = partial(\n read_csv,\n parse_dates=['day'],\n index_col='day',\n dtype=self._csv_dtypes,\n )\n return self.write(\n ((asset, read(path)) for asset, path in iteritems(asset_map)),\n assets=viewkeys(asset_map),\n show_progress=show_progress,\n invalid_data_behavior=invalid_data_behavior,\n )\n\n def _write_internal(self, iterator, assets):\n \"\"\"\n Internal implementation of write.\n\n `iterator` should be an iterator yielding pairs of (asset, ctable).\n \"\"\"\n total_rows = 0\n first_row = {}\n last_row = {}\n calendar_offset = {}\n\n # Maps column name -> output carray.\n columns = {\n k: carray(array([], dtype=uint32_dtype))\n for k in US_EQUITY_PRICING_BCOLZ_COLUMNS\n }\n\n earliest_date = None\n sessions = self._calendar.sessions_in_range(\n self._start_session, self._end_session\n )\n\n if assets is not None:\n @apply\n def iterator(iterator=iterator, assets=set(assets)):\n for asset_id, table in iterator:\n if asset_id not in assets:\n raise ValueError('unknown asset id %r' % asset_id)\n yield asset_id, table\n\n for asset_id, table in iterator:\n nrows = len(table)\n for column_name in columns:\n if column_name == 'id':\n # We know what the content of this column is, so don't\n # bother reading it.\n columns['id'].append(\n full((nrows,), asset_id, dtype='uint32'),\n )\n continue\n\n columns[column_name].append(table[column_name])\n\n if earliest_date is None:\n earliest_date = table[\"day\"][0]\n else:\n earliest_date = min(earliest_date, table[\"day\"][0])\n\n # Bcolz doesn't support ints as keys in `attrs`, so convert\n # assets to strings for use as attr keys.\n asset_key = str(asset_id)\n\n # Calculate the index into the array of the first and last row\n # for this asset. This allows us to efficiently load single\n # assets when querying the data back out of the table.\n first_row[asset_key] = total_rows\n last_row[asset_key] = total_rows + nrows - 1\n total_rows += nrows\n\n table_day_to_session = compose(\n self._calendar.minute_to_session_label,\n partial(Timestamp, unit='s', tz='UTC'),\n )\n asset_first_day = table_day_to_session(table['day'][0])\n asset_last_day = table_day_to_session(table['day'][-1])\n\n asset_sessions = sessions[\n sessions.slice_indexer(asset_first_day, asset_last_day)\n ]\n if len(table) != len(asset_sessions):\n print('Got {} rows for daily bars table with first day={}, last '\n 'day={}, expected {} rows.\\n'\n 'Missing sessions: {}\\n'\n 'Extra sessions: {}'.format(\n len(table),\n asset_first_day.date(),\n asset_last_day.date(),\n len(asset_sessions),\n asset_sessions.difference(\n to_datetime(\n np.array(table['day']),\n unit='s',\n utc=True,\n )\n ).tolist(),\n to_datetime(\n np.array(table['day']),\n unit='s',\n utc=True,\n ).difference(asset_sessions).tolist(),\n ))\n \n\n # Calculate the number of trading days between the first date\n # in the stored data and the first date of **this** asset. This\n # offset used for output alignment by the reader.\n calendar_offset[asset_key] = sessions.get_loc(asset_first_day)\n\n # This writes the table to disk.\n full_table = ctable(\n columns=[\n columns[colname]\n for colname in US_EQUITY_PRICING_BCOLZ_COLUMNS\n ],\n names=US_EQUITY_PRICING_BCOLZ_COLUMNS,\n rootdir=self._filename,\n mode='w',\n )\n\n full_table.attrs['first_trading_day'] = (\n earliest_date if earliest_date is not None else iNaT\n )\n\n full_table.attrs['first_row'] = first_row\n full_table.attrs['last_row'] = last_row\n full_table.attrs['calendar_offset'] = calendar_offset\n full_table.attrs['calendar_name'] = self._calendar.name\n full_table.attrs['start_session_ns'] = self._start_session.value\n full_table.attrs['end_session_ns'] = self._end_session.value\n full_table.flush()\n return full_table\n\n @expect_element(invalid_data_behavior={'warn', 'raise', 'ignore'})\n def to_ctable(self, raw_data, invalid_data_behavior):\n if isinstance(raw_data, ctable):\n # we already have a ctable so do nothing\n return raw_data\n\n winsorise_uint32(raw_data, invalid_data_behavior, 'volume', *OHLC)\n processed = (raw_data[list(OHLC)] * 1000).round().astype('uint32')\n dates = raw_data.index.values.astype('datetime64[s]')\n check_uint32_safe(dates.max().view(np.int64), 'day')\n processed['day'] = dates.astype('uint32')\n processed['volume'] = raw_data.volume.astype('uint32')\n return ctable.fromdataframe(processed)\n\n\nclass BcolzDailyBarReader(SessionBarReader):\n \"\"\"\n Reader for raw pricing data written by BcolzDailyOHLCVWriter.\n\n Parameters\n ----------\n table : bcolz.ctable\n The ctable contaning the pricing data, with attrs corresponding to the\n Attributes list below.\n read_all_threshold : int\n The number of equities at which; below, the data is read by reading a\n slice from the carray per asset. above, the data is read by pulling\n all of the data for all assets into memory and then indexing into that\n array for each day and asset pair. Used to tune performance of reads\n when using a small or large number of equities.\n\n Attributes\n ----------\n The table with which this loader interacts contains the following\n attributes:\n\n first_row : dict\n Map from asset_id -> index of first row in the dataset with that id.\n last_row : dict\n Map from asset_id -> index of last row in the dataset with that id.\n calendar_offset : dict\n Map from asset_id -> calendar index of first row.\n start_session_ns: int\n Epoch ns of the first session used in this dataset.\n end_session_ns: int\n Epoch ns of the last session used in this dataset.\n calendar_name: str\n String identifier of trading calendar used (ie, \"NYSE\").\n\n We use first_row and last_row together to quickly find ranges of rows to\n load when reading an asset's data into memory.\n\n We use calendar_offset and calendar to orient loaded blocks within a\n range of queried dates.\n\n Notes\n ------\n A Bcolz CTable is comprised of Columns and Attributes.\n The table with which this loader interacts contains the following columns:\n\n ['open', 'high', 'low', 'close', 'volume', 'day', 'id'].\n\n The data in these columns is interpreted as follows:\n\n - Price columns ('open', 'high', 'low', 'close') are interpreted as 1000 *\n as-traded dollar value.\n - Volume is interpreted as as-traded volume.\n - Day is interpreted as seconds since midnight UTC, Jan 1, 1970.\n - Id is the asset id of the row.\n\n The data in each column is grouped by asset and then sorted by day within\n each asset block.\n\n The table is built to represent a long time range of data, e.g. ten years\n of equity data, so the lengths of each asset block is not equal to each\n other. The blocks are clipped to the known start and end date of each asset\n to cut down on the number of empty values that would need to be included to\n make a regular/cubic dataset.\n\n When read across the open, high, low, close, and volume with the same\n index should represent the same asset and day.\n\n See Also\n --------\n zipline.data.bcolz_daily_bars.BcolzDailyBarWriter\n \"\"\"\n def __init__(self, table, read_all_threshold=3000):\n self._maybe_table_rootdir = table\n # Cache of fully read np.array for the carrays in the daily bar table.\n # raw_array does not use the same cache, but it could.\n # Need to test keeping the entire array in memory for the course of a\n # process first.\n self._spot_cols = {}\n self.PRICE_ADJUSTMENT_FACTOR = 0.001\n self._read_all_threshold = read_all_threshold\n\n @lazyval\n def _table(self):\n maybe_table_rootdir = self._maybe_table_rootdir\n if isinstance(maybe_table_rootdir, ctable):\n return maybe_table_rootdir\n return ctable(rootdir=maybe_table_rootdir, mode='r')\n\n @lazyval\n def sessions(self):\n if 'calendar' in self._table.attrs.attrs:\n # backwards compatibility with old formats, will remove\n return DatetimeIndex(self._table.attrs['calendar'], tz='UTC')\n else:\n cal = get_calendar(self._table.attrs['calendar_name'])\n start_session_ns = self._table.attrs['start_session_ns']\n start_session = Timestamp(start_session_ns, tz='UTC')\n\n end_session_ns = self._table.attrs['end_session_ns']\n end_session = Timestamp(end_session_ns, tz='UTC')\n\n sessions = cal.sessions_in_range(start_session, end_session)\n\n return sessions\n\n @lazyval\n def _first_rows(self):\n return {\n int(asset_id): start_index\n for asset_id, start_index in iteritems(\n self._table.attrs['first_row'],\n )\n }\n\n @lazyval\n def _last_rows(self):\n return {\n int(asset_id): end_index\n for asset_id, end_index in iteritems(\n self._table.attrs['last_row'],\n )\n }\n\n @lazyval\n def _calendar_offsets(self):\n return {\n int(id_): offset\n for id_, offset in iteritems(\n self._table.attrs['calendar_offset'],\n )\n }\n\n @lazyval\n def first_trading_day(self):\n try:\n return Timestamp(\n self._table.attrs['first_trading_day'],\n unit='s',\n tz='UTC'\n )\n except KeyError:\n return None\n\n @lazyval\n def trading_calendar(self):\n if 'calendar_name' in self._table.attrs.attrs:\n return get_calendar(self._table.attrs['calendar_name'])\n else:\n return None\n\n @property\n def last_available_dt(self):\n return self.sessions[-1]\n\n def _compute_slices(self, start_idx, end_idx, assets):\n \"\"\"\n Compute the raw row indices to load for each asset on a query for the\n given dates after applying a shift.\n\n Parameters\n ----------\n start_idx : int\n Index of first date for which we want data.\n end_idx : int\n Index of last date for which we want data.\n assets : pandas.Int64Index\n Assets for which we want to compute row indices\n\n Returns\n -------\n A 3-tuple of (first_rows, last_rows, offsets):\n first_rows : np.array[intp]\n Array with length == len(assets) containing the index of the first\n row to load for each asset in `assets`.\n last_rows : np.array[intp]\n Array with length == len(assets) containing the index of the last\n row to load for each asset in `assets`.\n offset : np.array[intp]\n Array with length == (len(asset) containing the index in a buffer\n of length `dates` corresponding to the first row of each asset.\n\n The value of offset[i] will be 0 if asset[i] existed at the start\n of a query. Otherwise, offset[i] will be equal to the number of\n entries in `dates` for which the asset did not yet exist.\n \"\"\"\n # The core implementation of the logic here is implemented in Cython\n # for efficiency.\n return _compute_row_slices(\n self._first_rows,\n self._last_rows,\n self._calendar_offsets,\n start_idx,\n end_idx,\n assets,\n )\n\n def load_raw_arrays(self, columns, start_date, end_date, assets):\n start_idx = self._load_raw_arrays_date_to_index(start_date)\n end_idx = self._load_raw_arrays_date_to_index(end_date)\n\n first_rows, last_rows, offsets = self._compute_slices(\n start_idx,\n end_idx,\n assets,\n )\n read_all = len(assets) > self._read_all_threshold\n return _read_bcolz_data(\n self._table,\n (end_idx - start_idx + 1, len(assets)),\n list(columns),\n first_rows,\n last_rows,\n offsets,\n read_all,\n )\n\n def _load_raw_arrays_date_to_index(self, date):\n try:\n return self.sessions.get_loc(date)\n except KeyError:\n raise NoDataOnDate(date)\n\n def _spot_col(self, colname):\n \"\"\"\n Get the colname from daily_bar_table and read all of it into memory,\n caching the result.\n\n Parameters\n ----------\n colname : string\n A name of a OHLCV carray in the daily_bar_table\n\n Returns\n -------\n array (uint32)\n Full read array of the carray in the daily_bar_table with the\n given colname.\n \"\"\"\n try:\n col = self._spot_cols[colname]\n except KeyError:\n col = self._spot_cols[colname] = self._table[colname]\n return col\n\n def get_last_traded_dt(self, asset, day):\n volumes = self._spot_col('volume')\n\n search_day = day\n\n while True:\n try:\n ix = self.sid_day_index(asset, search_day)\n except NoDataBeforeDate:\n return NaT\n except NoDataAfterDate:\n prev_day_ix = self.sessions.get_loc(search_day) - 1\n if prev_day_ix > -1:\n search_day = self.sessions[prev_day_ix]\n continue\n except NoDataOnDate:\n return NaT\n if volumes[ix] != 0:\n return search_day\n prev_day_ix = self.sessions.get_loc(search_day) - 1\n if prev_day_ix > -1:\n search_day = self.sessions[prev_day_ix]\n else:\n return NaT\n\n def sid_day_index(self, sid, day):\n \"\"\"\n Parameters\n ----------\n sid : int\n The asset identifier.\n day : datetime64-like\n Midnight of the day for which data is requested.\n\n Returns\n -------\n int\n Index into the data tape for the given sid and day.\n Raises a NoDataOnDate exception if the given day and sid is before\n or after the date range of the equity.\n \"\"\"\n try:\n day_loc = self.sessions.get_loc(day)\n except Exception:\n raise NoDataOnDate(\"day={0} is outside of calendar={1}\".format(\n day, self.sessions))\n offset = day_loc - self._calendar_offsets[sid]\n if offset < 0:\n raise NoDataBeforeDate(\n \"No data on or before day={0} for sid={1}\".format(\n day, sid))\n ix = self._first_rows[sid] + offset\n if ix > self._last_rows[sid]:\n raise NoDataAfterDate(\n \"No data on or after day={0} for sid={1}\".format(\n day, sid))\n return ix\n\n def get_value(self, sid, dt, field):\n \"\"\"\n Parameters\n ----------\n sid : int\n The asset identifier.\n day : datetime64-like\n Midnight of the day for which data is requested.\n colname : string\n The price field. e.g. ('open', 'high', 'low', 'close', 'volume')\n\n Returns\n -------\n float\n The spot price for colname of the given sid on the given day.\n Raises a NoDataOnDate exception if the given day and sid is before\n or after the date range of the equity.\n Returns -1 if the day is within the date range, but the price is\n 0.\n \"\"\"\n ix = self.sid_day_index(sid, dt)\n price = self._spot_col(field)[ix]\n if field != 'volume':\n if price == 0:\n return nan\n else:\n return price * 0.001\n else:\n return price\n"
]
| [
[
"numpy.full",
"numpy.array",
"numpy.nan_to_num",
"pandas.DatetimeIndex",
"pandas.Timestamp",
"numpy.iinfo"
]
]
|
codyly/locomotion-by-mann | [
"89139466829ef7802bf645f865e335d4cda444e4"
]
| [
"runners/a1/a1-wiki.py"
]
| [
"\"\"\"\nmotion_wiki = {\n \"walk\": ForwardProfile(\"walk\", 0.5, startup=True),\n \"trot\": ForwardProfile(\"trot\", 1.28, startup=True),\n \"jump\": Profile(\"dynamic_jumping\", stages=[0.1, 0.05], ops=[JMP, FWD], startup=True),\n \"turn\": TurningProfile(\"turn\", 0.01, startup=True),\n \"sit\": Profile(\"sit\", [1.0], [SIT], startup=True),\n \"stand\": Profile(\"stand\", [1.0], [STD], startup=True),\n \"lie\": Profile(\"lie\", [1.0], [LIE], startup=True),\n \"turn-in-place\": Profile(\"turn-in-place\", [1.0], [TLF], startup=True),\n}\n\"\"\"\n\nimport argparse\nimport os\n\nimport numpy as np\nimport pybullet\nimport pybullet_data as pd\n\nfrom animation import common as C\nfrom animation.profiles import motion_wiki\nfrom animation.profiles import motion_wiki_no_startup \nfrom animation.animation import Animation\nfrom thirdparty.retarget_motion import retarget_motion as retarget_utils\n\nparser = argparse.ArgumentParser(description=\"Generate forwarding gaits at customized speeds.\")\nparser.add_argument(\"-o\", \"--output\", type=str, help=\"output path\", default=\"outputs\")\nparser.add_argument(\"-t\", \"--type\", type=str, help=\"motion type in wiki\", default=\"walk\")\nparser.add_argument(\"-s\", \"--startup\", action='store_true', help=\"whether use startup second\")\nargs = parser.parse_args()\n\nif not os.path.exists(args.output):\n os.makedirs(args.output)\n\nWiki = motion_wiki if args.startup else motion_wiki_no_startup\n\n\nconfig = retarget_utils.config\n\np = pybullet\np.connect(p.DIRECT)\np.configureDebugVisualizer(p.COV_ENABLE_SINGLE_STEP_RENDERING, 1)\np.setAdditionalSearchPath(pd.getDataPath())\np.resetSimulation()\np.setGravity(0, 0, 0)\n\nbullet_robot = p.loadURDF(config.URDF_FILENAME, config.INIT_POS, config.INIT_ROT)\n\n# Set robot to default pose to bias knees in the right direction.\nretarget_utils.set_pose(bullet_robot, np.concatenate([config.INIT_POS, config.INIT_ROT, config.DEFAULT_JOINT_POSE]))\n\nprofile = Wiki[args.type]\nanimation = Animation(profile=profile)\n\ngenerator = animation.gen_frame()\n\nmotion_clip = []\n\noutput_path = args.output\noutput_file = f\"{animation.profile.name}.txt\"\n\ntimer = 0\n\ntry:\n # record horizontal displacement\n prev_loc = np.zeros(2)\n d = 0\n d1 = 0\n while timer < C.DURATION + args.startup:\n joint_pos_data = np.array([next(generator) for _ in range(1)])\n\n pose = retarget_utils.retarget_motion_once(bullet_robot, joint_pos_data[0], style=animation.get_root_styles())\n\n # correct quaternion\n w = pose[6]\n pose[4:7] = pose[3:6]\n pose[3] = w\n\n cur_loc = pose[:2]\n d += np.linalg.norm(cur_loc - prev_loc)\n if timer > 1:\n d1 += np.linalg.norm(cur_loc - prev_loc)\n prev_loc = cur_loc\n\n motion_clip.append(np.concatenate([[timer], pose]))\n\n # time.sleep(1 / C.SYS_FREQ)\n timer += 1 / C.SYS_FREQ\n\n speed = d / (C.DURATION + 1)\n print(f\"Locomotion Speed: {speed:.2f} m/s\")\n\n speed1 = d1 / (C.DURATION)\n print(f\"Non-startup Locomotion Speed: {speed1:.2f} m/s\")\n\n int_part = int(speed)\n flt_part = round((speed - int_part) * 1000)\n int_part_1 = int(speed1)\n flt_part_1 = round((speed1 - int_part_1) * 1000)\n output_file = f\"{animation.profile.name}_t_{args.type}_sp_{int_part}_{flt_part:03d}.txt\"\n if args.startup:\n output_file = \"startup_\" + output_file[:-4] + f\"_sp1_{int_part_1}_{flt_part_1:03d}.txt\"\n\n\nexcept KeyboardInterrupt:\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n if not os.path.exists(os.path.join(output_path, output_file)):\n np.savetxt(os.path.join(output_path, output_file), motion_clip, fmt=\"%.5f\")\n p.disconnect()\n\nfinally:\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n if not os.path.exists(os.path.join(output_path, output_file)):\n np.savetxt(os.path.join(output_path, output_file), motion_clip, fmt=\"%.5f\")\n p.disconnect()\n"
]
| [
[
"numpy.concatenate",
"numpy.linalg.norm",
"numpy.zeros"
]
]
|
yangzhou95/opioid_on_reddit | [
"626676773b2cd826e28f6e19c5900a9439534f0c"
]
| [
"code/text_classifier/src/core/text_to_number.py"
]
| [
"from core.preprocessor import processor\nimport dill as dpickle\nimport numpy as np\n\n\nclass TextToNumber:\n\n @staticmethod\n def create_number_vector(raw_text_array, append_indicators=True, max_uniqie_words_size=8000, padding_maxlen=70, padding_position='post', output_file='nummerical_text'):\n body_pp = processor(append_indicators=append_indicators, keep_n=max_uniqie_words_size, padding_maxlen=padding_maxlen, padding=padding_position)\n vecs = body_pp.fit_transform(raw_text_array)\n\n if output_file is not None:\n # Save the preprocessor\n with open(output_file + '_pp.dpkl', 'wb') as f:\n dpickle.dump(body_pp, f)\n\n # Save the processed data\n np.save(output_file + '_vecs.npy', vecs)\n\n return vecs\n\n @staticmethod\n def load_text_processor(fname='title_pp.dpkl'):\n \"\"\"\n Load preprocessors from disk.\n Parameters\n ----------\n fname: str\n file name of ktext.proccessor object\n Returns\n -------\n num_tokens : int\n size of vocabulary loaded into ktext.processor\n pp : ktext.processor\n the processor you are trying to load\n Typical Usage:\n -------------\n num_decoder_tokens, title_pp = load_text_processor(fname='title_pp.dpkl')\n num_encoder_tokens, body_pp = load_text_processor(fname='body_pp.dpkl')\n \"\"\"\n # Load files from disk\n with open(fname, 'rb') as f:\n pp = dpickle.load(f)\n\n num_tokens = max(pp.id2token.keys()) + 1\n # print(f'Size of vocabulary for {fname}: {num_tokens:,}')\n return num_tokens, pp\n\n @staticmethod\n def load_decoder_inputs(decoder_np_vecs='train_title_vecs.npy'):\n \"\"\"\n Load decoder inputs.\n Parameters\n ----------\n decoder_np_vecs : str\n filename of serialized numpy.array of decoder input (issue title)\n Returns\n -------\n decoder_input_data : numpy.array\n The data fed to the decoder as input during training for teacher forcing.\n This is the same as `decoder_np_vecs` except the last position.\n decoder_target_data : numpy.array\n The data that the decoder data is trained to generate (issue title).\n Calculated by sliding `decoder_np_vecs` one position forward.\n \"\"\"\n vectorized_text = np.load(decoder_np_vecs)\n # For Decoder Input, you don't need the last word as that is only for prediction\n # when we are training using Teacher Forcing.\n decoder_input_data = vectorized_text[:, :-1]\n\n # Decoder Target Data Is Ahead By 1 Time Step From Decoder Input Data (Teacher Forcing)\n decoder_target_data = vectorized_text[:, 1:]\n\n # print(f'Shape of decoder input: {decoder_input_data.shape}')\n # print(f'Shape of decoder target: {decoder_target_data.shape}')\n return decoder_input_data, decoder_target_data\n\n @staticmethod\n def load_encoder_inputs(encoder_np_vecs='train_body_vecs.npy'):\n \"\"\"\n Load variables & data that are inputs to encoder.\n Parameters\n ----------\n encoder_np_vecs : str\n filename of serialized numpy.array of encoder input (issue title)\n Returns\n -------\n encoder_input_data : numpy.array\n The issue body\n doc_length : int\n The standard document length of the input for the encoder after padding\n the shape of this array will be (num_examples, doc_length)\n \"\"\"\n vectorized_body = np.load(encoder_np_vecs)\n # Encoder input is simply the body of the issue text\n encoder_input_data = vectorized_body\n doc_length = encoder_input_data.shape[1]\n # print(f'Shape of encoder input: {encoder_input_data.shape}')\n return encoder_input_data, doc_length\n"
]
| [
[
"numpy.load",
"numpy.save"
]
]
|
luyaojie/Text2Event | [
"bfa818c58f1856a2bce194e58b53995a22be0f72"
]
| [
"seq2seq/constrained_seq2seq.py"
]
| [
"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport logging\nimport os\n\nimport torch\nimport torch.nn as nn\nfrom dataclasses import dataclass, field\nfrom typing import Union, List, Callable, Dict, Tuple, Any, Optional\nimport numpy as np\nfrom torch.cuda.amp import autocast\n\nfrom transformers import (\n PreTrainedTokenizer,\n EvalPrediction,\n Seq2SeqTrainer,\n Seq2SeqTrainingArguments,\n)\n\nfrom extraction.event_schema import EventSchema\nfrom extraction.extract_constraint import get_constraint_decoder\nfrom extraction.extraction_metrics import get_extract_metrics\nfrom seq2seq.label_smoother_sum import SumLabelSmoother\nfrom seq2seq.utils import lmap\n\nlogger = logging.getLogger(__name__)\n\n\ndef add_logging_file(training_args):\n fh = logging.FileHandler(os.path.join(training_args.output_dir.rstrip(os.sep) + '.log'))\n fh.setLevel(logging.DEBUG)\n logger.addHandler(fh)\n\n\ndef decode_tree_str(sequences: Union[List[int], List[List[int]], \"np.ndarray\", \"torch.Tensor\"],\n tokenizer: PreTrainedTokenizer) -> List[str]:\n def clean_tree_text(x):\n return x.replace('<pad>', '').replace('<s>', '').replace('</s>', '').strip()\n\n sequences = np.where(sequences != -100, sequences, tokenizer.pad_token_id)\n\n str_list = tokenizer.batch_decode(sequences, skip_special_tokens=False)\n return lmap(clean_tree_text, str_list)\n\n\ndef build_compute_extract_metrics_event_fn(decoding_type_schema: EventSchema,\n decoding_format: str,\n tokenizer: PreTrainedTokenizer) -> Callable[[EvalPrediction], Dict]:\n def non_pad_len(tokens: np.ndarray) -> int:\n return np.count_nonzero(tokens != tokenizer.pad_token_id)\n\n def decode_pred(pred: EvalPrediction) -> Tuple[List[str], List[str]]:\n return decode_tree_str(pred.predictions, tokenizer), decode_tree_str(pred.label_ids, tokenizer)\n\n def extraction_metrics(pred: EvalPrediction) -> Dict:\n pred_str, label_str = decode_pred(pred)\n extraction = get_extract_metrics(pred_lns=pred_str, tgt_lns=label_str, label_constraint=decoding_type_schema,\n decoding_format=decoding_format)\n # rouge: Dict = calculate_rouge(pred_str, label_str)\n summ_len = np.round(np.mean(lmap(non_pad_len, pred.predictions)), 1)\n extraction.update({\"gen_len\": summ_len})\n # extraction.update( )\n return extraction\n\n compute_metrics_fn = extraction_metrics\n return compute_metrics_fn\n\n\n@dataclass\nclass ConstraintSeq2SeqTrainingArguments(Seq2SeqTrainingArguments):\n \"\"\"\n Parameters:\n constraint_decoding (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to use Constraint Decoding\n structure_weight (:obj:`float`, `optional`, defaults to :obj:`None`):\n \"\"\"\n constraint_decoding: bool = field(default=False, metadata={\"help\": \"Whether to Constraint Decoding or not.\"})\n label_smoothing_sum: bool = field(default=False,\n metadata={\"help\": \"Whether to use sum token loss for label smoothing\"})\n\n\nclass ConstraintSeq2SeqTrainer(Seq2SeqTrainer):\n def __init__(self, decoding_type_schema=None, decoding_format='tree', source_prefix=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.decoding_format = decoding_format\n self.decoding_type_schema = decoding_type_schema\n\n # Label smoothing by sum token loss, different from different Label smootheing\n if self.args.label_smoothing_sum and self.args.label_smoothing_factor != 0:\n self.label_smoother = SumLabelSmoother(epsilon=self.args.label_smoothing_factor)\n print('Using %s' % self.label_smoother)\n elif self.args.label_smoothing_factor != 0:\n print('Using %s' % self.label_smoother)\n else:\n self.label_smoother = None\n\n if self.args.constraint_decoding:\n self.constraint_decoder = get_constraint_decoder(tokenizer=self.tokenizer,\n type_schema=self.decoding_type_schema,\n decoding_schema=self.decoding_format,\n source_prefix=source_prefix)\n else:\n self.constraint_decoder = None\n\n def prediction_step(\n self,\n model: nn.Module,\n inputs: Dict[str, Union[torch.Tensor, Any]],\n prediction_loss_only: bool,\n ignore_keys: Optional[List[str]] = None,\n ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:\n \"\"\"\n Perform an evaluation step on :obj:`model` using obj:`inputs`.\n\n Subclass and override to inject custom behavior.\n\n Args:\n model (:obj:`nn.Module`):\n The model to evaluate.\n inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n The dictionary will be unpacked before being fed to the model. Most models expect the targets under the\n argument :obj:`labels`. Check your model's documentation for all accepted arguments.\n prediction_loss_only (:obj:`bool`):\n Whether or not to return the loss only.\n\n Return:\n Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and\n labels (each being optional).\n \"\"\"\n\n def prefix_allowed_tokens_fn(batch_id, sent):\n # print(self.tokenizer.convert_ids_to_tokens(inputs['labels'][batch_id]))\n src_sentence = inputs['input_ids'][batch_id]\n return self.constraint_decoder.constraint_decoding(src_sentence=src_sentence,\n tgt_generated=sent)\n\n if not self.args.predict_with_generate or prediction_loss_only:\n return super().prediction_step(\n model=model,\n inputs=inputs,\n prediction_loss_only=prediction_loss_only,\n ignore_keys=ignore_keys,\n prefix_allowed_tokens_fn=prefix_allowed_tokens_fn if self.constraint_decoder else None,\n )\n\n has_labels = \"labels\" in inputs\n inputs = self._prepare_inputs(inputs)\n\n gen_kwargs = {\n \"max_length\": self._max_length if self._max_length is not None else self.model.config.max_length,\n \"num_beams\": self._num_beams if self._num_beams is not None else self.model.config.num_beams,\n \"prefix_allowed_tokens_fn\": prefix_allowed_tokens_fn if self.constraint_decoder else None,\n }\n\n generated_tokens = self.model.generate(\n inputs[\"input_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n **gen_kwargs,\n )\n\n # in case the batch is shorter than max length, the output should be padded\n if generated_tokens.shape[-1] < gen_kwargs[\"max_length\"]:\n generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs[\"max_length\"])\n\n with torch.no_grad():\n if self.use_amp:\n with autocast():\n outputs = model(**inputs)\n else:\n outputs = model(**inputs)\n if has_labels:\n if self.label_smoother is not None:\n loss = self.label_smoother(outputs, inputs[\"labels\"]).mean().detach()\n else:\n loss = (outputs[\"loss\"] if isinstance(outputs, dict) else outputs[0]).mean().detach()\n else:\n loss = None\n\n if self.args.prediction_loss_only:\n return loss, None, None\n\n labels = inputs[\"labels\"]\n if labels.shape[-1] < gen_kwargs[\"max_length\"]:\n labels = self._pad_tensors_to_max_len(labels, gen_kwargs[\"max_length\"])\n\n return loss, generated_tokens, labels\n\n\ndef main(): pass\n\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"numpy.where",
"numpy.count_nonzero",
"torch.cuda.amp.autocast",
"torch.no_grad"
]
]
|
qianyingw/rob | [
"4881c26a095e51036989ffd1bc76a97adf44e7c6"
]
| [
"bin/pomegranate_20190615/models_prev/model2_0a.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 12 15:15:01 2019\nref: github/GokuMohandas/practicalAI: notebooks/12_Embeddings.ipynb\n@author: qwang\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass PapersModel(nn.Module):\n def __init__(self, embedding_dim, num_embeddings, num_input_channels, \n num_channels, hidden_dim, num_classes, dropout_p, filter_sizes,\n pretrained_embeddings=None, freeze_embeddings=False,\n padding_idx=0):\n super(PapersModel, self).__init__()\n \n if pretrained_embeddings is None:\n self.embeddings = nn.Embedding(embedding_dim=embedding_dim,\n num_embeddings=num_embeddings,\n padding_idx=padding_idx)\n else:\n pretrained_embeddings = torch.from_numpy(pretrained_embeddings).float()\n self.embeddings = nn.Embedding(embedding_dim=embedding_dim,\n num_embeddings=num_embeddings,\n padding_idx=padding_idx,\n _weight=pretrained_embeddings)\n \n # Conv weights\n self.conv = nn.ModuleList([nn.Conv1d(num_input_channels, num_channels, \n kernel_size=f) for f in filter_sizes])\n \n # FC weights\n self.dropout = nn.Dropout(dropout_p)\n self.fc = nn.Linear(num_channels*len(filter_sizes), num_classes)\n \n if freeze_embeddings:\n self.embeddings.weight.requires_grad = False\n\n def forward(self, x_in, channel_first=False, apply_softmax=False):\n \n # Embed\n x_in = self.embeddings(x_in)\n\n # Rearrange input so num_channels is in dim 1 (N, C, L)\n if not channel_first:\n x_in = x_in.transpose(1, 2)\n \n # Conv outputs \n z = [conv(x_in) for conv in self.conv]\n z = [F.relu(zi) for zi in z]\n z = [F.max_pool1d(zi, zi.size(2)).squeeze(2) for zi in z]\n \n # Concat conv outputs\n z = torch.cat(z, 1)\n\n # FC layers\n z = self.dropout(z)\n# z = F.relu(z)\n z = F.softmax(z, dim=1) \n y_pred = self.fc(z) \n\n return y_pred"
]
| [
[
"torch.nn.Dropout",
"torch.cat",
"torch.nn.Conv1d",
"torch.from_numpy",
"torch.nn.functional.softmax",
"torch.nn.functional.relu",
"torch.nn.Embedding"
]
]
|
iratao/deep-learning | [
"4099c096623318669286cb555c2ffa7d8f3a8354"
]
| [
"practice/intro-tensorflow/linearfunc.py"
]
| [
"# Solution is available in the other \"sandbox_solution.py\" tab\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nfrom quiz import get_weights, get_biases, linear\n\n\ndef mnist_features_labels(n_labels):\n \"\"\"\n Gets the first <n> labels from the MNIST dataset\n :param n_labels: Number of labels to use\n :return: Tuple of feature list and label list\n \"\"\"\n mnist_features = []\n mnist_labels = []\n\n mnist = input_data.read_data_sets('/datasets/ud730/mnist', one_hot=True)\n\n # In order to make quizzes run faster, we're only looking at 10000 images\n for mnist_feature, mnist_label in zip(*mnist.train.next_batch(10000)):\n\n # Add features and labels if it's for the first <n>th labels\n if mnist_label[:n_labels].any():\n mnist_features.append(mnist_feature)\n mnist_labels.append(mnist_label[:n_labels])\n\n return mnist_features, mnist_labels\n\n\n# Number of features (28*28 image is 784 features)\nn_features = 784\n# Number of labels\nn_labels = 3\n\n# Features and Labels\nfeatures = tf.placeholder(tf.float32)\nlabels = tf.placeholder(tf.float32)\n\n# Weights and Biases\nw = get_weights(n_features, n_labels)\nb = get_biases(n_labels)\n\n# Linear Function xW + b\nlogits = linear(features, w, b)\n\n# Training data\ntrain_features, train_labels = mnist_features_labels(n_labels)\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as session:\n # TODO: Initialize session variables\n session.run(init)\n \n # Softmax\n prediction = tf.nn.softmax(logits)\n\n # Cross entropy\n # This quantifies how far off the predictions were.\n # You'll learn more about this in future lessons.\n cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)\n\n # Training loss\n # You'll learn more about this in future lessons.\n loss = tf.reduce_mean(cross_entropy)\n\n # Rate at which the weights are changed\n # You'll learn more about this in future lessons.\n learning_rate = 0.08\n\n # Gradient Descent\n # This is the method used to train the model\n # You'll learn more about this in future lessons.\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)\n\n # Run optimizer and get loss\n _, l = session.run(\n [optimizer, loss],\n feed_dict={features: train_features, labels: train_labels})\n\n# Print loss\nprint('Loss: {}'.format(l))\n"
]
| [
[
"tensorflow.Session",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.log",
"tensorflow.placeholder",
"tensorflow.nn.softmax",
"tensorflow.global_variables_initializer",
"tensorflow.reduce_mean"
]
]
|
VolkerH/imarispy | [
"d00415ffc8e341bf2e034e29c1feb32d915f2f18"
]
| [
"imarispy/util.py"
]
| [
"import numpy as np\n\n\ndef make_thumbnail(array, size=256):\n \"\"\" array should be 4D array \"\"\"\n # TODO: don't just crop to the upper left corner\n mip = np.squeeze(array).max(1)[:3, :size, :size].astype(np.float)\n for i in range(mip.shape[0]):\n mip[i] -= np.min(mip[i])\n mip[i] *= 255 / np.max(mip[i])\n mip = np.pad(mip, ((0, 3 - mip.shape[0]),\n (0, size - mip.shape[1]),\n (0, size - mip.shape[2])\n ), 'constant', constant_values=0)\n mip = np.pad(mip, ((0, 1), (0, 0), (0, 0)), 'constant',\n constant_values=255).astype('|u1')\n return np.squeeze(mip.T.reshape(1, size, size * 4)).astype('|u1')\n\n\ndef h5str(s, coding='ASCII', dtype='S1'):\n return np.frombuffer(str(s).encode(coding), dtype=dtype)\n\n\ndef subsample_data(data, subsamp):\n return data[0::int(subsamp[0]), 0::int(subsamp[1]), 0::int(subsamp[2])]\n"
]
| [
[
"numpy.max",
"numpy.pad",
"numpy.squeeze",
"numpy.min"
]
]
|
amirdel/dispersion-continua | [
"2e1f7a3fbfcdc0b27c546cb0ae51a628a926ad60"
]
| [
"tests/test_dispersionSystemContinua.py"
]
| [
"# Copyright 2017 Amir Hossein Delgoshaie, [email protected]\n#\n# Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee\n# is hereby granted, provided that the above copyright notice and this permission notice appear in all\n# copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE\n# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE\n# FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\n# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,\n# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\nfrom unittest import TestCase\nimport numpy as np\nfrom py_dp.simulation.grid_structured import structuredGrid\nfrom py_dp.dispersion.dispersion_continua import dispersionSystemContinua\n\nclass TestDispersionSystemContinua(TestCase):\n def test_find_exit_conditions_1d(self):\n # case1: v1<v2 and both are positive\n grid = structuredGrid(4, 3, 1.0, 1.0, boundaryType='full-periodic')\n dx, x1, x2 = 1.0, 0.0, 1.0\n f1, f2 = 0,1\n ds1 = dispersionSystemContinua(grid, 1, 1, tracking_type='exit')\n x = 0.0\n v1 = 10.0\n v2 = 12.0\n exit, exit_idx, xe, v, ve, dt = ds1.find_exit_conditions_1d(dx, x, x1, f1, v1, x2, f2, v2)\n self.assertListEqual([exit, exit_idx, xe, v, ve] , [True, f2, x2, v1, v2])\n self.assertAlmostEqual(dt, 0.0911, places=3)\n # case2: v1>v2 and both are negative\n x = 1.0\n v1 = -1.0\n v2 = -2.0\n exit, exit_idx, xe, v, ve, dt = ds1.find_exit_conditions_1d(dx, x, x1, f1, v1, x2, f2, v2)\n self.assertListEqual([exit, exit_idx, xe, v, ve], [True, f1, x1, v2, v1])\n self.assertAlmostEqual(dt, 0.693, places=3)\n # case3: v1==v2 and both are negative\n x = 0.5\n v1 = -2.0\n v2 = v1\n exit, exit_idx, xe, v, ve, dt = ds1.find_exit_conditions_1d(dx, x, x1, f1, v1, x2, f2, v2)\n self.assertListEqual([exit, exit_idx, xe, v, ve], [True, f1, x1, v2, v2])\n self.assertEqual(dt, 0.25)\n # case4: v1==v2 and both are positive\n x = 0.5\n v1 = 2.0\n v2 = v1\n exit, exit_idx, xe, v, ve, dt = ds1.find_exit_conditions_1d(dx, x, x1, f1, v1, x2, f2, v2)\n self.assertListEqual([exit, exit_idx, xe, v, ve], [True, f2, x2, v2, v2])\n self.assertEqual(dt, 0.25)\n # case5: v1>0 v2<0 no way to get out\n x = 0.5\n v1 = 2.0\n v2 = -1.0\n exit, exit_idx, xe, v, ve, dt = ds1.find_exit_conditions_1d(dx, x, x1, f1, v1, x2, f2, v2)\n self.assertEqual(exit, False)\n # case5: v1<0 and v2>0, on the right side of the stagnation plane\n x = 0.7\n v1 = -1.0\n v2 = 1.0\n exit, exit_idx, xe, v, ve, dt = ds1.find_exit_conditions_1d(dx, x, x1, f1, v1, x2, f2, v2)\n self.assertListEqual([exit, exit_idx, xe, ve], [True, f2, x2, v2])\n self.assertAlmostEqual(dt, 0.458145, places=4)\n # case6: v1<0 and v2>0, on the left side of the stagnation plane\n x = 0.3\n v1 = -1.0\n v2 = 1.0\n exit, exit_idx, xe, v, ve, dt = ds1.find_exit_conditions_1d(dx, x, x1, f1, v1, x2, f2, v2)\n self.assertListEqual([exit, exit_idx, xe, ve], [True, f1, x1, v1])\n self.assertAlmostEqual(dt, 0.458145, places=4)\n # case7: v1<0 and v2>0, on the stagnation plane\n x = 0.5\n v1 = -1.0\n v2 = 1.0\n exit, exit_idx, xe, v, ve, dt = ds1.find_exit_conditions_1d(dx, x, x1, f1, v1, x2, f2, v2)\n self.assertEqual(exit, False)\n # case8: v1=0 and v2>0\n x = 0.5\n v1 = 0.0\n v2 = 1.0\n exit, exit_idx, xe, v, ve, dt = ds1.find_exit_conditions_1d(dx, x, x1, f1, v1, x2, f2, v2)\n self.assertListEqual([exit, exit_idx, xe, ve], [True, f2, x2, v2])\n # case9: v2=0 and v1<0\n x = 0.5\n v1 = -1.0\n v2 = 0.0\n exit, exit_idx, xe, v, ve, dt = ds1.find_exit_conditions_1d(dx, x, x1, f1, v1, x2, f2, v2)\n self.assertListEqual([exit, exit_idx, xe, ve], [True, f1, x1, v1])\n # print exit, exit_idx, xe, v, ve, dt\n # self.fail()\n\n\n\n def test_find_exit_conditions(self):\n grid = structuredGrid(4, 3, 1.0, 1.0, boundaryType='full-periodic')\n # set face velocities for grid\n grid.face_velocities = np.zeros(grid.nr_t)\n ngh_faces = [0, 2, 3, 1]\n ngh_cells = [8, 4, 1, 3]\n vl, vr = 4.0, 4.0\n vb, vt = 1.0, 1.0\n grid.face_velocities[ngh_faces] = [vl, vr, vb, vt]\n # starting position center of left face\n xs = grid.pores.x[0] - grid.dx/2\n ys = grid.pores.y[0]\n ds1 = dispersionSystemContinua(grid, 1, 1, tracking_type='exit')\n exit_cell, exit_face, xe, ye, te = ds1.find_exit_conditions(0, xs, ys, 0.0)\n self.assertListEqual([exit_cell, xe, ye, te], [4, 1.0, 3.75, 0.25])\n # TODO: more test cases for here\n\n def test_init_particles_left_boundary(self):\n grid = structuredGrid(4, 3, 1.0, 1.0, boundaryType='full-periodic')\n n_particles = 5\n n_steps = 3\n ds1 = dispersionSystemContinua(grid, n_particles, n_steps, tracking_type='exit')\n ds1.init_particles_left_boundary()\n expected_x = [0.5, 0.5, 0.5, 0.5, 1.5]\n expected_y = [3.5, 2.5, 1.5, 0.5, 3.5]\n self.assertListEqual(list(ds1.cell_nr_array[:, 0]), range(n_particles))\n self.assertListEqual(list(ds1.x_array[:, 0]), expected_x)\n self.assertListEqual(list(ds1.y_array[:, 0]), expected_y)\n\n def test_init_particles_left_buffered(self):\n grid = structuredGrid(4, 3, 1.0, 1.0, boundaryType='full-periodic')\n n_particles = 2\n n_steps = 3\n ds1 = dispersionSystemContinua(grid, n_particles, n_steps, tracking_type='exit')\n ds1.init_particles_left_buffered(1)\n self.assertListEqual(list(ds1.cell_nr_array[:, 0]), [1,2])\n expected_x = [0.5, 0.5]\n expected_y = [2.5, 1.5]\n self.assertListEqual(list(ds1.x_array[:, 0]), expected_x)\n self.assertListEqual(list(ds1.y_array[:, 0]), expected_y)\n\n\n\n # def test_integrate_path(self):\n # self.fail()\n #\n # def test_follow_all_particles(self):\n # self.fail()\n #\n # def test_follow_all_particles_exit(self):\n # self.fail()\n #\n # def test_follow_all_particles_dt(self):\n # self.fail()\n"
]
| [
[
"numpy.zeros"
]
]
|
solohan22/deep-q-learning | [
"f267f6b716ce16994346415d91e0eec06ab0dbe2"
]
| [
"ddqn.py"
]
| [
"# -*- coding: utf-8 -*-\nimport random\nimport gym\nimport gym_pomdp\nimport numpy as np\nfrom collections import deque\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\nfrom keras import backend as K\n\nimport tensorflow as tf\n\nEPISODES = 5000\n\nclass DQNAgent:\n def __init__(self, state_size, action_size):\n self.state_size = state_size\n self.action_size = action_size\n self.memory = deque(maxlen=2000)\n self.gamma = 0.95 # discount rate\n self.epsilon = 1.0 # exploration rate\n self.epsilon_min = 0.01\n self.epsilon_decay = 0.99\n self.learning_rate = 0.001\n self.model = self._build_model()\n self.target_model = self._build_model()\n self.update_target_model()\n\n \"\"\"Huber loss for Q Learning\n\n References: https://en.wikipedia.org/wiki/Huber_loss\n https://www.tensorflow.org/api_docs/python/tf/losses/huber_loss\n \"\"\"\n\n def _huber_loss(self, y_true, y_pred, clip_delta=1.0):\n error = y_true - y_pred\n cond = K.abs(error) <= clip_delta\n\n squared_loss = 0.5 * K.square(error)\n quadratic_loss = 0.5 * K.square(clip_delta) + clip_delta * (K.abs(error) - clip_delta)\n\n return K.mean(tf.where(cond, squared_loss, quadratic_loss))\n\n def _build_model(self):\n # Neural Net for Deep-Q learning Model\n model = Sequential()\n model.add(Dense(12, input_dim=1, activation='relu'))\n model.add(Dense(12, activation='relu'))\n model.add(Dense(12, activation='relu'))\n model.add(Dense(12, activation='relu'))\n model.add(Dense(self.action_size, activation='linear'))\n model.compile(loss=self._huber_loss,\n optimizer=Adam(lr=self.learning_rate))\n return model\n\n def update_target_model(self):\n # copy weights from model to target_model\n self.target_model.set_weights(self.model.get_weights())\n\n def remember(self, state, action, reward, next_state, done):\n self.memory.append((state, action, reward, next_state, done))\n\n def act(self, state):\n if np.random.rand() <= self.epsilon:\n return random.randrange(self.action_size)\n act_values = self.model.predict(state)\n return np.argmax(act_values[0]) # returns action\n\n def replay(self, batch_size):\n minibatch = random.sample(self.memory, batch_size)\n for state, action, reward, next_state, done in minibatch:\n target = self.model.predict(state)\n if done:\n target[0][action] = reward\n else:\n # a = self.model.predict(next_state)[0]\n t = self.target_model.predict(next_state)[0]\n target[0][action] = reward + self.gamma * np.amax(t)\n # target[0][action] = reward + self.gamma * t[np.argmax(a)]\n self.model.fit(state, target, epochs=1, verbose=0)\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n\n def load(self, name):\n self.model.load_weights(name)\n\n def save(self, name):\n self.model.save_weights(name)\n\n\nif __name__ == \"__main__\":\n env = gym.make('Tiger-v0')\n state_size = env.observation_space.n\n action_size = env.action_space.n\n agent = DQNAgent(state_size, action_size)\n # agent.load(\"./save/cartpole-ddqn.h5\")\n done = False\n batch_size = 32\n\n for e in range(EPISODES):\n state = env.reset()\n state = np.reshape(state, [1])\n sum_reward=0\n for time in range(500):\n # env.render()\n action = agent.act(state)\n next_state, reward, done, _ = env.step(action)\n reward = reward if not done else -100\n next_state = np.reshape(next_state, [1])\n agent.remember(state, action, reward, next_state, done)\n state = next_state\n sum_reward=sum_reward+reward\n if done:\n agent.update_target_model()\n print(\"episode: {}/{}, score: {}, e: {:.2}\"\n .format(e, EPISODES, sum_reward/(time+1), agent.epsilon))\n break\n if len(agent.memory) > batch_size:\n agent.replay(batch_size)\n # if e % 10 == 0:\n # agent.save(\"./save/cartpole-ddqn.h5\")\n"
]
| [
[
"numpy.random.rand",
"numpy.reshape",
"tensorflow.where",
"numpy.amax",
"numpy.argmax"
]
]
|
samuilstoychev/research_project | [
"897bde82471ef92ded396aa31d91ec19826d4ce2"
]
| [
"cnn_root_classifier.py"
]
| [
"import torch\nfrom torch.nn import functional as F\nimport torch.nn as nn\nfrom linear_nets import MLP,fc_layer\nfrom exemplars import ExemplarHandler\nfrom continual_learner import ContinualLearner\nfrom replayer import Replayer\nimport utils\n\n\nclass CNNRootClassifier(ContinualLearner, Replayer, ExemplarHandler):\n '''Model for classifying images, \"enriched\" as \"ContinualLearner\"-, Replayer- and ExemplarHandler-object.'''\n\n # TODO: Do I need the `classes` argument? \n def __init__(self, image_size, classes, latent_space, binaryCE=False, binaryCE_distill=False, AGEM=False, \n out_channels=5, kernel_size=5, dataset=\"mnist\"):\n # configurations\n super().__init__()\n self.classes = classes\n self.label = \"Classifier\"\n self.latent_space = latent_space\n self.image_size = image_size\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n if dataset == \"ckplus\" or dataset == \"affectnet\": \n self.flattened_size = int((image_size[0] - 2*(kernel_size-1)) * (image_size[1] - 2*(kernel_size-1)) * 0.25 * out_channels)\n else: \n self.flattened_size = int(((image_size - 2*(kernel_size-1)) ** 2) * (0.25 * out_channels))\n # settings for training\n self.binaryCE = binaryCE #-> use binary (instead of multiclass) prediction error\n self.binaryCE_distill = binaryCE_distill #-> for classes from previous tasks, use the by the previous model\n # predicted probs as binary targets (only in Class-IL with binaryCE)\n self.AGEM = AGEM #-> use gradient of replayed data as inequality constraint for (instead of adding it to)\n # the gradient of the current data (as in A-GEM, see Chaudry et al., 2019; ICLR)\n\n ######------SPECIFY MODEL------######\n self.conv1 = nn.Conv2d(1, self.out_channels, self.kernel_size)\n self.conv2 = nn.Conv2d(self.out_channels, self.out_channels, self.kernel_size)\n self.dropout1 = nn.Dropout(0.25)\n self.fc0 = nn.Linear(self.flattened_size, latent_space)\n\n def list_init_layers(self):\n '''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''\n list = []\n list += self.conv1\n list += self.conv2\n list += self.dropout1\n list += self.fc0\n return list\n\n @property\n def name(self):\n return \"{}_c{}\".format(\"CNN_ROOT_CLASSIFIER\", self.classes)\n\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n x = self.fc0(x)\n x = torch.sigmoid(x)\n return x\n\n def feature_extractor(self, x):\n return self.forward(x)\n\n\n def train_a_batch(self, x, y, scores=None, x_=None, y_=None, scores_=None, rnt=0.5, active_classes=None, task=1):\n '''Train model for one batch ([x],[y]), possibly supplemented with replayed data ([x_],[y_/scores_]).\n\n [x] <tensor> batch of inputs (could be None, in which case only 'replayed' data is used)\n [y] <tensor> batch of corresponding labels\n [scores] None or <tensor> 2Dtensor:[batch]x[classes] predicted \"scores\"/\"logits\" for [x]\n NOTE: only to be used for \"BCE with distill\" (only when scenario==\"class\")\n [x_] None or (<list> of) <tensor> batch of replayed inputs\n [y_] None or (<list> of) <tensor> batch of corresponding \"replayed\" labels\n [scores_] None or (<list> of) <tensor> 2Dtensor:[batch]x[classes] predicted \"scores\"/\"logits\" for [x_]\n [rnt] <number> in [0,1], relative importance of new task\n [active_classes] None or (<list> of) <list> with \"active\" classes\n [task] <int>, for setting task-specific mask'''\n\n # Set model to training-mode\n self.train()\n\n # Reset optimizer\n self.optimizer.zero_grad()\n\n # Should gradient be computed separately for each task? (needed when a task-mask is combined with replay)\n gradient_per_task = True if ((self.mask_dict is not None) and (x_ is not None)) else False\n\n\n ##--(1)-- REPLAYED DATA --##\n\n if x_ is not None:\n # In the Task-IL scenario, [y_] or [scores_] is a list and [x_] needs to be evaluated on each of them\n # (in case of 'exact' or 'exemplar' replay, [x_] is also a list!\n TaskIL = (type(y_)==list) if (y_ is not None) else (type(scores_)==list)\n if not TaskIL:\n y_ = [y_]\n scores_ = [scores_]\n active_classes = [active_classes] if (active_classes is not None) else None\n n_replays = len(y_) if (y_ is not None) else len(scores_)\n\n # Prepare lists to store losses for each replay\n loss_replay = [None]*n_replays\n predL_r = [None]*n_replays\n distilL_r = [None]*n_replays\n\n # Run model (if [x_] is not a list with separate replay per task and there is no task-specific mask)\n if (not type(x_)==list) and (self.mask_dict is None):\n y_hat_all = self(x_)\n\n # Loop to evalute predictions on replay according to each previous task\n for replay_id in range(n_replays):\n\n # -if [x_] is a list with separate replay per task, evaluate model on this task's replay\n if (type(x_)==list) or (self.mask_dict is not None):\n x_temp_ = x_[replay_id] if type(x_)==list else x_\n if self.mask_dict is not None:\n self.apply_XdGmask(task=replay_id+1)\n y_hat_all = self(x_temp_)\n\n # -if needed (e.g., Task-IL or Class-IL scenario), remove predictions for classes not in replayed task\n y_hat = y_hat_all if (active_classes is None) else y_hat_all[:, active_classes[replay_id]]\n\n # Calculate losses\n if (y_ is not None) and (y_[replay_id] is not None):\n if self.binaryCE:\n binary_targets_ = utils.to_one_hot(y_[replay_id].cpu(), y_hat.size(1)).to(y_[replay_id].device)\n predL_r[replay_id] = F.binary_cross_entropy_with_logits(\n input=y_hat, target=binary_targets_, reduction='none'\n ).sum(dim=1).mean() #--> sum over classes, then average over batch\n else:\n predL_r[replay_id] = F.cross_entropy(y_hat, y_[replay_id], reduction='mean')\n if (scores_ is not None) and (scores_[replay_id] is not None):\n # n_classes_to_consider = scores.size(1) #--> with this version, no zeroes are added to [scores]!\n n_classes_to_consider = y_hat.size(1) #--> zeros will be added to [scores] to make it this size!\n kd_fn = utils.loss_fn_kd_binary if self.binaryCE else utils.loss_fn_kd\n distilL_r[replay_id] = kd_fn(scores=y_hat[:, :n_classes_to_consider],\n target_scores=scores_[replay_id], T=self.KD_temp)\n # Weigh losses\n if self.replay_targets==\"hard\":\n loss_replay[replay_id] = predL_r[replay_id]\n elif self.replay_targets==\"soft\":\n loss_replay[replay_id] = distilL_r[replay_id]\n\n # If needed, perform backward pass before next task-mask (gradients of all tasks will be accumulated)\n if gradient_per_task:\n weight = 1 if self.AGEM else (1 - rnt)\n weighted_replay_loss_this_task = weight * loss_replay[replay_id] / n_replays\n weighted_replay_loss_this_task.backward()\n\n # Calculate total replay loss\n loss_replay = None if (x_ is None) else sum(loss_replay) / n_replays\n\n # If using A-GEM, calculate and store averaged gradient of replayed data\n if self.AGEM and x_ is not None:\n # Perform backward pass to calculate gradient of replayed batch (if not yet done)\n if not gradient_per_task:\n loss_replay.backward()\n # Reorganize the gradient of the replayed batch as a single vector\n grad_rep = []\n for p in self.parameters():\n if p.requires_grad:\n grad_rep.append(p.grad.view(-1))\n grad_rep = torch.cat(grad_rep)\n # Reset gradients (with A-GEM, gradients of replayed batch should only be used as inequality constraint)\n self.optimizer.zero_grad()\n\n\n ##--(2)-- CURRENT DATA --##\n\n if x is not None:\n # If requested, apply correct task-specific mask\n if self.mask_dict is not None:\n self.apply_XdGmask(task=task)\n\n # Run model\n y_hat = self(x)\n # -if needed, remove predictions for classes not in current task\n if active_classes is not None:\n class_entries = active_classes[-1] if type(active_classes[0])==list else active_classes\n y_hat = y_hat[:, class_entries]\n\n # Calculate prediction loss\n if self.binaryCE:\n # -binary prediction loss\n binary_targets = utils.to_one_hot(y.cpu(), y_hat.size(1)).to(y.device)\n if self.binaryCE_distill and (scores is not None):\n classes_per_task = int(y_hat.size(1) / task)\n binary_targets = binary_targets[:, -(classes_per_task):]\n binary_targets = torch.cat([torch.sigmoid(scores / self.KD_temp), binary_targets], dim=1)\n predL = None if y is None else F.binary_cross_entropy_with_logits(\n input=y_hat, target=binary_targets, reduction='none'\n ).sum(dim=1).mean() #--> sum over classes, then average over batch\n else:\n # -multiclass prediction loss\n predL = None if y is None else F.cross_entropy(input=y_hat, target=y, reduction='mean')\n\n # Weigh losses\n loss_cur = predL\n\n # Calculate training-precision\n precision = None if y is None else (y == y_hat.max(1)[1]).sum().item() / x.size(0)\n\n # If backward passes are performed per task (e.g., XdG combined with replay), perform backward pass\n if gradient_per_task:\n weighted_current_loss = rnt*loss_cur\n weighted_current_loss.backward()\n else:\n precision = predL = None\n # -> it's possible there is only \"replay\" [e.g., for offline with task-incremental learning]\n\n\n # Combine loss from current and replayed batch\n if x_ is None or self.AGEM:\n loss_total = loss_cur\n else:\n loss_total = loss_replay if (x is None) else rnt*loss_cur+(1-rnt)*loss_replay\n\n\n ##--(3)-- ALLOCATION LOSSES --##\n\n # Add SI-loss (Zenke et al., 2017)\n surrogate_loss = self.surrogate_loss()\n if self.si_c>0:\n loss_total += self.si_c * surrogate_loss\n\n # Add EWC-loss\n ewc_loss = self.ewc_loss()\n if self.ewc_lambda>0:\n loss_total += self.ewc_lambda * ewc_loss\n\n\n # Backpropagate errors (if not yet done)\n if not gradient_per_task:\n loss_total.backward()\n\n # If using A-GEM, potentially change gradient:\n if self.AGEM and x_ is not None:\n # -reorganize gradient (of current batch) as single vector\n grad_cur = []\n for p in self.parameters():\n if p.requires_grad:\n grad_cur.append(p.grad.view(-1))\n grad_cur = torch.cat(grad_cur)\n # -check inequality constrain\n angle = (grad_cur*grad_rep).sum()\n if angle < 0:\n # -if violated, project the gradient of the current batch onto the gradient of the replayed batch ...\n length_rep = (grad_rep*grad_rep).sum()\n grad_proj = grad_cur-(angle/length_rep)*grad_rep\n # -...and replace all the gradients within the model with this projected gradient\n index = 0\n for p in self.parameters():\n if p.requires_grad:\n n_param = p.numel() # number of parameters in [p]\n p.grad.copy_(grad_proj[index:index+n_param].view_as(p))\n index += n_param\n\n # Take optimization-step\n self.optimizer.step()\n\n # Return the dictionary with different training-loss split in categories\n return {\n 'loss_total': loss_total.item(),\n 'loss_current': loss_cur.item() if x is not None else 0,\n 'loss_replay': loss_replay.item() if (loss_replay is not None) and (x is not None) else 0,\n 'pred': predL.item() if predL is not None else 0,\n 'pred_r': sum(predL_r).item()/n_replays if (x_ is not None and predL_r[0] is not None) else 0,\n 'distil_r': sum(distilL_r).item()/n_replays if (x_ is not None and distilL_r[0] is not None) else 0,\n 'ewc': ewc_loss.item(), 'si_loss': surrogate_loss.item(),\n 'precision': precision if precision is not None else 0.,\n }\n\n"
]
| [
[
"torch.nn.Linear",
"torch.sigmoid",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.flatten",
"torch.nn.Conv2d",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.relu",
"torch.nn.functional.max_pool2d"
]
]
|
ajaykrishna1878/Robotics-Automation-QSTP-2021 | [
"f5b8626db20a60f9dd923bab5a0bec118d0abc67"
]
| [
"Week1/Task1.py"
]
| [
"import numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\nclass Unicycle:\n def __init__(self, x: float, y: float, theta: float, dt: float):\n self.x = x\n self.y = y\n self.theta = theta\n self.dt = dt\n\n self.x_points = [self.x]\n self.y_points = [self.y]\n\n def step(self, v: float, w: float, n: int):\n for i in range(0, n):\n self.x += v * math.cos(self.theta) * self.dt\n # taking the x component of velocity and multiplying by change in time to get the position in x direction\n \n self.y += v * math.sin(self.theta) * self.dt\n # taking the y component of velocity and multiplying by change in time to get the position in y direction\n \n self.theta += w * self.dt\n # multiplying angular velocity and change in time to get change in angular position\n \n self.x_points.append(self.x)\n # add the new x position to the x_points list\n \n self.y_points.append(self.y)\n # add the new x position to the x_points list\n \n return self.x, self.y, self.theta\n\n def plot(self, v: float, w: float):\n plt.title(f\"Unicycle Model: {v}, {w}\")\n plt.xlabel(\"X-Coordinates\")\n plt.ylabel(\"Y-Coordinates\")\n plt.plot(self.x_points, self.y_points, color = 'r', alpha = 0.75)\n for i in range(len(self.x_points)):\n plt.scatter(self.x_points[i], self.y_points[i], color = 'r')\n plt.show()\n # plt.savefig(f\"Unicycle_{v}_{w}.png\")\n\nif __name__ == '__main__':\n print(\"Unicycle Model Assignment\")\n case1 = Unicycle(0, 0, 0, 0.1)\n x1, y1, theta1 = case1.step(1, 0.5, 25)\n case1.plot(1, 0.5)\n\n case2 = Unicycle(0, 0, 1.57, 0.2)\n x2, y2, theta2 = case2.step(0.5, 1, 10)\n case2.plot(0.5, 1)\n\n case3 = Unicycle(0, 0, 0.77, 0.05)\n x3, y3, theta3 = case3.step(5, 4, 50)\n case3.plot(5, 4)\n"
]
| [
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show"
]
]
|
LibertFan/TAGS | [
"3c059a1ed6768ef76c1f0f0a587b02eed18bb58f"
]
| [
"inf_nsgd.py"
]
| [
"\"\"\"\nCopyright (c) Microsoft Corporation.\nLicensed under the MIT license.\n\nrun inference for Image Text Retrieval\n\"\"\"\nimport argparse\nimport json\nimport os\nfrom os.path import exists\nimport pickle\nfrom time import time\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom apex import amp\nfrom horovod import torch as hvd\n\nfrom data import (PrefetchLoader,\n DetectFeatLmdb, TxtTokLmdb, ItmEvalDataset, itm_eval_collate)\nfrom model.nsgd import UniterForNSGD\n\nfrom utils.logger import LOGGER\nfrom utils.distributed import all_gather_list\nfrom utils.misc import Struct\nfrom utils.const import IMG_DIM\nfrom utils.itm_eval import inference, itm_eval\n\n\ndef main(opts):\n hvd.init()\n n_gpu = hvd.size()\n device = torch.device(\"cuda\", hvd.local_rank())\n torch.cuda.set_device(hvd.local_rank())\n rank = hvd.rank()\n LOGGER.info(\"device: {} n_gpu: {}, rank: {}, \"\n \"16-bits training: {}\".format(\n device, n_gpu, hvd.rank(), opts.fp16))\n\n if opts.train_config is not None:\n train_opts = Struct(json.load(open(opts.train_config)))\n opts.conf_th = train_opts.conf_th\n opts.max_bb = train_opts.max_bb\n opts.min_bb = train_opts.min_bb\n opts.num_bb = train_opts.num_bb\n\n # load DBs and image dirs\n eval_img_db = DetectFeatLmdb(opts.img_db,\n opts.conf_th, opts.max_bb,\n opts.min_bb, opts.num_bb,\n opts.compressed_db)\n eval_txt_db = TxtTokLmdb(opts.txt_db, -1)\n eval_dataset = ItmEvalDataset(eval_txt_db, eval_img_db, opts.batch_size)\n\n # Prepare model\n checkpoint = torch.load(opts.checkpoint)\n model = UniterForNSGD.from_pretrained(\n opts.model_config, checkpoint, img_dim=IMG_DIM)\n if 'rank_output' not in checkpoint:\n model.init_output() # zero shot setting\n\n model.to(device)\n model = amp.initialize(model, enabled=opts.fp16, opt_level='O2')\n\n print('| eval_dataset id2len: ', len(eval_dataset.id2len))\n print('| eval_dataset_id2len example key: ', list(eval_dataset.id2len.keys())[:5],\n max([int(k) for k in list(eval_dataset.id2len.keys())]))\n print('| eval_dataset_id2len example value: ', list(eval_dataset.id2len.values())[:5])\n # for k in range(10):\n # print('| example:', k, eval_dataset.i2len[k])\n print('| i2len: ', len(eval_dataset.id2len), min(list(eval_dataset.id2len.keys())), max(eval_dataset.id2len.keys()),\n min(list(eval_dataset.id2len.values())), max(eval_dataset.id2len.values()))\n\n print('| mean of all_txt_lens:', sum(list(eval_dataset.id2len.values())) / float(len(list(eval_dataset.id2len.values()))))\n\n\n eval_dataloader = DataLoader(eval_dataset, batch_size=1,\n num_workers=opts.n_workers,\n pin_memory=opts.pin_mem,\n collate_fn=itm_eval_collate)\n eval_dataloader = PrefetchLoader(eval_dataloader)\n\n eval_log, results = evaluate(model, eval_dataloader)\n if hvd.rank() == 0:\n if not exists(opts.output_dir) and rank == 0:\n os.makedirs(opts.output_dir)\n with open(f'{opts.output_dir}/config.json', 'w') as f:\n json.dump(vars(opts), f)\n with open(f'{opts.output_dir}/results.bin', 'wb') as f:\n pickle.dump(results, f)\n with open(f'{opts.output_dir}/scores.json', 'w') as f:\n json.dump(eval_log, f)\n LOGGER.info(f'evaluation finished')\n LOGGER.info(\n f\"======================== Results =========================\\n\"\n f\"image retrieval R1: {eval_log['img_r1']*100:.2f},\\n\"\n f\"image retrieval R5: {eval_log['img_r5']*100:.2f},\\n\"\n f\"image retrieval R10: {eval_log['img_r10']*100:.2f}\\n\"\n f\"text retrieval R1: {eval_log['txt_r1']*100:.2f},\\n\"\n f\"text retrieval R5: {eval_log['txt_r5']*100:.2f},\\n\"\n f\"text retrieval R10: {eval_log['txt_r10']*100:.2f}\")\n LOGGER.info(\"========================================================\")\n\n\[email protected]_grad()\ndef evaluate(model, eval_loader):\n model.eval()\n st = time()\n LOGGER.info(\"start running Image/Text Retrieval evaluation ...\")\n score_matrix = inference(model, eval_loader)\n dset = eval_loader.dataset\n all_score = hvd.allgather(score_matrix)\n all_txt_ids = [i for ids in all_gather_list(dset.ids)\n for i in ids]\n # all_txt_lens = [l for lens in all_gather_list(dset.txt_lens) for l in lens]\n print('| mean of all_txt_lens:', sum(list(dset.id2len.values())) / float(len(list(dset.id2len.values()))))\n all_img_ids = dset.all_img_ids\n assert all_score.size() == (len(all_txt_ids), len(all_img_ids))\n if hvd.rank() != 0:\n return {}, tuple()\n # NOTE: only use rank0 to compute final scores\n eval_log = itm_eval(all_score, all_txt_ids, all_img_ids,\n dset.txt2img, dset.img2txts, dset.id2len)\n\n results = (all_score, all_txt_ids, all_img_ids)\n tot_time = time()-st\n LOGGER.info(f\"evaluation finished in {int(tot_time)} seconds, \")\n return eval_log, results\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\"--txt_db\", default=None, type=str,\n help=\"The input train corpus. (LMDB)\")\n parser.add_argument(\"--img_db\", default=None, type=str,\n help=\"The input train images.\")\n parser.add_argument(\"--checkpoint\", default=None, type=str,\n help=\"model checkpoint binary\")\n parser.add_argument(\"--model_config\", default=None, type=str,\n help=\"model config json\")\n parser.add_argument(\n \"--output_dir\", default=None, type=str,\n help=\"The output directory where the inference results will be \"\n \"written.\")\n\n # optional parameters\n parser.add_argument(\"--train_config\", default=None, type=str,\n help=\"hps.json from training (for prepro hps)\")\n parser.add_argument('--compressed_db', action='store_true',\n help='use compressed LMDB')\n parser.add_argument('--conf_th', type=float, default=0.2,\n help='threshold for dynamic bounding boxes '\n '(-1 for fixed)')\n parser.add_argument('--max_bb', type=int, default=100,\n help='max number of bounding boxes')\n parser.add_argument('--min_bb', type=int, default=10,\n help='min number of bounding boxes')\n parser.add_argument('--num_bb', type=int, default=36,\n help='static number of bounding boxes')\n parser.add_argument(\"--batch_size\", default=400, type=int,\n help=\"number of tokens in a batch\")\n\n # device parameters\n parser.add_argument('--fp16', action='store_true',\n help=\"Whether to use 16-bit float precision instead \"\n \"of 32-bit\")\n parser.add_argument('--n_workers', type=int, default=4,\n help=\"number of data workers\")\n parser.add_argument('--pin_mem', action='store_true',\n help=\"pin memory\")\n\n args = parser.parse_args()\n\n main(args)\n"
]
| [
[
"torch.no_grad",
"torch.utils.data.DataLoader",
"torch.load"
]
]
|
pminervini/clarify-biore | [
"d2cfa40ccb89d0f685bdbe02446ccbe90d8c1bbc"
]
| [
"utils/train_utils.py"
]
| [
"# -*- coding: utf-8 -*-\n\nimport json\nimport os\nimport pickle\nimport random\n\nimport numpy as np\nimport torch\n\nfrom sklearn import metrics\nfrom sklearn.metrics import precision_recall_fscore_support, accuracy_score\nfrom torch.utils.data import DataLoader, SequentialSampler, TensorDataset\nfrom tqdm import tqdm\n\nimport config\nfrom utils.utils import TriplesReader as read_triples\nfrom utils.utils import read_relations, read_entities\n\nfrom typing import Dict, Tuple, List, Set, Iterable, Any\n\n\nclass AverageMeter(object):\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=0):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / (.0001 + self.count)\n\n def __str__(self):\n \"\"\"\n String representation for logging\n \"\"\"\n # for values that should be recorded exactly e.g. iteration number\n if self.count == 0:\n return str(self.val)\n # for stats\n return '%.4f (%.4f)' % (self.val, self.avg)\n\n\ndef set_seed():\n seed = config.SEED\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\n\n# For each element in the batch, given the:\n# - label logits,\n# - gold labels, and\n# - source and target entity indices of the triple,\n# compute\ndef compute_metrics(logits, labels, groups, set_type, logger, ent_types=False) -> Dict[str, Any]:\n # - eval['logits'] is [B * N, C]\n # - eval['labels'] is [B * N]\n # - eval['names'] is [B * N, G, 2] -- note that B=1 in the code\n # - eval['groups'] is [B * N, 2] -- note that B=1 in the code\n # groups was eval['names'] originally, called from train-cli.py\n\n # Read relation mappings and triples\n if ent_types:\n # Get the entity-to-id and relation-to-id mappings from entities_types.txt and relations_types.txt\n rel2idx = read_relations(config.relations_file_types)\n entity2idx = read_entities(config.entities_file_types)\n\n if set_type == \"dev\":\n # triples_types_dev.tsv\n triples_file = config.triples_types_file_dev\n else:\n # triples_types_test.tsv\n triples_file = config.triples_file_test\n # entities.txt\n entity2idx = read_entities(config.entities_file)\n else:\n # Get the entity-to-id and relation-to-id mappings from entities.txt and relations.txt\n rel2idx = read_relations(config.relations_file)\n entity2idx = read_entities(config.entities_file)\n\n if set_type == \"dev\":\n # triples_dev.tsv\n triples_file = config.triples_file_dev\n else:\n # triples_test.tsv\n triples_file = config.triples_file_test\n\n # Read triples, where we have indices instead of entity names, and does not include 'na' triples\n triples: Set[Tuple[int, str, int]] = set()\n\n print('Loaded ', triples_file)\n\n # For each triple ..\n for src, rel, tgt in read_triples(triples_file):\n if rel != \"na\":\n # .. make sure that the relation type is not NA, and add it to 'triples'\n triples.add((entity2idx[src], rel, entity2idx[tgt]))\n\n # RE predictions\n # [B * N, C]\n probas = torch.nn.Softmax(-1)(logits).squeeze()\n re_preds = list()\n\n # For each of the B * N instances ..\n for i in range(probas.size(0)):\n # group has shape [2]\n group = groups[i]\n\n # Let's get the two items from the group, ie. the source and the target entities ..\n src, tgt = group[0].item(), group[1].item()\n\n # For each possible relation types ..\n for rel, rel_idx in rel2idx.items():\n if rel != \"na\":\n\n # For instance i, take the logit of that relation type ..\n score = probas[i][rel_idx].item()\n\n # And add it to the possible predictions, in re_preds.\n # WE NEED ALL POSSIBLE PREDICTIONS BECAUSE THI IS A RANKING TASK NOW.\n re_preds.append({\n \"src\": src,\n \"tgt\": tgt,\n \"relation\": rel,\n \"score\": score\n })\n\n # Adopted from:\n # https://github.com/thunlp/OpenNRE/blob/master/opennre/framework/data_loader.py#L230\n\n # Sort the predictions based on their scores\n sorted_re_preds = sorted(re_preds, key=lambda x: x[\"score\"], reverse=True)\n\n # Remove duplicate triples from sorted_re_preds\n sorted_re_preds = non_dup_ordered_seq(sorted_re_preds)\n\n P = list()\n R = list()\n\n correct = 0\n total = len(triples)\n\n # For each prediction dictionary, where duplicate (s, p, o) triples were removed ..\n for i, item in enumerate(sorted_re_preds):\n\n # Get the subject, predicate, and object of the triple, where for each (s, o) pair we have all possible\n # values for p ..\n relation = item[\"relation\"]\n src, tgt = item[\"src\"], item[\"tgt\"]\n\n # If the (s, p, o) triple appears in triples ..\n if (src, relation, tgt) in triples:\n # Increment the 'correct' counter\n correct += 1\n\n # P = list of [nb_correct_predictions / nb_predictions so far]\n # R = list of [nb_correct_predictions / nb_triples]\n P.append(float(correct) / float(i + 1))\n R.append(float(correct) / float(total))\n\n # Compute AUC, and F1\n auc = metrics.auc(x=R, y=P)\n P = np.array(P)\n R = np.array(R)\n\n f1 = (2 * P * R / (P + R + 1e-20)).max()\n\n # Added metrics\n added_metrics = {}\n for n in range(100, 1000, 100): # 100, 200, etc recall\n added_metrics['P@{}'.format(n)] = sum(P[:n]) / n\n\n for n in range(2000, total, 2000):\n added_metrics['P@{}'.format(n)] = sum(P[:n]) / n\n\n added_metrics['P@{}'.format(total)] = sum(P[:total]) / total\n\n # Accuracy\n na_idx = rel2idx[\"na\"]\n\n # Get the prediction with the highest probability; (I think the torch.nn.Softmax here can be omitted)\n preds = torch.argmax(torch.nn.Softmax(-1)(logits), -1)\n\n # Compute the accuracy -- discrepancy between predicted and gold labels\n acc = float((preds == labels).long().sum()) / labels.size(0)\n\n # Compute the total number of non-NA gold relations ..\n pos_total = (labels != na_idx).long().sum()\n\n # For all non-NA gold labels, number of times that the predict label and the gold label match\n pos_correct = ((preds == labels).long() * (labels != na_idx).long()).sum()\n\n if pos_total > 0:\n # Accuracy for non-NA relations\n pos_acc = float(pos_correct) / float(pos_total)\n else:\n pos_acc = 0\n\n logger.info(\" accuracy = %s\", str(acc))\n logger.info(\" pos_accuracy = %s\", str(pos_acc))\n\n # Return a dict with all the results\n results = {\n \"P\": list(P[:5]),\n \"R\": list(R[:5]),\n \"F1\": f1,\n \"AUC\": auc,\n \"accuracy: \": str(acc),\n \"pos_accuracy: \": str(pos_acc)\n }\n\n results.update(added_metrics)\n\n return results\n\n\ndef save_eval_results(results,\n eval_dir,\n set_type,\n logger,\n prefix=\"\"):\n os.makedirs(eval_dir, exist_ok=True)\n output_eval_file = os.path.join(eval_dir, \"eval_results.txt\")\n\n with open(output_eval_file, \"w\") as wf:\n logger.info(\"***** {} results {} *****\".format(set_type, prefix))\n for key in sorted(results.keys()):\n logger.info(\" %s = %s\", key, str(results[key]))\n wf.write(\"%s = %s\\n\" % (key, str(results[key])))\n\n\ndef load_dataset(set_type: str,\n logger,\n ent_types: bool = False) -> TensorDataset:\n if set_type == \"train\":\n if ent_types:\n features_file = config.feats_file_types_train\n else:\n features_file = config.feats_file_train\n\n elif set_type == \"dev\":\n if ent_types:\n features_file = config.feats_file_types_dev\n else:\n features_file = config.feats_file_dev\n\n else:\n if ent_types:\n features_file = config.feats_file_types_test\n else:\n features_file = config.feats_file_test\n\n logger.info(\"Loading features from cached file %s\", features_file)\n features = torch.load(features_file)\n\n all_input_ids = torch.cat([f[\"input_ids\"].unsqueeze(0) for f in features]).long()\n all_entity_ids = torch.cat([f[\"entity_ids\"].unsqueeze(0) for f in features]).long()\n all_attention_mask = torch.cat([f[\"attention_mask\"].unsqueeze(0) for f in features]).long()\n all_groups = torch.cat([torch.tensor(f[\"group\"]).unsqueeze(0) for f in features]).long()\n all_labels = torch.tensor([f[\"label\"] for f in features]).long()\n if ent_types: # include ent names within ent types\n all_names = [f[\"ent_names\"] for f in features]\n all_names = convert_names_to_cuis(all_names)\n dataset = TensorDataset(all_input_ids, all_entity_ids, all_attention_mask, all_groups, all_labels, all_names)\n else:\n dataset = TensorDataset(all_input_ids, all_entity_ids, all_attention_mask, all_groups, all_labels)\n return dataset\n\n\ndef convert_names_to_cuis(l_names):\n entity2idx = read_entities(config.entities_file)\n lc = []\n for l_bag in l_names:\n lb = []\n for l_group in l_bag:\n lb.append((entity2idx[l_group[0]], entity2idx[l_group[1]]))\n lc.append(lb)\n lc = torch.IntTensor(lc)\n return lc\n\n\n#\n# cf. https://stackoverflow.com/a/480227\n#\n# \"seq\" is a sequence of Dict where the key are details of a given prediction, including the \"score\", and the value\n# is the source or target entity, or the considered relation type \"relation\"\n#\ndef non_dup_ordered_seq(seq: Iterable[Dict[str, Any]]) -> List[Dict[str, Any]]:\n seen = set()\n seen_add = seen.add\n\n # Create a list of predictions such that the triple in the prediction (subject and object entities, and possible\n # relation type) are unique\n non_dup_seq: List[Dict[str, Any]] = list()\n\n # For each of the predictions ..\n for item in seq:\n # Extract the subject, predicate, and object of the triple ..\n # Here 'relation' appears with all its possible values in 'seq'\n relation = item[\"relation\"]\n src, tgt = item[\"src\"], item[\"tgt\"]\n\n # Build the triple ..\n triple = (src, relation, tgt)\n\n # If the triple appears for the first time, add the prediction dictionary to non_dup_seq\n if not (triple in seen or seen_add(triple)):\n non_dup_seq.append(item)\n\n return non_dup_seq\n\n\ndef evaluate(model, logger, set_type: str = \"dev\", prefix: str = \"\", ent_types: bool = False):\n eval_output_dir = config.output_dir\n\n # Load the dataset ...\n eval_dataset: TensorDataset = load_dataset(set_type, logger, ent_types=ent_types)\n\n if not os.path.exists(eval_output_dir):\n os.makedirs(eval_output_dir)\n\n # Load the data loader ...\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=config.eval_batch_size)\n\n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", config.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n eval_logits, eval_labels, eval_preds, eval_groups, eval_dirs, eval_names = [], [], [], [], [], []\n\n # For each batch in the dataset ...\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n model.eval()\n\n # Move the batches to GPU ..\n batch = tuple(t.to(config.device) for t in batch)\n\n with torch.inference_mode():\n # Create the inputs dictionary with input_ids, entity_ids, etc.\n inputs = {\n \"input_ids\": batch[0], # [1, 16, 128], so I think it's [B, G, MAX_SEQ_LEN]\n \"entity_ids\": batch[1], # [1, 16, 128], so I think it's [B, G, MAX_SEQ_LEN]\n \"attention_mask\": batch[2], # [1, 16, 128], so I think it's [B, G, MAX_SEQ_LEN]\n \"labels\": batch[4], # [1], so I think it's [B]\n \"is_train\": False\n }\n\n # Do inference with the model ..\n outputs = model(**inputs)\n\n # outputs is [ [], [1, 394] ], so I think it's [ LOSS, LOGITS ] where:\n # - LOSS is a scalar\n # - LOGITS is [B, NUM_LABELS]\n\n # Get the loss (scalar) and the [B, NUM_LABELS] logits ..\n tmp_eval_loss, logits = outputs[:2]\n\n eval_loss += tmp_eval_loss.mean().item()\n\n nb_eval_steps += 1\n\n # And add the results to lists:\n eval_labels.append(inputs[\"labels\"].detach().cpu()) # Gold labels, [B]\n eval_logits.append(logits.detach().cpu()) # Predicted logits, [B, NUM_LABELS]\n eval_groups.append(batch[3].detach().cpu()) # groups, [B, 2]\n\n eval_preds.append(torch.argmax(logits.detach().cpu(), dim=1).item()) # Predicted labels, B=1 so here it's an int\n # now eval_preds looks like [14, 14, 14, 14, ..]\n\n if ent_types:\n # each entry in eval_names has shape [1, 16, 2], so I guess it's [B, G, 2],\n # and describes source and target entities\n eval_names.append(batch[5].detach().cpu())\n\n del model, batch, logits, tmp_eval_loss, eval_dataloader, eval_dataset # memory mgmt\n\n eval = {\n 'loss': eval_loss / nb_eval_steps, # Scalar, average loss\n 'labels': torch.cat(eval_labels), # B gold labels -> [B * N]\n 'logits': torch.cat(eval_logits), # B x C, predicted logits -> [B * N, C]\n 'preds': np.asarray(eval_preds), # B predicted labels -> [N] Numpy array, since B=1\n 'groups': torch.cat(eval_groups) # B x 2, [B * N, 2] -- I think these are source and target entities of the triples\n }\n # -> now eval['groups'] will look like [B * N, 2]\n\n # Add ent names to evaluation for ent types experiment\n if ent_types:\n eval['names'] = torch.cat(eval_names) # This will be [B * N, G, 2], so for B=1 will be [N, G, 2]\n\n # Get all positive relationship labels\n if ent_types:\n rel2idx = read_relations(config.relations_file_types)\n else:\n rel2idx = read_relations(config.relations_file)\n\n # All relation indices\n pos_rel_idxs = list(rel2idx.values())\n\n # Relation index of 'NA'\n rel_idx_na = rel2idx['na']\n\n # Remove 'NA' from pos_rel_idxs, the list of relation indices\n del pos_rel_idxs[rel_idx_na]\n\n # Given the gold and the predicted labels, compute the accuracy of the model\n a = accuracy_score(eval['labels'].numpy(), eval['preds'])\n\n # Given the gold labels, the predicted labels, and some more info, compute Precision, Recall, and F1\n p, r, f1, support = precision_recall_fscore_support(eval['labels'].numpy(), eval['preds'], average='micro', labels=pos_rel_idxs)\n\n logger.info('Accuracy (including \"NA\"): {}\\nP: {}, R: {}, F1: {}'.format(a, p, r, f1))\n\n results: Dict[str, Dict[str, Any]] = {}\n\n results['new_results'] = {\n 'acc_with_na': a,\n 'scikit_precision': p,\n 'scikit_recall': r,\n 'scikit_f1': f1,\n \"loss\": eval_loss,\n \"counter\": eval['labels'].shape\n }\n\n # Compute the evaluation metrics:\n # - eval['logits'] is [B * N, C]\n # - eval['labels'] is [B * N]\n # - eval['names'] is [B * N, G, 2]\n # - eval['gropups'] is [B * N, 2]\n\n # I am pretty sure the third argument here is not eval['names'] but eval['groups']\n # results['original'] = compute_metrics(eval['logits'], eval['labels'], eval['names'], set_type, logger, ent_types=ent_types)\n results['original'] = compute_metrics(eval['logits'], eval['labels'], eval['groups'], set_type, logger,\n ent_types=ent_types)\n\n # XXX isn't this the same as results['original'] ?\n # results['top_preds_only'] = compute_metrics(eval['logits'], eval['labels'], eval['names'], set_type, logger, ent_types=ent_types)\n results['top_preds_only'] = compute_metrics(eval['logits'], eval['labels'], eval['groups'], set_type, logger,\n ent_types=ent_types)\n\n logger.info(\"Results: %s\", results)\n\n # Save evaluation results\n with open(os.path.join(config.output_dir, set_type + \"_metrics.txt\"), \"w\") as wf:\n json.dump(results, wf, indent=4)\n\n # Save evaluation raw data\n with open(os.path.join(config.output_dir, set_type + \"_raw_eval_data.pkl\"), \"wb\") as wf:\n pickle.dump(eval, wf)\n\n return results\n"
]
| [
[
"numpy.array",
"torch.cuda.manual_seed",
"torch.cat",
"numpy.asarray",
"torch.nn.Softmax",
"numpy.random.seed",
"torch.IntTensor",
"torch.utils.data.SequentialSampler",
"torch.manual_seed",
"torch.inference_mode",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.load",
"sklearn.metrics.auc",
"torch.utils.data.TensorDataset"
]
]
|
ETHmodlab/BIMODAL | [
"31c3265498ea8021acca558e7de22c8fdaf6578f"
]
| [
"model/trainer.py"
]
| [
"\"\"\"\nImplementation of different training methods\n\"\"\"\n\nimport numpy as np\nfrom sklearn.model_selection import KFold, train_test_split, ShuffleSplit\nimport pandas as pd\nimport configparser\nfrom fb_rnn import FBRNN\nfrom forward_rnn import ForwardRNN\nfrom nade import NADE\nfrom bimodal import BIMODAL\nfrom one_hot_encoder import SMILESEncoder\nfrom sklearn.utils import shuffle\nimport os\nfrom helper import clean_molecule, check_model, check_molecules\n\nnp.random.seed(1)\n\n\nclass Trainer():\n\n def __init__(self, experiment_name='ForwardRNN'):\n\n self._encoder = SMILESEncoder()\n\n # Read all parameter from the .ini file\n self._config = configparser.ConfigParser()\n self._config.read('../experiments/' + experiment_name + '.ini')\n\n self._model_type = self._config['MODEL']['model']\n self._experiment_name = experiment_name\n self._hidden_units = int(self._config['MODEL']['hidden_units'])\n\n self._file_name = '../data/' + self._config['DATA']['data']\n self._encoding_size = int(self._config['DATA']['encoding_size'])\n self._molecular_size = int(self._config['DATA']['molecular_size'])\n\n self._epochs = int(self._config['TRAINING']['epochs'])\n self._n_folds = int(self._config['TRAINING']['n_folds'])\n self._learning_rate = float(self._config['TRAINING']['learning_rate'])\n self._batch_size = int(self._config['TRAINING']['batch_size'])\n\n self._samples = int(self._config['EVALUATION']['samples'])\n self._T = float(self._config['EVALUATION']['temp'])\n self._starting_token = self._encoder.encode([self._config['EVALUATION']['starting_token']])\n\n if self._model_type == 'FBRNN':\n self._model = FBRNN(self._molecular_size, self._encoding_size,\n self._learning_rate, self._hidden_units)\n elif self._model_type == 'ForwardRNN':\n self._model = ForwardRNN(self._molecular_size, self._encoding_size,\n self._learning_rate, self._hidden_units)\n\n elif self._model_type == 'BIMODAL':\n self._model = BIMODAL(self._molecular_size, self._encoding_size,\n self._learning_rate, self._hidden_units)\n\n elif self._model_type == 'NADE':\n self._generation = self._config['MODEL']['generation']\n self._missing_token = self._encoder.encode([self._config['TRAINING']['missing_token']])\n self._model = NADE(self._molecular_size, self._encoding_size, self._learning_rate,\n self._hidden_units, self._generation, self._missing_token)\n\n self._data = self._encoder.encode_from_file(self._file_name)\n\n def complete_run(self, stor_dir='../evaluation/', restart=False):\n '''Training without validation on complete data'''\n\n # Create directories\n if not os.path.exists(stor_dir + '/' + self._experiment_name + '/models'):\n os.makedirs(stor_dir + '/' + self._experiment_name + '/models')\n\n if not os.path.exists(stor_dir + '/' + self._experiment_name + '/molecules'):\n os.makedirs(stor_dir + '/' + self._experiment_name + '/molecules')\n\n if not os.path.exists(stor_dir + '/' + self._experiment_name + '/statistic'):\n os.makedirs(stor_dir + '/' + self._experiment_name + '/statistic')\n\n # Compute labels\n label = np.argmax(self._data, axis=-1).astype(int)\n\n # Special preprocessing in the case of NADE\n if self._model_type == 'NADE' and self._generation == 'random':\n # First column stores correct SMILES and second column stores SMILES with missing values\n label = np.argmax(self._data[:, 0], axis=-1).astype(int)\n aug = self._data.shape[1] - 1\n label = np.repeat(label[:, np.newaxis, :], aug, axis=1)\n self._data = self._data[:, 1:]\n\n # Build model\n self._model.build()\n\n # Store total Statistics\n tot_stat = []\n\n # only single fold\n fold = 1\n\n # Shuffle data before training (Data reshaped from (N_samples, N_augmentation, molecular_size, encoding_size)\n # to (all_SMILES, molecular_size, encoding_size))\n self._data, label = shuffle(self._data.reshape(-1, self._molecular_size, self._encoding_size),\n label.reshape(-1, self._molecular_size))\n\n for i in range(self._epochs):\n print('Fold:', fold)\n print('Epoch:', i)\n\n # With restart read existing files\n if restart:\n tmp_stat_file = pd.read_csv(\n stor_dir + '/' + self._experiment_name + '/statistic/stat_fold_' + str(fold) + '.csv',\n header=None).to_numpy()\n\n # Check if current epoch is successfully completed else continue with normal training\n if check_model(self._model_type, self._experiment_name, stor_dir, fold, i) and check_molecules(\n self._experiment_name, stor_dir, fold, i) and tmp_stat_file.shape[0] > i:\n\n # Load model\n self._model.build(\n stor_dir + '/' + self._experiment_name + '/models/model_fold_' + str(fold) + '_epochs_' + str(i))\n\n # Fill statistic and loss list\n tot_stat.append(tmp_stat_file[i, 1:].reshape(1, -1).tolist())\n continue\n\n # Continue with normal training\n else:\n restart = False\n\n # Train model\n statistic = self._model.train(self._data, label, epochs=1, batch_size=self._batch_size)\n tot_stat.append(statistic.tolist())\n\n # Store model\n self._model.save(\n stor_dir + '/' + self._experiment_name + '/models/model_fold_' + str(fold) + '_epochs_' + str(i))\n\n # Sample new molecules\n new_molecules = []\n for s in range(self._samples):\n mol = self._encoder.decode(self._model.sample(self._starting_token, self._T))\n new_molecules.append(clean_molecule(mol[0], self._model_type))\n\n # Store new molecules\n new_molecules = np.array(new_molecules)\n pd.DataFrame(new_molecules).to_csv(\n stor_dir + '/' + self._experiment_name + '/molecules/molecule_fold_' + str(fold) + '_epochs_' + str(\n i) + '.csv', header=None)\n\n # Store statistic\n store_stat = np.array(tot_stat).reshape(i + 1, -1)\n pd.DataFrame(np.array(store_stat)).to_csv(\n stor_dir + '/' + self._experiment_name + '/statistic/stat_fold_' + str(fold) + '.csv',\n header=None)\n\n def single_run(self, stor_dir='../evaluation/', restart=False):\n '''Training with validation and store data'''\n\n # Create directories\n if not os.path.exists(stor_dir + '/' + self._experiment_name + '/models'):\n os.makedirs(stor_dir + '/' + self._experiment_name + '/models')\n\n if not os.path.exists(stor_dir + '/' + self._experiment_name + '/molecules'):\n os.makedirs(stor_dir + '/' + self._experiment_name + '/molecules')\n\n if not os.path.exists(stor_dir + '/' + self._experiment_name + '/statistic'):\n os.makedirs(stor_dir + '/' + self._experiment_name + '/statistic')\n\n if not os.path.exists(stor_dir + '/' + self._experiment_name + '/validation'):\n os.makedirs(stor_dir + '/' + self._experiment_name + '/validation')\n\n # Compute labels\n label = np.argmax(self._data, axis=-1).astype(int)\n\n # Special preprocessing in the case of NADE\n if (self._model_type == 'NADE' or self._model_type == 'NADE_v2') and self._generation == 'random':\n # First column stores correct SMILES and second column stores SMILES with missing values\n label = np.argmax(self._data[:, 0], axis=-1).astype(int)\n aug = self._data.shape[1] - 1\n label = np.repeat(label[:, np.newaxis, :], aug, axis=1)\n self._data = self._data[:, 1:]\n\n # Split data into train and test data\n train_data, test_data, train_label, test_label = train_test_split(self._data, label, test_size=1. / 5,\n random_state=1, shuffle=True)\n # Build model\n self._model.build()\n\n # Store total Statistics\n tot_stat = []\n\n # Store validation loss\n tot_loss = []\n\n # only single fold\n fold = 1\n\n for i in range(self._epochs):\n print('Fold:', fold)\n print('Epoch:', i)\n\n if restart:\n # Read existing files\n tmp_val_file = pd.read_csv(\n stor_dir + '/' + self._experiment_name + '/validation/val_fold_' + str(fold) + '.csv',\n header=None).to_numpy()\n tmp_stat_file = pd.read_csv(\n stor_dir + '/' + self._experiment_name + '/statistic/stat_fold_' + str(fold) + '.csv',\n header=None).to_numpy()\n\n # Check if current epoch is successfully completed else continue with normal training\n if check_model(self._model_type, self._experiment_name, stor_dir, fold, i) and check_molecules(\n self._experiment_name, stor_dir, fold, i) and tmp_val_file.shape[0] > i and tmp_stat_file.shape[\n 0] > i:\n\n # Load model\n self._model.build(\n stor_dir + '/' + self._experiment_name + '/models/model_fold_' + str(fold) + '_epochs_' + str(i))\n\n # Fill statistic and loss list\n tot_stat.append(tmp_stat_file[i, 1:].reshape(1, -1).tolist())\n tot_loss.append(tmp_val_file[i, 1])\n\n # Skip this epoch\n continue\n\n # Continue with normal training\n else:\n restart = False\n\n # Train model (Data reshaped from (N_samples, N_augmentation, molecular_size, encoding_size)\n # to (all_SMILES, molecular_size, encoding_size))\n statistic = self._model.train(train_data.reshape(-1, self._molecular_size, self._encoding_size),\n train_label.reshape(-1, self._molecular_size), epochs=1,\n batch_size=self._batch_size)\n tot_stat.append(statistic.tolist())\n\n # Store model\n self._model.save(\n stor_dir + '/' + self._experiment_name + '/models/model_fold_' + str(fold) + '_epochs_' + str(i))\n\n # Test model on validation set\n tot_loss.append(\n self._model.validate(test_data.reshape(-1, self._molecular_size, self._encoding_size),\n test_label.reshape(-1, self._molecular_size)))\n\n # Sample new molecules\n new_molecules = []\n for s in range(self._samples):\n mol = self._encoder.decode(self._model.sample(self._starting_token, self._T))\n new_molecules.append(clean_molecule(mol[0], self._model_type))\n\n # Store new molecules\n new_molecules = np.array(new_molecules)\n pd.DataFrame(new_molecules).to_csv(\n stor_dir + '/' + self._experiment_name + '/molecules/molecule_fold_' + str(fold) + '_epochs_' + str(\n i) + '.csv', header=None)\n\n # Store statistic\n store_stat = np.array(tot_stat).reshape(i + 1, -1)\n pd.DataFrame(np.array(store_stat)).to_csv(\n stor_dir + '/' + self._experiment_name + '/statistic/stat_fold_' + str(fold) + '.csv',\n header=None)\n\n # Store validation data\n pd.DataFrame(np.array(tot_loss).reshape(-1, 1)).to_csv(\n stor_dir + '/' + self._experiment_name + '/validation/val_fold_' + str(fold) + '.csv',\n header=None)\n\n def cross_validation(self, stor_dir='../evaluation/', restart=False):\n '''Perform cross-validation and store data'''\n\n # Create directories\n if not os.path.exists(stor_dir + '/' + self._experiment_name + '/models'):\n os.makedirs(stor_dir + '/' + self._experiment_name + '/models')\n\n if not os.path.exists(stor_dir + '/' + self._experiment_name + '/molecules'):\n os.makedirs(stor_dir + '/' + self._experiment_name + '/molecules')\n\n if not os.path.exists(stor_dir + '/' + self._experiment_name + '/statistic'):\n os.makedirs(stor_dir + '/' + self._experiment_name + '/statistic')\n\n if not os.path.exists(stor_dir + '/' + self._experiment_name + '/validation'):\n os.makedirs(stor_dir + '/' + self._experiment_name + '/validation')\n\n self._kf = KFold(n_splits=self._n_folds, shuffle=True, random_state=2)\n\n # Count iterations\n fold = 0\n\n # Compute labels\n label = np.argmax(self._data, axis=-1).astype(int)\n\n # Special preprocessing in the case of NADE\n if (self._model_type == 'NADE') and self._generation == 'random':\n # First column stores correct SMILES and second column stores SMILES with missing values\n label = np.argmax(self._data[:, 0], axis=-1).astype(int)\n aug = self._data.shape[1] - 1\n label = np.repeat(label[:, np.newaxis, :], aug, axis=1)\n self._data = self._data[:, 1:]\n\n # Split data into train and test data\n for train, test in self._kf.split(self._data):\n\n # Shuffle index within test and train set\n np.random.shuffle(train)\n np.random.shuffle(test)\n\n fold += 1\n\n self._model.build()\n\n # Store total statistics\n tot_stat = []\n\n # Store validation loss\n tot_loss = []\n\n for i in range(self._epochs):\n print('Fold:', fold)\n print('Epoch:', i)\n\n if restart:\n tmp_val_file = pd.read_csv(\n stor_dir + '/' + self._experiment_name + '/validation/val_fold_' + str(fold) + '.csv',\n header=None).to_numpy()\n\n tmp_stat_file = pd.read_csv(\n stor_dir + '/' + self._experiment_name + '/statistic/stat_fold_' + str(fold) + '.csv',\n header=None).to_numpy()\n\n # Check if current epoch is successfully complete[0]d else continue with normal training\n if check_model(self._model_type, self._experiment_name, stor_dir, fold, i) and check_molecules(\n self._experiment_name, stor_dir, fold, i) and tmp_val_file.shape[0] > i and tmp_stat_file.shape[\n 0] > i:\n\n # Load model\n self._model.build(\n stor_dir + '/' + self._experiment_name + '/models/model_fold_' + str(fold) + '_epochs_' + str(i))\n\n # Fill statistic and loss list\n tot_stat.append(tmp_stat_file[i, 1:].reshape(1, -1).tolist())\n tot_loss.append(tmp_val_file[i, 1])\n\n # Skip this epoch\n continue\n\n else:\n restart = False\n\n # Train model (Data reshaped from (N_samples, N_augmentation, molecular_size, encoding_size)\n # to (all_SMILES, molecular_size, encoding_size))\n statistic = self._model.train(\n self._data[train].reshape(-1, self._molecular_size, self._encoding_size),\n label[train].reshape(-1, self._molecular_size), epochs=1, batch_size=self._batch_size)\n\n tot_stat.append(statistic.tolist())\n\n # Store model\n self._model.save(\n stor_dir + '/' + self._experiment_name + '/models/model_fold_' + str(fold) + '_epochs_' + str(i))\n\n # Test model on validation set\n tot_loss.append(\n self._model.validate(self._data[test].reshape(-1, self._molecular_size, self._encoding_size),\n label[test].reshape(-1, self._molecular_size)))\n\n # Sample new molecules\n new_molecules = []\n for s in range(self._samples):\n mol = self._encoder.decode(self._model.sample(self._starting_token, self._T))\n new_molecules.append(clean_molecule(mol[0], self._model_type))\n\n # Store new molecules\n new_molecules = np.array(new_molecules)\n pd.DataFrame(new_molecules).to_csv(\n stor_dir + '/' + self._experiment_name + '/molecules/molecule_fold_' + str(fold) + '_epochs_' + str(\n i) + '.csv', header=None)\n\n # Store statistic\n store_stat = np.array(tot_stat).reshape(i + 1, -1)\n pd.DataFrame(np.array(store_stat)).to_csv(\n stor_dir + '/' + self._experiment_name + '/statistic/stat_fold_' + str(fold) + '.csv', header=None)\n\n # Store validation data\n pd.DataFrame(np.array(tot_loss).reshape(-1, 1)).to_csv(\n stor_dir + '/' + self._experiment_name + '/validation/val_fold_' + str(fold) + '.csv', header=None)\n\n\n"
]
| [
[
"numpy.repeat",
"numpy.array",
"sklearn.model_selection.train_test_split",
"numpy.random.seed",
"pandas.DataFrame",
"numpy.random.shuffle",
"numpy.argmax",
"sklearn.model_selection.KFold"
]
]
|
halsayed/kps_mask_detector | [
"b1fa3c6c54ceb2e919fd21a8e53f996000f46680"
]
| [
"functions/local_test_detect_faces.py"
]
| [
"import cv2\nimport numpy as np\nfrom tensorflow.keras.applications.mobilenet_v2 import preprocess_input\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.models import load_model\n\nprototext_file = 'models/face_detector.prototxt'\nweights_file = 'models/face_detector.caffemodel'\nmask_model = 'models/mask_detector.model'\nfacenet = cv2.dnn.readNet(prototext_file, weights_file)\nmasknet = load_model(mask_model)\nconfidence = 0.5\n\n\ndef main(ctx, msg):\n image = cv2.imread('test1.jpg')\n (h, w) = image.shape[:2]\n blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), (104.0, 177.0, 123.0))\n facenet.setInput(blob)\n detections = facenet.forward()\n\n faces = []\n face_locations = []\n faces_without_mask = 0\n faces_with_mask = 0\n\n for i in range(0, detections.shape[2]):\n image_confidence = detections[0, 0, i, 2]\n\n if image_confidence > confidence:\n # compute the (x, y)-coordinates of the bounding box for the object\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype('int')\n\n # ensure the bounding boxes fall within the dimensions of the frame\n (startX, startY) = (max(0, startX), max(0, startY))\n (endX, endY) = (min(w - 1, endX), min(h - 1, endY))\n\n # extract the face ROI, convert it from BGR to RGB channel ordering, resize it to 224x224, and preprocess it\n face = image[startY:endY, startX:endX]\n face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)\n face = cv2.resize(face, (224, 224))\n face = img_to_array(face)\n face = preprocess_input(face)\n\n # add the face and bounding boxes to their respective to result lists\n faces.append(face)\n face_locations.append((startX, startY, endX, endY))\n\n # only make a predictions if at least one face was detected\n if len(faces) > 0:\n # for faster inference we'll make batch predictions on *all*\n # faces at the same time rather than one-by-one predictions\n # in the above `for` loop\n faces = np.array(faces, dtype=\"float32\")\n mask_predictions = masknet.predict(faces, batch_size=32)\n\n print(mask_predictions)\n print(type(mask_predictions))\n # draw boxes around the faces\n for (box, pred) in zip(face_locations, mask_predictions):\n (startX, startY, endX, endY) = box\n (mask, withoutMask) = pred\n print(pred)\n print(type(pred))\n\n # determine the class label and color we'll use to draw\n # the bounding box and text\n if mask > withoutMask:\n label = f'Mask: {mask:.2f}'\n faces_with_mask += 1\n color = (0, 255, 0)\n else:\n label = f'No Mask: {withoutMask:.2f}'\n faces_without_mask += 1\n color = (0, 0, 255)\n\n cv2.putText(image, label, (startX, startY - 10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2)\n cv2.rectangle(image, (startX, startY), (endX, endY), color, 4)\n\n print('done')\n cv2.imshow('output', image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main('test', 'test')\n"
]
| [
[
"tensorflow.keras.models.load_model",
"numpy.array",
"tensorflow.keras.preprocessing.image.img_to_array",
"tensorflow.keras.applications.mobilenet_v2.preprocess_input"
]
]
|
archer1211/LSSTM | [
"27fca510faeaf0ae4bdb19c8fee8b792eca0262a"
]
| [
"LS_STM.py"
]
| [
"import numpy as np\nfrom hottbox.core import Tensor\nimport copy\nimport time\nfrom scipy.spatial.distance import pdist, cdist, squareform\nfrom aiding_functions import contractor\n\nclass LSSTM:\n def __init__(self, C=10, kernel='linear', sig2=1, max_iter=100):\n\n self.order = None\n self.shape = None\n self.C = C\n self.max_iter = max_iter\n self.model = {'Weights': None,\n 'Bias': 0,\n 'nIter': 0\n }\n\n self.eta_history = []\n self.b_history = []\n self.orig_labels = None\n self.kernel = kernel\n self.sig2 = sig2\n\n\n\n def fit(self, X_train, labels):\n \"\"\"\n\n Parameters\n ----------\n X_train: list[Tensor], list of length M of Tensor objects, all of the same order and size\n labels: list of length M of labels +1, -1\n\n Returns\n -------\n\n \"\"\"\n self.order = X_train[0].order\n self.shape = X_train[0].shape\n self._assert_data(X_train, labels=labels)\n\n self.orig_labels = list(set(labels))\n labels = [1 if x == self.orig_labels[0] else -1 for x in labels]\n\n w_n = self._initialize_weights(X_train[0].shape)\n\n for i in range(self.max_iter):\n # w_n_old = copy.deepcopy(weights)\n for n in range(self.order):\n #Always seems to be better if the weights are updated on the fly, rather than altogether at the\n #end of each iteration\n # eta = self._calc_eta(w_n_old, n)\n # X_m = self._calc_Xm(X_train, w_n_old, n)\n eta = self._calc_eta(w_n, n)\n X_m = self._calc_Xm(X_train, w_n, n)\n self.eta_history.append(eta)\n\n w, b = self._compute_weights(X_m, labels, eta, self.C, kernel=self.kernel, sig2=self.sig2)\n w = w / np.linalg.norm(w)\n w_n[n] = w\n\n\n self._update_model(w_n, b, i)\n if self._converged(): break\n\n\n\n def predict(self, X_test):\n \"\"\"\n\n Parameters\n ----------\n X_test: list[Tensor]\n\n Returns\n -------\n y_pred: list of predicted laels\n\n \"\"\"\n #if singleton\n if not isinstance(X_test, list):\n X_test = [X_test]\n\n self._assert_data(X_test)\n\n w_n = self.model['Weights']\n b = self.model['Bias']\n y_pred = []\n dec_values = []\n for xtest in X_test:\n temp = xtest.copy()\n for n, w in enumerate(w_n):\n temp.mode_n_product(np.expand_dims(w, axis=0), mode=n, inplace=True)\n dec_values.append(temp.data.squeeze() + b)\n y_pred.append(np.sign(temp.data.squeeze() + b))\n\n y_pred = [self.orig_labels[0] if x == 1 else self.orig_labels[1] for x in y_pred]\n return y_pred, dec_values\n\n\n\n def _assert_data(self, X_data, labels=None):\n \"\"\"\n\n Parameters\n ----------\n X_data: list[Tensor]\n\n Returns\n -------\n\n None, just checks if all tensors have same order and dimensions, and if labels are binary\n \"\"\"\n order = self.order\n shape = self.shape\n for tensor in X_data[1:]:\n assert tensor.order == order, \"Tensors must all be of the same order\"\n assert tensor.shape == shape, \"Tensors must all have modes of equal dimensions\"\n order = tensor.order\n shape = tensor.shape\n\n if labels is not None:\n assert len(set(labels)) == 2, \"LSSTM is a binary classifier, more than two labels were passed\"\n\n\n def _initialize_weights(self, shape):\n \"\"\"\n\n Parameters\n ----------\n shape: tuple, of tensor dimensions\n\n Returns\n -------\n w_n: list, the initialized weights\n\n \"\"\"\n w_n = []\n for dim in shape:\n w_n.append(np.random.randn(dim))\n return w_n\n\n\n def _calc_eta(self, w_n, n):\n \"\"\"\n\n Parameters\n ----------\n w_n: list, of LS-STM weights\n n: int, the one to leave out\n\n Returns\n -------\n eta: int, parameter to be used in LS-STM optimization problem\n\n \"\"\"\n\n w_n_new = [w for i, w in enumerate(w_n) if i != n]\n eta = 1\n for w in w_n_new:\n eta *= (np.linalg.norm(w)**2)\n return eta\n\n\n def _calc_Xm(self, X_data, w_n, n):\n \"\"\"\n\n Parameters\n ----------\n X_data: list[Tensor], all the data as list of tensor objects\n w_n: list, the weights treated as constants\n n: int, the mode we're looking at\n\n Returns\n -------\n X_m: np.ndarray of size M x mode(n), to be passed to the LS-SVM solver\n\n \"\"\"\n order = X_data[0].order\n w_n_new = [w for i, w in enumerate(w_n) if i != n]\n modes = [i for i in range(order) if i != n]\n\n result = list(map(lambda x: contractor(x, w_n_new, modes), X_data))\n X_m = np.array(result).squeeze()\n\n return X_m\n\n\n def _compute_weights(self, X_m, labels, eta, C, kernel, sig2):\n \"\"\"\n\n Parameters\n ----------\n X_m: np.ndarray, Matrix of contracted tensors along all weights except the current n\n labels: int, the labels of hte training data\n eta: int, Parameter to be used in the algo\n C: cost\n kernel: which kernel to use (linear, rbf, etc)\n\n Returns\n -------\n w: list, Weights for mode n\n b: int, Bias\n\n \"\"\"\n M = X_m.shape[0]\n if kernel=='linear':\n alphas, b = self._ls_optimizer(X_m, labels, eta, C, kernel=kernel, sig2=sig2)\n w = np.sum(alphas * X_m, axis=0)\n elif kernel=='RBF':\n y = np.zeros(M)\n alphas, b = self._ls_optimizer(X_m, labels, eta, C, kernel=kernel, sig2=sig2)\n for i in range(M):\n x_star = X_m[[i]]\n X_tmp = np.delete(X_m, i, axis=0)\n l_tmp = np.delete(labels, i, axis=0)\n alpha_tmp = np.delete(alphas, i, axis=0)[:,0]\n rbf_vector = np.exp(-np.square(cdist(X_tmp, x_star)[:,0])/(2*sig2))\n y[i] = np.sum(alpha_tmp*l_tmp*rbf_vector)\n w = np.dot(np.linalg.pinv(X_m), y-b)\n\n\n return w, b\n\n\n\n def _ls_optimizer(self, X_m, labels, eta, C, kernel, sig2):\n \"\"\"\n\n Parameters\n ----------\n X_m: np.ndarray, Matrix of contracted tensors along all weights except the current n\n labels: int, the labels of hte training data\n eta: int, Parameter to be used in the algo\n C: Cost\n kernel: which kernel to use (linear, rbf, etc)\n\n Returns\n -------\n alphas: the alphas computed from the Lagrangian. The first alpha is the b, the bias parameter\n b = the bias parameter\n \"\"\"\n\n M = X_m.shape[0]\n gamma = C / eta\n y_train = np.expand_dims(np.array(labels), axis=1)\n\n #For now, use no kernel\n if kernel=='linear':\n Omega = np.dot(X_m, X_m.transpose())\n elif kernel=='RBF':\n Omega = np.exp(-np.square(squareform(pdist(X_m))) / (2*sig2))\n\n left_column = np.expand_dims(np.append(np.array([0]), np.ones(M)), axis=1)\n right_block = np.append(np.expand_dims(np.ones(M), axis=0), Omega + (1/gamma) * np.eye(M), axis=0)\n params = np.append( left_column, right_block, axis=1)\n\n RHS = np.append(np.array([[0]]), y_train, axis=0)\n\n alphas = np.dot(np.linalg.inv(params), RHS)\n\n b = alphas[0][0]\n alphas = alphas[1:, :]\n\n return alphas, b\n\n\n\n def _update_model(self, w, b, nIter):\n \"\"\"\n\n Parameters\n ----------\n w: list, estimated weights\n b: int, estimated bias\n nIter: int, current iteration number\n\n Returns\n -------\n None\n\n \"\"\"\n self.model['Weights'] = w\n self.model['Bias'] = b\n self.model['nIter'] = nIter\n\n\n\n def _converged(self):\n \"\"\"\n\n Parameters\n ----------\n w_n_old: list, previous computed weights\n\n Returns\n -------\n Boolean\n\n \"\"\"\n\n self.b_history.append(self.model['Bias'])\n\n if len(self.b_history) > 10:\n err1 = np.diff(np.array(self.eta_history[-11:]))\n err2 = np.diff(np.array(self.b_history[-11:]))\n\n if np.all(np.abs(err1) < 1e-8) and np.all(np.abs(err2) < 1e-8):\n return True\n\n return False\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
]
| [
[
"numpy.array",
"numpy.linalg.norm",
"numpy.delete",
"scipy.spatial.distance.pdist",
"numpy.zeros",
"numpy.sum",
"numpy.ones",
"numpy.random.randn",
"numpy.linalg.pinv",
"numpy.eye",
"numpy.abs",
"numpy.append",
"scipy.spatial.distance.cdist",
"numpy.linalg.inv",
"numpy.expand_dims"
]
]
|
hannahaih/panda-gym | [
"cbbe9deb85f53d6f4f805917781857cdce0af957",
"cbbe9deb85f53d6f4f805917781857cdce0af957"
]
| [
"panda_gym/envs/panda_tasks/panda_push.py",
"panda_gym/utils.py"
]
| [
"import numpy as np\n\nfrom panda_gym.envs.core import RobotTaskEnv\nfrom panda_gym.envs.robots.panda import Panda\nfrom panda_gym.envs.tasks.push import Push\nfrom panda_gym.pybullet import PyBullet\n\n\nclass PandaPushEnv(RobotTaskEnv):\n \"\"\"Push task wih Panda robot.\n\n Args:\n render (bool, optional): Activate rendering. Defaults to False.\n reward_type (str, optional): \"sparse\" or \"dense\". Defaults to \"sparse\".\n control_type (str, optional): \"ee\" to control end-effector position or \"joints\" to control joint values.\n Defaults to \"ee\".\n \"\"\"\n\n def __init__(self, render: bool = False, reward_type: str = \"sparse\", control_type: str = \"ee\") -> None:\n sim = PyBullet(render=render)\n robot = Panda(sim, block_gripper=True, base_position=np.array([-0.6, 0.0, 0.0]), control_type=control_type)\n task = Push(sim, reward_type=reward_type)\n super().__init__(robot, task)\n",
"from typing import Union\n\nimport numpy as np\n\n\ndef distance(a: np.ndarray, b: np.ndarray) -> Union[float, np.ndarray]:\n \"\"\"Compute the distance between two array. This function is vectorized.\n\n Args:\n a (np.ndarray): First array.\n b (np.ndarray): Second array.\n\n Returns:\n Union[float, np.ndarray]: The distance between the arrays.\n \"\"\"\n assert a.shape == b.shape\n return np.linalg.norm(a - b, axis=-1)\n\n\ndef angle_distance(a: np.ndarray, b: np.ndarray) -> Union[float, np.ndarray]:\n \"\"\"Compute the geodesic distance between two array of angles. This function is vectorized.\n\n Args:\n a (np.ndarray): First array.\n b (np.ndarray): Second array.\n\n Returns:\n Union[float, np.ndarray]: The geodesic distance between the angles.\n \"\"\"\n assert a.shape == b.shape\n dist = 1 - np.inner(a, b) ** 2\n return dist\n"
]
| [
[
"numpy.array"
],
[
"numpy.inner",
"numpy.linalg.norm"
]
]
|
buaayhq/cs131mysolution | [
"2cceaeb4133a54a2ba6e30d67e48b7d7083adaa7"
]
| [
"hw1_release/filters.py"
]
| [
"\"\"\"\nCS131 - Computer Vision: Foundations and Applications\nAssignment 1\nAuthor: Donsuk Lee ([email protected])\nDate created: 07/2017\nLast modified: 10/16/2017\nPython Version: 3.5+\n\"\"\"\n\nimport numpy as np\n\n\ndef conv_nested(image, kernel):\n \"\"\"A naive implementation of convolution filter.\n\n This is a naive implementation of convolution using 4 nested for-loops.\n This function computes convolution of an image with a kernel and outputs\n the result that has the same shape as the input image.\n\n Args:\n image: numpy array of shape (Hi, Wi).\n kernel: numpy array of shape (Hk, Wk).\n\n Returns:\n out: numpy array of shape (Hi, Wi).\n \"\"\"\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n out = np.zeros((Hi, Wi))\n\n ### YOUR CODE HERE\n ckx = (Hk - 1)//2\n cky = (Wk - 1)//2\n for m in range(Hi):\n for n in range(Wi):\n sum = 0\n for i in range(Hk):\n for j in range(Wk):\n if (m+ckx-i>=0 and m+ckx-i<Hi and n+cky-j>=0 and n+cky-j<Wi):\n sum += kernel[i][j] * image[m+ckx-i][n+cky-j]\n out[m, n] = sum\n ### END YOUR CODE\n\n return out\n\ndef zero_pad(image, pad_height, pad_width):\n \"\"\" Zero-pad an image.\n\n Ex: a 1x1 image [[1]] with pad_height = 1, pad_width = 2 becomes:\n\n [[0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0]] of shape (3, 5)\n\n Args:\n image: numpy array of shape (H, W).\n pad_width: width of the zero padding (left and right padding).\n pad_height: height of the zero padding (bottom and top padding).\n\n Returns:\n out: numpy array of shape (H+2*pad_height, W+2*pad_width).\n \"\"\"\n\n H, W = image.shape\n out = None\n\n ### YOUR CODE HERE\n out = np.zeros((H+2*pad_height, W+2*pad_width))\n out[pad_height:H+pad_height, pad_width:W+pad_width] = image\n ### END YOUR CODE\n return out\n\n\ndef conv_fast(image, kernel):\n \"\"\" An efficient implementation of convolution filter.\n\n This function uses element-wise multiplication and np.sum()\n to efficiently compute weighted sum of neighborhood at each\n pixel.\n\n Hints:\n - Use the zero_pad function you implemented above\n - There should be two nested for-loops\n - You may find np.flip() and np.sum() useful\n\n Args:\n image: numpy array of shape (Hi, Wi).\n kernel: numpy array of shape (Hk, Wk).\n\n Returns:\n out: numpy array of shape (Hi, Wi).\n \"\"\"\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n out = np.zeros((Hi, Wi))\n\n ### YOUR CODE HERE\n image = zero_pad(image, Hk//2, Wk//2)\n kernel = np.flip(kernel, 0)\n kernel = np.flip(kernel, 1)\n for m in range(Hi):\n for n in range(Wi):\n out[m,n] = np.sum(image[m:m+Hk, n:n+Wk]*kernel)\n ### END YOUR CODE\n\n return out\n\ndef conv_faster(image, kernel):\n \"\"\"\n Args:\n image: numpy array of shape (Hi, Wi).\n kernel: numpy array of shape (Hk, Wk).\n\n Returns:\n out: numpy array of shape (Hi, Wi).\n \"\"\"\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n out = np.zeros((Hi, Wi))\n\n ### YOUR CODE HERE\n image = zero_pad(image, Hk//2, Wk//2)\n kernel = np.flip(kernel, 0)\n kernel = np.flip(kernel, 1)\n mat = np.zeros((Hi*Wi, Hk*Wk))\n for i in range(Hi*Wi):\n row = i//Wi\n col = i%Wi\n #row, col = np.unravel_index(i, (Hi, Wi)) #this function is a little slow\n mat[i,:] = image[row:row+Hk, col:col+Wk].reshape(1, Hk*Wk)\n out = np.dot(mat, kernel.reshape(-1,1)).reshape(Hi, Wi) \n ### END YOUR CODE\n\n return out\n\ndef cross_correlation(f, g):\n \"\"\" Cross-correlation of f and g.\n\n Hint: use the conv_fast function defined above.\n\n Args:\n f: numpy array of shape (Hf, Wf).\n g: numpy array of shape (Hg, Wg).\n\n Returns:\n out: numpy array of shape (Hf, Wf).\n \"\"\"\n\n out = None\n ### YOUR CODE HERE\n g = np.flip(g, 0)\n g = np.flip(g, 1)\n out = conv_faster(f, g)\n ### END YOUR CODE\n\n return out\n\ndef zero_mean_cross_correlation(f, g):\n \"\"\" Zero-mean cross-correlation of f and g.\n\n Subtract the mean of g from g so that its mean becomes zero.\n\n Hint: you should look up useful numpy functions online for calculating the mean.\n\n Args:\n f: numpy array of shape (Hf, Wf).\n g: numpy array of shape (Hg, Wg).\n\n Returns:\n out: numpy array of shape (Hf, Wf).\n \"\"\"\n\n out = None\n ### YOUR CODE HERE\n g = g - np.mean(g)\n out = cross_correlation(f, g)\n ### END YOUR CODE\n\n return out\n\ndef normalized_cross_correlation(f, g):\n \"\"\" Normalized cross-correlation of f and g.\n\n Normalize the subimage of f and the template g at each step\n before computing the weighted sum of the two.\n\n Hint: you should look up useful numpy functions online for calculating \n the mean and standard deviation.\n\n Args:\n f: numpy array of shape (Hf, Wf).\n g: numpy array of shape (Hg, Wg).\n\n Returns:\n out: numpy array of shape (Hf, Wf).\n \"\"\"\n\n out = None\n ### YOUR CODE HERE\n f = (f - np.mean(f))/np.std(f)\n g = (g - np.mean(g))/np.std(g)\n out = cross_correlation(f, g)\n ### END YOUR CODE\n\n return out\n"
]
| [
[
"numpy.zeros",
"numpy.sum",
"numpy.mean",
"numpy.std",
"numpy.flip"
]
]
|
cuemacro/cufflinks | [
"1f361221698abd59a36b576ae3e6892ec2708c40"
]
| [
"cufflinks/colors.py"
]
| [
"##\n# Special thanks to @krey for the python3 support\n##\n\nimport numpy as np\nimport colorsys\nimport colorlover as cl\nimport operator\nimport copy\n\nfrom collections import deque\nfrom six import string_types\nfrom IPython.display import HTML, display\n\nfrom . import themes\nfrom .utils import inverseDict\nfrom .auth import get_config_file\n\n\nclass CufflinksError(Exception):\n pass\n\n\ndef to_rgba(color, alpha):\n \"\"\"\n Converts from hex|rgb to rgba\n\n Parameters:\n -----------\n color : string\n Color representation on hex or rgb\n alpha : float\n Value from 0 to 1.0 that represents the \n alpha value.\n\n Example:\n to_rgba('#E1E5ED',0.6)\n to_rgba('#f03',0.7)\n to_rgba('rgb(23,23,23)',.5)\n \"\"\"\n if type(color) == tuple:\n color, alpha = color\n color = color.lower()\n if 'rgba' in color:\n cl = list(eval(color.replace('rgba', '')))\n if alpha:\n cl[3] = alpha\n return 'rgba' + str(tuple(cl))\n elif 'rgb' in color:\n r, g, b = eval(color.replace('rgb', ''))\n return 'rgba' + str((r, g, b, alpha))\n else:\n return to_rgba(hex_to_rgb(color), alpha)\n\n\ndef hex_to_rgb(color):\n \"\"\"\n Converts from hex to rgb\n\n Parameters:\n -----------\n color : string\n Color representation on hex or rgb\n\n Example:\n hex_to_rgb('#E1E5ED')\n hex_to_rgb('#f03')\n \"\"\"\n color = normalize(color)\n color = color[1:]\n # return 'rgb'+str(tuple(ord(c) for c in color.decode('hex')))\n return 'rgb' + str((int(color[0:2], base=16), int(color[2:4], base=16), int(color[4:6], base=16)))\n\n\ndef normalize(color):\n \"\"\"\n Returns an hex color\n\n Parameters:\n -----------\n color : string\n Color representation in rgba|rgb|hex\n\n Example:\n normalize('#f03')\n \"\"\"\n if type(color) == tuple:\n color = to_rgba(*color)\n if 'rgba' in color:\n return rgb_to_hex(rgba_to_rgb(color))\n elif 'rgb' in color:\n return rgb_to_hex(color)\n elif '#' in color:\n if len(color) == 7:\n return color\n else:\n color = color[1:]\n return '#' + ''.join([x * 2 for x in list(color)])\n else:\n try:\n return normalize(cnames[color.lower()])\n except:\n raise CufflinksError('Not a valid color: ' + color)\n\n\ndef rgb_to_hex(color):\n \"\"\"\n Converts from rgb to hex\n\n Parameters:\n -----------\n color : string\n Color representation on hex or rgb\n\n Example:\n rgb_to_hex('rgb(23,25,24)')\n \"\"\"\n rgb = eval(color.replace('rgb', ''))\n # return '#'+''.join(map(chr, rgb)).encode('hex')\n return '#' + ''.join(['{0:02x}'.format(x).upper() for x in rgb])\n\n\ndef rgba_to_rgb(color, bg='rgb(255,255,255)'):\n \"\"\"\n Converts from rgba to rgb\n\n Parameters:\n -----------\n color : string\n Color representation in rgba\n bg : string\n Color representation in rgb\n\n Example:\n rgba_to_rgb('rgb(23,25,24,.4)''\n \"\"\"\n def c_tup(c):\n return eval(c[c.find('('):])\n color = c_tup(color)\n bg = hex_to_rgb(normalize(bg))\n bg = c_tup(bg)\n a = color[3]\n r = [int((1 - a) * bg[i] + a * color[i]) for i in range(3)]\n return 'rgb' + str(tuple(r))\n\n\ndef hex_to_hsv(color):\n \"\"\"\n Converts from hex to hsv\n\n Parameters:\n -----------\n color : string\n Color representation on color\n\n Example:\n hex_to_hsv('#ff9933')\n \"\"\"\n color = normalize(color)\n color = color[1:]\n # color=tuple(ord(c)/255.0 for c in color.decode('hex'))\n color = (int(color[0:2], base=16) / 255.0, int(color[2:4],\n base=16) / 255.0, int(color[4:6], base=16) / 255.0)\n return colorsys.rgb_to_hsv(*color)\n\n\ndef color_range(color, N=20):\n \"\"\"\n Generates a scale of colours from a base colour\n\n Parameters:\n -----------\n color : string\n Color representation in hex\n N : int\n number of colours to generate\n\n Example:\n color_range('#ff9933',20)\n \"\"\"\n color = normalize(color)\n org = color\n color = hex_to_hsv(color)\n HSV_tuples = [(color[0], x, color[2]) for x in np.arange(0, 1, 2.0 / N)]\n HSV_tuples.extend([(color[0], color[1], x)\n for x in np.arange(0, 1, 2.0 / N)])\n hex_out = []\n for c in HSV_tuples:\n c = colorsys.hsv_to_rgb(*c)\n c = [int(_ * 255) for _ in c]\n # hex_out.append(\"#\"+\"\".join([chr(x).encode('hex') for x in c]))\n hex_out.append(\"#\" + \"\".join(['{0:02x}'.format(x) for x in c]))\n if org not in hex_out:\n hex_out.append(org)\n hex_out.sort()\n return hex_out\n\n\ndef color_table(color, N=1, sort=False, sort_values=False, inline=False, as_html=False):\n \"\"\"\n Generates a colour table \n\n Parameters:\n -----------\n color : string | list | dict\n Color representation in rgba|rgb|hex\n If a list of colors is passed then these\n are displayed in a table\n N : int\n number of colours to generate\n When color is not a list then it generaes \n a range of N colors\n sort : bool \n if True then items are sorted\n sort_values : bool \n if True then items are sorted by color values.\n Only applies if color is a dictionary\n inline : bool\n if True it returns single line color blocks\n as_html : bool\n if True it returns the HTML code\n\n Example:\n color_table('#ff9933')\n color_table(cufflinks.cnames)\n color_table(['pink','salmon','yellow'])\n Note:\n This function only works in iPython Notebook\n \"\"\"\n if isinstance(color, list):\n c_ = ''\n rgb_tup = [normalize(c) for c in color]\n if sort:\n rgb_tup.sort()\n elif isinstance(color, dict):\n c_ = ''\n items = [(k, normalize(v), hex_to_hsv(normalize(v)))\n for k, v in list(color.items())]\n if sort_values:\n items = sorted(items, key=operator.itemgetter(2))\n elif sort:\n items = sorted(items, key=operator.itemgetter(0))\n rgb_tup = [(k, v) for k, v, _ in items]\n else:\n c_ = normalize(color)\n if N > 1:\n rgb_tup = np.array(color_range(c_, N))[::-1]\n else:\n rgb_tup = [c_]\n\n def _color(c):\n if hex_to_hsv(c)[2] < .5:\n color = \"#ffffff\"\n shadow = '0 1px 0 #000'\n else:\n color = \"#000000\"\n shadow = '0 1px 0 rgba(255,255,255,0.6)'\n if c == c_:\n border = \" border: 1px solid #ffffff;\"\n else:\n border = ''\n return color, shadow, border\n\n s = '<ul style=\"list-style-type: none;\">' if not inline else ''\n for c in rgb_tup:\n if isinstance(c, tuple):\n k, c = c\n k += ' : '\n else:\n k = ''\n if inline:\n s += '<div style=\"background-color:{0};height:20px;width:20px;display:inline-block;\"></div>'.format(\n c)\n else:\n color, shadow, border = _color(c)\n s += \"\"\"<li style=\"text-align:center;\"\"\" + border + \"\"\"line-height:30px;background-color:\"\"\" + c + \"\"\";\"> \n <span style=\" text-shadow:\"\"\" + shadow + \"\"\"; color:\"\"\" + color + \"\"\";\">\"\"\" + k + c.upper() + \"\"\"</span>\n </li>\"\"\"\n s += '</ul>' if not inline else ''\n if as_html:\n return s\n return display(HTML(s))\n\n\ndef colorgen(colors=None, n=None, scale=None, theme=None):\n \"\"\"\n Returns a generator with a list of colors\n and gradients of those colors\n\n Parameters:\n -----------\n colors : list(colors)\n List of colors to use\n\n Example:\n colorgen()\n colorgen(['blue','red','pink'])\n colorgen(['#f03','rgb(23,25,25)'])\n \"\"\"\n step = .1\n if not colors:\n if not scale:\n if not theme:\n scale = get_config_file()['colorscale']\n else:\n scale = themes.THEMES[theme]['colorscale']\n colors = get_scales(scale)\n dq = deque(colors)\n if n:\n step = len(dq) * 0.8 / n if len(dq) * 8 < n else .1\n for i in np.arange(.2, 1, step):\n for y in dq:\n yield to_rgba(y, 1 - i + .2)\n dq.rotate(1)\n\n# NEW STUFF\n\n\n# Color Names\n# ---------------------------------\n\n\ncnames = {'aliceblue': '#F0F8FF',\n 'antiquewhite':\t '#FAEBD7',\n 'aqua':\t\t\t '#00FFFF',\n 'aquamarine':\t\t '#7FFFD4',\n 'azure':\t\t\t '#F0FFFF',\n 'beige':\t\t\t '#F5F5DC',\n 'bisque':\t\t\t '#FFE4C4',\n 'black':\t\t\t '#000000',\n 'blanchedalmond':\t '#FFEBCD',\n 'blue':\t\t\t '#3780bf',\n 'bluegray':\t\t '#565656',\n 'bluepurple':\t\t '#6432AB',\n 'blueviolet':\t\t '#8A2BE2',\n 'brick':\t\t\t '#E24A33',\n 'brightblue':\t\t '#0000FF',\n 'brightred':\t\t '#FF0000',\n 'brown':\t\t\t '#A52A2A',\n 'burlywood':\t\t '#DEB887',\n 'cadetblue':\t\t '#5F9EA0',\n 'charcoal':\t\t '#151516',\n 'chartreuse':\t\t '#7FFF00',\n 'chocolate':\t\t '#D2691E',\n 'coral':\t\t\t '#FF7F50',\n 'cornflowerblue':\t '#6495ED',\n 'cornsilk':\t\t '#FFF8DC',\n 'crimson':\t\t\t '#DC143C',\n 'cyan':\t\t\t '#00FFFF',\n 'darkblue':\t\t '#00008B',\n 'darkcyan':\t\t '#008B8B',\n 'darkgoldenrod':\t '#B8860B',\n 'darkgray':\t\t '#A9A9A9',\n 'darkgreen':\t\t '#006400',\n 'darkgrey':\t\t '#A9A9A9',\n 'darkkhaki':\t\t '#BDB76B',\n 'darkmagenta':\t\t '#8B008B',\n 'darkolivegreen':\t '#556B2F',\n 'darkorange':\t\t '#FF8C00',\n 'darkorchid':\t\t '#9932CC',\n 'darkred':\t\t\t '#8B0000',\n 'darksalmon':\t\t '#E9967A',\n 'darkseagreen':\t '#8FBC8F',\n 'darkslateblue':\t '#483D8B',\n 'darkslategray':\t '#2F4F4F',\n 'darkslategrey':\t '#2F4F4F',\n 'darkturquoise':\t '#00CED1',\n 'darkviolet':\t\t '#9400D3',\n 'deeppink':\t\t '#FF1493',\n 'deepskyblue':\t\t '#00BFFF',\n 'dimgray':\t\t\t '#696969',\n 'dimgrey':\t\t\t '#696969',\n 'dodgerblue':\t\t '#1E90FF',\n 'firebrick':\t\t '#B22222',\n 'floralwhite':\t\t '#FFFAF0',\n 'forestgreen':\t\t '#228B22',\n 'fuchsia':\t\t\t '#FF00FF',\n 'gainsboro':\t\t '#DCDCDC',\n 'ghostwhite':\t\t '#F8F8FF',\n 'gold':\t\t\t '#FFD700',\n 'goldenrod':\t\t '#DAA520',\n 'grassgreen':\t\t '#32ab60',\n 'gray':\t\t\t '#808080',\n 'green':\t\t\t '#008000',\n 'greenyellow':\t\t '#ADFF2F',\n 'grey':\t\t\t '#808080',\n 'grey01':\t\t\t '#0A0A0A',\n 'grey02':\t\t\t '#151516',\n 'grey03':\t\t\t '#1A1A1C',\n 'grey04':\t\t\t '#1E1E21',\n 'grey05':\t\t\t '#252529',\n 'grey06':\t\t\t '#36363C',\n 'grey07':\t\t\t '#3C3C42',\n 'grey08':\t\t\t '#434343',\n 'grey09':\t\t\t '#666570',\n 'grey10':\t\t\t '#666666',\n 'grey11':\t\t\t '#8C8C8C',\n 'grey12':\t\t\t '#C2C2C2',\n 'grey13':\t\t\t '#E2E2E2',\n 'grey14':\t\t\t '#E5E5E5',\n 'honeydew':\t\t '#F0FFF0',\n 'hotpink':\t\t\t '#FF69B4',\n 'indianred':\t\t '#CD5C5C',\n 'indigo':\t\t\t '#4B0082',\n 'ivory':\t\t\t '#FFFFF0',\n 'java':\t\t\t '#17BECF',\n 'khaki':\t\t\t '#F0E68C',\n 'lavender':\t\t '#E6E6FA',\n 'lavenderblush':\t '#FFF0F5',\n 'lawngreen':\t\t '#7CFC00',\n 'lemonchiffon':\t '#FFFACD',\n 'lightpink2':\t\t '#fccde5',\n 'lightpurple':\t\t '#bc80bd',\n 'lightblue':\t\t '#ADD8E6',\n 'lightcoral':\t\t '#F08080',\n 'lightcyan':\t\t '#E0FFFF',\n 'lightgoldenrodyellow':\t '#FAFAD2',\n 'lightgray':\t\t '#D3D3D3',\n 'lightgreen':\t\t '#90EE90',\n 'lightgrey':\t\t '#D3D3D3',\n 'lightivory':\t\t '#F6F6F6',\n 'lightpink':\t\t '#FFB6C1',\n 'lightsalmon':\t\t '#FFA07A',\n 'lightseagreen':\t '#20B2AA',\n 'lightskyblue':\t '#87CEFA',\n 'lightslategray':\t '#778899',\n 'lightslategrey':\t '#778899',\n 'lightsteelblue':\t '#B0C4DE',\n 'lightteal':\t\t '#8dd3c7',\n 'lightyellow':\t\t '#FFFFE0',\n 'lightblue2':\t\t '#80b1d3',\n 'lightviolet':\t\t '#8476CA',\n 'lime':\t\t\t '#00FF00',\n 'lime2':\t\t\t '#8EBA42',\n 'limegreen':\t\t '#32CD32',\n 'linen':\t\t\t '#FAF0E6',\n 'magenta':\t\t\t '#FF00FF',\n 'maroon':\t\t\t '#800000',\n 'mediumaquamarine': '#66CDAA',\n 'mediumblue':\t\t '#0000CD',\n 'mediumgray': \t '#656565',\n 'mediumorchid':\t '#BA55D3',\n 'mediumpurple':\t '#9370DB',\n 'mediumseagreen':\t '#3CB371',\n 'mediumslateblue':\t '#7B68EE',\n 'mediumspringgreen': '#00FA9A',\n 'mediumturquoise':\t '#48D1CC',\n 'mediumvioletred':\t '#C71585',\n 'midnightblue':\t '#191970',\n 'mintcream':\t\t '#F5FFFA',\n 'mistyrose':\t\t '#FFE4E1',\n 'moccasin':\t\t '#FFE4B5',\n 'mustard':\t\t\t '#FBC15E',\n 'navajowhite':\t\t '#FFDEAD',\n 'navy':\t\t\t '#000080',\n 'oldlace':\t\t\t '#FDF5E6',\n 'olive':\t\t\t '#808000',\n 'olivedrab':\t\t '#6B8E23',\n 'orange':\t\t\t '#ff9933',\n 'orangered':\t\t '#FF4500',\n 'orchid':\t\t\t '#DA70D6',\n 'palegoldenrod':\t '#EEE8AA',\n 'palegreen':\t\t '#98FB98',\n 'paleolive':\t\t '#b3de69',\n 'paleturquoise':\t '#AFEEEE',\n 'palevioletred':\t '#DB7093',\n 'papayawhip':\t\t '#FFEFD5',\n 'peachpuff':\t\t '#FFDAB9',\n 'pearl':\t\t\t '#D9D9D9',\n 'pearl02':\t\t\t '#F5F6F9',\n 'pearl03':\t\t\t '#E1E5ED',\n 'pearl04':\t\t\t '#9499A3',\n 'pearl05':\t\t\t '#6F7B8B',\n 'pearl06':\t\t\t '#4D5663',\n 'peru':\t\t\t '#CD853F',\n 'pink':\t\t\t '#ff0088',\n 'pinksalmon':\t\t '#FFB5B8',\n 'plum':\t\t\t '#DDA0DD',\n 'polar':\t\t\t '#ACAFB5',\n 'polarblue':\t\t '#0080F0',\n 'polarbluelight':\t '#46A0F0',\n 'polarcyan':\t\t '#ADFCFC',\n 'polardark':\t\t '#484848',\n 'polardiv':\t\t '#D5D8DB',\n 'polardust':\t\t '#F2F3F7',\n 'polargrey':\t\t '#505050',\n 'polargreen':\t\t '#309054',\n 'polarorange':\t\t '#EE7600',\n 'polarpurple':\t\t '#6262DE',\n 'polarred':\t\t '#D94255',\n 'powderblue':\t\t '#B0E0E6',\n 'purple':\t\t\t '#800080',\n 'red':\t\t\t\t '#db4052',\n 'rose':\t\t\t '#FFC0CB',\n 'rosybrown':\t\t '#BC8F8F',\n 'royalblue':\t\t '#4169E1',\n 'saddlebrown':\t\t '#8B4513',\n 'salmon':\t\t\t '#fb8072',\n 'sandybrown':\t\t '#FAA460',\n 'seaborn':\t\t\t '#EAE7E4',\n 'seagreen':\t\t '#2E8B57',\n 'seashell':\t\t '#FFF5EE',\n 'sienna':\t\t\t '#A0522D',\n 'silver':\t\t\t '#C0C0C0',\n 'skyblue':\t\t\t '#87CEEB',\n 'slateblue':\t\t '#6A5ACD',\n 'slategray':\t\t '#708090',\n 'slategrey':\t\t '#708090',\n 'smurf':\t\t\t '#3E6FB0',\n 'snow':\t\t\t '#FFFAFA',\n 'springgreen':\t\t '#00FF7F',\n 'steelblue':\t\t '#4682B4',\n 'tan':\t\t\t\t '#D2B48C',\n 'teal':\t\t\t '#008080',\n 'thistle':\t\t\t '#D8BFD8',\n 'tomato':\t\t\t '#FF6347',\n 'turquoise':\t\t '#40E0D0',\n 'violet':\t\t\t '#EE82EE',\n 'wheat':\t\t\t '#F5DEB3',\n 'white':\t\t\t '#FFFFFF',\n 'whitesmoke':\t\t '#F5F5F5',\n 'yellow':\t\t\t '#ffff33',\n 'yellowgreen':\t\t '#9ACD32',\n \"henanigans_bg\": \"#242424\",\n \"henanigans_blue1\": \"#5F95DE\",\n \"henanigans_blue2\": \"#93B6E6\",\n \"henanigans_cyan1\": \"#7EC4CF\",\n \"henanigans_cyan2\": \"#B6ECF3\",\n \"henanigans_dark1\": \"#040404\",\n \"henanigans_dark2\": \"#141414\",\n \"henanigans_dialog1\": \"#444459\",\n \"henanigans_dialog2\": \"#5D5D7A\",\n \"henanigans_green1\": \"#8BD155\",\n \"henanigans_green2\": \"#A0D17B\",\n \"henanigans_grey1\": \"#343434\",\n \"henanigans_grey2\": \"#444444\",\n \"henanigans_light1\": \"#A4A4A4\",\n \"henanigans_light2\": \"#F4F4F4\",\n \"henanigans_orange1\": \"#EB9E58\",\n \"henanigans_orange2\": \"#EBB483\",\n \"henanigans_purple1\": \"#C98FDE\",\n \"henanigans_purple2\": \"#AC92DE\",\n \"henanigans_red1\": \"#F77E70\",\n \"henanigans_red2\": \"#DE958E\",\n \"henanigans_yellow1\": \"#E8EA7E\",\n \"henanigans_yellow2\": \"#E9EABE\"\n }\n\n# Custom Color Scales\n# ---------------------------------\n\n_custom_scales = {\n 'qual': {\n # dflt only exists to keep backward compatibility after issue 91\n 'dflt': ['orange', 'blue', 'grassgreen', 'purple', 'red', 'teal', 'yellow', 'olive', 'salmon', 'lightblue2'],\n 'original': ['orange', 'blue', 'grassgreen', 'purple', 'red', 'teal', 'yellow', 'olive', 'salmon', 'lightblue2'],\n 'ggplot': ['brick', 'smurf', 'lightviolet', 'mediumgray', 'mustard', 'lime2', 'pinksalmon'],\n 'polar': ['polarblue', 'polarorange', 'polargreen', 'polarpurple', 'polarred', 'polarcyan', 'polarbluelight'],\n 'plotly' : ['rgb(31, 119, 180)', 'rgb(255, 127, 14)', 'rgb(44, 160, 44)', 'rgb(214, 39, 40)',\n 'rgb(148, 103, 189)', 'rgb(140, 86, 75)', 'rgb(227, 119, 194)', 'rgb(127, 127, 127)', 'rgb(188, 189, 34)', 'rgb(23, 190, 207)']\n },\n 'div': {\n\n },\n 'seq': {\n\n }\n\n}\n\n\n# ---------------------------------------------------------------\n# The below functions are based in colorlover by Jack Parmer\n# https://github.com/jackparmer/colorlover/\n# ---------------------------------------------------------------\n\n\n_scales = None\n_scales_names = None\n\n\ndef interp(colors, N):\n def _interp(colors, N):\n try:\n return cl.interp(colors, N)\n except:\n return _interp(colors, N + 1)\n c = _interp(colors, N)\n return list(map(rgb_to_hex, cl.to_rgb(c)))\n\n\ndef scales(scale=None):\n \"\"\"\n Displays a color scale (HTML)\n\n Parameters:\n -----------\n scale : str\n Color scale name\n If no scale name is provided then all scales are returned\n (max number for each scale)\n If scale='all' then all scale combinations available \n will be returned\n\n Example:\n scales('accent')\n scales('all')\n scales()\n \"\"\"\n if scale:\n if scale == 'all':\n display(HTML(cl.to_html(_scales)))\n else:\n display(HTML(cl.to_html(get_scales(scale))))\n else:\n s = ''\n keys = list(_scales_names.keys())\n keys.sort()\n for k in keys:\n scale = get_scales(k)\n s += '<div style=\"display:inline-block;padding:10px;\"><div>{0}</div>{1}</div>'.format(\n k, cl.to_html(scale))\n display(HTML(s))\n\n# Scales Dictionary\n# ---------------------------------\n\n\ndef reset_scales():\n global _scales\n global _scales_names\n scale_cpy = cl.scales.copy()\n\n # Add custom scales\n for k, v in list(_custom_scales.items()):\n if v:\n for k_, v_ in list(v.items()):\n if str(len(v_)) not in scale_cpy:\n scale_cpy[str(len(v_))] = {}\n scale_cpy[str(len(v_))][k][k_] = [\n hex_to_rgb(normalize(_)) for _ in v_]\n\n # Dictionary by Type > Name > N\n _scales = {}\n for k, v in list(scale_cpy.items()):\n for k_, v_ in list(v.items()):\n if k_ not in _scales:\n _scales[k_] = {}\n for k__, v__ in list(v_.items()):\n if k__ not in _scales[k_]:\n _scales[k_][k__] = {}\n _scales[k_][k__][k] = v__\n\n # Dictionary by Name > N\n _scales_names = {}\n for k, v in list(scale_cpy.items()):\n for k_, v_ in list(v.items()):\n for k__, v__ in list(v_.items()):\n k__ = k__.lower()\n if k__ not in _scales_names:\n _scales_names[k__] = {}\n _scales_names[k__][k] = v__\n\n\ndef get_scales(scale=None, n=None):\n \"\"\"\n Returns a color scale \n\n Parameters:\n -----------\n scale : str\n Color scale name\n If the color name is preceded by a minus (-) \n then the scale is inversed\n n : int\n Number of colors \n If n < number of colors available for a given scale then \n the minimum number will be returned \n If n > number of colors available for a given scale then\n the maximum number will be returned \n\n Example:\n get_scales('accent',8)\n get_scales('pastel1')\n \"\"\"\n if scale:\n is_reverse = False\n if scale[0] == '-':\n scale = scale[1:]\n is_reverse = True\n d = copy.deepcopy(_scales_names[scale.lower()])\n keys = list(map(int, list(d.keys())))\n cs = None\n if n:\n if n in keys:\n cs = d[str(n)]\n elif n < min(keys):\n cs = d[str(min(keys))]\n if cs is None:\n cs = d[str(max(keys))]\n if is_reverse:\n cs.reverse()\n return cs\n else:\n d = {}\n for k, v in list(_scales_names.items()):\n if isinstance(v, dict):\n keys = list(map(int, list(v.keys())))\n d[k] = v[str(max(keys))]\n else:\n d[k] = v\n return d\n\n\ndef get_colorscale(scale):\n \"\"\"\n Returns a color scale to be used for a plotly figure\n\n Parameters:\n -----------\n scale : str or list\n Color scale name\n If the color name is preceded by a minus (-) \n then the scale is inversed.\n Also accepts a list of colors (rgb,rgba,hex)\n\n Example:\n get_colorscale('accent')\n get_colorscale(['rgb(127,201,127)','rgb(190,174,212)','rgb(253,192,134)'])\n \"\"\"\n\n if type(scale) in string_types:\n scale = get_scales(scale)\n else:\n if type(scale) != list:\n raise Exception(\n \"scale needs to be either a scale name or list of colors\")\n\n cs = [[1.0 * c / (len(scale) - 1), scale[c]] for c in range(len(scale))]\n cs.sort()\n return cs\n\n\nreset_scales()\n"
]
| [
[
"numpy.arange"
]
]
|
zeynepakkalyoncu/BERT-Fine_tune | [
"79b17151e607a03c70bbd7a5015759c1a5263b0e"
]
| [
"src1/main.py"
]
| [
"import random\nimport numpy as np\nimport argparse\n\nimport torch\n\nfrom util import *\nfrom eval import *\nfrom data import DataGenerator\n\nRANDOM_SEED = 12345\nrandom.seed(RANDOM_SEED)\nnp.random.seed(RANDOM_SEED)\ntorch.manual_seed(RANDOM_SEED)\nif torch.cuda.is_available():\n torch.cuda.manual_seed_all(RANDOM_SEED)\n\n\ndef train(args):\n if args.load_trained:\n epoch, arch, model, tokenizer, scores = load_checkpoint(args.pytorch_dump_path)\n else:\n model, tokenizer = load_pretrained_model_tokenizer(args.model_type, device=args.device, chinese=args.chinese,\n num_labels=args.num_labels)\n train_dataset = DataGenerator(args.data_path, args.data_name, args.batch_size, tokenizer, \"train\", args.device,\n args.data_format)\n validate_dataset = DataGenerator(args.data_path, args.data_name, args.batch_size, tokenizer, \"dev\", args.device,\n args.data_format, label_map=train_dataset.label_map)\n test_dataset = DataGenerator(args.data_path, args.data_name, args.batch_size, tokenizer, \"test\", args.device,\n args.data_format, label_map=train_dataset.label_map)\n optimizer = init_optimizer(model, args.learning_rate, args.warmup_proportion, args.num_train_epochs,\n train_dataset.data_size, args.batch_size)\n\n model.train()\n global_step = 0\n best_score = 0\n step = 0\n for epoch in range(1, args.num_train_epochs + 1):\n print(\"epoch {} ............\".format(epoch))\n tr_loss = 0\n # random.shuffle(train_dataset)\n while True:\n batch = train_dataset.load_batch()\n if batch is None:\n break\n tokens_tensor, segments_tensor, mask_tensor, label_tensor = batch[:4]\n loss = model(tokens_tensor, segments_tensor, mask_tensor, label_tensor)\n loss.backward()\n tr_loss += loss.item()\n optimizer.step()\n model.zero_grad()\n global_step += 1\n\n if args.eval_steps > 0 and step % args.eval_steps == 0:\n print(\"step: {}\".format(step))\n best_score = eval_select(model, tokenizer, validate_dataset, test_dataset, args.pytorch_dump_path,\n best_score, epoch, args.model_type)\n\n step += 1\n\n print(\"[train] loss: {}\".format(tr_loss))\n best_score = eval_select(model, tokenizer, validate_dataset, test_dataset, args.pytorch_dump_path, best_score,\n epoch, args.model_type)\n\n scores = test(args, split=\"test\")\n print_scores(scores)\n\n\ndef eval_select(model, tokenizer, validate_dataset, test_dataset, model_path, best_score, epoch, arch):\n scores_dev = test(args, split=\"dev\", model=model, tokenizer=tokenizer, test_dataset=validate_dataset)\n print_scores(scores_dev, mode=\"dev\")\n scores_test = test(args, split=\"test\", model=model, tokenizer=tokenizer, test_dataset=test_dataset)\n print_scores(scores_test)\n\n if scores_dev[1][0] > best_score:\n best_score = scores_dev[1][0]\n # Save pytorch-model\n model_path = \"{}_{}\".format(model_path, epoch)\n print(\"Save PyTorch model to {}\".format(model_path))\n save_checkpoint(epoch, arch, model, tokenizer, scores_dev, model_path, test_dataset.label_map)\n\n return best_score\n\ndef test(args, split=\"test\", model=None, tokenizer=None, test_dataset=None):\n if model is None:\n epoch, arch, model, tokenizer, scores, label_map = load_checkpoint(args.pytorch_dump_path)\n assert test_dataset is None\n print(\"Load {} set\".format(split))\n test_dataset = DataGenerator(args.data_path, args.data_name, args.batch_size, tokenizer, split, args.device,\n args.data_format, label_map=label_map)\n\n model.eval()\n prediction_score_list, prediction_index_list, labels = [], [], []\n f = open(args.output_path, \"w\")\n f2 = open(args.output_path2, \"w\")\n qrelf = open(split + '.' + args.qrels_path, \"w\")\n\n lineno = 1\n label_map_reverse = {}\n for k in test_dataset.label_map:\n label_map_reverse[test_dataset.label_map[k]] = k\n qid_tensor, docid_tensor = None, None\n while True:\n batch = test_dataset.load_batch()\n if batch is None:\n break\n if len(batch) == 6:\n tokens_tensor, segments_tensor, mask_tensor, label_tensor, qid_tensor, docid_tensor = batch\n elif len(batch) == 5:\n tokens_tensor, segments_tensor, mask_tensor, label_tensor, qid_tensor = batch\n else:\n tokens_tensor, segments_tensor, mask_tensor, label_tensor = batch\n # print(tokens_tensor.shape, segments_tensor.shape, mask_tensor.shape)\n predictions = model(tokens_tensor, segments_tensor, mask_tensor)\n scores = predictions.cpu().detach().numpy()\n predicted_index = list(torch.argmax(predictions, dim=-1).cpu().numpy())\n if args.data_format == \"glue\" or args.data_format == \"regression\":\n predicted_score = list(predictions[:, 0].cpu().detach().numpy())\n else:\n predicted_score = list(predictions[:, 1].cpu().detach().numpy())\n prediction_score_list.extend(predicted_score)\n label_batch = list(label_tensor.cpu().detach().numpy())\n label_new = []\n predicted_index_new = []\n if args.data_format == \"trec\":\n qids = qid_tensor.cpu().detach().numpy()\n if docid_tensor is not None:\n docids = docid_tensor.cpu().detach().numpy()\n else:\n docids = list(range(lineno, lineno + len(label_batch)))\n for p, qid, docid, s, label in zip(predicted_index, qids, docids, \\\n scores, label_batch):\n f.write(\"{}\\t{}\\n\".format(lineno, p))\n f2.write(\"{} Q0 {} {} {} bert\\n\".format(qid, docid, lineno, s[1]))\n qrelf.write(\"{} Q0 {} {}\\n\".format(qid, docid, label))\n lineno += 1\n elif args.data_format == \"ontonote\":\n tokens = tokens_tensor.cpu().detach().numpy()\n for token, p, label in zip(tokens, predicted_index, label_batch):\n assert len(token) == len(p)\n assert len(token) == len(label)\n predicted_index_tmp = []\n label_tmp = []\n for a, b, c in zip(token, p, label):\n a = tokenizer.convert_ids_to_tokens([a])[0]\n if a == \"[SEP]\":\n f.write(\"\\n\")\n break\n predicted_index_tmp.append(b)\n label_tmp.append(c)\n b = label_map_reverse[b]\n c = label_map_reverse[c]\n f.write(\"{} {} {}\\n\".format(a, b, c))\n predicted_index_new.append(predicted_index_tmp)\n label_new.append(label_tmp)\n elif args.data_format == \"robust04\":\n qids = qid_tensor.cpu().detach().numpy()\n docids = docid_tensor.cpu().detach().numpy()\n assert len(qids) == len(predicted_index)\n for p, l, s, qid, docid in zip(predicted_index, label_batch, scores, qids, docids):\n f.write(\"{} Q0 {} {} {} bert {}\\n\".format(qid, docid, lineno, s[1], l))\n lineno += 1\n else:\n if qid_tensor is None:\n qids = list(range(lineno, lineno + len(label_batch)))\n else:\n qids = qid_tensor.cpu().detach().numpy()\n assert len(qids) == len(predicted_index)\n for qid, p, l in zip(qids, predicted_index, label_batch):\n f.write(\"{},{},{}\\n\".format(qid, p, l))\n\n label_new = label_new if len(label_new) > 0 else label_batch\n predicted_index_new = predicted_index_new if len(predicted_index_new) > 0 else predicted_index\n labels.extend(label_new)\n prediction_index_list += predicted_index_new\n del predictions\n\n f.close()\n f2.close()\n qrelf.close()\n torch.cuda.empty_cache()\n model.train()\n\n if args.data_format == \"trec\":\n map, mrr, p30 = evaluate_trec(predictions_file=args.output_path2, \\\n qrels_file=split + '.' + args.qrels_path)\n return [[\"map\", \"mrr\", \"p30\"], [map, mrr, p30]]\n elif args.data_format == \"glue\" or args.data_format == \"regression\":\n pearson_r, spearman_r = evaluate_glue(prediction_score_list, labels)\n return [[\"pearson_r\", \"spearman_r\"], [pearson_r, spearman_r]]\n elif args.data_format == \"ontonote\":\n acc, pre, rec, f1 = evaluate_ner(prediction_index_list, labels, test_dataset.label_map)\n else:\n acc, pre, rec, f1 = evaluate_classification(prediction_index_list, labels)\n return [[\"f1\", \"acc\", \"precision\", \"recall\"], [f1, acc, pre, rec]]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--mode', default='train', help='[train, test]')\n parser.add_argument('--device', default='cuda', help='[cuda, cpu]')\n parser.add_argument('--batch_size', default=16, type=int, help='[1, 8, 16, 32]')\n parser.add_argument('--data_size', default=41579, type=int, help='[tweet2014: 41579]')\n parser.add_argument('--learning_rate', default=1e-5, type=float, help='')\n parser.add_argument('--num_train_epochs', default=3, type=int, help='')\n parser.add_argument('--data_path', default='/data/wyang/ShortTextSemanticSimilarity/data/corpora/', help='')\n parser.add_argument('--data_name', default='annotation', help='annotation or youzan_new or tweet')\n parser.add_argument('--pytorch_dump_path', default='saved.model', help='')\n parser.add_argument('--load_trained', action='store_true', default=False, help='')\n parser.add_argument('--chinese', action='store_true', default=False, help='')\n parser.add_argument('--eval_steps', default=-1, type=int,\n help='evaluation per [eval_steps] steps, -1 for evaluation per epoch')\n parser.add_argument('--model_type', default='BertForNextSentencePrediction', help='')\n parser.add_argument('--output_path', default='predict.tmp', help='')\n parser.add_argument('--output_path2', default='predict.trec', help='')\n parser.add_argument('--qrels_path', default='qrels.trec', help='')\n parser.add_argument('--num_labels', default=2, type=int, help='')\n parser.add_argument('--data_format', default='classification', help='[classification, trec, tweet]')\n parser.add_argument('--warmup_proportion', default=0.1, type=float,\n help='Proportion of training to perform linear learning rate warmup. E.g., 0.1 = 10%% of training.')\n args = parser.parse_args()\n\n if args.mode == \"train\":\n train(args)\n else:\n scores = test(args)\n print_scores(scores)\n"
]
| [
[
"torch.cuda.manual_seed_all",
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.empty_cache",
"torch.cuda.is_available",
"torch.argmax"
]
]
|
fulequn/DLAction | [
"da2ff080f7a65f89010a5829b86fc1b45beb9dc8"
]
| [
"classifiers/chapter5/bn_layers.py"
]
| [
"#-*- coding: utf-8 -*-\nimport sys, os\nsys.path.append(os.path.realpath(os.path.dirname(os.path.realpath(__file__))))\n\nimport numpy as np\nfrom layers import *\nfrom dropout_layers import *\n\ndef batchnorm_forward(x, gamma, beta, bn_param):\n \"\"\"\n 使用类似动量衰减的运行时平均,计算总体均值与方差 例如:\n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_var\n Input:\n - x: 数据(N, D)\n - gamma: 缩放参数 (D,)\n - beta: 平移参数 (D,)\n - bn_param: 字典型,使用下列键值:\n - mode: 'train' 或'test'; \n - eps: 保证数值稳定\n - momentum: 运行时平均衰减因子 \n - running_mean: 形状为(D,)的运行时均值\n - running_var : 形状为 (D,)的运行时方差\n\n Returns 元组:\n - out: 输出(N, D)\n - cache: 用于反向传播的缓存\n \"\"\"\n # 不同阶段\n mode = bn_param['mode']\n # 防止为0\n eps = bn_param.get('eps', 1e-5)\n # 动量衰减\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape # N:数据数目 D:数据维度\n # 获取运行时均差以及运行时方差,默认为0\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #############################################################################\n # 任务:实现训练阶段BN的前向传播 #\n # 首先,你需要计算输入数据的均值和方差 ; #\n # 然后,使用均值和方差将数据进行归一化处理; #\n # 之后,使用gamma和beta参数将数据进行缩放和平移; #\n # 最后,将该批数据均值和方差添加到累积均值和方差中; #\n # 注意:将反向传播时所需的所有中间值保存在cache中。 #\n #############################################################################\n mu = 1/float(N)*np.sum(x, axis=0)\n xmu = x-mu\n carre = xmu**2\n var = 1/float(N)*np.sum(carre, axis=0)\n sqrtvar = np.sqrt(var+eps)\n invvar = 1./sqrtvar\n va2 = xmu*invvar\n va3 = gamma*va2\n out = va3+beta\n running_mean = momentum*running_mean+(1.0-momentum)*mu\n running_var = momentum * running_var+(1.0-momentum)*var\n cache=(mu, xmu, carre, var, sqrtvar, invvar, va2, va3, gamma, beta, x, bn_param)\n #############################################################################\n # 结束编码 #\n #############################################################################\n elif mode == 'test':\n #############################################################################\n # 任务:实现测试阶段BN的前向传播 #\n # 首先,使用运行时均值与方差归一化数据, #\n # 然后,使用gamma和beta参数缩放,平移数据。 #\n #############################################################################\n mu = running_mean\n var = running_var\n xhat = (x-mu)/np.sqrt(var+eps)\n out = gamma*xhat+beta\n cache = (mu, var, gamma, beta, bn_param)\n #############################################################################\n # 结束编码 #\n #############################################################################\n else:\n raise ValueError('无法识别的BN模式: \"%s\"' % mode)\n \n # 更新运行时均值,方差\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache\n\n\ndef batchnorm_backward(dout, cache):\n \"\"\"\n BN反向传播 \n Inputs:\n - dout: 上层梯度 (N, D)\n - cache: 前向传播时的缓存.\n \n Returns 元组:\n - dx: 数据梯度 (N, D)\n - dgamma: gamma梯度 (D,)\n - dbeta: beta梯度 (D,)\n \"\"\"\n dx, dgamma, dbeta = None, None, None\n #############################################################################\n # 任务:实现BN反向传播 #\n # 将结果分别保存在dx,dgamma,dbeta中 #\n #############################################################################\n mu, xmu, carre, var, sqrtvar, invvar, va2, va3, gamma, beta, x, bn_param=cache\n eps = bn_param.get('eps', 1e-5)\n N, D = dout.shape\n # 第9步反向传播\n dva3 = dout\n dbeta = np.sum(dout, axis=0)\n # 第8步反向传播\n dva2 = gamma*dva3\n dgamma = np.sum(va2*dva3, axis=0) \n # 第7步反向传播\n dxmu = invvar*dva2\n dinvvar = np.sum(xmu*dva2, axis=0)\n # 第6步反向传播\n dsqrtvar = -1./(sqrtvar**2)*dinvvar\n # 第5步反向传播\n dvar = 0.5*(var+eps)**(-0.5)*dsqrtvar\n # 第4步反向传播\n dcarre = 1/float(N)*np.ones((carre.shape))*dvar\n # 第3步反向传播\n dxmu += 2*xmu*dcarre\n # 第2步反向传播\n dx = dxmu\n dmu = -np.sum(dxmu, axis=0)\n # 第1步反向传播\n dx += 1/float(N)*np.ones((dxmu.shape))*dmu\n #############################################################################\n # 结束编码 #\n #############################################################################\n return dx, dgamma, dbeta\n\n\ndef batchnorm_backward_alt(dout, cache):\n \"\"\"\n 可选的BN反向传播\n \"\"\"\n dx, dgamma, dbeta = None, None, None\n mu, xmu, carre, var, sqrtvar, invvar, va2, va3, gamma, beta, x, bn_param = cache\n eps = bn_param.get('eps', 1e-5)\n N, D = dout.shape\n dbeta = np.sum(dout, axis=0)\n dgamma = np.sum((x - mu) * (var + eps)**(-1. / 2.) * dout, axis=0)\n dx = (1./N) * gamma * (var + eps)**(-1./2.)*(N*dout-np.sum(\n dout, axis=0)-(x-mu)*(var+eps)**(-1.0)*np.sum(dout*(x-mu),axis=0))\n \n return dx, dgamma, dbeta\n\n\ndef affine_bn_relu_forward(x,w,b,gamma, beta,bn_param):\n x_affine,cache_affine= affine_forward(x,w,b)\n x_bn,cache_bn = batchnorm_forward(x_affine,gamma, beta,bn_param)\n out,cache_relu = relu_forward(x_bn)\n cache = (cache_affine,cache_bn,cache_relu)\n return out,cache\n\ndef affine_bn_relu_backward(dout,cache):\n cache_affine,cache_bn,cache_relu = cache\n drelu = relu_backward(dout,cache_relu)\n dbn,dgamma, dbeta= batchnorm_backward_alt(drelu,cache_bn)\n dx,dw,db = affine_backward(dbn,cache_affine)\n return dx,dw,db,dgamma,dbeta\n\n"
]
| [
[
"numpy.sum",
"numpy.ones",
"numpy.sqrt",
"numpy.zeros"
]
]
|
liujuanLT/TensorRT | [
"611dba63880da20a21771a0e9941a1fa0039887d"
]
| [
"tools/onnx-graphsurgeon/onnx_graphsurgeon/ir/graph.py"
]
| [
"#\n# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport copy\nfrom collections import OrderedDict, defaultdict\nfrom typing import Sequence\n\nimport numpy as np\nfrom onnx_graphsurgeon.ir.node import Node\nfrom onnx_graphsurgeon.ir.tensor import Constant, Tensor, Variable\nfrom onnx_graphsurgeon.logger import G_LOGGER\nfrom onnx_graphsurgeon.util import misc\n\n\nclass NodeIDAdder(object):\n def __init__(self, graph):\n self.graph = graph\n\n def __enter__(self):\n # To get unique ids for each node, add an `id` attribute. This will be removed before the function returns.\n # Using the index in the node list allows the same object to count as different nodes.\n for index, node in enumerate(self.graph.nodes):\n node.id = index\n\n def __exit__(self, exc_type, exc_value, traceback):\n for node in self.graph.nodes:\n del node.id\n\n\nclass Graph(object):\n \"\"\"\n Represents a graph containing nodes and tensors.\n \"\"\"\n DEFAULT_OPSET = 11\n OPSET_FUNC_MAP = defaultdict(dict) # Ops registered for specific opsets.\n GLOBAL_FUNC_MAP = dict() # Ops registered for ALL opsets.\n\n\n @staticmethod\n def register(opsets=None):\n \"\"\"\n Registers a function with the Graph class for the specified group of opsets.\n After registering the function, it can be accessed like a normal member function.\n\n For example:\n ::\n\n @Graph.register()\n def add(self, a, b):\n return self.layer(op=\"Add\", inputs=[a, b], outputs=[\"add_out_gs\"])\n\n graph.add(a, b)\n\n Args:\n opsets (Sequence[int]):\n A group of opsets for which to register the function. Multiple functions with the same\n name may be registered simultaneously if they are registered for different opsets.\n Registering a function with a duplicate name for the same opsets will overwrite any\n function previously registered for those opsets. By default, the function is\n registered for all opsets.\n \"\"\"\n def register_func(func):\n if hasattr(Graph, func.__name__):\n G_LOGGER.warning(\"Registered function: {:} is hidden by a Graph attribute or function with the same name. \"\n \"This function will never be called!\".format(func.__name__))\n\n # Default behavior is to register functions for all opsets.\n if opsets is None:\n Graph.GLOBAL_FUNC_MAP[func.__name__] = func\n else:\n for opset in opsets:\n Graph.OPSET_FUNC_MAP[opset][func.__name__] = func\n return func\n return register_func\n\n\n def __init__(self, nodes: Sequence[Node]=None, inputs: Sequence[Tensor]=None, outputs: Sequence[Tensor]=None, name=None, doc_string=None, opset=None, import_domains=None):\n \"\"\"\n Args:\n nodes (Sequence[Node]): A list of the nodes in this graph.\n inputs (Sequence[Tensor]): A list of graph input Tensors.\n outputs (Sequence[Tensor]): A list of graph output Tensors.\n name (str): The name of the graph. Defaults to \"onnx_graphsurgeon_graph\".\n doc_string (str): A doc_string for the graph. Defaults to \"\".\n \"\"\"\n self.nodes = misc.default_value(nodes, [])\n self.inputs = list(misc.default_value(inputs, []))\n self.outputs = list(misc.default_value(outputs, []))\n\n self.name = misc.default_value(name, \"onnx_graphsurgeon_graph\")\n self.__name__ = self.name\n\n self.doc_string = misc.default_value(doc_string, \"\")\n self.opset = misc.default_value(opset, Graph.DEFAULT_OPSET)\n self.import_domains = misc.default_value(import_domains, None)\n # Printing graphs can be very expensive\n G_LOGGER.ultra_verbose(lambda: \"Created Graph: {:}\".format(self))\n # For layer() function\n self.name_idx = 0\n\n\n def __getattr__(self, name):\n try:\n return super().__getattribute__(name)\n except AttributeError as err:\n # Opset specific ops always take priority over global ops.\n if self.opset in Graph.OPSET_FUNC_MAP and name in Graph.OPSET_FUNC_MAP[self.opset]:\n return lambda *args, **kwargs: Graph.OPSET_FUNC_MAP[self.opset][name](self, *args, **kwargs)\n\n if name in Graph.GLOBAL_FUNC_MAP:\n return lambda *args, **kwargs: Graph.GLOBAL_FUNC_MAP[name](self, *args, **kwargs)\n\n G_LOGGER.error(\"No function: {:} registered for opset: {:}\".format(name, self.opset))\n raise err\n\n\n def __setattr__(self, name, value):\n # We don't want graph inputs/outputs to be SynchronizedLists\n if name in [\"inputs\", \"outputs\"]:\n value = list(value)\n return super().__setattr__(name, value)\n\n\n def __eq__(self, other: \"Graph\"):\n nodes_match = len(self.nodes) == len(other.nodes) and all([node == other_node for node, other_node in zip(self.nodes, other.nodes)])\n inputs_match = len(self.inputs) == len(other.inputs) and all([inp == other_inp for inp, other_inp in zip(self.inputs, other.inputs)])\n outputs_match = len(self.outputs) == len(other.outputs) and all([out == other_out for out, other_out in zip(self.outputs, other.outputs)])\n return nodes_match and inputs_match and outputs_match\n\n\n def node_ids(self):\n \"\"\"\n Returns a context manager that supplies unique integer IDs for Nodes in the Graph.\n\n For example:\n ::\n\n with graph.node_ids():\n assert graph.nodes[0].id != graph.nodes[1].id\n\n Returns:\n NodeIDAdder: A context manager that supplies unique integer IDs for Nodes.\n \"\"\"\n return NodeIDAdder(self)\n\n\n def _get_node_id(self, node):\n try:\n return node.id\n except AttributeError:\n G_LOGGER.critical(\"Encountered a node not in the graph:\\n{:}.\\n\\n\"\n \"To fix this, please append the node to this graph's `nodes` attribute.\".format(node))\n\n\n # A tensor is local if it is produced in this graph, or is explicitly a graph input.\n def _local_tensors(self):\n local_tensors = {t.name: t for node in self.nodes for t in node.outputs if not t.is_empty()}\n local_tensors.update({t.name: t for t in self.inputs})\n return local_tensors\n\n\n # Returns tensors used by this graph which are not present in the graph.\n # These may come from an outer graph for example.\n def _foreign_tensors(self):\n local_tensors = self._local_tensors()\n foreign_tensors = {}\n\n def is_foreign_tensor(tensor):\n return tensor.name not in local_tensors\n\n for node in self.nodes:\n foreign_tensors.update({t.name: t for t in node.inputs if is_foreign_tensor(t)})\n\n for attr in node.attrs.values():\n if isinstance(attr, Graph):\n subgraph_foreign_tensors = attr._foreign_tensors()\n # Some of the foreign tensors from a subgraph may come from this graph.\n subgraph_foreign_tensors = {\n t.name: t\n for t in subgraph_foreign_tensors.values()\n if is_foreign_tensor(t)\n }\n foreign_tensors.update(subgraph_foreign_tensors)\n\n return foreign_tensors\n\n\n def _get_used_node_ids(self):\n local_tensors = self._local_tensors()\n\n # We only want to consider tensors that are local to this graph, because we can't\n # remove external tensors (e.g. from outer graphs) anyway.\n class IgnoreDupAndForeign(object):\n def __init__(self, initial_tensors=None):\n tensors = misc.default_value(initial_tensors, [])\n self.seen_tensors = set([tensor.name for tensor in tensors])\n\n\n def __call__(self, tensor):\n # Returns True if a tensor should included,\n # False if it should be filtered out.\n if tensor.is_empty():\n return True\n elif tensor.name not in local_tensors:\n return False\n elif tensor.name not in self.seen_tensors:\n self.seen_tensors.add(tensor.name)\n return True\n return False\n\n\n # Traverse backwards from outputs to find all used nodes.\n ignore_tensors = IgnoreDupAndForeign()\n used_tensors = list(filter(ignore_tensors, self.outputs))\n used_node_ids = set()\n\n index = 0\n while index < len(used_tensors):\n used_tensor = used_tensors[index]\n index += 1\n for node in used_tensor.inputs:\n # Must cast to list here, otherwise node_used_tensors will be SynchronizedList!\n node_used_tensors = list(node.inputs)\n\n # If a node includes a subgraph, get any tensors that it uses from the outer graph.\n for attr in node.attrs.values():\n if isinstance(attr, Graph):\n node_used_tensors += list(attr._foreign_tensors().values())\n\n used_node_ids.add(self._get_node_id(node))\n used_tensors.extend(filter(ignore_tensors, node_used_tensors))\n return used_node_ids, used_tensors\n\n\n def cleanup(self, remove_unused_node_outputs=False, recurse_subgraphs=True, remove_unused_graph_inputs=False):\n \"\"\"\n Removes unused nodes and tensors from the graph.\n A node or tensor is considered unused if it does not contribute to any of the graph outputs.\n\n Additionally, any producer nodes of graph input tensors, as well as consumer nodes of graph output\n tensors that are not in the graph, are removed from the graph.\n\n *Note: This function will never modify graph output tensors.*\n\n Args:\n remove_unused_node_outputs (bool): Whether to remove unused output tensors of nodes. This will never remove\n empty-tensor (i.e. optional, but omitted) outputs. Defaults to False.\n recurse_subgraphs (bool):\n Whether to recursively cleanup subgraphs.\n Defaults to True.\n remove_unused_graph_inputs (bool):\n Whether to remove unused graph inputs.\n Defaults to False.\n\n Returns:\n self\n \"\"\"\n def cleanup_subgraphs():\n for node in self.nodes:\n for attr in node.attrs.values():\n if isinstance(attr, Graph):\n attr.cleanup(remove_unused_node_outputs=remove_unused_node_outputs,\n remove_unused_graph_inputs=remove_unused_graph_inputs)\n\n\n if recurse_subgraphs:\n cleanup_subgraphs()\n\n G_LOGGER.debug(\"Cleaning up {:}\".format(self.name))\n\n with self.node_ids():\n # Graph input producers must be removed first so used_node_ids is correct.\n for inp in self.inputs:\n inp.inputs.clear()\n\n used_node_ids, used_tensors = self._get_used_node_ids()\n\n inputs = []\n for inp in self.inputs:\n if inp in used_tensors or not remove_unused_graph_inputs:\n inputs.append(inp)\n else:\n G_LOGGER.ultra_verbose(\"Removing unused input: {:}\".format(inp))\n self.inputs = inputs\n\n nodes = []\n for node in self.nodes:\n if self._get_node_id(node) in used_node_ids:\n nodes.append(node)\n else:\n node.inputs.clear()\n node.outputs.clear()\n G_LOGGER.ultra_verbose(\"Removing unused node: {:}\".format(node))\n\n # Remove any hanging tensors - tensors without outputs\n if remove_unused_node_outputs:\n graph_output_names = set([tensor.name for tensor in self.outputs])\n for node in nodes:\n def is_hanging_tensor(tensor):\n return not tensor.is_empty() and len(tensor.outputs) == 0 and tensor.name not in graph_output_names\n\n to_remove = [out for out in node.outputs if is_hanging_tensor(out)]\n for out in to_remove:\n if out in node.outputs:\n node.outputs.remove(out)\n\n self.nodes = nodes\n return self\n\n\n def toposort(self, recurse_subgraphs=True):\n \"\"\"\n Topologically sort the graph in place.\n\n Args:\n recurse_subgraphs (bool):\n Whether to recursively topologically sort subgraphs.\n Defaults to True.\n\n Returns:\n self\n \"\"\"\n if recurse_subgraphs:\n for node in self.nodes:\n for attr in node.attrs.values():\n if isinstance(attr, Graph):\n attr.toposort()\n\n G_LOGGER.debug(\"Topologically sorting {:}\".format(self.name))\n\n # Keeps track of a node and it's level in the graph hierarchy.\n # 0 corresponds to an input node, N corresponds to a node with N layers of inputs.\n class HierarchyDescriptor(object):\n def __init__(self, node=None, level=None):\n self.node = node\n self.level = level\n\n def __lt__(self, other):\n return self.level < other.level\n\n hierarchy_levels = {} # Dict[int, HierarchyDescriptor]\n\n local_tensors = self._local_tensors()\n\n def get_hierarchy_level(node):\n # Return all local nodes that contribute to this node.\n def get_input_nodes(node):\n inputs = {}\n for tensor in node.inputs:\n if tensor.name in local_tensors:\n for inp_node in tensor.inputs:\n inputs[self._get_node_id(inp_node)] = inp_node\n return inputs.values()\n\n if self._get_node_id(node) in hierarchy_levels:\n return hierarchy_levels[self._get_node_id(node)].level\n\n # The level of a node is the level of it's highest input + 1.\n try:\n max_input_level = max([get_hierarchy_level(input_node) for input_node in get_input_nodes(node)] + [-1])\n except RecursionError:\n G_LOGGER.critical(\"Cycle detected in graph! Are there tensors with duplicate names in the graph?\")\n\n return max_input_level + 1\n\n with self.node_ids():\n for node in self.nodes:\n hierarchy_levels[self._get_node_id(node)] = HierarchyDescriptor(node, level=get_hierarchy_level(node))\n\n self.nodes = [hd.node for hd in sorted(hierarchy_levels.values())]\n return self\n\n\n def tensors(self, check_duplicates=False):\n \"\"\"\n Creates a tensor map of all the tensors used by this graph by walking over all nodes. Empty tensors are omitted from this map.\n\n Tensors are guaranteed to be in order of the nodes in the graph. Hence, if the graph is topologically sorted, the tensor map will be too.\n\n Args:\n check_duplicates (bool): Whether to fail if multiple tensors with the same name are encountered.\n\n Raises:\n OnnxGraphSurgeonException: If check_duplicates is True and multiple distinct tensors in the graph share the same name.\n\n Returns:\n OrderedDict[str, Tensor]: A mapping of tensor names to tensors.\n \"\"\"\n tensor_map = OrderedDict()\n\n def add_to_tensor_map(tensor):\n if not tensor.is_empty():\n if check_duplicates and tensor.name in tensor_map and not (tensor_map[tensor.name] is tensor):\n G_LOGGER.critical(\"Found distinct tensors that share the same name:\\n[id: {:}] {:}\\n[id: {:}] {:}\"\n .format(id(tensor_map[tensor.name]), tensor_map[tensor.name], id(tensor), tensor))\n\n tensor_map[tensor.name] = tensor\n\n\n # I/O tensors may not be attached to nodes.\n for io_tensor in self.inputs:\n add_to_tensor_map(io_tensor)\n\n for node in self.nodes:\n for tensor in node.inputs + node.outputs:\n add_to_tensor_map(tensor)\n\n for io_tensor in self.outputs:\n add_to_tensor_map(io_tensor)\n\n return tensor_map\n\n\n def fold_constants(self, fold_shapes=True, recurse_subgraphs=True, partitioning=None, error_ok=True):\n \"\"\"\n Folds constants in-place in the graph. The graph must be topologically sorted prior to\n calling this function (see `toposort()`).\n\n This function will not remove constants after folding them. In order to get rid of\n these hanging nodes, you can run the `cleanup()` function.\n\n *Note: Due to how this function is implemented, the graph must be exportable to ONNX,\n and evaluable in ONNX-Runtime. Additionally, ONNX-Runtime must be installed.*\n\n Args:\n fold_shapes (bool):\n Whether to fold `Shape` nodes in the graph.\n This requires shapes to be inferred in the graph, and can only fold\n static shapes.\n Defaults to True.\n recurse_subgraphs (bool):\n Whether to recursively fold constants in subgraphs.\n Defaults to True.\n partitioning (Union[str, None]):\n Whether/How to partition the graph so that errors in folding one\n part of a model do not affect other parts. Available modes are:\n\n - None: Do not partition the graph. If inference fails, no constants are folded.\n - \"basic\": Partition the graph. If inference fails in one partition, other partitions will\n remain unaffected.\n - \"recursive\": Parition the graph recursively. If inference fails in a partition, the partition\n will be further paritioned.\n\n Defaults to None.\n error_ok (bool):\n Whether inference errors should be suppressed.\n When this is enabled, any errors encountered during inference will be re-raised.\n Defaults to True.\n\n Returns:\n self\n \"\"\"\n import onnxruntime as rt\n from onnx_graphsurgeon.exporters.onnx_exporter import export_onnx\n\n PARTITIONING_MODES = [None, \"basic\", \"recursive\"]\n if partitioning not in PARTITIONING_MODES:\n G_LOGGER.critical(\"Argument for parameter 'partitioning' must be one of: {:}\".format(PARTITIONING_MODES))\n\n G_LOGGER.debug(\"Folding constants in {:}\".format(self.name))\n\n graph_clone = self.copy()\n clone_tensors = graph_clone.tensors()\n\n # We find graph constants in two passes:\n # Pass 1 finds all Constant tensors in the graph, then walks over their outputs.\n # Pass 2 searches for Shape nodes that have variable inputs (i.e. not marked const in pass 1)\n # and turns them into Constants iff the input has a statically known shape.\n\n def update_foldable_outputs(graph_constants):\n def is_foldable(node):\n def all_tensors_const(tensors):\n return all([t.name in graph_constants for t in tensors])\n\n if not all_tensors_const(node.inputs):\n return False\n\n all_subgraph_foreign_tensors_const = True\n for attr in node.attrs.values():\n if isinstance(attr, Graph):\n foreign_tensors = attr._foreign_tensors().values()\n all_subgraph_foreign_tensors_const &= all_tensors_const(foreign_tensors)\n return all_subgraph_foreign_tensors_const\n\n\n # Walks along the outputs of graph_constants to see if they can also be computed statically.\n # Since the graph is topologically sorted, this should find all constant nodes in the graph.\n for node in graph_clone.nodes:\n if is_foldable(node):\n graph_constants.update({out.name: out for out in node.outputs})\n return graph_constants\n\n # Pass 1: Non-shape Constant Folding\n\n graph_constants = {name: tensor for name, tensor in clone_tensors.items() if isinstance(tensor, Constant)}\n\n # Replaces outputs of Constant nodes with constant tensors\n for tensor in clone_tensors.values():\n if len(tensor.inputs) == 1:\n node = tensor.inputs[0]\n if node.op == \"Constant\":\n graph_constants[tensor.name] = tensor.to_constant(node.attrs[\"value\"]._values) # Using ._values avoids copying\n graph_constants[tensor.name].inputs.clear()\n\n graph_constants = update_foldable_outputs(graph_constants)\n\n\n # Pass 2: Shape Folding\n\n def get_producer(tensor, op):\n \"\"\"\n Get the producer of the specified tensor iff it matches op\n \"\"\"\n if len(tensor.inputs) != 1:\n return None\n\n node = tensor.inputs[0]\n if node.op != op:\n return None\n return node\n\n\n def get_input(node, index=0):\n \"\"\"\n Get the input tensor of a node iff the input tensor is not already marked a graph constant.\n \"\"\"\n if node is None:\n return None\n\n inp = node.inputs[index]\n\n # If the input was already found to be a constant, it will be folded anyway.\n if inp.name in graph_constants:\n return None\n\n return inp\n\n\n def handle_shape(tensor):\n inp = get_input(get_producer(tensor, \"Shape\"))\n if inp is None:\n return None\n\n if inp.shape is None or misc.is_dynamic_shape(inp.shape):\n return None\n return np.array(inp.shape, dtype=np.int64)\n\n\n def handle_shape_gather(tensor):\n gather = get_producer(tensor, \"Gather\")\n if gather is None:\n return None\n\n data = gather.inputs[0]\n indices_tensor = gather.inputs[1]\n\n inp = get_input(get_producer(data, \"Shape\"))\n if inp is None or inp.shape is None:\n return None\n\n if not isinstance(indices_tensor, Constant):\n return None\n\n indices = indices_tensor.values\n if not indices.shape: # Scalar-case\n shape = inp.shape[int(indices)]\n if misc.is_dynamic_dimension(shape):\n return None\n else:\n shape = [inp.shape[index] for index in indices]\n if misc.is_dynamic_shape(shape):\n return None\n\n return np.array(shape, dtype=np.int64)\n\n\n # Finds the static shape of a shape node output if possible, otherwise returns None.\n def lower_shape(tensor):\n SHAPE_FOLD_FUNCS = [handle_shape, handle_shape_gather]\n for fold_func in SHAPE_FOLD_FUNCS:\n shape = fold_func(tensor)\n if shape is not None:\n return shape\n\n\n if fold_shapes:\n for tensor in clone_tensors.values():\n shape_of = lower_shape(tensor)\n\n if shape_of is not None:\n G_LOGGER.ultra_verbose(\"Folding shape tensor: {:} to: {:}\".format(tensor.name, shape_of))\n graph_constants[tensor.name] = tensor.to_constant(shape_of)\n graph_constants[tensor.name].inputs.clear()\n\n graph_constants = update_foldable_outputs(graph_constants)\n\n\n def partition_and_infer(subgraph):\n def get_out_node_ids():\n # Gets the final output nodes - producer nodes of graph output tensors without other outputs.\n with subgraph.node_ids():\n out_node_ids = set()\n for out in subgraph.outputs:\n if not out.outputs and not isinstance(out, Constant):\n for n_inp in out.inputs:\n out_node_ids.add(n_inp.id)\n return out_node_ids\n\n # Compute each output node in a separate subgraph.\n out_node_ids = get_out_node_ids()\n constant_values = {}\n\n for index in out_node_ids: # Have to use index since 'node' is not in part\n part = subgraph.copy()\n out_node = part.nodes[index]\n part.outputs = out_node.outputs\n part.name = \"Folding: {:}\".format([out.name for out in part.outputs])\n part.cleanup(remove_unused_graph_inputs=True)\n names = [out.name for out in part.outputs]\n\n try:\n # Determining types is not trivial, and ONNX-RT does its own type inference.\n sess = rt.InferenceSession(export_onnx(part, do_type_check=False).SerializeToString())\n values = sess.run(names, {})\n except Exception as err:\n G_LOGGER.warning(\"Inference failed for subgraph: {:}. Note: Error was:\\n{:}\".format(part.name, err))\n if partitioning == \"recursive\":\n G_LOGGER.verbose(\"Attempting to recursively partition subgraph\")\n # Partition failed, peel off last node.\n # We only need to remove one node, so avoid doing an expensive call to cleanup()\n part.outputs = out_node.inputs\n del part.nodes[part.nodes.index(out_node)]\n out_node.outputs.clear()\n out_node.inputs.clear()\n else:\n G_LOGGER.info(\"You may see better results if you set partitioning='recursive'\")\n if not error_ok:\n raise err\n\n constant_values.update(partition_and_infer(part))\n else:\n constant_values.update({name: val for name, val in zip(names, values)})\n\n return constant_values\n\n\n # Next, evaluate the foldable variables with ONNX-Runtime\n graph_clone.outputs = [t for t in graph_constants.values() if not isinstance(t, Constant)]\n graph_clone.cleanup(remove_unused_graph_inputs=True)\n\n # Using ._values avoids a deep copy of the values.\n constant_values = {name: tensor._values for name, tensor in graph_constants.items() if isinstance(tensor, Constant)}\n if graph_clone.outputs:\n if partitioning:\n constant_values.update(partition_and_infer(graph_clone))\n else:\n names = [t.name for t in graph_clone.outputs]\n try:\n sess = rt.InferenceSession(export_onnx(graph_clone, do_type_check=False).SerializeToString())\n values = sess.run(names, {})\n constant_values.update({name: val for name, val in zip(names, values)})\n except Exception as err:\n G_LOGGER.warning(\"Inference failed. You may want to try enabling partitioning to see better results. \"\n \"Note: Error was:\\n{:}\".format(err))\n G_LOGGER.verbose(\"Note: Graph was:\\n{:}\".format(graph_clone))\n if not error_ok:\n raise\n elif not constant_values:\n G_LOGGER.info(\"Could not find any nodes in this graph ({:}) that can be folded. \"\n \"This could mean that constant folding has already been run on this graph. \"\n \"Skipping.\".format(self.name))\n\n # Finally, replace the Variables in the original graph with constants.\n if constant_values:\n graph_tensors = self.tensors()\n for name, values in constant_values.items():\n tensor = graph_tensors[name]\n if not isinstance(tensor, Constant):\n tensor.to_constant(values)\n tensor.inputs.clear() # Constants do not need inputs\n\n\n # Folding subgraphs after the outer graph can lead to better folding.\n def fold_subgraphs():\n for node in self.nodes:\n for attr in node.attrs.values():\n if isinstance(attr, Graph):\n attr.fold_constants(fold_shapes=fold_shapes, partitioning=partitioning)\n\n if recurse_subgraphs:\n fold_subgraphs()\n\n return self\n\n\n def _generate_name(self, prefix):\n name = \"{}_{}\".format(prefix, self.name_idx)\n self.name_idx += 1\n return name\n\n\n def layer(self, inputs=[], outputs=[], *args, **kwargs):\n \"\"\"\n Creates a node, adds it to this graph, and optionally creates its input and output tensors.\n\n The input and output lists can include various different types:\n\n - ``Tensor``: Any Tensors provided will be used as-is in the inputs/outputs of the node created.\n - ``str``:\n If a string is provided, this function will generate a new tensor using\n the string to generate a name. It will append an index to the end of the provided string\n to attempt to avoid duplicate tensor names, but since this doesn't guarantee that the name will\n be unique, you should try to ensure that the string provided is as unique as possible.\n - ``numpy.ndarray``:\n If a NumPy array is provided, this function will generate a Constant tensor\n using the name prefix: \"onnx_graphsurgeon_constant\"\n - ``Union[List[Number], Tuple[Number]]``:\n If a list or tuple of numbers (int or float) is provided, this function will\n generate a Constant tensor using the name prefix: \"onnx_graphsurgeon_lst_constant\"\n\n Args:\n inputs (List[Union[Tensor, str, numpy.ndarray]]): The list of inputs\n outputs (List[Union[Tensor, str, numpy.ndarray]]): The list of outputs\n args/kwargs: These are passed directly to the constructor of Node\n\n Returns:\n List[Tensor]: The output tensors of the node\n \"\"\"\n def process_io(io):\n new_io = []\n for elem in io:\n if isinstance(elem, Tensor):\n new_io.append(elem)\n elif isinstance(elem, str):\n tensor = Variable(name=self._generate_name(elem))\n new_io.append(tensor)\n elif isinstance(elem, np.ndarray):\n new_io.append(Constant(name=self._generate_name(\"onnx_graphsurgeon_constant\"), values=elem))\n elif isinstance(elem, list) or isinstance(elem, tuple):\n dtype = np.float32 if any([isinstance(x, float) for x in elem]) else np.int64\n arr = np.array(elem, dtype=dtype)\n new_io.append(Constant(name=self._generate_name(\"onnx_graphsurgeon_lst_constant\"), values=arr))\n else:\n G_LOGGER.critical(\"Unrecognized type passed to Graph.layer: {:}.\\n\"\n \"\\tHint: Did you forget to unpack a list with `*`?\\n\"\n \"\\tPlease use Tensors, strings, or NumPy arrays.\".format(elem))\n return new_io\n\n inputs = process_io(inputs)\n outputs = process_io(outputs)\n\n if \"name\" not in kwargs:\n kwargs[\"name\"] = self._generate_name(\"onnx_graphsurgeon_node\")\n\n node = Node(*args, **kwargs, inputs=inputs, outputs=outputs)\n self.nodes.append(node)\n return node.outputs\n\n\n def copy(self, tensor_map: \"OrderedDict[str, Tensor]\"=None):\n \"\"\"\n Copy the graph.\n\n This makes copies of all nodes and tensors in the graph, but will not\n do a deep-copy of weights or attributes (with the exception of ``Graph``\n attributes, which will be copied using their ``copy`` method).\n\n Args:\n tensor_map (OrderedDict[str, Tensor]):\n A mapping of tensor names to tensors from the outer graph.\n This should be ``None`` if this is the outer-most graph.\n\n Returns:\n Graph: A copy of the graph.\n \"\"\"\n # First, reconstruct each tensor in the graph, but with no inputs or outputs\n tensor_map = copy.copy(misc.default_value(tensor_map, {}))\n\n local_tensors = self.tensors()\n local_tensor_copies = {name: tensor.copy() for name, tensor in local_tensors.items()}\n local_tensor_copies.update(tensor_map)\n\n def get_tensor(name):\n if not name:\n return Variable.empty()\n return local_tensor_copies[name]\n\n\n # Next, copy nodes, and update inputs/outputs\n new_nodes = []\n for node in self.nodes:\n new_node = node.copy(inputs=[get_tensor(inp.name) for inp in node.inputs],\n outputs=[get_tensor(out.name) for out in node.outputs],\n tensor_map=local_tensor_copies)\n new_nodes.append(new_node)\n\n new_graph_inputs = [get_tensor(inp.name) for inp in self.inputs]\n new_graph_outputs = [get_tensor(out.name) for out in self.outputs]\n return Graph(nodes=new_nodes, inputs=new_graph_inputs, outputs=new_graph_outputs,\n name=copy.copy(self.name), doc_string=copy.copy(self.doc_string),\n opset=copy.copy(self.opset))\n\n\n def __str__(self):\n nodes_str = \"\\n\".join([str(node) for node in self.nodes])\n return \"Graph {:} (Opset: {:})\\nInputs: {:}\\nNodes:\\n{:}\\nOutputs: {:}\".format(\n self.name, self.opset, self.inputs, nodes_str, self.outputs)\n\n\n def __repr__(self):\n return self.__str__()\n"
]
| [
[
"numpy.array"
]
]
|
david8862/keras-YOLOv3-model-se | [
"8822619f8f241b4b8ee34b881e3fd5c5233cb558"
]
| [
"eval.py"
]
| [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCalculate mAP for YOLO model on some annotation dataset\n\"\"\"\nimport os, argparse, time\nimport numpy as np\nimport operator\nfrom operator import mul\nfrom functools import reduce\nfrom PIL import Image\nfrom collections import OrderedDict\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\nfrom tensorflow.keras.models import load_model\nimport tensorflow.keras.backend as K\nimport tensorflow as tf\nimport MNN\nimport onnxruntime\n\nfrom yolo5.postprocess_np import yolo5_postprocess_np\nfrom yolo3.postprocess_np import yolo3_postprocess_np\nfrom yolo2.postprocess_np import yolo2_postprocess_np\nfrom common.data_utils import preprocess_image\nfrom common.utils import get_dataset, get_classes, get_anchors, get_colors, draw_boxes, optimize_tf_gpu, get_custom_objects\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\noptimize_tf_gpu(tf, K)\n\n\ndef annotation_parse(annotation_lines, class_names):\n '''\n parse annotation lines to get image dict and ground truth class dict\n\n image dict would be like:\n annotation_records = {\n '/path/to/000001.jpg': {'100,120,200,235':'dog', '85,63,156,128':'car', ...},\n ...\n }\n\n ground truth class dict would be like:\n classes_records = {\n 'car': [\n ['000001.jpg','100,120,200,235'],\n ['000002.jpg','85,63,156,128'],\n ...\n ],\n ...\n }\n '''\n annotation_records = OrderedDict()\n classes_records = OrderedDict({class_name: [] for class_name in class_names})\n\n for line in annotation_lines:\n box_records = {}\n image_name = line.split(' ')[0]\n boxes = line.split(' ')[1:]\n for box in boxes:\n #strip box coordinate and class\n class_name = class_names[int(box.split(',')[-1])]\n coordinate = ','.join(box.split(',')[:-1])\n box_records[coordinate] = class_name\n #append or add ground truth class item\n record = [os.path.basename(image_name), coordinate]\n if class_name in classes_records:\n classes_records[class_name].append(record)\n else:\n classes_records[class_name] = list([record])\n annotation_records[image_name] = box_records\n\n return annotation_records, classes_records\n\n\ndef transform_gt_record(gt_records, class_names):\n '''\n Transform the Ground Truth records of a image to prediction format, in\n order to show & compare in result pic.\n\n Ground Truth records is a dict with format:\n {'100,120,200,235':'dog', '85,63,156,128':'car', ...}\n\n Prediction format:\n (boxes, classes, scores)\n '''\n if gt_records is None or len(gt_records) == 0:\n return [], [], []\n\n gt_boxes = []\n gt_classes = []\n gt_scores = []\n for (coordinate, class_name) in gt_records.items():\n gt_box = [int(x) for x in coordinate.split(',')]\n gt_class = class_names.index(class_name)\n\n gt_boxes.append(gt_box)\n gt_classes.append(gt_class)\n gt_scores.append(1.0)\n\n return np.array(gt_boxes), np.array(gt_classes), np.array(gt_scores)\n\n\n\ndef yolo_predict_tflite(interpreter, image, anchors, num_classes, conf_threshold, elim_grid_sense, v5_decode):\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n # check the type of the input tensor\n #if input_details[0]['dtype'] == np.float32:\n #floating_model = True\n\n height = input_details[0]['shape'][1]\n width = input_details[0]['shape'][2]\n model_input_shape = (height, width)\n\n image_data = preprocess_image(image, model_input_shape)\n #origin image shape, in (height, width) format\n image_shape = image.size[::-1]\n\n interpreter.set_tensor(input_details[0]['index'], image_data)\n interpreter.invoke()\n\n prediction = []\n for output_detail in output_details:\n output_data = interpreter.get_tensor(output_detail['index'])\n prediction.append(output_data)\n\n prediction.sort(key=lambda x: len(x[0]))\n if len(anchors) == 5:\n # YOLOv2 use 5 anchors and have only 1 prediction\n assert len(prediction) == 1, 'invalid YOLOv2 prediction number.'\n pred_boxes, pred_classes, pred_scores = yolo2_postprocess_np(prediction[0], image_shape, anchors, num_classes, model_input_shape, max_boxes=100, confidence=conf_threshold, elim_grid_sense=elim_grid_sense)\n else:\n if v5_decode:\n pred_boxes, pred_classes, pred_scores = yolo5_postprocess_np(prediction, image_shape, anchors, num_classes, model_input_shape, max_boxes=100, confidence=conf_threshold, elim_grid_sense=True) #enable \"elim_grid_sense\" by default\n else:\n pred_boxes, pred_classes, pred_scores = yolo3_postprocess_np(prediction, image_shape, anchors, num_classes, model_input_shape, max_boxes=100, confidence=conf_threshold, elim_grid_sense=elim_grid_sense)\n\n return pred_boxes, pred_classes, pred_scores\n\n\ndef yolo_predict_mnn(interpreter, session, image, anchors, num_classes, conf_threshold, elim_grid_sense, v5_decode):\n # assume only 1 input tensor for image\n input_tensor = interpreter.getSessionInput(session)\n # get input shape\n input_shape = input_tensor.getShape()\n if input_tensor.getDimensionType() == MNN.Tensor_DimensionType_Tensorflow:\n batch, height, width, channel = input_shape\n elif input_tensor.getDimensionType() == MNN.Tensor_DimensionType_Caffe:\n batch, channel, height, width = input_shape\n else:\n # should be MNN.Tensor_DimensionType_Caffe_C4, unsupported now\n raise ValueError('unsupported input tensor dimension type')\n\n model_input_shape = (height, width)\n\n # prepare input image\n image_data = preprocess_image(image, model_input_shape)\n #origin image shape, in (height, width) format\n image_shape = image.size[::-1]\n\n # create a temp tensor to copy data\n # use TF NHWC layout to align with image data array\n # TODO: currently MNN python binding have mem leak when creating MNN.Tensor\n # from numpy array, only from tuple is good. So we convert input image to tuple\n tmp_input_shape = (batch, height, width, channel)\n input_elementsize = reduce(mul, tmp_input_shape)\n tmp_input = MNN.Tensor(tmp_input_shape, input_tensor.getDataType(),\\\n tuple(image_data.reshape(input_elementsize, -1)), MNN.Tensor_DimensionType_Tensorflow)\n\n input_tensor.copyFrom(tmp_input)\n interpreter.runSession(session)\n\n def get_tensor_list(output_tensors):\n # transform the output tensor dict to ordered tensor list, for further postprocess\n #\n # output tensor list should be like (for YOLOv3):\n # [\n # (name, tensor) for (13, 13, 3, num_classes+5),\n # (name, tensor) for (26, 26, 3, num_classes+5),\n # (name, tensor) for (52, 52, 3, num_classes+5)\n # ]\n output_list = []\n\n for (output_tensor_name, output_tensor) in output_tensors.items():\n tensor_shape = output_tensor.getShape()\n dim_type = output_tensor.getDimensionType()\n tensor_height, tensor_width = tensor_shape[2:4] if dim_type == MNN.Tensor_DimensionType_Caffe else tensor_shape[1:3]\n\n if len(anchors) == 6:\n # Tiny YOLOv3\n if tensor_height == height//32:\n output_list.insert(0, (output_tensor_name, output_tensor))\n elif tensor_height == height//16:\n output_list.insert(1, (output_tensor_name, output_tensor))\n else:\n raise ValueError('invalid tensor shape')\n elif len(anchors) == 9:\n # YOLOv3\n if tensor_height == height//32:\n output_list.insert(0, (output_tensor_name, output_tensor))\n elif tensor_height == height//16:\n output_list.insert(1, (output_tensor_name, output_tensor))\n elif tensor_height == height//8:\n output_list.insert(2, (output_tensor_name, output_tensor))\n else:\n raise ValueError('invalid tensor shape')\n elif len(anchors) == 5:\n # YOLOv2 use 5 anchors and have only 1 prediction\n assert len(output_tensors) == 1, 'YOLOv2 model should have only 1 output tensor.'\n output_list.insert(0, (output_tensor_name, output_tensor))\n else:\n raise ValueError('invalid anchor number')\n\n return output_list\n\n output_tensors = interpreter.getSessionOutputAll(session)\n output_tensor_list = get_tensor_list(output_tensors)\n\n prediction = []\n for (output_tensor_name, output_tensor) in output_tensor_list:\n output_shape = output_tensor.getShape()\n output_elementsize = reduce(mul, output_shape)\n\n assert output_tensor.getDataType() == MNN.Halide_Type_Float\n\n # copy output tensor to host, for further postprocess\n tmp_output = MNN.Tensor(output_shape, output_tensor.getDataType(),\\\n #np.zeros(output_shape, dtype=float), output_tensor.getDimensionType())\n tuple(np.zeros(output_shape, dtype=float).reshape(output_elementsize, -1)), output_tensor.getDimensionType())\n\n output_tensor.copyToHostTensor(tmp_output)\n #tmp_output.printTensorData()\n\n output_data = np.array(tmp_output.getData(), dtype=float).reshape(output_shape)\n # our postprocess code based on TF NHWC layout, so if the output format\n # doesn't match, we need to transpose\n if output_tensor.getDimensionType() == MNN.Tensor_DimensionType_Caffe:\n output_data = output_data.transpose((0,2,3,1))\n elif output_tensor.getDimensionType() == MNN.Tensor_DimensionType_Caffe_C4:\n raise ValueError('unsupported output tensor dimension type')\n\n prediction.append(output_data)\n\n prediction.sort(key=lambda x: len(x[0]))\n if len(anchors) == 5:\n # YOLOv2 use 5 anchors and have only 1 prediction\n assert len(prediction) == 1, 'invalid YOLOv2 prediction number.'\n pred_boxes, pred_classes, pred_scores = yolo2_postprocess_np(prediction[0], image_shape, anchors, num_classes, model_input_shape, max_boxes=100, confidence=conf_threshold, elim_grid_sense=elim_grid_sense)\n else:\n if v5_decode:\n pred_boxes, pred_classes, pred_scores = yolo5_postprocess_np(prediction, image_shape, anchors, num_classes, model_input_shape, max_boxes=100, confidence=conf_threshold, elim_grid_sense=True) #enable \"elim_grid_sense\" by default\n else:\n pred_boxes, pred_classes, pred_scores = yolo3_postprocess_np(prediction, image_shape, anchors, num_classes, model_input_shape, max_boxes=100, confidence=conf_threshold, elim_grid_sense=elim_grid_sense)\n\n return pred_boxes, pred_classes, pred_scores\n\n\ndef yolo_predict_pb(model, image, anchors, num_classes, model_input_shape, conf_threshold, elim_grid_sense, v5_decode):\n # NOTE: TF 1.x frozen pb graph need to specify input/output tensor name\n # so we hardcode the input/output tensor names here to get them from model\n if len(anchors) == 6:\n output_tensor_names = ['graph/predict_conv_1/BiasAdd:0', 'graph/predict_conv_2/BiasAdd:0']\n elif len(anchors) == 9:\n output_tensor_names = ['graph/predict_conv_1/BiasAdd:0', 'graph/predict_conv_2/BiasAdd:0', 'graph/predict_conv_3/BiasAdd:0']\n elif len(anchors) == 5:\n # YOLOv2 use 5 anchors and have only 1 prediction\n output_tensor_names = ['graph/predict_conv/BiasAdd:0']\n else:\n raise ValueError('invalid anchor number')\n\n # assume only 1 input tensor for image\n input_tensor_name = 'graph/image_input:0'\n\n # get input/output tensors\n image_input = model.get_tensor_by_name(input_tensor_name)\n output_tensors = [model.get_tensor_by_name(output_tensor_name) for output_tensor_name in output_tensor_names]\n\n batch, height, width, channel = image_input.shape\n model_input_shape = (int(height), int(width))\n\n # prepare input image\n image_data = preprocess_image(image, model_input_shape)\n #origin image shape, in (height, width) format\n image_shape = image.size[::-1]\n\n with tf.Session(graph=model) as sess:\n prediction = sess.run(output_tensors, feed_dict={\n image_input: image_data\n })\n\n prediction.sort(key=lambda x: len(x[0]))\n if len(anchors) == 5:\n # YOLOv2 use 5 anchors and have only 1 prediction\n assert len(prediction) == 1, 'invalid YOLOv2 prediction number.'\n pred_boxes, pred_classes, pred_scores = yolo2_postprocess_np(prediction[0], image_shape, anchors, num_classes, model_input_shape, max_boxes=100, confidence=conf_threshold, elim_grid_sense=elim_grid_sense)\n else:\n if v5_decode:\n pred_boxes, pred_classes, pred_scores = yolo5_postprocess_np(prediction, image_shape, anchors, num_classes, model_input_shape, max_boxes=100, confidence=conf_threshold, elim_grid_sense=True) #enable \"elim_grid_sense\" by default\n else:\n pred_boxes, pred_classes, pred_scores = yolo3_postprocess_np(prediction, image_shape, anchors, num_classes, model_input_shape, max_boxes=100, confidence=conf_threshold, elim_grid_sense=elim_grid_sense)\n\n return pred_boxes, pred_classes, pred_scores\n\n\ndef yolo_predict_onnx(model, image, anchors, num_classes, conf_threshold, elim_grid_sense, v5_decode):\n input_tensors = []\n for i, input_tensor in enumerate(model.get_inputs()):\n input_tensors.append(input_tensor)\n\n # assume only 1 input tensor for image\n assert len(input_tensors) == 1, 'invalid input tensor number.'\n\n # check if input layout is NHWC or NCHW\n if input_tensors[0].shape[1] == 3:\n batch, channel, height, width = input_tensors[0].shape #NCHW\n else:\n batch, height, width, channel = input_tensors[0].shape #NHWC\n\n model_input_shape = (height, width)\n\n # prepare input image\n image_data = preprocess_image(image, model_input_shape)\n #origin image shape, in (height, width) format\n image_shape = image.size[::-1]\n\n if input_tensors[0].shape[1] == 3:\n # transpose image for NCHW layout\n image_data = image_data.transpose((0,3,1,2))\n\n feed = {input_tensors[0].name: image_data}\n prediction = model.run(None, feed)\n\n prediction.sort(key=lambda x: len(x[0]))\n if len(anchors) == 5:\n # YOLOv2 use 5 anchors and have only 1 prediction\n assert len(prediction) == 1, 'invalid YOLOv2 prediction number.'\n pred_boxes, pred_classes, pred_scores = yolo2_postprocess_np(prediction[0], image_shape, anchors, num_classes, model_input_shape, max_boxes=100, confidence=conf_threshold, elim_grid_sense=elim_grid_sense)\n else:\n if v5_decode:\n pred_boxes, pred_classes, pred_scores = yolo5_postprocess_np(prediction, image_shape, anchors, num_classes, model_input_shape, max_boxes=100, confidence=conf_threshold, elim_grid_sense=True) #enable \"elim_grid_sense\" by default\n else:\n pred_boxes, pred_classes, pred_scores = yolo3_postprocess_np(prediction, image_shape, anchors, num_classes, model_input_shape, max_boxes=100, confidence=conf_threshold, elim_grid_sense=elim_grid_sense)\n\n return pred_boxes, pred_classes, pred_scores\n\n\ndef yolo_predict_keras(model, image, anchors, num_classes, model_input_shape, conf_threshold, elim_grid_sense, v5_decode):\n image_data = preprocess_image(image, model_input_shape)\n #origin image shape, in (height, width) format\n image_shape = image.size[::-1]\n\n prediction = model.predict([image_data])\n if len(anchors) == 5:\n # YOLOv2 use 5 anchors\n pred_boxes, pred_classes, pred_scores = yolo2_postprocess_np(prediction, image_shape, anchors, num_classes, model_input_shape, max_boxes=100, confidence=conf_threshold, elim_grid_sense=elim_grid_sense)\n else:\n if v5_decode:\n pred_boxes, pred_classes, pred_scores = yolo5_postprocess_np(prediction, image_shape, anchors, num_classes, model_input_shape, max_boxes=100, confidence=conf_threshold, elim_grid_sense=True) #enable \"elim_grid_sense\" by default\n else:\n pred_boxes, pred_classes, pred_scores = yolo3_postprocess_np(prediction, image_shape, anchors, num_classes, model_input_shape, max_boxes=100, confidence=conf_threshold, elim_grid_sense=elim_grid_sense)\n\n return pred_boxes, pred_classes, pred_scores\n\n\ndef get_prediction_class_records(model, model_format, annotation_records, anchors, class_names, model_input_shape, conf_threshold, elim_grid_sense, v5_decode, save_result):\n '''\n Do the predict with YOLO model on annotation images to get predict class dict\n\n predict class dict would contain image_name, coordinary and score, and\n sorted by score:\n pred_classes_records = {\n 'car': [\n ['000001.jpg','94,115,203,232',0.98],\n ['000002.jpg','82,64,154,128',0.93],\n ...\n ],\n ...\n }\n '''\n if model_format == 'MNN':\n #MNN inference engine need create session\n session = model.createSession()\n\n # create txt file to save prediction result, with\n # save format as annotation file but adding score, like:\n #\n # path/to/img1.jpg 50,100,150,200,0,0.86 30,50,200,120,3,0.95\n #\n os.makedirs('result', exist_ok=True)\n result_file = open(os.path.join('result','detection_result.txt'), 'w')\n\n pred_classes_records = OrderedDict()\n pbar = tqdm(total=len(annotation_records), desc='Eval model')\n for (image_name, gt_records) in annotation_records.items():\n image = Image.open(image_name)\n if image.mode != 'RGB':\n image = image.convert('RGB')\n image_array = np.array(image, dtype='uint8')\n\n # support of tflite model\n if model_format == 'TFLITE':\n pred_boxes, pred_classes, pred_scores = yolo_predict_tflite(model, image, anchors, len(class_names), conf_threshold, elim_grid_sense, v5_decode)\n # support of MNN model\n elif model_format == 'MNN':\n pred_boxes, pred_classes, pred_scores = yolo_predict_mnn(model, session, image, anchors, len(class_names), conf_threshold, elim_grid_sense, v5_decode)\n # support of TF 1.x frozen pb model\n elif model_format == 'PB':\n pred_boxes, pred_classes, pred_scores = yolo_predict_pb(model, image, anchors, len(class_names), model_input_shape, conf_threshold, elim_grid_sense, v5_decode)\n # support of ONNX model\n elif model_format == 'ONNX':\n pred_boxes, pred_classes, pred_scores = yolo_predict_onnx(model, image, anchors, len(class_names), conf_threshold, elim_grid_sense, v5_decode)\n # normal keras h5 model\n elif model_format == 'H5':\n pred_boxes, pred_classes, pred_scores = yolo_predict_keras(model, image, anchors, len(class_names), model_input_shape, conf_threshold, elim_grid_sense, v5_decode)\n else:\n raise ValueError('invalid model format')\n\n #print('Found {} boxes for {}'.format(len(pred_boxes), image_name))\n image.close()\n pbar.update(1)\n\n # save prediction result to txt\n result_file.write(image_name)\n for box, cls, score in zip(pred_boxes, pred_classes, pred_scores):\n xmin, ymin, xmax, ymax = box\n box_annotation = \" %d,%d,%d,%d,%d,%f\" % (\n xmin, ymin, xmax, ymax, cls, score)\n result_file.write(box_annotation)\n result_file.write('\\n')\n result_file.flush()\n\n if save_result:\n\n gt_boxes, gt_classes, gt_scores = transform_gt_record(gt_records, class_names)\n\n result_dir=os.path.join('result','detection')\n os.makedirs(result_dir, exist_ok=True)\n colors = get_colors(len(class_names))\n image_array = draw_boxes(image_array, gt_boxes, gt_classes, gt_scores, class_names, colors=None, show_score=False)\n image_array = draw_boxes(image_array, pred_boxes, pred_classes, pred_scores, class_names, colors)\n image = Image.fromarray(image_array)\n # here we handle the RGBA image\n if(len(image.split()) == 4):\n r, g, b, a = image.split()\n image = Image.merge(\"RGB\", (r, g, b))\n image.save(os.path.join(result_dir, image_name.split(os.path.sep)[-1]))\n\n # Nothing detected\n if pred_boxes is None or len(pred_boxes) == 0:\n continue\n\n for box, cls, score in zip(pred_boxes, pred_classes, pred_scores):\n pred_class_name = class_names[cls]\n xmin, ymin, xmax, ymax = box\n coordinate = \"{},{},{},{}\".format(xmin, ymin, xmax, ymax)\n\n #append or add predict class item\n record = [os.path.basename(image_name), coordinate, score]\n if pred_class_name in pred_classes_records:\n pred_classes_records[pred_class_name].append(record)\n else:\n pred_classes_records[pred_class_name] = list([record])\n\n # sort pred_classes_records for each class according to score\n for pred_class_list in pred_classes_records.values():\n pred_class_list.sort(key=lambda ele: ele[2], reverse=True)\n\n pbar.close()\n result_file.close()\n return pred_classes_records\n\n\ndef box_iou(pred_box, gt_box):\n '''\n Calculate iou for predict box and ground truth box\n Param\n pred_box: predict box coordinate\n (xmin,ymin,xmax,ymax) format\n gt_box: ground truth box coordinate\n (xmin,ymin,xmax,ymax) format\n Return\n iou value\n '''\n # get intersection box\n inter_box = [max(pred_box[0], gt_box[0]), max(pred_box[1], gt_box[1]), min(pred_box[2], gt_box[2]), min(pred_box[3], gt_box[3])]\n inter_w = max(0.0, inter_box[2] - inter_box[0] + 1)\n inter_h = max(0.0, inter_box[3] - inter_box[1] + 1)\n\n # compute overlap (IoU) = area of intersection / area of union\n pred_area = (pred_box[2] - pred_box[0] + 1) * (pred_box[3] - pred_box[1] + 1)\n gt_area = (gt_box[2] - gt_box[0] + 1) * (gt_box[3] - gt_box[1] + 1)\n inter_area = inter_w * inter_h\n union_area = pred_area + gt_area - inter_area\n return 0 if union_area == 0 else float(inter_area) / float(union_area)\n\n\ndef match_gt_box(pred_record, gt_records, iou_threshold=0.5):\n '''\n Search gt_records list and try to find a matching box for the predict box\n\n Param\n pred_record: with format ['image_file', 'xmin,ymin,xmax,ymax', score]\n gt_records: record list with format\n [\n ['image_file', 'xmin,ymin,xmax,ymax', 'usage'],\n ['image_file', 'xmin,ymin,xmax,ymax', 'usage'],\n ...\n ]\n iou_threshold:\n\n pred_record and gt_records should be from same annotation image file\n\n Return\n matching gt_record index. -1 when there's no matching gt\n '''\n max_iou = 0.0\n max_index = -1\n #get predict box coordinate\n pred_box = [float(x) for x in pred_record[1].split(',')]\n\n for i, gt_record in enumerate(gt_records):\n #get ground truth box coordinate\n gt_box = [float(x) for x in gt_record[1].split(',')]\n iou = box_iou(pred_box, gt_box)\n\n # if the ground truth has been assigned to other\n # prediction, we couldn't reuse it\n if iou > max_iou and gt_record[2] == 'unused' and pred_record[0] == gt_record[0]:\n max_iou = iou\n max_index = i\n\n # drop the prediction if couldn't match iou threshold\n if max_iou < iou_threshold:\n max_index = -1\n\n return max_index\n\ndef voc_ap(rec, prec):\n \"\"\"\n --- Official matlab code VOC2012---\n mrec=[0 ; rec ; 1];\n mpre=[0 ; prec ; 0];\n for i=numel(mpre)-1:-1:1\n mpre(i)=max(mpre(i),mpre(i+1));\n end\n i=find(mrec(2:end)~=mrec(1:end-1))+1;\n ap=sum((mrec(i)-mrec(i-1)).*mpre(i));\n \"\"\"\n rec.insert(0, 0.0) # insert 0.0 at begining of list\n rec.append(1.0) # insert 1.0 at end of list\n mrec = rec[:]\n prec.insert(0, 0.0) # insert 0.0 at begining of list\n prec.append(0.0) # insert 0.0 at end of list\n mpre = prec[:]\n \"\"\"\n This part makes the precision monotonically decreasing\n (goes from the end to the beginning)\n \"\"\"\n # matlab indexes start in 1 but python in 0, so I have to do:\n # range(start=(len(mpre) - 2), end=0, step=-1)\n # also the python function range excludes the end, resulting in:\n # range(start=(len(mpre) - 2), end=-1, step=-1)\n for i in range(len(mpre) - 2, -1, -1):\n mpre[i] = max(mpre[i], mpre[i + 1])\n \"\"\"\n This part creates a list of indexes where the recall changes\n \"\"\"\n # matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1;\n i_list = []\n for i in range(1, len(mrec)):\n if mrec[i] != mrec[i - 1]:\n i_list.append(i) # if it was matlab would be i + 1\n \"\"\"\n The Average Precision (AP) is the area under the curve\n (numerical integration)\n \"\"\"\n # matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i));\n ap = 0.0\n for i in i_list:\n ap += ((mrec[i] - mrec[i - 1]) * mpre[i])\n return ap, mrec, mpre\n\n'''\ndef voc_ap(rec, prec, use_07_metric=False):\n if use_07_metric:\n # 11 point metric\n ap = 0.\n for t in np.arange(0., 1.1, 0.1):\n if np.sum(rec >= t) == 0:\n p = 0\n else:\n p = np.max(prec[rec >= t])\n ap = ap + p / 11.\n else:\n mrec = np.concatenate(([0.], rec, [1.]))\n mpre = np.concatenate(([0.], prec, [0.]))\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n i = np.where(mrec[1:] != mrec[:-1])[0]\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap, mrec, mpre\n'''\n\n\ndef get_rec_prec(true_positive, false_positive, gt_records):\n '''\n Calculate precision/recall based on true_positive, false_positive\n result.\n '''\n cumsum = 0\n for idx, val in enumerate(false_positive):\n false_positive[idx] += cumsum\n cumsum += val\n\n cumsum = 0\n for idx, val in enumerate(true_positive):\n true_positive[idx] += cumsum\n cumsum += val\n\n rec = true_positive[:]\n for idx, val in enumerate(true_positive):\n rec[idx] = (float(true_positive[idx]) / len(gt_records)) if len(gt_records) != 0 else 0\n\n prec = true_positive[:]\n for idx, val in enumerate(true_positive):\n prec[idx] = float(true_positive[idx]) / (false_positive[idx] + true_positive[idx])\n\n return rec, prec\n\n\ndef draw_rec_prec(rec, prec, mrec, mprec, class_name, ap):\n \"\"\"\n Draw plot\n \"\"\"\n plt.plot(rec, prec, '-o')\n # add a new penultimate point to the list (mrec[-2], 0.0)\n # since the last line segment (and respective area) do not affect the AP value\n area_under_curve_x = mrec[:-1] + [mrec[-2]] + [mrec[-1]]\n area_under_curve_y = mprec[:-1] + [0.0] + [mprec[-1]]\n plt.fill_between(area_under_curve_x, 0, area_under_curve_y, alpha=0.2, edgecolor='r')\n # set window title\n fig = plt.gcf() # gcf - get current figure\n fig.canvas.set_window_title('AP ' + class_name)\n # set plot title\n plt.title('class: ' + class_name + ' AP = {}%'.format(ap*100))\n #plt.suptitle('This is a somewhat long figure title', fontsize=16)\n # set axis titles\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n # optional - set axes\n axes = plt.gca() # gca - get current axes\n axes.set_xlim([0.0,1.0])\n axes.set_ylim([0.0,1.05]) # .05 to give some extra space\n # Alternative option -> wait for button to be pressed\n #while not plt.waitforbuttonpress(): pass # wait for key display\n # Alternative option -> normal display\n #plt.show()\n # save the plot\n rec_prec_plot_path = os.path.join('result','classes')\n os.makedirs(rec_prec_plot_path, exist_ok=True)\n fig.savefig(os.path.join(rec_prec_plot_path, class_name + \".png\"))\n plt.cla() # clear axes for next plot\n\n\nimport bokeh\nimport bokeh.io as bokeh_io\nimport bokeh.plotting as bokeh_plotting\ndef generate_rec_prec_html(mrec, mprec, scores, class_name, ap):\n \"\"\"\n generate dynamic P-R curve HTML page for each class\n \"\"\"\n # bypass invalid class\n if len(mrec) == 0 or len(mprec) == 0 or len(scores) == 0:\n return\n\n rec_prec_plot_path = os.path.join('result' ,'classes')\n os.makedirs(rec_prec_plot_path, exist_ok=True)\n bokeh_io.output_file(os.path.join(rec_prec_plot_path, class_name + '.html'), title='P-R curve for ' + class_name)\n\n # prepare curve data\n area_under_curve_x = mrec[:-1] + [mrec[-2]] + [mrec[-1]]\n area_under_curve_y = mprec[:-1] + [0.0] + [mprec[-1]]\n score_on_curve = [0.0] + scores[:-1] + [0.0] + [scores[-1]] + [1.0]\n source = bokeh.models.ColumnDataSource(data={\n 'rec' : area_under_curve_x,\n 'prec' : area_under_curve_y,\n 'score' : score_on_curve,\n })\n\n # prepare plot figure\n plt_title = 'class: ' + class_name + ' AP = {}%'.format(ap*100)\n plt = bokeh_plotting.figure(plot_height=200 ,plot_width=200, tools=\"\", toolbar_location=None,\n title=plt_title, sizing_mode=\"scale_width\")\n plt.background_fill_color = \"#f5f5f5\"\n plt.grid.grid_line_color = \"white\"\n plt.xaxis.axis_label = 'Recall'\n plt.yaxis.axis_label = 'Precision'\n plt.axis.axis_line_color = None\n\n # draw curve data\n plt.line(x='rec', y='prec', line_width=2, color='#ebbd5b', source=source)\n plt.add_tools(bokeh.models.HoverTool(\n tooltips=[\n ( 'score', '@score{0.0000 a}'),\n ( 'Prec', '@prec'),\n ( 'Recall', '@rec'),\n ],\n formatters={\n 'rec' : 'printf',\n 'prec' : 'printf',\n },\n mode='vline'\n ))\n bokeh_io.save(plt)\n return\n\n\ndef adjust_axes(r, t, fig, axes):\n \"\"\"\n Plot - adjust axes\n \"\"\"\n # get text width for re-scaling\n bb = t.get_window_extent(renderer=r)\n text_width_inches = bb.width / fig.dpi\n # get axis width in inches\n current_fig_width = fig.get_figwidth()\n new_fig_width = current_fig_width + text_width_inches\n propotion = new_fig_width / current_fig_width\n # get axis limit\n x_lim = axes.get_xlim()\n axes.set_xlim([x_lim[0], x_lim[1]*propotion])\n\n\ndef draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color, true_p_bar):\n \"\"\"\n Draw plot using Matplotlib\n \"\"\"\n # sort the dictionary by decreasing value, into a list of tuples\n sorted_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1))\n # unpacking the list of tuples into two lists\n sorted_keys, sorted_values = zip(*sorted_dic_by_value)\n #\n if true_p_bar != \"\":\n \"\"\"\n Special case to draw in (green=true predictions) & (red=false predictions)\n \"\"\"\n fp_sorted = []\n tp_sorted = []\n for key in sorted_keys:\n fp_sorted.append(dictionary[key] - true_p_bar[key])\n tp_sorted.append(true_p_bar[key])\n plt.barh(range(n_classes), fp_sorted, align='center', color='crimson', label='False Predictions')\n plt.barh(range(n_classes), tp_sorted, align='center', color='forestgreen', label='True Predictions', left=fp_sorted)\n # add legend\n plt.legend(loc='lower right')\n \"\"\"\n Write number on side of bar\n \"\"\"\n fig = plt.gcf() # gcf - get current figure\n axes = plt.gca()\n r = fig.canvas.get_renderer()\n for i, val in enumerate(sorted_values):\n fp_val = fp_sorted[i]\n tp_val = tp_sorted[i]\n fp_str_val = \" \" + str(fp_val)\n tp_str_val = fp_str_val + \" \" + str(tp_val)\n # trick to paint multicolor with offset:\n # first paint everything and then repaint the first number\n t = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold')\n plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold')\n if i == (len(sorted_values)-1): # largest bar\n adjust_axes(r, t, fig, axes)\n else:\n plt.barh(range(n_classes), sorted_values, color=plot_color)\n \"\"\"\n Write number on side of bar\n \"\"\"\n fig = plt.gcf() # gcf - get current figure\n axes = plt.gca()\n r = fig.canvas.get_renderer()\n for i, val in enumerate(sorted_values):\n str_val = \" \" + str(val) # add a space before\n if val < 1.0:\n str_val = \" {0:.2f}\".format(val)\n t = plt.text(val, i, str_val, color=plot_color, va='center', fontweight='bold')\n # re-set axes to show number inside the figure\n if i == (len(sorted_values)-1): # largest bar\n adjust_axes(r, t, fig, axes)\n # set window title\n fig.canvas.set_window_title(window_title)\n # write classes in y axis\n tick_font_size = 12\n plt.yticks(range(n_classes), sorted_keys, fontsize=tick_font_size)\n \"\"\"\n Re-scale height accordingly\n \"\"\"\n init_height = fig.get_figheight()\n # comput the matrix height in points and inches\n dpi = fig.dpi\n height_pt = n_classes * (tick_font_size * 1.4) # 1.4 (some spacing)\n height_in = height_pt / dpi\n # compute the required figure height\n top_margin = 0.15 # in percentage of the figure height\n bottom_margin = 0.05 # in percentage of the figure height\n figure_height = height_in / (1 - top_margin - bottom_margin)\n # set new height\n if figure_height > init_height:\n fig.set_figheight(figure_height)\n\n # set plot title\n plt.title(plot_title, fontsize=14)\n # set axis titles\n # plt.xlabel('classes')\n plt.xlabel(x_label, fontsize='large')\n # adjust size of window\n fig.tight_layout()\n # save the plot\n fig.savefig(output_path)\n # show image\n if to_show:\n plt.show()\n # close the plot\n plt.close()\n\n\ndef calc_AP(gt_records, pred_records, class_name, iou_threshold, show_result):\n '''\n Calculate AP value for one class records\n\n Param\n gt_records: ground truth records list for one class, with format:\n [\n ['image_file', 'xmin,ymin,xmax,ymax'],\n ['image_file', 'xmin,ymin,xmax,ymax'],\n ...\n ]\n pred_records: predict records for one class, with format (in score descending order):\n [\n ['image_file', 'xmin,ymin,xmax,ymax', score],\n ['image_file', 'xmin,ymin,xmax,ymax', score],\n ...\n ]\n Return\n AP value for the class\n '''\n # append usage flag in gt_records for matching gt search\n gt_records = [gt_record + ['unused'] for gt_record in gt_records]\n\n # prepare score list for generating P-R html page\n scores = [pred_record[2] for pred_record in pred_records]\n\n # init true_positive and false_positive list\n nd = len(pred_records) # number of predict data\n true_positive = [0] * nd\n false_positive = [0] * nd\n true_positive_count = 0\n # assign predictions to ground truth objects\n for idx, pred_record in enumerate(pred_records):\n # filter out gt record from same image\n image_gt_records = [ gt_record for gt_record in gt_records if gt_record[0] == pred_record[0]]\n\n i = match_gt_box(pred_record, image_gt_records, iou_threshold=iou_threshold)\n if i != -1:\n # find a valid gt obj to assign, set\n # true_positive list and mark image_gt_records.\n #\n # trick: gt_records will also be marked\n # as 'used', since image_gt_records is a\n # reference list\n image_gt_records[i][2] = 'used'\n true_positive[idx] = 1\n true_positive_count += 1\n else:\n false_positive[idx] = 1\n\n # compute precision/recall\n rec, prec = get_rec_prec(true_positive, false_positive, gt_records)\n ap, mrec, mprec = voc_ap(rec, prec)\n if show_result:\n draw_rec_prec(rec, prec, mrec, mprec, class_name, ap)\n generate_rec_prec_html(mrec, mprec, scores, class_name, ap)\n\n return ap, true_positive_count\n\n\ndef plot_Pascal_AP_result(count_images, count_true_positives, num_classes,\n gt_counter_per_class, pred_counter_per_class,\n precision_dict, recall_dict, mPrec, mRec,\n APs, mAP, iou_threshold):\n '''\n Plot the total number of occurences of each class in the ground-truth\n '''\n window_title = \"Ground-Truth Info\"\n plot_title = \"Ground-Truth\\n\" + \"(\" + str(count_images) + \" files and \" + str(num_classes) + \" classes)\"\n x_label = \"Number of objects per class\"\n output_path = os.path.join('result','Ground-Truth_Info.png')\n draw_plot_func(gt_counter_per_class, num_classes, window_title, plot_title, x_label, output_path, to_show=False, plot_color='forestgreen', true_p_bar='')\n\n '''\n Plot the total number of occurences of each class in the \"predicted\" folder\n '''\n window_title = \"Predicted Objects Info\"\n # Plot title\n plot_title = \"Predicted Objects\\n\" + \"(\" + str(count_images) + \" files and \"\n count_non_zero_values_in_dictionary = sum(int(x) > 0 for x in list(pred_counter_per_class.values()))\n plot_title += str(count_non_zero_values_in_dictionary) + \" detected classes)\"\n # end Plot title\n x_label = \"Number of objects per class\"\n output_path = os.path.join('result','Predicted_Objects_Info.png')\n draw_plot_func(pred_counter_per_class, len(pred_counter_per_class), window_title, plot_title, x_label, output_path, to_show=False, plot_color='forestgreen', true_p_bar=count_true_positives)\n\n '''\n Draw mAP plot (Show AP's of all classes in decreasing order)\n '''\n window_title = \"mAP\"\n plot_title = \"mAP@IoU={0}: {1:.2f}%\".format(iou_threshold, mAP)\n x_label = \"Average Precision\"\n output_path = os.path.join('result','mAP.png')\n draw_plot_func(APs, num_classes, window_title, plot_title, x_label, output_path, to_show=False, plot_color='royalblue', true_p_bar='')\n\n '''\n Draw Precision plot (Show Precision of all classes in decreasing order)\n '''\n window_title = \"Precision\"\n plot_title = \"mPrec@IoU={0}: {1:.2f}%\".format(iou_threshold, mPrec)\n x_label = \"Precision rate\"\n output_path = os.path.join('result','Precision.png')\n draw_plot_func(precision_dict, len(precision_dict), window_title, plot_title, x_label, output_path, to_show=False, plot_color='royalblue', true_p_bar='')\n\n '''\n Draw Recall plot (Show Recall of all classes in decreasing order)\n '''\n window_title = \"Recall\"\n plot_title = \"mRec@IoU={0}: {1:.2f}%\".format(iou_threshold, mRec)\n x_label = \"Recall rate\"\n output_path = os.path.join('result','Recall.png')\n draw_plot_func(recall_dict, len(recall_dict), window_title, plot_title, x_label, output_path, to_show=False, plot_color='royalblue', true_p_bar='')\n\n\ndef get_mean_metric(metric_records, gt_classes_records):\n '''\n Calculate mean metric, but only count classes which have ground truth object\n\n Param\n metric_records: metric dict like:\n metric_records = {\n 'aeroplane': 0.79,\n 'bicycle': 0.79,\n ...\n 'tvmonitor': 0.71,\n }\n gt_classes_records: ground truth class dict like:\n gt_classes_records = {\n 'car': [\n ['000001.jpg','100,120,200,235'],\n ['000002.jpg','85,63,156,128'],\n ...\n ],\n ...\n }\n Return\n mean_metric: float value of mean metric\n '''\n mean_metric = 0.0\n count = 0\n for (class_name, metric) in metric_records.items():\n if (class_name in gt_classes_records) and (len(gt_classes_records[class_name]) != 0):\n mean_metric += metric\n count += 1\n mean_metric = (mean_metric/count)*100 if count != 0 else 0.0\n return mean_metric\n\n\ndef compute_mAP_PascalVOC(annotation_records, gt_classes_records, pred_classes_records, class_names, iou_threshold, show_result=True):\n '''\n Compute PascalVOC style mAP\n '''\n APs = {}\n count_true_positives = {class_name: 0 for class_name in list(gt_classes_records.keys())}\n #get AP value for each of the ground truth classes\n for _, class_name in enumerate(class_names):\n #if there's no gt obj for a class, record 0\n if class_name not in gt_classes_records:\n APs[class_name] = 0.\n continue\n gt_records = gt_classes_records[class_name]\n #if we didn't detect any obj for a class, record 0\n if class_name not in pred_classes_records:\n APs[class_name] = 0.\n continue\n pred_records = pred_classes_records[class_name]\n ap, true_positive_count = calc_AP(gt_records, pred_records, class_name, iou_threshold, show_result)\n APs[class_name] = ap\n count_true_positives[class_name] = true_positive_count\n\n #sort AP result by value, in descending order\n APs = OrderedDict(sorted(APs.items(), key=operator.itemgetter(1), reverse=True))\n\n #get mAP percentage value\n #mAP = np.mean(list(APs.values()))*100\n mAP = get_mean_metric(APs, gt_classes_records)\n\n #get GroundTruth count per class\n gt_counter_per_class = {}\n for (class_name, info_list) in gt_classes_records.items():\n gt_counter_per_class[class_name] = len(info_list)\n\n #get Precision count per class\n pred_counter_per_class = {class_name: 0 for class_name in list(gt_classes_records.keys())}\n for (class_name, info_list) in pred_classes_records.items():\n pred_counter_per_class[class_name] = len(info_list)\n\n\n #get the precision & recall\n precision_dict = {}\n recall_dict = {}\n for (class_name, gt_count) in gt_counter_per_class.items():\n if (class_name not in pred_counter_per_class) or (class_name not in count_true_positives) or pred_counter_per_class[class_name] == 0:\n precision_dict[class_name] = 0.\n else:\n precision_dict[class_name] = float(count_true_positives[class_name]) / pred_counter_per_class[class_name]\n\n if class_name not in count_true_positives or gt_count == 0:\n recall_dict[class_name] = 0.\n else:\n recall_dict[class_name] = float(count_true_positives[class_name]) / gt_count\n\n #get mPrec, mRec\n #mPrec = np.mean(list(precision_dict.values()))*100\n #mRec = np.mean(list(recall_dict.values()))*100\n mPrec = get_mean_metric(precision_dict, gt_classes_records)\n mRec = get_mean_metric(recall_dict, gt_classes_records)\n\n\n if show_result:\n plot_Pascal_AP_result(len(annotation_records), count_true_positives, len(gt_classes_records),\n gt_counter_per_class, pred_counter_per_class,\n precision_dict, recall_dict, mPrec, mRec,\n APs, mAP, iou_threshold)\n #show result\n print('\\nPascal VOC AP evaluation')\n for (class_name, AP) in APs.items():\n print('%s: AP %.4f, precision %.4f, recall %.4f' % (class_name, AP, precision_dict[class_name], recall_dict[class_name]))\n print('mAP@IoU=%.2f result: %f' % (iou_threshold, mAP))\n print('mPrec@IoU=%.2f result: %f' % (iou_threshold, mPrec))\n print('mRec@IoU=%.2f result: %f' % (iou_threshold, mRec))\n\n #return mAP percentage value\n return mAP, APs\n\n\n\ndef compute_AP_COCO(annotation_records, gt_classes_records, pred_classes_records, class_names, class_filter=None, show_result=True):\n '''\n Compute MSCOCO AP list on AP 0.5:0.05:0.95\n '''\n iou_threshold_list = [x/100 for x in range(50, 100, 5)]\n APs = {}\n pbar = tqdm(total=len(iou_threshold_list), desc='Eval COCO')\n for iou_threshold in iou_threshold_list:\n iou_threshold = round(iou_threshold, 2)\n mAP, mAPs = compute_mAP_PascalVOC(annotation_records, gt_classes_records, pred_classes_records, class_names, iou_threshold, show_result=False)\n\n if class_filter is not None:\n mAP = get_filter_class_mAP(mAPs, class_filter, show_result=False)\n\n APs[iou_threshold] = round(mAP, 6)\n pbar.update(1)\n\n pbar.close()\n\n #sort AP result by value, in descending order\n APs = OrderedDict(sorted(APs.items(), key=operator.itemgetter(1), reverse=True))\n\n #get overall AP percentage value\n AP = np.mean(list(APs.values()))\n\n if show_result:\n '''\n Draw MS COCO AP plot\n '''\n os.makedirs('result', exist_ok=True)\n window_title = \"MSCOCO AP on different IOU\"\n plot_title = \"COCO AP = {0:.2f}%\".format(AP)\n x_label = \"Average Precision\"\n output_path = os.path.join('result','COCO_AP.png')\n draw_plot_func(APs, len(APs), window_title, plot_title, x_label, output_path, to_show=False, plot_color='royalblue', true_p_bar='')\n\n print('\\nMS COCO AP evaluation')\n for (iou_threshold, AP_value) in APs.items():\n print('IOU %.2f: AP %f' % (iou_threshold, AP_value))\n print('total AP: %f' % (AP))\n\n #return AP percentage value\n return AP, APs\n\n\ndef compute_AP_COCO_Scale(annotation_records, scale_gt_classes_records, pred_classes_records, class_names):\n '''\n Compute MSCOCO AP on different scale object: small, medium, large\n '''\n scale_APs = {}\n for scale_key in ['small','medium','large']:\n gt_classes_records = scale_gt_classes_records[scale_key]\n scale_AP, _ = compute_AP_COCO(annotation_records, gt_classes_records, pred_classes_records, class_names, show_result=False)\n scale_APs[scale_key] = round(scale_AP, 4)\n\n #get overall AP percentage value\n scale_mAP = np.mean(list(scale_APs.values()))\n\n '''\n Draw Scale AP plot\n '''\n os.makedirs('result', exist_ok=True)\n window_title = \"MSCOCO AP on different scale\"\n plot_title = \"scale mAP = {0:.2f}%\".format(scale_mAP)\n x_label = \"Average Precision\"\n output_path = os.path.join('result','COCO_scale_AP.png')\n draw_plot_func(scale_APs, len(scale_APs), window_title, plot_title, x_label, output_path, to_show=False, plot_color='royalblue', true_p_bar='')\n\n '''\n Draw Scale Object Sum plot\n '''\n for scale_key in ['small','medium','large']:\n gt_classes_records = scale_gt_classes_records[scale_key]\n gt_classes_sum = {}\n\n for _, class_name in enumerate(class_names):\n # summarize the gt object number for every class on different scale\n gt_classes_sum[class_name] = np.sum(len(gt_classes_records[class_name])) if class_name in gt_classes_records else 0\n\n total_sum = np.sum(list(gt_classes_sum.values()))\n\n window_title = \"{} object number\".format(scale_key)\n plot_title = \"total {} object number = {}\".format(scale_key, total_sum)\n x_label = \"Object Number\"\n output_path = os.path.join('result','{}_object_number.png'.format(scale_key))\n draw_plot_func(gt_classes_sum, len(gt_classes_sum), window_title, plot_title, x_label, output_path, to_show=False, plot_color='royalblue', true_p_bar='')\n\n print('\\nMS COCO AP evaluation on different scale')\n for (scale, AP_value) in scale_APs.items():\n print('%s scale: AP %f' % (scale, AP_value))\n print('total AP: %f' % (scale_mAP))\n\n\ndef add_gt_record(gt_records, gt_record, class_name):\n # append or add ground truth class item\n if class_name in gt_records:\n gt_records[class_name].append(gt_record)\n else:\n gt_records[class_name] = list([gt_record])\n\n return gt_records\n\n\ndef get_scale_gt_dict(gt_classes_records, class_names):\n '''\n Get ground truth class dict on different object scales, according to MS COCO metrics definition:\n small objects: area < 32^2\n medium objects: 32^2 < area < 96^2\n large objects: area > 96^2\n\n input gt_classes_records would be like:\n gt_classes_records = {\n 'car': [\n ['000001.jpg','100,120,200,235'],\n ['000002.jpg','85,63,156,128'],\n ...\n ],\n ...\n }\n return a record dict with following format, for AP/AR eval on different scale:\n scale_gt_classes_records = {\n 'small': {\n 'car': [\n ['000001.jpg','100,120,200,235'],\n ['000002.jpg','85,63,156,128'],\n ...\n ],\n ...\n },\n\n 'medium': {\n 'car': [\n ['000003.jpg','100,120,200,235'],\n ['000004.jpg','85,63,156,128'],\n ...\n ],\n ...\n },\n\n 'large': {\n 'car': [\n ['000005.jpg','100,120,200,235'],\n ['000006.jpg','85,63,156,128'],\n ...\n ],\n ...\n }\n }\n '''\n scale_gt_classes_records = {}\n small_gt_records = {}\n medium_gt_records = {}\n large_gt_records = {}\n\n for _, class_name in enumerate(class_names):\n gt_records = gt_classes_records[class_name]\n\n for (image_file, box) in gt_records:\n # get box area based on coordinate\n box_coord = [int(p) for p in box.split(',')]\n box_area = (box_coord[2] - box_coord[0]) * (box_coord[3] - box_coord[1])\n\n # add to corresponding gt records dict according to area size\n if box_area <= 32*32:\n small_gt_records = add_gt_record(small_gt_records, [image_file, box], class_name)\n elif box_area > 32*32 and box_area <= 96*96:\n medium_gt_records = add_gt_record(medium_gt_records, [image_file, box], class_name)\n elif box_area > 96*96:\n large_gt_records = add_gt_record(large_gt_records, [image_file, box], class_name)\n\n # form up scale_gt_classes_records\n scale_gt_classes_records['small'] = small_gt_records\n scale_gt_classes_records['medium'] = medium_gt_records\n scale_gt_classes_records['large'] = large_gt_records\n\n return scale_gt_classes_records\n\n\ndef get_filter_class_mAP(APs, class_filter, show_result=True):\n filtered_mAP = 0.0\n filtered_APs = OrderedDict()\n\n for (class_name, AP) in APs.items():\n if class_name in class_filter:\n filtered_APs[class_name] = AP\n\n filtered_mAP = np.mean(list(filtered_APs.values()))*100\n\n if show_result:\n print('\\nfiltered classes AP')\n for (class_name, AP) in filtered_APs.items():\n print('%s: AP %.4f' % (class_name, AP))\n print('mAP:', filtered_mAP, '\\n')\n return filtered_mAP\n\n\ndef eval_AP(model, model_format, annotation_lines, anchors, class_names, model_input_shape, eval_type, iou_threshold, conf_threshold, elim_grid_sense, v5_decode, save_result, class_filter=None):\n '''\n Compute AP for detection model on annotation dataset\n '''\n annotation_records, gt_classes_records = annotation_parse(annotation_lines, class_names)\n pred_classes_records = get_prediction_class_records(model, model_format, annotation_records, anchors, class_names, model_input_shape, conf_threshold, elim_grid_sense, v5_decode, save_result)\n AP = 0.0\n\n if eval_type == 'VOC':\n AP, APs = compute_mAP_PascalVOC(annotation_records, gt_classes_records, pred_classes_records, class_names, iou_threshold)\n\n if class_filter is not None:\n get_filter_class_mAP(APs, class_filter)\n\n elif eval_type == 'COCO':\n AP, _ = compute_AP_COCO(annotation_records, gt_classes_records, pred_classes_records, class_names, class_filter)\n # get AP for different scale: small, medium, large\n scale_gt_classes_records = get_scale_gt_dict(gt_classes_records, class_names)\n compute_AP_COCO_Scale(annotation_records, scale_gt_classes_records, pred_classes_records, class_names)\n else:\n raise ValueError('Unsupported evaluation type')\n\n return AP\n\n\n#load TF 1.x frozen pb graph\ndef load_graph(model_path):\n # check tf version to be compatible with TF 2.x\n global tf\n if tf.__version__.startswith('2'):\n import tensorflow.compat.v1 as tf\n tf.disable_eager_execution()\n\n # We parse the graph_def file\n with tf.gfile.GFile(model_path, \"rb\") as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\n # We load the graph_def in the default graph\n with tf.Graph().as_default() as graph:\n tf.import_graph_def(\n graph_def,\n input_map=None,\n return_elements=None,\n name=\"graph\",\n op_dict=None,\n producer_op_list=None\n )\n return graph\n\n\ndef load_eval_model(model_path):\n # support of tflite model\n if model_path.endswith('.tflite'):\n from tensorflow.lite.python import interpreter as interpreter_wrapper\n model = interpreter_wrapper.Interpreter(model_path=model_path)\n model.allocate_tensors()\n model_format = 'TFLITE'\n\n # support of MNN model\n elif model_path.endswith('.mnn'):\n model = MNN.Interpreter(model_path)\n model_format = 'MNN'\n\n # support of TF 1.x frozen pb model\n elif model_path.endswith('.pb'):\n model = load_graph(model_path)\n model_format = 'PB'\n\n # support of ONNX model\n elif model_path.endswith('.onnx'):\n model = onnxruntime.InferenceSession(model_path)\n model_format = 'ONNX'\n\n # normal keras h5 model\n elif model_path.endswith('.h5'):\n custom_object_dict = get_custom_objects()\n\n model = load_model(model_path, compile=False, custom_objects=custom_object_dict)\n model_format = 'H5'\n K.set_learning_phase(0)\n else:\n raise ValueError('invalid model file')\n\n return model, model_format\n\n\ndef main():\n # class YOLO defines the default value, so suppress any default here\n parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS, description='evaluate YOLO model (h5/pb/onnx/tflite/mnn) with test dataset')\n '''\n Command line options\n '''\n parser.add_argument(\n '--model_path', type=str, required=True,\n help='path to model file')\n\n parser.add_argument(\n '--anchors_path', type=str, required=True,\n help='path to anchor definitions')\n\n parser.add_argument(\n '--classes_path', type=str, required=False,\n help='path to class definitions, default=%(default)s', default=os.path.join('configs' , 'voc_classes.txt'))\n\n parser.add_argument(\n '--classes_filter_path', type=str, required=False,\n help='path to class filter definitions, default=%(default)s', default=None)\n\n parser.add_argument(\n '--annotation_file', type=str, required=True,\n help='test annotation txt file')\n\n parser.add_argument(\n '--eval_type', type=str, required=False, choices=['VOC', 'COCO'],\n help='evaluation type (VOC/COCO), default=%(default)s', default='VOC')\n\n parser.add_argument(\n '--iou_threshold', type=float,\n help='IOU threshold for PascalVOC mAP, default=%(default)s', default=0.5)\n\n parser.add_argument(\n '--conf_threshold', type=float,\n help='confidence threshold for filtering box in postprocess, default=%(default)s', default=0.001)\n\n parser.add_argument(\n '--model_input_shape', type=str,\n help='model image input shape as <height>x<width>, default=%(default)s', default='416x416')\n\n parser.add_argument(\n '--elim_grid_sense', default=False, action=\"store_true\",\n help = \"Eliminate grid sensitivity\")\n\n parser.add_argument(\n '--v5_decode', default=False, action=\"store_true\",\n help = \"Use YOLOv5 prediction decode\")\n\n parser.add_argument(\n '--save_result', default=False, action=\"store_true\",\n help='Save the detection result image in result/detection dir'\n )\n\n args = parser.parse_args()\n\n # param parse\n anchors = get_anchors(args.anchors_path)\n class_names = get_classes(args.classes_path)\n height, width = args.model_input_shape.split('x')\n model_input_shape = (int(height), int(width))\n assert (model_input_shape[0]%32 == 0 and model_input_shape[1]%32 == 0), 'model_input_shape should be multiples of 32'\n\n # class filter parse\n if args.classes_filter_path is not None:\n class_filter = get_classes(args.classes_filter_path)\n else:\n class_filter = None\n\n annotation_lines = get_dataset(args.annotation_file, shuffle=False)\n model, model_format = load_eval_model(args.model_path)\n\n start = time.time()\n eval_AP(model, model_format, annotation_lines, anchors, class_names, model_input_shape, args.eval_type, args.iou_threshold, args.conf_threshold, args.elim_grid_sense, args.v5_decode, args.save_result, class_filter=class_filter)\n end = time.time()\n print(\"Evaluation time cost: {:.6f}s\".format(end - start))\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"numpy.array",
"tensorflow.compat.v1.__version__.startswith",
"numpy.zeros",
"tensorflow.compat.v1.Graph",
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.compat.v1.GraphDef",
"tensorflow.compat.v1.Session",
"tensorflow.lite.python.interpreter.Interpreter",
"tensorflow.keras.models.load_model",
"tensorflow.compat.v1.import_graph_def",
"tensorflow.keras.backend.set_learning_phase",
"tensorflow.compat.v1.gfile.GFile"
]
]
|
mtsnel006/covid19za | [
"5db79ecb616041ff7980230d5995d90d6dbc86f5"
]
| [
"scripts/realtime_r0.py"
]
| [
"# Python script version of the Realtime R0 notebook\n# Used for automated processing\n\n# Originally by Kevin Systrom - April 17\n# Adapted for South Africa - Vukosi Marivate & Schalk van Heerden 29 April\n\nimport pandas as pd\nimport numpy as np\n\nfrom scipy import stats as sps\nfrom scipy.interpolate import interp1d\n\n\nremote_run = True\n\n\nR_T_MAX = 12\nr_t_range = np.linspace(0, R_T_MAX, R_T_MAX*100+1)\n\nGAMMA = 1/7\n\n\ndef highest_density_interval(pmf, p=.9, debug=False):\n # If we pass a DataFrame, just call this recursively on the columns\n if(isinstance(pmf, pd.DataFrame)):\n return pd.DataFrame([highest_density_interval(pmf[col], p=p) for col in pmf],\n index=pmf.columns)\n \n cumsum = np.cumsum(pmf.values)\n \n # N x N matrix of total probability mass for each low, high\n total_p = cumsum - cumsum[:, None]\n \n # Return all indices with total_p > p\n lows, highs = (total_p > p).nonzero()\n \n # Find the smallest range (highest density)\n best = (highs - lows).argmin()\n \n low = pmf.index[lows[best]]\n high = pmf.index[highs[best]]\n \n return pd.Series([low, high],\n index=[f'Low_{p*100:.0f}',\n f'High_{p*100:.0f}'])\n\n\nif remote_run:\n # better results for remote branches when Action scripts are run\n base_url = \"../\"\nelse:\n # get data directly from source for easy local analysis\n base_url = \"https://raw.githubusercontent.com/dsfsi/covid19za/master/\"\n\n\nurl = base_url + 'data/covid19za_provincial_cumulative_timeline_confirmed.csv'\nstates_all = pd.read_csv(url,\n parse_dates=['date'], dayfirst=True,\n squeeze=True).sort_index()\nstates_all = states_all.rename(columns={'total':'Total RSA'})\n\n# ZA: single plot\nstate_name = 'Total RSA'\ncutoff = 25\n\n# filter data if required\n#state_filter = states_all[:-1];\nstates = states_all\n\n\ndef prepare_cases(cases, cutoff=25):\n new_cases = cases.diff()\n\n smoothed = new_cases.rolling(7,\n win_type='gaussian',\n min_periods=1,\n center=True).mean(std=2).round()\n \n idx_start = np.searchsorted(smoothed, cutoff)\n \n smoothed = smoothed.iloc[idx_start:]\n original = new_cases.loc[smoothed.index]\n \n return original, smoothed\n\ncases = pd.Series(states[state_name].values,index=states['date'])\n\noriginal, smoothed = prepare_cases(cases, cutoff)\n\n\ndef get_posteriors(sr, sigma=0.15):\n\n # (1) Calculate Lambda\n lam = sr[:-1].values * np.exp(GAMMA * (r_t_range[:, None] - 1))\n\n \n # (2) Calculate each day's likelihood\n likelihoods = pd.DataFrame(\n data = sps.poisson.pmf(sr[1:].values, lam),\n index = r_t_range,\n columns = sr.index[1:])\n \n # (3) Create the Gaussian Matrix\n process_matrix = sps.norm(loc=r_t_range,\n scale=sigma\n ).pdf(r_t_range[:, None]) \n\n # (3a) Normalize all rows to sum to 1\n process_matrix /= process_matrix.sum(axis=0)\n \n # (4) Calculate the initial prior\n #prior0 = sps.gamma(a=4).pdf(r_t_range)\n prior0 = np.ones_like(r_t_range)/len(r_t_range)\n prior0 /= prior0.sum()\n\n # Create a DataFrame that will hold our posteriors for each day\n # Insert our prior as the first posterior.\n posteriors = pd.DataFrame(\n index=r_t_range,\n columns=sr.index,\n data={sr.index[0]: prior0}\n )\n \n # We said we'd keep track of the sum of the log of the probability\n # of the data for maximum likelihood calculation.\n log_likelihood = 0.0\n\n # (5) Iteratively apply Bayes' rule\n for previous_day, current_day in zip(sr.index[:-1], sr.index[1:]):\n\n #(5a) Calculate the new prior\n current_prior = process_matrix @ posteriors[previous_day]\n \n #(5b) Calculate the numerator of Bayes' Rule: P(k|R_t)P(R_t)\n numerator = likelihoods[current_day] * current_prior\n \n #(5c) Calcluate the denominator of Bayes' Rule P(k)\n denominator = np.sum(numerator)\n \n # Execute full Bayes' Rule\n posteriors[current_day] = numerator/denominator\n \n # Add to the running sum of log likelihoods\n log_likelihood += np.log(denominator)\n \n return posteriors, log_likelihood\n\n# Note that we're fixing sigma to a value just for the example\nposteriors, log_likelihood = get_posteriors(smoothed, sigma=.25)\n\n\n# Note that this takes a while to execute - it's not the most efficient algorithm\n\n## ERROR! Please review for South Africa country data range\n## The rest of the data ranges does not crash\n## But the statistical significance of any of the results are highly in doubt\n## > At Box [8], Line 13 of highest_density_interval func\n## > best = (highs - lows).argmin()\n## > attempt to get argmin of an empty sequence\n## Removing the confidence interval for now\n#hdis = highest_density_interval(posteriors, p=.9)\nmost_likely = posteriors.idxmax().rename('ML')\n\n# Look into why you shift -1\n#result = pd.concat([most_likely, hdis], axis=1)\nresult = pd.concat([most_likely], axis=1)\n\n# US: Since we now use a uniform prior, the first datapoint is pretty bogus, so just truncating it here\n# ZA: rename to single_result to add to final province plots again\nsingle_result = result.drop(result.index[0])\n\n## add dumpy data for confidence columns\nsingle_result['High_90'] = 0\nsingle_result['Low_90'] = 0\n\n\nsigmas = np.linspace(1/20, 1, 20)\n\n# ZA: only consider the official 9 provinces\nstates_to_process = list(states.columns.values[2:11])\n# ZA: do not think the total RSA sigma needs to be included to find max later\n# states_to_process.append('Total RSA') \n\nresults = {}\n\nfor state_name in states_to_process:\n \n print(state_name)\n \n # --> ZA prepare data\n # ZA: Rt is very small for some provinces\n cases = pd.Series(states[state_name].values,index=states['date'])\n cut = 10\n new, smoothed = prepare_cases(cases, cutoff=cut)\n \n # Rt for ZA is very small for some provinces\n # set threshold for smoothed data length at 3 to ensure posteriors can be calculated\n if len(smoothed) < 3:\n cut = 5\n new, smoothed = prepare_cases(cases, cutoff=cut)\n \n if len(smoothed) < 3:\n cut = 3\n new, smoothed = prepare_cases(cases, cutoff=cut)\n \n ## ignore Rt further for slow growth provinces\n if len(smoothed) < 3:\n print('BREAK')\n continue\n \n print(cut)\n ## <-- ZA prepare data\n \n result = {}\n \n # Holds all posteriors with every given value of sigma\n result['posteriors'] = []\n \n # Holds the log likelihood across all k for each value of sigma\n result['log_likelihoods'] = []\n \n for sigma in sigmas:\n posteriors, log_likelihood = get_posteriors(smoothed, sigma=sigma)\n result['posteriors'].append(posteriors)\n result['log_likelihoods'].append(log_likelihood)\n \n # Store all results keyed off of state name\n results[state_name] = result\n\nprint('Done.')\n\n\n# Each index of this array holds the total of the log likelihoods for\n# the corresponding index of the sigmas array.\ntotal_log_likelihoods = np.zeros_like(sigmas)\n\n# Loop through each state's results and add the log likelihoods to the running total.\nfor state_name, result in results.items():\n total_log_likelihoods += result['log_likelihoods']\n\n# Select the index with the largest log likelihood total\nmax_likelihood_index = total_log_likelihoods.argmax()\n\n# Select the value that has the highest log likelihood\nsigma = sigmas[max_likelihood_index]\nprint(sigma)\n\n\nfinal_results = None\n\nfor state_name, result in results.items():\n try:\n print(state_name)\n posteriors = result['posteriors'][max_likelihood_index]\n hdis_90 = highest_density_interval(posteriors, p=.9)\n hdis_50 = highest_density_interval(posteriors, p=.5)\n most_likely = posteriors.idxmax().rename('ML')\n result = pd.concat([most_likely, hdis_90, hdis_50], axis=1)\n\n # ZA: add province index\n result.index = pd.MultiIndex.from_product([[state_name], result.index], names=['state','date'])\n\n if final_results is None:\n final_results = result\n else:\n final_results = pd.concat([final_results, result])\n \n except:\n print('Fatal crash on final results routine: ' + state_name)\n continue\n \n if final_results is None:\n print('NO RESULTS')\n\nprint('Done.')\n\n\n# US: This can be moved before the plots\n# Since we now use a uniform prior, the first datapoint is pretty bogus, so just truncating it here\nfinal_results = final_results.groupby('state').apply(lambda x: x.iloc[1:].droplevel(0))\n\n\n# ZA: include Total RSA in export results\nsingle_result.index = pd.MultiIndex.from_product([['Total RSA'], single_result.index], names=['state','date'])\nfinal_results = pd.concat([final_results, single_result])\n\n\n# Uncomment the following line if you'd like to export the data\nexport_results = final_results[['ML', 'High_90', 'Low_90']]\n\nexport_results.to_csv('../data/calc/calculated_rt_sa_provincial_cumulative.csv', float_format='%.2f')\n\n\nurl = base_url + 'data/district_data/combined_district_keys.csv'\ndistrict_keys = pd.read_csv(url, index_col=[0,1,3,8,7]).sort_index()\ndistrict_keys\n\n\ndef calculate_district_rt(state_title, data_file, export):\n \n if (export == False & remote_run == True):\n # Do not even calculate further\n return []\n \n # Download latest district data\n # Data file names are no longer following standards\n #data_file = 'provincial_' + state_title + '_cumulative.csv'\n url = base_url + 'data/district_data/' + data_file + '.csv'\n states = pd.read_csv(url,\n parse_dates=['date'], dayfirst=True,\n squeeze=True).sort_index()\n \n # TODO: \"PerformanceWarning indexing past lexsort depth may impact performance\"\n # warning with this type of filter. Possibly due to index that is not sorted.\n # Consider another filter or query method to solve this issue.\n district_records = district_keys.loc[(state_title.upper(),1,'Case',data_file)]\n district_titles = np.array(district_records[['Data_title','Friendly_title']])\n \n states_to_process = []\n for district in district_titles:\n key = district[0]\n title = district[1]\n if title.find('Unknown') >= 0:\n continue\n states = states.rename(columns={key:title})\n states_to_process.append(title)\n \n ## Get all sigmas\n\n sigmas = np.linspace(1/20, 1, 20)\n\n results = {}\n\n for state_name in states_to_process:\n \n try:\n\n print(state_name)\n\n cases = pd.Series(states[state_name].values,index=states['date'])\n cut = 10\n new, smoothed = prepare_cases(cases, cutoff=cut)\n\n # Rt for ZA is very small for some provinces\n # set threshold for smoothed data length at 3 to ensure posteriors can be calculated\n if len(smoothed) < 3:\n cut = 5\n new, smoothed = prepare_cases(cases, cutoff=cut)\n\n ## ignore Rt further for slow growth provinces\n if len(smoothed) < 3:\n print('BREAK')\n continue\n\n print(cut)\n\n result = {}\n\n # Holds all posteriors with every given value of sigma\n result['posteriors'] = []\n\n # Holds the log likelihood across all k for each value of sigma\n result['log_likelihoods'] = []\n\n for sigma in sigmas:\n posteriors, log_likelihood = get_posteriors(smoothed, sigma=sigma)\n result['posteriors'].append(posteriors)\n result['log_likelihoods'].append(log_likelihood)\n\n # Store all results keyed off of state name\n results[state_name] = result\n \n except:\n print('Fatal crash on sigmas routine: ' + state_name)\n continue\n\n print('Done')\n\n\n ## Get sigma for max likelihood\n\n # Each index of this array holds the total of the log likelihoods for\n # the corresponding index of the sigmas array.\n total_log_likelihoods = np.zeros_like(sigmas)\n\n # Loop through each state's results and add the log likelihoods to the running total.\n for state_name, result in results.items():\n total_log_likelihoods += result['log_likelihoods']\n\n # Select the index with the largest log likelihood total\n max_likelihood_index = total_log_likelihoods.argmax()\n\n # Select the value that has the highest log likelihood\n sigma = sigmas[max_likelihood_index]\n\n\n ## Compile final results\n\n final_results = None\n\n for state_name, result in results.items():\n \n try:\n print(state_name)\n posteriors = result['posteriors'][max_likelihood_index]\n hdis_90 = highest_density_interval(posteriors, p=.9)\n hdis_50 = highest_density_interval(posteriors, p=.5)\n most_likely = posteriors.idxmax().rename('ML')\n result = pd.concat([most_likely, hdis_90, hdis_50], axis=1)\n\n result.index = pd.MultiIndex.from_product([[state_name], result.index], names=['state','date'])\n\n if final_results is None:\n final_results = result\n else:\n final_results = pd.concat([final_results, result])\n \n except:\n print('Fatal crash on final results routine: ' + state_name)\n continue\n \n if final_results is None:\n print('NO RESULTS')\n return []\n\n final_results = final_results.groupby('state').apply(lambda x: x.iloc[1:].droplevel(0))\n\n\n ## Print max calculated Gaussian\n\n print('Max Sigma: ' + str(sigma))\n\n # Note: Not plotting anymore with this method! Focussing on results, not optomising matplotlib.\n # Create your own district plots in another notebook with the result data\n\n ## Plot all \n\n #ncols = 2\n #nrows = int(np.ceil(len(results) / ncols))\n\n #fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(15, nrows*3))\n\n #for i, (state_name, result) in enumerate(final_results.groupby('state')):\n # plot_rt(result, axes.flat[i], state_name)\n\n #fig.tight_layout()\n\n #fig.suptitle('Real-time $R_t$ for ' + header, size=14)\n #fig.subplots_adjust(top=plotscale)\n\n\n ## Export results\n\n export_results = final_results[['ML', 'High_90', 'Low_90']]\n \n if export:\n filename = 'calculated_rt_' + state_title + '_district_cumulative.csv'\n export_results.to_csv('../data/calc/' + filename, float_format='%.2f')\n\n # Return latest rt results\n return export_results.groupby(level=0).last()\n\n\nresults_ec = calculate_district_rt('ec','provincial_ec_cumulative', export=False)\n\n\nresults_fs = calculate_district_rt('fs','provincial_fs_cumulative', export=False)\n\n\nresults_gp = calculate_district_rt('gp','provincial_gp_cumulative', export=True)\n\n\nresults_kzn = calculate_district_rt('kzn','provincial_kzn_cumulative', export=True)\n\n\nresults_lp = calculate_district_rt('lp','provincial_lp_cumulative', export=True)\n\n\nresults_mp = calculate_district_rt('mp','provincial_mp_cumulative', export=False)\n\n\nresults_nw = calculate_district_rt('nw','provincial_nw_cumulative', export=True)\n\n\nresults_wc = calculate_district_rt('wc','provincial_wc_cumulative', export=True)"
]
| [
[
"numpy.zeros_like",
"numpy.array",
"numpy.ones_like",
"scipy.stats.norm",
"numpy.log",
"pandas.DataFrame",
"numpy.sum",
"scipy.stats.poisson.pmf",
"numpy.exp",
"pandas.MultiIndex.from_product",
"pandas.concat",
"numpy.cumsum",
"numpy.searchsorted",
"numpy.linspace",
"pandas.Series",
"pandas.read_csv"
]
]
|
scutyuanzhi/gin-config | [
"263624840fd9f9494e20926ad9e8b1a1d15a6853"
]
| [
"tests/torch/external_configurables_test.py"
]
| [
"# coding=utf-8\n# Copyright 2018 The Gin-Config Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding=utf-8\n# Copyright 2019 The Gin-Config Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import absltest\n\nfrom gin import config\nfrom gin.torch import external_configurables # pylint: disable=unused-import\n\nimport torch\n\n\[email protected]\ndef fake_train_model(optimizer, scheduler=None):\n # pylint: disable=E1101\n opt = optimizer([torch.nn.Parameter(torch.rand(10))])\n # pylint: enable=E1101\n sch = None\n if scheduler:\n sch = scheduler(opt)\n return opt, sch\n\n\[email protected]\ndef configurable(**kwargs):\n return kwargs\n\n\nclass PyTorchConfigTest(absltest.TestCase):\n\n def assertAlmostEqualList(self, xs, ys):\n for i, (x, y) in enumerate(zip(xs, ys)):\n print(i)\n self.assertAlmostEqual(x, y)\n\n def tearDown(self):\n config.clear_config()\n super(PyTorchConfigTest, self).tearDown()\n\n def testConfigureOptimizerAndLearningRate(self):\n config_str = \"\"\"\n fake_train_model.optimizer = @Adam\n torch.optim.Adam.lr = 0.001\n torch.optim.Adam.betas = (0.8, 0.888)\n fake_train_model.scheduler = @StepLR\n StepLR.step_size = 10\n \"\"\"\n config.parse_config(config_str)\n\n opt, sch = fake_train_model() # pylint: disable=no-value-for-parameter\n\n self.assertIsInstance(opt, torch.optim.Adam)\n self.assertAlmostEqual(opt.param_groups[0]['betas'][0], 0.8)\n self.assertAlmostEqual(opt.param_groups[0]['betas'][1], 0.888)\n self.assertAlmostEqual(opt.defaults['betas'][0], 0.8)\n self.assertAlmostEqual(opt.defaults['betas'][1], 0.888)\n self.assertAlmostEqual(sch.step_size, 10)\n\n lrs = []\n for _ in range(15):\n lrs.append(opt.param_groups[0]['lr'])\n opt.step()\n sch.step()\n\n # Divide lr in tenth epoch by 10\n target_lrs = [0.001] * 10 + [0.0001] * 5\n\n self.assertAlmostEqualList(lrs, target_lrs)\n\n def testOptimizersWithDefaults(self):\n optimizers = [\n torch.optim.Adadelta,\n torch.optim.Adagrad,\n torch.optim.Adam,\n torch.optim.SparseAdam,\n torch.optim.Adamax,\n torch.optim.ASGD,\n torch.optim.LBFGS,\n torch.optim.RMSprop,\n torch.optim.Rprop,\n torch.optim.SGD,\n ]\n for optimizer in optimizers:\n config.clear_config()\n config_str = \"\"\"\n fake_train_model.optimizer = @{optimizer}\n {optimizer}.lr = 0.001\n \"\"\"\n config.parse_config(config_str.format(optimizer=optimizer.__name__))\n configed_optimizer, _ = fake_train_model(config.REQUIRED)\n self.assertIsInstance(configed_optimizer, optimizer)\n\n def testDtypes(self):\n # Spot check a few.\n config_str = \"\"\"\n # Test without torch prefix, but using the\n # prefix is strongly recommended!\n configurable.float32 = %float32\n # Test with torch prefix.\n configurable.int8 = %torch.int8\n configurable.float16 = %torch.float16\n \"\"\"\n config.parse_config(config_str)\n\n vals = configurable()\n # pylint: disable=E1101\n self.assertIs(vals['float32'], torch.float32)\n self.assertIs(vals['int8'], torch.int8)\n self.assertIs(vals['float16'], torch.float16)\n # pylint: disable=E1101\n\n\nif __name__ == '__main__':\n absltest.main()\n"
]
| [
[
"torch.rand"
]
]
|
renjithbaby23/tf2.0_examples | [
"79f8f0b018536e5f011fc7e413039e933f786b2e"
]
| [
"13_guide_to_activations_and_initializations.py"
]
| [
"\"\"\"\n* This script just lists GENERALLY BEST activation functions and\nthe best kernel initializers associated with them.\n\nGlorot and Bengio propose a way to significantly alleviate the problem with vanishing gradients.\nWe need the signal to flow properly in both directions: in the forward direction when making predictions, \nand in the reverse direction when backpropagating gradients. \nWe don’t want the signal to die out, nor do we want it to explode and saturate.\nFor the signal to flow properly, we need the variance of the\noutputs of each layer to be equal to the variance of its inputs, and we also need the\ngradients to have equal variance before and after flowing through a layer in the\nreverse direction. It is actually not possible to guarantee both unless the layer has an equal\nnumber of inputs and neurons (these numbers are called the fan_in and fan_out of the\nlayer), but they proposed a good compromise that has proven to work very well in\npractice: the connection weights of each layer must be initialized as a uniform on normal di\nsribution with specific mean and variance.\n\nfan_avg = fan_in + fan_out /2\n\n|Initialization | Activation functions | σ2 (Normal)|\n|-----------------------------------------------------------------|\n|Glorot | None, Tanh, Logistic, Softmax | 1 / fan_avg|\n|He | ReLU & variants | 2 / fan_in |\n|LeCun | SELU | 1 / fan_in |\n -----------------------------------------------------------------\n\n\n###############################################################\nSo which activation function should you use for the hidden layers of your \ndeep neural networks? Although your mileage will vary, in general \nSELU > ELU > leaky ReLU (and its variants) > ReLU > tanh > logistic. \nIf the network’s architecture prevents it from self-normalizing, \nthen ELU may perform better than SELU (since SELUis not smooth at z = 0). \nIf you care a lot about runtime latency, then you may prefer leaky ReLU. \nIf you don’t want to tweak yet another hyperparameter, you may just use \nthe default α values used by Keras (e.g., 0.3 for the leaky ReLU). \nIf you have spare time and computing power, you can use cross-validation \nto evaluate other activation functions, in particular RReLU if your network \nis over‐fitting, or PReLU if you have a huge training set.\n###############################################################\n\"\"\"\nimport tensorflow as tf\n\n# ReLU -> Best use with He Normal initialization\ntf.keras.layers.Dense(10, \n activation=\"relu\", \n kernel_initializer=\"he_normal\")\n\n# Leaky ReLU -> Best use with He Normal initialization\nleaky_relu = tf.keras.layers.LeakyReLU(alpha=0.2)\nlayer = tf.keras.layers.Dense(10, \n activation=leaky_relu, \n kernel_initializer=\"he_normal\")\n\n# SELU -> Best use with LeCunn Normal initialization\nlayer = tf.keras.layers.Dense(10, \n activation=\"selu\", \n kernel_initializer=\"lecun_normal\")\n\n# If you want He initialization with a uniform distribution, but based on fan avg rather\n# than fan_in , you can use the VarianceScaling initializer like this:\n\nhe_avg_init = tf.keras.initializers.VarianceScaling(scale=2., \n mode='fan_avg', \n distribution='uniform')\ntf.keras.layers.Dense(10, \n activation=\"sigmoid\", \n kernel_initializer=he_avg_init)\n\n\n# Although using He initialization along with ELU (or any variant of ReLU) can significantly reduce \n# the vanishing/exploding gradients problems at the beginning of training, it doesn’t guarantee \n# that they won’t come back during training. That's where batch normalization comes in to picture.\n"
]
| [
[
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.keras.initializers.VarianceScaling",
"tensorflow.keras.layers.Dense"
]
]
|
joshuous/lisa | [
"4f6e3bf0ed051aaaf36eb7fa8eb4e5ba20fa1bb7"
]
| [
"libs/utils/energy_model.py"
]
| [
"# SPDX-License-Identifier: Apache-2.0\n#\n# Copyright (C) 2016, ARM Limited and contributors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom collections import namedtuple, OrderedDict\nfrom itertools import product\nimport logging\nimport operator\nimport re\n\nimport pandas as pd\nimport numpy as np\n\nfrom devlib.utils.misc import memoized, mask_to_list, ranges_to_list\nfrom devlib import TargetError\nfrom trappy.stats.grammar import Parser\n\n\"\"\"Classes for modeling and estimating energy usage of CPU systems\"\"\"\n\ndef read_multiple_oneline_files(target, glob_patterns):\n \"\"\"\n Quickly read many single-line files that match a glob pattern\n\n Finds all the files that match any of the glob patterns and, assuming that\n they each contain exactly 1 line of text, read them all at once. When the\n target or connection is slow this saves a lot of time when reading a large\n number of files.\n\n This will only work safely on stationary files, don't try to use it where\n the glob expansion will change often - for example /proc/**/autogroup would\n not work because /proc/ entries will likely appear & disappear while we're\n reading them.\n\n :param target: devlib target object to read from\n :param glob_pattern: Unix glob pattern matching the files to read\n :returns: A dictionary mapping matched paths to the values read. ``{}`` if\n no paths matched the globs.\n \"\"\"\n find_cmd = 'find ' + ' '.join(glob_patterns)\n try:\n paths = target.execute(find_cmd, as_root=True).split()\n except TargetError:\n return {}\n\n cmd = '{} | {} xargs cat'.format(find_cmd, target.busybox)\n contents = target.execute(cmd, as_root=True).splitlines()\n\n if len(contents) != len(paths):\n raise RuntimeError('File count mismatch while reading multiple files')\n\n return dict(zip(paths, contents))\n\nclass EnergyModelCapacityError(Exception):\n \"\"\"Used by :meth:`EnergyModel.get_optimal_placements`\"\"\"\n pass\n\nclass ActiveState(namedtuple('ActiveState', ['capacity', 'power'])):\n \"\"\"Represents power and compute capacity at a given frequency\n\n :param capacity: Relative compute capacity at frequency\n :param power: Power usage at frequency\n \"\"\"\n def __new__(cls, capacity=None, power=None):\n return super(ActiveState, cls).__new__(cls, capacity, power)\n\nclass _CpuTree(object):\n \"\"\"Internal class. Abstract representation of a CPU topology.\n\n Each node contains either a single CPU or a set of child nodes.\n \"\"\"\n def __init__(self, cpu, children):\n if (cpu is None) == (children is None):\n raise ValueError('Provide exactly one of: cpu or children')\n\n self.parent = None\n self.cpu = cpu\n\n if cpu is not None:\n self.cpus = (cpu,)\n self.children = []\n else:\n if len(children) == 0:\n raise ValueError('children cannot be empty')\n self.cpus = tuple(sorted(set().union(*[n.cpus for n in children])))\n self.children = children\n for child in children:\n child.parent = self\n\n self.name = None\n\n def __repr__(self):\n name_bit = ''\n if self.name:\n name_bit = 'name=\"{}\", '.format(self.name)\n\n if self.children:\n return '{}({}children={})'.format(\n self.__class__.__name__, name_bit, self.children)\n else:\n return '{}({}cpus={})'.format(\n self.__class__.__name__, name_bit, self.cpus)\n\n def _iter(self, include_non_leaves):\n for child in self.children:\n for child_i in child._iter(include_non_leaves):\n yield child_i\n if include_non_leaves or not self.children:\n yield self\n\n def iter_nodes(self):\n \"\"\"Iterate over nodes depth-first, post-order\"\"\"\n return self._iter(True)\n\n def iter_leaves(self):\n \"\"\"Iterate over leaves\"\"\"\n return self._iter(False)\n\nclass EnergyModelNode(_CpuTree):\n \"\"\"Describes topology and energy data for an EnergyModel.\n\n Represents a CPU topology with energy data. The active and idle state data\n represents the power usage of just the hardware resources of this topology\n level, not its children. e.g. If the node represents a cluster, the power\n numbers should not include power used by the CPU - that power should be\n included the data of the child nodes.\n\n Exactly one of ``cpu`` and ``children`` must be given.\n\n :param active_states: Dict mapping frequencies to :class:`ActiveState`\n values. Compute capacity data is optional for\n non-leaf nodes.\n :param idle_states: Dict mapping idle state names to power usage values\n :param cpu: The CPU this node represents. If provided, this is a leaf node.\n :type cpus: tuple(int)\n :param children: Non-empty list of child :class:`EnergyModelNode` objects\n :param name: Optional human-readable name for this node. Leaf (CPU) nodes\n have a default name of \"cpuN\" where N is the cpu number.\n\n :ivar cpus: CPUs contained in this node. Includes those of child nodes.\n :ivar cpu: For convenience, this holds the single CPU contained by leaf\n nodes. ``None`` for non-leaf nodes.\n \"\"\"\n def __init__(self, active_states, idle_states,\n cpu=None, children=None, name=None):\n super(EnergyModelNode, self).__init__(cpu, children)\n\n self._log = logging.getLogger('EnergyModel')\n\n def is_monotonic(l, decreasing=False):\n op = operator.ge if decreasing else operator.le\n return all(op(a, b) for a, b in zip(l, l[1:]))\n\n if active_states:\n # Sanity check for active_states's frequencies\n freqs = active_states.keys()\n if not is_monotonic(freqs):\n self._log.warning(\n 'Active states frequencies are expected to be '\n 'monotonically increasing. Freqs: {}'.format(freqs))\n\n # Sanity check for active_states's powers\n power_vals = [s.power for s in active_states.values()]\n if not is_monotonic(power_vals):\n self._log.warning(\n 'Active states powers are expected to be '\n 'monotonically increasing. Values: {}'.format(power_vals))\n\n if idle_states:\n # This is needed for idle_state_by_idx to work.\n if not isinstance(idle_states, OrderedDict):\n f = 'idle_states is {}, must be collections.OrderedDict'\n raise ValueError(f.format(type(self.idle_states)))\n\n # Sanity check for idle_states powers\n power_vals = idle_states.values()\n if not is_monotonic(power_vals, decreasing=True):\n self._log.warning(\n 'Idle states powers are expected to be '\n 'monotonically decreasing. Values: {}'.format(power_vals))\n\n if cpu is not None and not name:\n name = 'cpu' + str(cpu)\n\n self.name = name\n self.active_states = active_states\n self.idle_states = idle_states\n\n @property\n def max_capacity(self):\n \"\"\"Compute capacity at highest frequency\"\"\"\n return max(s.capacity for s in self.active_states.values())\n\n def idle_state_by_idx(self, idx):\n \"\"\"Return the idle state with index ``idx``\"\"\"\n # NB self.idle_states must be ordered for this to work. __init__\n # enforces that it is an OrderedDict\n if self.idle_states and idx < len(self.idle_states):\n return self.idle_states.keys()[idx]\n\n raise KeyError('No idle state with index {}'.format(idx))\n\nclass EnergyModelRoot(EnergyModelNode):\n \"\"\"\n Convenience class for root of an EnergyModelNode tree.\n\n Just like EnergyModelNode except that ``active_states`` and ``idle_states``\n aren't required.\n \"\"\"\n def __init__(self, active_states=None, idle_states=None,\n cpu=None, children=None, name=None):\n return super(EnergyModelRoot, self).__init__(\n active_states, idle_states, cpu, children, name)\n\nclass PowerDomain(_CpuTree):\n \"\"\"Describes the power domain hierarchy for an EnergyModel.\n\n Power domains are a description of the topological dependencies in hardware\n for entering idle states. \"Composite\" states such as cluster-sleep states\n require a set of CPUs to all be idle before that state can be entered. In\n that case those CPUs can be grouped into a power domain, and that composite\n state attached to the power domain. Note that cpuidle is not aware of these\n dependencies; they are typically handled by the platform firmware.\n\n Exactly one of ``cpu`` and ``children`` must be given. That is, leaves of\n the PowerDomain tree always contain exactly one CPU - each CPU is\n represented as being in a power domain of its own. This represents the\n assumption that all CPUs have at least one idle state (such as ARM WFI) that\n they can enter independently of other CPUs.\n\n :param idle_states: List of names of idle states for this power domain. Does\n not store power data - these names are used as keys into\n the ``idle_states`` field of :class:`EnergyModelNode`\n objects.\n :type idle_states: list(str)\n :param cpu: The CPU this node represents. If provided, this is a leaf node.\n :type cpu: int\n :param children: Non-empty list of child :class:`PowerDomain` objects\n :type children: list(PowerDomain)\n\n :ivar cpus: CPUs contained in this node. Includes those of child nodes.\n :type cpus: tuple(int)\n \"\"\"\n def __init__(self, idle_states, cpu=None, children=None):\n if idle_states is None:\n raise ValueError('idle_states cannot be None (but may be empty)')\n super(PowerDomain, self).__init__(cpu, children)\n self.idle_states = idle_states\n\nclass EnergyModel(object):\n \"\"\"Represents hierarchical CPU topology with power and capacity data\n\n An energy model consists of\n\n - A CPU topology, representing the physical (cache/interconnect) topology of\n the CPUs. Each node stores the energy usage of that node's hardware when\n it is in each active or idle state. They also store a compute capacity at\n each frequency, but this is only meaningful for leaf nodes (CPUs) and may\n be None at higher levels. These capacity values are relative; the maximum\n capacity would usually be 1024, the value of SCHED_CAPACITY_SCALE in the\n Linux kernel scheduler. Use EnergyModelNodes to describe this.\n\n - A power domain topology, representing the hierarchy of areas that can be\n powered down (idled).\n The power domains are a single tree. Leaf nodes must contain exactly one\n CPU and the root node must indirectly contain every CPU. Each power domain\n has a list (maybe empty) of names of idle states that that domain can\n enter.\n Use PowerDomains to describe this.\n\n - A set of frequency domains, representing groups of CPUs whose clock\n frequencies must be equal (probably because they share a clock). The\n frequency domains must be a partition of the CPUs.\n\n :ivar cpu_nodes: List of leaf (CPU) :class:`EnergyModelNode`\n :ivar cpus: List of logical CPU numbers in the system\n\n :param root_node: Root of :class:`EnergyModelNode` tree\n :param root_power_domain: Root of :class:`PowerDomain` tree\n :param freq_domains: Collection of collections of logical CPU numbers\n representing frequency (clock) domains.\n\n .. note::\n The most signficant shortcomings of the model are:\n\n 1. Voltage domains are assumed to be congruent to frequency domains\n\n 2. Idle state power is assumed to be independent of voltage\n\n 3. Temperature is ignored entirely\n\n .. _cpu-utils:\n\n .. admonition:: ``cpu_utils``: CPU util distributions\n\n Used throughout this module: A ``cpu_utils`` is a list ``u`` where\n ``u[N]`` is the sum of the frequency-invariant, capacity-invariant\n utilization of tasks placed on CPU N. That is, the quantity represented\n by a CPU runqueue's util_avg in the Linux kernel scheduler's\n load-tracking system with EAS features enabled.\n\n The range of utilization values is 0 -\n :attr:`EnergyModel.capacity_scale`.\n\n This represents a static utilization, assuming that tasks don't change\n in size (for example representing a set of fixed periodic RT-App\n workloads). For workloads that change over time, a series of\n ``cpu_utils`` items would be needed to describe the utilization, with a\n distinct estimation for each item in the series.\n \"\"\"\n\n capacity_scale = 1024\n \"\"\"The relative computational capacity of the most powerful CPU at its\n highest available frequency.\n \"\"\"\n\n def __init__(self, root_node, root_power_domain, freq_domains):\n self.cpus = root_node.cpus\n if self.cpus != tuple(range(len(self.cpus))):\n raise ValueError('CPU IDs [{}] are sparse'.format(self.cpus))\n\n # Check that freq_domains is a partition of the CPUs\n fd_intersection = set().intersection(*freq_domains)\n if fd_intersection:\n raise ValueError('CPUs {} exist in multiple freq domains'.format(\n fd_intersection))\n fd_difference = set(self.cpus) - set().union(*freq_domains)\n if fd_difference:\n raise ValueError('CPUs {} not in any frequency domain'.format(\n fd_difference))\n self.freq_domains = freq_domains\n\n # Check that nodes with energy data are all within a frequency domain\n for node in root_node.iter_nodes():\n if not node.active_states or node.idle_states:\n continue\n cpu_freq_doms = []\n for cpu in node.cpus:\n [cpu_freq_dom] = [d for d in freq_domains if cpu in d]\n cpu_freq_doms.append(cpu_freq_dom)\n if not all(d == cpu_freq_doms[0] for d in cpu_freq_doms[1:]):\n raise ValueError(\n 'Node {} (CPUs {}) '\n 'has energy data and overlaps freq domains'.format(\n node.name, node.cpus))\n\n def sorted_leaves(root):\n # Get a list of the leaf (cpu) nodes of a _CpuTree in order of the\n # CPU ID\n ret = sorted(list(root.iter_leaves()), key=lambda n: n.cpus[0])\n assert all(len(n.cpus) == 1 for n in ret)\n return ret\n\n self.root = root_node\n self.cpu_nodes = sorted_leaves(root_node)\n self.cpu_pds = sorted_leaves(root_power_domain)\n assert len(self.cpu_pds) == len(self.cpu_nodes)\n\n self._log = logging.getLogger('EnergyModel')\n\n max_cap = max(n.max_capacity for n in self.cpu_nodes)\n if max_cap != self.capacity_scale:\n self._log.debug(\n 'Unusual max capacity (%s), overriding capacity_scale', max_cap)\n self.capacity_scale = max_cap\n\n def _cpus_with_capacity(self, cap):\n \"\"\"\n Helper method to find the CPUs whose max capacity equals cap\n \"\"\"\n return [c for c in self.cpus\n if self.cpu_nodes[c].max_capacity == cap]\n\n @property\n @memoized\n def biggest_cpus(self):\n \"\"\"\n The CPUs with the highest compute capacity at their highest frequency\n \"\"\"\n return self._cpus_with_capacity(self.capacity_scale)\n\n @property\n @memoized\n def littlest_cpus(self):\n \"\"\"\n The CPUs with the lowest compute capacity at their highest frequency\n \"\"\"\n min_cap = min(n.max_capacity for n in self.cpu_nodes)\n return self._cpus_with_capacity(min_cap)\n\n @property\n @memoized\n def is_heterogeneous(self):\n \"\"\"\n True iff CPUs do not all have the same efficiency and OPP range\n \"\"\"\n states = self.cpu_nodes[0].active_states\n return any(c.active_states != states for c in self.cpu_nodes[1:])\n\n @property\n @memoized\n def cpu_groups(self):\n \"\"\"\n List of lists of CPUs who share the same active state values\n \"\"\"\n groups = []\n for node in self.cpu_nodes:\n for group in groups:\n group_states = self.cpu_nodes[group[0]].active_states\n if node.active_states == group_states:\n group.append(node.cpu)\n break\n else:\n groups.append([node.cpu])\n return groups\n\n def _deepest_idle_idxs(self, cpus_active):\n def find_deepest(pd):\n if any(cpus_active[c] for c in pd.cpus):\n return -1\n if pd.parent:\n parent_idx = find_deepest(pd.parent)\n else:\n parent_idx = -1\n ret = parent_idx + len(pd.idle_states)\n return ret\n return [find_deepest(pd) for pd in self.cpu_pds]\n\n def _guess_idle_states(self, cpus_active):\n idxs = self._deepest_idle_idxs(cpus_active)\n return [n.idle_state_by_idx(max(i, 0)) for n, i in zip(self.cpu_nodes, idxs)]\n\n def get_cpu_capacity(self, cpu, freq=None):\n \"\"\"Convenience method to get the capacity of a CPU at a given frequency\n\n :param cpu: CPU to get capacity for\n :param freq: Frequency to get the CPU capacity at. Default is max\n capacity.\n \"\"\"\n if freq is None:\n return self.cpu_nodes[cpu].max_capacity\n return self.cpu_nodes[cpu].active_states[freq].capacity\n\n def guess_idle_states(self, cpus_active):\n \"\"\"Pessimistically guess the idle states that each CPU may enter\n\n If a CPU has any tasks it is estimated that it may only enter its\n shallowest idle state in between task activations. If all the CPUs\n within a power domain have no tasks, they will all be judged able to\n enter that domain's deepest idle state. If any CPU in a domain has work,\n no CPUs in that domain are assumed to enter any domain shared state.\n\n e.g. Consider a system with\n\n - two power domains PD0 and PD1\n\n - 4 CPUs, with CPUs [0, 1] in PD0 and CPUs [2, 3] in PD1\n\n - 4 idle states: \"WFI\", \"cpu-sleep\", \"cluster-sleep-0\" and\n \"cluster-sleep-1\", where the \"cluster-sleep-*\" states domain states,\n i.e. a CPU can only enter those states when both CPUs in the domain\n are idle.\n\n Then here are some example inputs and outputs:\n\n ::\n\n # All CPUs idle:\n [0, 0, 0, 0] -> [\"cluster-sleep-1\", \"cluster-sleep-1\",\n \"cluster-sleep-1\", \"cluster-sleep-1\"]\n\n # All CPUs have work\n [1, 1, 1, 1] -> [\"WFI\",\"WFI\",\"WFI\", \"WFI\"]\n\n # One power domain active, the other idle\n [0, 0, 1, 1] -> [\"cluster-sleep-1\", \"cluster-sleep-1\", \"WFI\",\"WFI\"]\n\n # One CPU active.\n # Note that CPU 2 has no work but is assumed to never be able to enter\n # any \"cluster\" state.\n [0, 0, 0, 1] -> [\"cluster-sleep-1\", \"cluster-sleep-1\",\n \"cpu-sleep\",\"WFI\"]\n\n :param cpus_active: list where bool(cpus_active[N]) is False iff no\n tasks will run on CPU N.\n :returns: List ``ret`` where ``ret[N]`` is the name of the estimated\n idle state that CPU N can enter during idle periods.\n\n \"\"\"\n states = self._guess_idle_states(cpus_active)\n return [s or c.idle_states.keys()[0]\n for s, c in zip(states, self.cpu_nodes)]\n\n def _guess_freqs(self, cpu_utils):\n overutilized = False\n # Find what frequency each CPU would need if it was alone in its\n # frequency domain\n ideal_freqs = [0 for _ in self.cpus]\n for node in self.cpu_nodes:\n [cpu] = node.cpus\n required_cap = cpu_utils[cpu]\n\n possible_freqs = [f for f, s in node.active_states.iteritems()\n if s.capacity >= required_cap]\n\n if possible_freqs:\n ideal_freqs[cpu] = min(possible_freqs)\n else:\n # CPU cannot provide required capacity, use max freq\n ideal_freqs[cpu] = max(node.active_states.keys())\n overutilized = True\n\n # Rectify the frequencies among domains\n freqs = [0 for _ in ideal_freqs]\n for domain in self.freq_domains:\n domain_freq = max(ideal_freqs[c] for c in domain)\n for cpu in domain:\n freqs[cpu] = domain_freq\n\n return freqs, overutilized\n\n def guess_freqs(self, cpu_utils):\n \"\"\"Work out CPU frequencies required to execute a workload\n\n Find the lowest possible frequency for each CPU that provides enough\n capacity to satisfy the utilization, taking into account frequency\n domains.\n\n :param cpu_utils: Utilization distribution, see\n :ref:`cpu_utils <cpu-utils>`\n :returns: List ``ret`` where ``ret[N]`` is the frequency that CPU N must\n run at\n \"\"\"\n freqs, _ = self._guess_freqs(cpu_utils)\n return freqs\n\n def _estimate_from_active_time(self, cpu_active_time, freqs, idle_states,\n combine):\n \"\"\"Helper for estimate_from_cpu_util\n\n Like estimate_from_cpu_util but uses active time i.e. proportion of time\n spent not-idle in the range 0.0 - 1.0.\n\n If combine=False, return idle and active power as separate components.\n \"\"\"\n power = 0\n ret = {}\n\n assert all(0.0 <= a <= 1.0 for a in cpu_active_time)\n\n for node in self.root.iter_nodes():\n # Some nodes might not have energy model data, they could just be\n # used to group other nodes (likely the root node, for example).\n if not node.active_states or not node.idle_states:\n continue\n\n cpus = tuple(node.cpus)\n # For now we assume topology nodes with energy models do not overlap\n # with frequency domains\n freq = freqs[cpus[0]]\n\n # The active time of a node is estimated as the max of the active\n # times of its children.\n # This works great for the synthetic periodic workloads we use in\n # LISA (where all threads wake up at the same time) but is probably\n # no good for real workloads.\n active_time = max(cpu_active_time[c] for c in cpus)\n active_power = node.active_states[freq].power * active_time\n\n _idle_power = max(node.idle_states[idle_states[c]] for c in cpus)\n idle_power = _idle_power * (1 - active_time)\n\n if combine:\n ret[cpus] = active_power + idle_power\n else:\n ret[cpus] = {}\n ret[cpus][\"active\"] = active_power\n ret[cpus][\"idle\"] = idle_power\n\n return ret\n\n def estimate_from_cpu_util(self, cpu_utils, freqs=None, idle_states=None):\n \"\"\"\n Estimate the energy usage of the system under a utilization distribution\n\n Optionally also take freqs; a list of frequencies at which each CPU is\n assumed to run, and idle_states, the idle states that each CPU can enter\n between activations. If not provided, they will be estimated assuming an\n ideal selection system (i.e. perfect cpufreq & cpuidle governors).\n\n :param cpu_utils: Utilization distribution, see\n :ref:`cpu_utils <cpu-utils>`\n :param freqs: List of CPU frequencies. Got from :meth:`guess_freqs` by\n default.\n :param idle_states: List of CPU frequencies. Got from\n :meth:`guess_idle_states` by default.\n\n :returns: Dict with power in bogo-Watts (bW), with contributions from\n each system component keyed with a tuple of the CPUs\n comprising that component (i.e. :attr:EnergyModelNode.cpus)\n\n ::\n\n {\n (0,) : 10,\n (1,) : 10,\n (0, 1) : 5,\n }\n\n This represents CPUs 0 and 1 each using 10bW and their shared\n resources using 5bW for a total of 25bW.\n \"\"\"\n if len(cpu_utils) != len(self.cpus):\n raise ValueError(\n 'cpu_utils length ({}) must equal CPU count ({})'.format(\n len(cpu_utils), len(self.cpus)))\n\n if freqs is None:\n freqs = self.guess_freqs(cpu_utils)\n if idle_states is None:\n idle_states = self.guess_idle_states(cpu_utils)\n\n cpu_active_time = []\n for cpu, node in enumerate(self.cpu_nodes):\n assert (cpu,) == node.cpus\n cap = node.active_states[freqs[cpu]].capacity\n cpu_active_time.append(min(float(cpu_utils[cpu]) / cap, 1.0))\n\n return self._estimate_from_active_time(cpu_active_time,\n freqs, idle_states, combine=True)\n\n def get_optimal_placements(self, capacities):\n \"\"\"Find the optimal distribution of work for a set of tasks\n\n Find a list of candidates which are estimated to be optimal in terms of\n power consumption, but that do not result in any CPU becoming\n over-utilized.\n\n If no such candidates exist, i.e. the system being modeled cannot\n satisfy the workload's throughput requirements, an\n :class:`EnergyModelCapacityError` is raised. For example, if e was an\n EnergyModel modeling two CPUs with capacity 1024, this error would be\n raised by:\n\n ::\n\n e.get_optimal_placements({\"t1\": 800, \"t2\": 800, \"t3: \"800\"})\n\n This estimation assumes an ideal system of selecting OPPs and idle\n states for CPUs.\n\n .. note::\n This is a brute force search taking time exponential wrt. the number\n of tasks.\n\n :param capacities: Dict mapping tasks to expected utilization\n values. These tasks are assumed not to change; they\n have a single static utilization value. A set of\n single-phase periodic RT-App tasks is an example of a\n suitable workload for this model.\n :returns: List of ``cpu_utils`` items representing distributions of work\n under optimal task placements, see\n :ref:`cpu_utils <cpu-utils>`. Multiple task placements\n that result in the same CPU utilizations are considered\n equivalent.\n \"\"\"\n tasks = capacities.keys()\n\n num_candidates = len(self.cpus) ** len(tasks)\n self._log.debug(\n '%14s - Searching %d configurations for optimal task placement...',\n 'EnergyModel', num_candidates)\n\n candidates = {}\n excluded = []\n for cpus in product(self.cpus, repeat=len(tasks)):\n placement = {task: cpu for task, cpu in zip(tasks, cpus)}\n\n util = [0 for _ in self.cpus]\n for task, cpu in placement.items():\n util[cpu] += capacities[task]\n util = tuple(util)\n\n # Filter out candidate placements that have tasks greater than max\n # or that we have already determined that we cannot place.\n if (any(u > self.capacity_scale for u in util) or util in excluded):\n continue\n\n if util not in candidates:\n freqs, overutilized = self._guess_freqs(util)\n if overutilized:\n # This isn't a valid placement\n excluded.append(util)\n else:\n power = self.estimate_from_cpu_util(util, freqs=freqs)\n candidates[util] = sum(power.values())\n\n if not candidates:\n # The system can't provide full throughput to this workload.\n raise EnergyModelCapacityError(\n \"Can't handle workload - total cap = {}\".format(\n sum(capacities.values())))\n\n # Whittle down to those that give the lowest energy estimate\n min_power = min(p for p in candidates.itervalues())\n ret = [u for u, p in candidates.iteritems() if p == min_power]\n\n self._log.debug('%14s - Done', 'EnergyModel')\n return ret\n\n @classmethod\n def _find_core_groups(cls, target):\n \"\"\"\n Read the core_siblings masks for each CPU from sysfs\n\n :param target: Devlib Target object to read masks from\n :returns: A list of tuples of ints, representing the partition of core\n siblings\n \"\"\"\n cpus = range(target.number_of_cpus)\n\n topology_base = '/sys/devices/system/cpu/'\n\n # We only care about core_siblings, but let's check *_siblings, so we\n # can throw an error if a CPU's thread_siblings isn't just itself, or if\n # there's a topology level we don't understand.\n\n # Since we might have to read a lot of files, read everything we need in\n # one go to avoid taking too long.\n mask_glob = topology_base + 'cpu**/topology/*_siblings'\n file_values = read_multiple_oneline_files(target, [mask_glob])\n\n regex = re.compile(\n topology_base + r'cpu([0-9]+)/topology/([a-z]+)_siblings')\n\n ret = set()\n\n for path, mask_str in file_values.iteritems():\n match = regex.match(path)\n cpu = int(match.groups()[0])\n level = match.groups()[1]\n # mask_to_list returns the values in descending order, so we'll sort\n # them ascending. This isn't strictly necessary but it's nicer.\n siblings = tuple(sorted(mask_to_list(int(mask_str, 16))))\n\n if level == 'thread':\n if siblings != (cpu,):\n # SMT systems aren't supported\n raise RuntimeError('CPU{} thread_siblings is {}. '\n 'expected {}'.format(cpu, siblings, [cpu]))\n continue\n if level != 'core':\n # The only other levels we should expect to find are 'book' and\n # 'shelf', which are not used by architectures we support.\n raise RuntimeError(\n 'Unrecognised topology level \"{}\"'.format(level))\n\n ret.add(siblings)\n\n # Sort core groups so that the lowest-numbered cores are first\n # Again, not strictly necessary, just more pleasant.\n return sorted(ret, key=lambda x: x[0])\n\n @classmethod\n def from_simplifiedEM_target(cls, target,\n directory='/sys/devices/system/cpu/energy_model'):\n \"\"\"\n Create an EnergyModel by reading a target filesystem on a device with\n the new Simplified Energy Model present.\n\n This uses the energy_model sysctl added by EAS patches to exposes\n the frequency domains, together with a tuple of capacity, frequency\n and active power for each CPU. This feature is not upstream in mainline\n Linux (as of v4.17), and only exists in Android kernels later than\n android-4.14.\n\n Wrt. idle states - the EnergyModel constructed won't be aware of\n any power data or topological dependencies for entering \"cluster\"\n idle states since the simplified model has no such concept.\n\n Initialises only Active States for CPUs and clears all other levels.\n\n :param target: Devlib target object to read filesystem from. Must have\n cpufreq and cpuidle modules enabled.\n :returns: Constructed EnergyModel object based on the parameters\n reported by the target.\n \"\"\"\n if 'cpuidle' not in target.modules:\n raise TargetError('Requires cpuidle devlib module. Please ensure '\n '\"cpuidle\" is listed in your target/test modules')\n\n # Simplified EM on-disk format (for each frequency domain):\n # /sys/devices/system/cpu/energy_model/<frequency_domain>/..\n # ../capacity\n # contains a space-separated list of capacities in increasing order\n # ../cpus\n # cpulist-formatted representation of the cpus in the frequency domain\n # ../frequency\n # space-separated list of frequencies in corresponding order to capacities\n # ../power\n # space-separated list of power consumption in corresponding order to capacities\n # taken together, the contents of capacity, frequency and power give you the required\n # tuple for ActiveStates.\n # hence, domain should be supplied as a glob, and fields should be\n # capacity, cpus, frequency, power\n\n sysfs_em = target.read_tree_values(directory, depth=3)\n\n if not sysfs_em:\n raise TargetError('Simplified Energy Model not exposed '\n 'at {} in sysfs.'.format(directory))\n\n cpu_to_fdom = {}\n for fd, fields in sysfs_em.iteritems():\n cpus = ranges_to_list(fields[\"cpus\"])\n for cpu in cpus:\n cpu_to_fdom[cpu] = fd\n sysfs_em[fd]['cpus'] = cpus\n sysfs_em[fd]['frequency'] = map(int, sysfs_em[fd]['frequency'].split(' '))\n sysfs_em[fd]['power'] = map(int, sysfs_em[fd]['power'].split(' '))\n\n # Compute the capacity of the CPUs at each OPP with a linerar\n # mapping to the frequencies\n sysfs = '/sys/devices/system/cpu/cpu{}/cpu_capacity'\n cap = target.read_value(sysfs.format(cpus[0]), int)\n max_freq = max(sysfs_em[fd]['frequency'])\n caps = [f * cap / max_freq for f in sysfs_em[fd]['frequency']]\n sysfs_em[fd]['capacity'] = caps\n\n def read_active_states(cpu):\n fd = sysfs_em[cpu_to_fdom[cpu]]\n cstates = zip(fd['capacity'], fd['power'])\n active_states = [ActiveState(c, p) for c, p in cstates]\n return OrderedDict(zip(fd['frequency'], active_states))\n\n def read_idle_states(cpu):\n # idle states are not supported in the new model\n # record 0 power for them all, but name them according to target\n names = [s.name for s in target.cpuidle.get_states(cpu)]\n return OrderedDict((name, 0) for name in names)\n\n # Read the CPU-level data\n cpus = range(target.number_of_cpus)\n cpu_nodes = []\n for cpu in cpus:\n node = EnergyModelNode(\n cpu=cpu,\n active_states=read_active_states(cpu),\n idle_states=read_idle_states(cpu))\n cpu_nodes.append(node)\n\n root = EnergyModelRoot(children=cpu_nodes)\n freq_domains = [sysfs_em[fdom]['cpus'] for fdom in sysfs_em]\n\n # We don't have a way to read the idle power domains from sysfs (the kernel\n # isn't even aware of them) so we'll just have to assume each CPU is its\n # own power domain and all idle states are independent of each other.\n cpu_pds = []\n for cpu in cpus:\n names = [s.name for s in target.cpuidle.get_states(cpu)]\n cpu_pds.append(PowerDomain(cpu=cpu, idle_states=names))\n\n root_pd = PowerDomain(children=cpu_pds, idle_states=[])\n return cls(root_node=root,\n root_power_domain=root_pd,\n freq_domains=freq_domains)\n\n @classmethod\n def from_sd_target(cls, target, filename=\n '/proc/sys/kernel/sched_domain/cpu{}/domain{}/group{}/energy/{}'):\n \"\"\"\n Create an EnergyModel by reading a target filesystem\n\n This uses the sysctl added by EAS pathces to exposes the cap_states and\n idle_states fields for each sched_group. This feature depends on\n CONFIG_SCHED_DEBUG, and is not upstream in mainline Linux (as of v4.11),\n so this method is only tested with Android kernels.\n\n The kernel doesn't have an power domain data, so this method assumes\n that all CPUs are totally independent wrt. idle states - the EnergyModel\n constructed won't be aware of the topological dependencies for entering\n \"cluster\" idle states.\n\n Assumes the energy model has two-levels (plus the root) - a level for\n CPUs and a level for 'clusters'.\n\n :param target: Devlib target object to read filesystem from. Must have\n cpufreq and cpuidle modules enabled.\n :returns: Constructed EnergyModel object based on the parameters\n reported by the target.\n \"\"\"\n if 'cpufreq' not in target.modules:\n raise TargetError('Requires cpufreq devlib module. Please ensure '\n '\"cpufreq\" is listed in your target/test modules')\n if 'cpuidle' not in target.modules:\n raise TargetError('Requires cpuidle devlib module. Please ensure '\n '\"cpuidle\" is listed in your target/test modules')\n\n def sge_path(cpu, domain, group, field):\n return filename.format(cpu, domain, group, field)\n\n # Read all the files we might need in one go, otherwise this will take\n # ages.\n sge_globs = [sge_path('**', '**', '**', 'cap_states'),\n sge_path('**', '**', '**', 'idle_states')]\n sge_file_values = read_multiple_oneline_files(target, sge_globs)\n\n if not sge_file_values:\n raise TargetError('Energy Model not exposed in sysfs. '\n 'Check CONFIG_SCHED_DEBUG is enabled.')\n\n # These functions read the cap_states and idle_states vectors for the\n # first sched_group in the sched_domain for a given CPU at a given\n # level. That first group will include the given CPU. So\n # read_active_states(0, 0) will give the CPU-level active_states for\n # CPU0 and read_active_states(0, 1) will give the \"cluster\"-level\n # active_states for the \"cluster\" that contains CPU0.\n\n def read_sge_file(path):\n try:\n return sge_file_values[path]\n except KeyError as e:\n raise TargetError('No such file: {}'.format(e))\n\n def read_active_states(cpu, domain_level):\n cap_states_path = sge_path(cpu, domain_level, 0, 'cap_states')\n cap_states_strs = read_sge_file(cap_states_path).split()\n\n # cap_states lists the capacity of each state followed by its power,\n # in increasing order. The `zip` call does this:\n # [c0, p0, c1, p1, c2, p2] -> [(c0, p0), (c1, p1), (c2, p2)]\n\n # joshuous: This needs to be modified to handle the weird EM cap_states_strs node in sdm845.\n # It currently has three values per line: capacity, frequency and power\n cap_states = [ActiveState(capacity=int(c), power=int(p))\n # for c, p in zip(cap_states_strs[0::2],\n # cap_states_strs[1::2])]\n for c, p in zip(cap_states_strs[0::3], # capacity: start at 0 and use steps of 3\n cap_states_strs[2::3])] # power: start at index 2 and use steps of 3\n freqs = target.cpufreq.list_frequencies(cpu)\n return OrderedDict(zip(sorted(freqs), cap_states))\n\n def read_idle_states(cpu, domain_level):\n idle_states_path = sge_path(cpu, domain_level, 0, 'idle_states')\n idle_states_strs = read_sge_file(idle_states_path).split()\n\n # get_states should return the state names in increasing depth order\n names = [s.name for s in target.cpuidle.get_states(cpu)]\n # idle_states is a list of power values in increasing order of\n # idle-depth/decreasing order of power.\n return OrderedDict(zip(names, [int(p) for p in idle_states_strs]))\n\n # Read the CPU-level data from sched_domain level 0\n cpus = range(target.number_of_cpus)\n cpu_nodes = []\n for cpu in cpus:\n node = EnergyModelNode(\n cpu=cpu,\n active_states=read_active_states(cpu, 0),\n idle_states=read_idle_states(cpu, 0))\n cpu_nodes.append(node)\n\n # Read the \"cluster\" level data from sched_domain level 1\n core_group_nodes = []\n for core_group in cls._find_core_groups(target):\n node=EnergyModelNode(\n children=[cpu_nodes[c] for c in core_group],\n active_states=read_active_states(core_group[0], 1),\n idle_states=read_idle_states(core_group[0], 1))\n core_group_nodes.append(node)\n\n root = EnergyModelRoot(children=core_group_nodes)\n\n # Use cpufreq to figure out the frequency domains\n freq_domains = []\n remaining_cpus = set(cpus)\n while remaining_cpus:\n cpu = next(iter(remaining_cpus))\n dom = target.cpufreq.get_related_cpus(cpu)\n freq_domains.append(dom)\n remaining_cpus = remaining_cpus.difference(dom)\n\n # We don't have a way to read the power domains from sysfs (the kernel\n # isn't even aware of them) so we'll just have to assume each CPU is its\n # own power domain and all idle states are independent of each other.\n cpu_pds = []\n for cpu in cpus:\n names = [s.name for s in target.cpuidle.get_states(cpu)]\n cpu_pds.append(PowerDomain(cpu=cpu, idle_states=names))\n\n root_pd=PowerDomain(children=cpu_pds, idle_states=[])\n\n return cls(root_node=root,\n root_power_domain=root_pd,\n freq_domains=freq_domains)\n\n @classmethod\n def from_target(cls, target):\n \"\"\"\n Create an EnergyModel by reading a target filesystem\n\n If present, load an EM provided via dt using from_sd_target, since these\n devices make the full EM available via the sched domain in sysfs. If\n there is no EM at this location, attempt to load the simplified EM\n made available via dedicated sysfs files.\n\n :param target: Devlib target object to read filesystem from. Must have\n cpufreq and cpuidle modules enabled.\n :returns: Constructed EnergyModel object based on the parameters\n reported by the target.\n \"\"\"\n _log = logging.getLogger('EMReader')\n\n # To add a new EM reader type, the following is required:\n # 1. Create an inline function to test for EM presence which takes a\n # target as the first parameter. Any exceptions raised here will\n # be caught in the loader loop.\n # 2. Create a function which returns an EnergyModel as a member of this\n # class, also with a target as the first parameter.\n # 3. Add an entry to the em_loaders dict where 'check' contains the\n # inline function and 'load' contains the class member function\n # 4. If you need any additional data, add it to the em_loaders dict - any\n # additional keys will be passed to both 'check' and 'load' functions\n # as named parameters.\n\n # Utility functions to determine if we should try to use a particular\n # EM loader function.\n def em_present_in_sd(target, filename=None):\n cpu = target.list_online_cpus()[0]\n f = filename.format(cpu, 0, 0, 'cap_states')\n return target.file_exists(f)\n def simplified_em_present_in_cpusysfs(target, directory=None):\n return target.directory_exists(directory)\n\n # em_loaders dictionary joins EM loaders and the identifying functions\n # with any associated metadata\n em_loaders = {\n 'sd' : { 'check': em_present_in_sd,\n 'load': cls.from_sd_target,\n 'filename': '/proc/sys/kernel/sched_domain/cpu{}/domain{}/group{}/energy/{}' },\n 'sysfs' : { 'check': simplified_em_present_in_cpusysfs,\n 'load': cls.from_simplifiedEM_target,\n 'directory': '/sys/devices/system/cpu/energy_model' }\n }\n\n for loader_type in em_loaders:\n args = dict(em_loaders[loader_type])\n check = args.pop('check')\n load = args.pop('load')\n try:\n em_present = check(target, **args)\n except Exception:\n em_present = False\n if em_present:\n _log.info('Attempting to load EM using {}'.format(load.__name__))\n return load(target, **args)\n\n raise TargetError('Unable to probe for energy model on target.')\n\n def estimate_from_trace(self, trace):\n \"\"\"\n Estimate the energy consumption of the system by looking at a trace\n\n Usese the EAS energy model data, and the idle and DVFS conditions\n reported in the trace, to estimate the energy usage of the system at\n every given moment.\n\n Takes into account knowledge of power domains - where cpuidle makes\n impossible claims about idle states (e.g. a CPU in 'cluster sleep' while\n its cluster siblings are running), the states will be minimised.\n\n The accuracy of this is otherwise totally dependent on the accuracy of\n the EAS energy model and the kernel's information. This does not take\n into account cost of idle state of DVFS transitions, nor any other\n conditions that are invisible to the kernel. The effect any power\n decisions that the platform makes independently of the kernel cannot be\n seen in this data. Examples of this _might_ include firmware thermal\n management invisibly restricting CPU frequencies, or secure-world\n software with real-time constraints preventing deep CPU idle states.\n\n :param trace: The trace\n :type trace: Trace\n\n :returns: A DataFrame with a column for each node in the energy model,\n labelled with the CPU members of the node joined by '-'s.\n Shows the energy use by each node at each given moment.\n If you don't care about those details, call ``.sum(axis=1)` on\n the returned DataFrame to get a Series that shows overall\n estimated power usage over time.\n \"\"\"\n if not trace.hasEvents('cpu_idle') or not trace.hasEvents('cpu_frequency'):\n raise ValueError('Requires cpu_idle and cpu_frequency trace events')\n\n idle = Parser(trace.ftrace).solve('cpu_idle:state')\n freqs = Parser(trace.ftrace).solve('cpu_frequency:frequency')\n\n columns = ['-'.join(str(c) for c in n.cpus)\n for n in self.root.iter_nodes()\n if n.active_states and n.idle_states]\n\n inputs = pd.concat([idle, freqs], axis=1, keys=['idle', 'freq']).ffill()\n\n # Drop stuff at the beginning where we don't have the inputs\n # (e.g. where we have had our first cpu_idle event but no cpu_frequency)\n inputs = inputs.dropna()\n # Convert to int wholesale so we can do things like use the values in\n # the inputs DataFrame as list indexes. The only reason we had floats\n # was to make room for NaN, but we've just dropped all the NaNs, so\n # that's fine.\n inputs = inputs.astype(int)\n # Drop consecutive duplicates (optimisation)\n inputs = inputs[(inputs.shift() != inputs).any(axis=1)]\n\n memo_cache = {}\n\n def f(input_row):\n # The code in this module is slow. Try not to call it too much.\n memo_key = tuple(input_row)\n if memo_key in memo_cache:\n return memo_cache[memo_key]\n\n # cpuidle doesn't understand shared resources so it will claim to\n # put a CPU into e.g. 'cluster sleep' while its cluster siblings are\n # active. Rectify those false claims.\n cpus_active = input_row['idle'] == -1\n deepest_possible = self._deepest_idle_idxs(cpus_active)\n idle_idxs = [min(i, j) for i, j in zip(deepest_possible,\n input_row['idle'])]\n\n # Convert indexes to state names\n idle_states = [n.idle_state_by_idx(max(i, 0))\n for n, i in zip(self.cpu_nodes, idle_idxs)]\n\n # We don't use tracked load, we just treat a CPU as active or idle,\n # so set util to 0 or 100%.\n utils = cpus_active * self.capacity_scale\n\n nrg = self.estimate_from_cpu_util(cpu_utils=utils,\n idle_states=idle_states,\n freqs=input_row['freq'])\n\n # nrg is a dict mapping CPU group tuples to energy values.\n # Unfortunately tuples don't play nicely as pandas column labels\n # because parts of its API treat that as nested indexing\n # (i.e. df[(0, 1)] sometimes means df[0][1]). So we'll give them\n # awkward names.\n\n nrg = {'-'.join(str(c) for c in k): v for k, v in nrg.iteritems()}\n\n ret = pd.Series(nrg)\n memo_cache[memo_key] = ret\n return ret\n\n return inputs.apply(f, axis=1)\n"
]
| [
[
"pandas.concat",
"pandas.Series"
]
]
|
sunjaeyoon/LearningPytorch | [
"aaee0418929e7ec1d8e8b21afa582cbb42e39de2"
]
| [
"GettingStarted/linear_regressionPT.py"
]
| [
"# -*- coding: utf-8 -*- Using Torch\n\nimport torch\nimport math\n\n\ndtype = torch.float\ndevice = torch.device(\"cpu\")\n# device = torch.device(\"cuda:0\") # Uncomment this to run on GPU\n\n# Create random input and output data\nx = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)\ny = torch.sin(x)\n\n# Randomly initialize weights\na = torch.randn((), device=device, dtype=dtype)\nb = torch.randn((), device=device, dtype=dtype)\nc = torch.randn((), device=device, dtype=dtype)\nd = torch.randn((), device=device, dtype=dtype)\n\nlearning_rate = 1e-6\nfor t in range(2000):\n # Forward pass: compute predicted y\n y_pred = a + b * x + c * x ** 2 + d * x ** 3\n\n # Compute and print loss\n loss = (y_pred - y).pow(2).sum().item()\n if t % 100 == 99:\n print(t, loss)\n\n # Backprop to compute gradients of a, b, c, d with respect to loss\n grad_y_pred = 2.0 * (y_pred - y)\n grad_a = grad_y_pred.sum()\n grad_b = (grad_y_pred * x).sum()\n grad_c = (grad_y_pred * x ** 2).sum()\n grad_d = (grad_y_pred * x ** 3).sum()\n\n # Update weights using gradient descent\n a -= learning_rate * grad_a\n b -= learning_rate * grad_b\n c -= learning_rate * grad_c\n d -= learning_rate * grad_d\n\n\nprint(f'Result: y = {a.item()} + {b.item()} x + {c.item()} x^2 + {d.item()} x^3')"
]
| [
[
"torch.device",
"torch.randn",
"torch.linspace",
"torch.sin"
]
]
|
lmriccardo/SBML2Modelica | [
"b16705e26346de1780cea9b8bc08b17ee2cb1073"
]
| [
"tests/biomodels/biomd190/BIOMD190_MPGOS/plot.py"
]
| [
"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\n\nfilename=sys.argv[-1]\nvalues = []\nwith open(filename, mode=\"r\", encoding=\"utf-8\") as f:\n start = False\n while (line := f.readline()):\n if start:\n new_line = line.strip().replace('\\n', '')\n values.append([float(x.strip()) for x in new_line.split(\",\")])\n if \"Time series\" in line:\n start = True\n\nnpvalues = np.array(values)\ntimes = npvalues[:, 0]\nvars = [npvalues[:, i] for i in range(1,16) if i not in [2, 4, 7, 6]]\n\nplt.figure(figsize=[15.0, 8.0])\nfor i in range(0, len(vars)):\n\tplt.plot(times, vars[i], marker=\"\", label=f\"x{i}\")\n \nplt.xlabel(\"Time [s]\")\nplt.legend(loc=\"upper left\")\nplt.savefig(f\"MPGOSPlot\")\nplt.close()\n"
]
| [
[
"numpy.array",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure"
]
]
|
ArneKramerSunderbrink/adaptivetuning | [
"80dce0c8d031918a9d45dc84fdd6cd64f6df7a8a"
]
| [
"tests/test_dissonancereduction.py"
]
| [
"from adaptivetuning import Dissonancereduction\nimport numpy as np\n\ndef approx_equal(a, b, epsilon = 0.01):\n if isinstance(a, list):\n return all([approx_equal(a[i], b[i], epsilon) for i in range(len(a))])\n return abs((a - b) / (0.5 * (a + b))) < epsilon\n\ndef test_reduction():\n ji_intervals = [1, 16/15, 9/8, 6/5, 5/4, 4/3, 45/32, 3/2, 8/5, 5/3, 9/5, 15/8, 2]\n partials_vol_piano = np.array([3.7, 5.4, 1.2, 1.1, 0.95, 0.6, 0.5, 0.65, 0.001, 0.1, 0.2]) / 5.4\n\n # A major chord in closed position with added octave\n notes = [0, 4, 7, 12]\n et_fundamentals = [440 * 2**(i/12) for i in notes] # equal tempered version of that chord\n ji_fundamentals = [440 * ji_intervals[i] for i in notes] # just version\n fundamentals_vol = np.ones(len(notes))\n partials_pos = list(range(1, len(partials_vol_piano) + 1))\n partials_vol = partials_vol_piano\n\n # The partials of the tonic are used as fixed positions\n fixed_freq = [et_fundamentals[0] * p for p in partials_pos]\n fixed_vol = [fundamentals_vol[0] * v for v in partials_vol]\n\n dissonancereduction = Dissonancereduction()\n\n relevant_pairs, critical_bandwidths, volume_factors = dissonancereduction.quasi_constants(\n np.array(et_fundamentals[1:]), np.array(fundamentals_vol[1:]),\n np.array(partials_pos), np.array(partials_vol),\n np.array(fixed_freq), np.array(fixed_vol))\n\n # droping pairs with v = 0 or h > 1.46 reduces the amount of relevant pairs from 1210 to 137\n assert len(relevant_pairs) == 140\n\n # The second overtone of the tonic (2, -1) and the first overtone of the fifths (1, 1) are close -> relevant\n assert [1, 1, 2, -1] in relevant_pairs.tolist()\n\n # The fundamental of the tonic (0, -1) and the first overtone of the fifths (1, 1) are not close -> irrelevant\n assert [1, 1, 0, -1] not in relevant_pairs.tolist()\n\n # The fifth overtones of the fifth and nineth overtone of the tonic are close -> relevant\n assert [0, 8, 1, 8] in relevant_pairs.tolist()\n\n # But since the eighth overtones of out timbre are very weak, their volume factor is small\n assert approx_equal(volume_factors[relevant_pairs.tolist().index([0, 8, 1, 8])], 0.8608141448259226)\n\n # The first overtones are strong -> big volume factor\n assert approx_equal(volume_factors[relevant_pairs.tolist().index([0, 1, 1, 1])], 4.5313189239810825)\n\n # First overtones of the third and the fifths ([0, 1, 1, 1]) are approximately at 1200 HZ where the critical\n # bandwidth is approximately 200 Hz, our approximation is very rough of course\n assert approx_equal(critical_bandwidths[relevant_pairs.tolist().index([0, 1, 1, 1])], 187.33314724834622)\n\n # Third overtones of the third and the fifths ([0, 3, 1, 3]) are approximately at 2400 HZ where the critical\n # bandwidth is approximately 380 Hz\n assert approx_equal(critical_bandwidths[relevant_pairs.tolist().index([0, 3, 1, 3])], 373.04659900888373)\n\n dissonance, gradient = dissonancereduction.dissonance_and_gradient(\n np.array(et_fundamentals[1:]), np.array(partials_pos), np.array(fixed_freq),\n np.array(critical_bandwidths), np.array(volume_factors), np.array(relevant_pairs))\n\n # The most dissonant note of an equal tempered major chord is the major third which is to sharp\n # -> the biggest value of the gradient is the one corresponding to the major third and the negative gradient is\n # pointing in the negative direction, corresponding to a down-tuning of the third\n assert max(abs(gradient)) == gradient[0]\n\n result = dissonancereduction.tune(np.array(et_fundamentals[1:]), np.array(fundamentals_vol[1:]),\n np.array(partials_pos), np.array(partials_vol),\n np.array(fixed_freq), np.array(fixed_vol))\n assert result['success']\n\n # The resulting chord is more similar to a just major chord than to a equal tempered major chord:\n assert not approx_equal([et_fundamentals[0]] + result['x'].tolist(), et_fundamentals, epsilon=0.001)\n assert approx_equal([et_fundamentals[0]] + result['x'].tolist(), ji_fundamentals, epsilon=0.001)\n \n \ndef test_zero_cases():\n ji_intervals = [1, 16/15, 9/8, 6/5, 5/4, 4/3, 45/32, 3/2, 8/5, 5/3, 9/5, 15/8, 2]\n partials_vol_piano = np.array([3.7, 5.4, 1.2, 1.1, 0.95, 0.6, 0.5, 0.65, 0.001, 0.1, 0.2]) / 5.4\n\n # A major chord in closed position with added octave\n notes = [0, 4, 7, 12]\n et_fundamentals = [440 * 2**(i/12) for i in notes] # equal tempered version of that chord\n ji_fundamentals = [440 * ji_intervals[i] for i in notes] # just version\n fundamentals_vol = np.ones(len(notes))\n partials_pos = list(range(1, len(partials_vol_piano) + 1))\n partials_vol = partials_vol_piano\n\n # The partials of the tonic are used as fixed positions\n fixed_freq = [et_fundamentals[0] * p for p in partials_pos]\n fixed_vol = [fundamentals_vol[0] * v for v in partials_vol]\n\n dissonancereduction = Dissonancereduction()\n\n \n # We are only interested in the dissonance relative to the fundamentals\n # If there are no fundamentals, dissonance is 0\n relevant_pairs, critical_bandwidths, volume_factors = dissonancereduction.quasi_constants(\n np.array([]), np.array([]), np.array([]), np.array([]), np.array([]), np.array([]))\n assert len(relevant_pairs) == 0\n\n dissonance, gradient = dissonancereduction.dissonance_and_gradient(\n np.array([]), np.array([]), np.array([]),\n np.array([]), np.array([]), np.array([]))\n assert dissonance == 0\n assert len(gradient) == 0\n\n result = dissonancereduction.tune(np.array([]), np.array([]),\n np.array([]), np.array([]),\n np.array([]), np.array([]))\n assert len(result['x']) == 0\n\n # If there is only one fundamental and no fixed frequencies, dissonance is 0\n relevant_pairs, critical_bandwidths, volume_factors = dissonancereduction.quasi_constants(\n np.array([et_fundamentals[0]]), np.array([fundamentals_vol[0]]),\n np.array(partials_pos), np.array(partials_vol),\n np.array([]), np.array([]))\n assert len(relevant_pairs) == 0\n\n dissonance, gradient = dissonancereduction.dissonance_and_gradient(\n np.array([et_fundamentals[0]]), np.array(partials_pos), np.array([]),\n np.array([]), np.array([]), np.array([]))\n assert dissonance == 0\n assert gradient == np.array([0.])\n\n result = dissonancereduction.tune(np.array([et_fundamentals[0]]), np.array([fundamentals_vol[0]]),\n np.array(partials_pos), np.array(partials_vol),\n np.array([]), np.array([]))\n assert len(result['x']) == 1\n assert result['x'][0] == 440"
]
| [
[
"numpy.array"
]
]
|
astrophysicist87/PHripser | [
"7a124f6fe66fb802f63a2f6f548f134fc265133f"
]
| [
"pers_hom.py"
]
| [
"import numpy as np\nimport os\nimport scipy.stats as st\nimport matplotlib.pyplot as plt\nimport scipy.spatial.distance as ssd \nimport itertools as it\n\nclass ph:\n def __init__(self,my_data,my_hom_dim,my_thresh,my_dist_type=None,my_dist_mat = None,my_dist_max=None):\n self.data= my_data\n self.hom_dim = my_hom_dim\n self.thresh = my_thresh\n self.dist_type = my_dist_type\n self.dist_mat = my_dist_mat\n self.pp = None\n self.dist_max = my_dist_max\n self.birth=None\n self.death=None\n self.dims = None\n\n def build_distance(self,p=2):\n method = getattr(self,self.dist_type,lambda:\"invalid distance type\")\n self.dist_mat = method(p)\n self.dist_max = np.max(self.dist_mat)\n \n def pnorm(self,my_p):\n return ssd.squareform(ssd.pdist(self.data,metric='minkowski', p = my_p))\n\n def spherical_dist(self,p):\n diffMat = lambda x:x[:,np.newaxis]-x\n multMat = lambda x: x[:,np.newaxis]*x\n hs = lambda x: (1-np.cos(x))/2\n halpha = lambda x,y,z: x + y*z\n dis = lambda x: np.arcsin(np.sqrt(x))\n costheta = self.data[:,0]\n pol = np.arccos(costheta)\t \n az = self.data[:,1]\n\n hpol,haz = map(hs,list(map(diffMat,[pol,az])))\n cosmult = multMat(costheta)\n ha=halpha(hpol,cosmult, haz)\n my_dist_mat = dis(ha)\n return my_dist_mat\n\n def unit_circle_dist(self,p):\n return np.mod(np.abs(self.data[:,np.newaxis] - self.data),np.pi)\n\n def mat_pert_dist(self,data):\n dist_mat = np.zeros_like(data)\n for i in np.arange(dist_mat.shape[0]):\n for j in np.arange(i+1,dist_mat.shape[1]):\n dist_mat[j,i] = np.abs(data[i,j]/(data[i,i]-data[j,j]))\n return dist_mat + dist_mat.T\n\n\n def betti(self,radii_list):\n tmp = []\n curr_dim = 0\n for i in radii_list:\n tmp.append(curr_dim)\n if i == True:\n curr_dim +=1\n tmp.pop()\n return np.array(tmp)\n\n def pers_pairs(self,dat):\n radii_list = dat[:,1] == -1.0\n my_dims = self.betti(radii_list)\n birth_tmp = np.array(dat[:,0]) \n my_birth = np.delete(birth_tmp, np.where(birth_tmp==-1.0))\n death_tmp = np.array(dat[:,1])\n death_tmp[death_tmp > 1000000] = self.dist_max\n my_death = np.delete(death_tmp, np.where(death_tmp==-1.0))\n self.birth, self.death, self.dims = my_birth, my_death,my_dims\n \n def run_ripser(self,input_str,output_str):\n np.savetxt(input_str, self.dist_mat, delimiter=\",\")\n if self.thresh > 0:\n os.system(\"ripser.exe {} --format distance --dim {} --threshold {} > {}\".format(input_str,self.hom_dim,self.thresh,output_str))\n ripser_output=np.loadtxt(output_str,delimiter=\",\",skiprows= 1)\n else:\n os.system(\"ripser.exe {} --format distance --dim {} > {}\".format(input_str,self.hom_dim,output_str))\n ripser_output=np.loadtxt(output_str,delimiter=\",\")\n self.pers_pairs(ripser_output)\n\n\n def plot_pp(self,title_str):\n tmp = np.unique(self.dims)\n for dim in tmp:\n b = self.birth[self.dims == dim]\n d = self.death[self.dims==dim]\n plt.scatter(b,d,label = \"dim {}\".format(dim))\n\n plt.xlabel(\"birth radius\")\n plt.ylabel(\"death radius\")\n x = np.arange(np.max(self.death))\n plt.plot(x,x,color='r',linestyle=\"dashed\")\n plt.title(title_str)\n plt.legend()\n plt.ylim([0,5])\n plt.xlim([-.01,np.max(self.birth)])\n plt.show()\n\n def hist_pp(self,title_str):\n tmp = np.unique(self.dims)\n for dim in tmp:\n b = self.birth[self.dims == dim]\n d = self.death[self.dims==dim]\n plt.hist(d-b,label = \"dim {}\".format(dim),alpha=.3,density=True,bins=30)\n plt.xlabel(\"lifetime sum\")\n plt.ylabel(\"density\")\n plt.title(title_str)\n plt.legend()\n plt.show()\n\n def fractal_dim(self, alpha):\n un_dims = np.unique(self.dims)\n result = []\n for dim in un_dims:\n b= self.birth[self.dims == dim]\n d = self.death[self.dims == dim]\n result.append(np.sum((d-b)**alpha))\n return np.array(result)\n\n\n\n def pers_entropy(self,alpha):\n un_dims = np.unique(self.dims)\n result = []\n for dim in un_dims:\n b= self.birth[self.dims == dim]\n d = self.death[self.dims == dim]\n s = d-b\n prob = s/np.sum(s)\n nor = np.log(len(b))\n if alpha == 1:\n entropy = -np.sum(prob*np.log(prob))/nor \n else:\n entropy = np.log(np.sum(prob**alpha))/(1-alpha)\n result.append(entropy)\n return np.array(result)\n\n\n '''\n given persistence diagram P, the rank function r(a,b) is the sum of persistence points to the north-west of (a,b); i.e., the number of homology groups born before a and die after b. We have to decide a consistent gridding, or do a linear interpolation.\n '''\n def rank_pp(self):\n return 0\n\n '''\n calculate euler integral from the distance matrix. If hom_dim = i, then we sum up through i+1 simplices.\n '''\n def euler_integral(self):\n result = 0\n result += self.dist_mat.shape[0]*self.dist_max \n result -= np.sum(self.dist_mat)/2.0\n if self.hom_dim == 0:\n return result\n count = 2\n return 0\n # come back, implement a max over combinations lambda function\n # for np.arange(2,self.hom_dim+2)\n\n def inter_pcd(self, ph1):\n new_ph = ph(None,self.hom_dim,0,self.dist_type)\n if self.data.ndim == 1:\n new_ph.data = np.concatenate((self.data,ph1.data))\n tmp = len(self.data)\n else:\n new_ph.data = np.vstack((self.dist_mat,ph1.dist_mat))\n tmp = self.data.shape[0]\n\n new_ph.build_distance()\n new_ph.dist_max = np.max(new_ph.dist_mat[:tmp,tmp:])\n new_ph.dist_mat[:tmp,:tmp] = new_ph.dist_max + 1 \n new_ph.dist_mat[tmp:, tmp:] = new_ph.dist_max + 1\n new_ph.thresh = new_ph.dist_max \n\n return new_ph\n\n def get_simplicity(self, clusters):\n n = clusters.size\n den = np.sum(np.log(np.arange(1,n+1)))\n num = 0.0\n for cluster in clusters:\n num += np.sum(np.log(np.arange(1,cluster.size+1)))\n return num/den\n\n def simplicities(self):\n result = []\n #den = np.sum(np.log(np.arange(1,len(self.dims)+1)))\n # set birth and death radii for all connected components\n b = self.birth[self.dims == 0]\n d = self.death[self.dims == 0]\n pairs = np.array([b,d]).T\n pairs[pairs[:,1].argsort()]\n clusters = np.arange(1,len(pairs)+1).reshape([len(pairs),1])\n for i, pair in enumerate(pairs):\n # locate cluster containing pair[0]\n pair0InCluster = np.array(list(map(lambda y: np.isin(pair[0],y),clusters))).tolist()\n # locate cluster containing pair[1]\n pair1InCluster = np.array(list(map(lambda y: np.isin(pair[1],y),clusters))).tolist()\n # merge both clusters\n merged = np.concatenate(( clusters[pair0InCluster], clusters[pair1InCluster] ))\n # remove both unmerged clusters from list and append single merged cluster\n clusters = np.append( clusters[ (np.logical_not(pair0InCluster))\n & (np.logical_not(pair1InCluster))], merged )\n # compute \"simplicity\" for this configuration and append to results\n # (together with merge step and death radius)\n result.append([i, pair[1], self.get_simplicity(clusters)])\n return np.array(result)\n \n\n\n\n\n\n\n\n"
]
| [
[
"numpy.arccos",
"numpy.where",
"numpy.cos",
"numpy.max",
"numpy.concatenate",
"numpy.zeros_like",
"numpy.log",
"numpy.arange",
"numpy.sqrt",
"numpy.vstack",
"numpy.array",
"numpy.savetxt",
"matplotlib.pyplot.title",
"numpy.loadtxt",
"matplotlib.pyplot.show",
"numpy.logical_not",
"scipy.spatial.distance.pdist",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.sum",
"matplotlib.pyplot.ylabel",
"numpy.abs",
"numpy.unique",
"numpy.isin"
]
]
|
itremel/twitchchess | [
"dcf7fc0b26dd863f8fd4cde729fcf47e866d6a6b"
]
| [
"MCTSNode.py"
]
| [
"import numpy as np\r\nimport chess\r\nfrom collections import defaultdict\r\nfrom state import State\r\nimport copy\r\n\r\n\r\nclass MCTSNode(object):\r\n\r\n def __init__(self, state, parent=None, previous_move=None):\r\n self.state = state\r\n self.parent = parent\r\n self.children = []\r\n self._number_of_visits = 0.\r\n self._results = defaultdict(int)\r\n self._untried_actions = None\r\n self.move_from_previous_state_to_this_position = previous_move\r\n \r\n def is_fully_expanded(self):\r\n return len(self.untried_actions()) == 0\r\n\r\n def best_child(self, c_param=1.4):\r\n choices_weights = [\r\n (c.q() / c.n()) + c_param * np.sqrt((np.log(self.n()) / c.n()))\r\n for c in self.children\r\n ]\r\n return self.children[np.argmax(choices_weights)]\r\n\r\n def rollout_policy(self, possible_moves, s):\r\n #---------------------------------------------------\r\n #neural net rollout policy\r\n #from play import Valuator\r\n #netvaluator = Valuator()\r\n #isort = []\r\n #for e in possible_moves:\r\n # s.board.push(e)\r\n # isort.append((netvaluator(s), e))\r\n # s.board.pop()\r\n #move = sorted(isort, key=lambda x: x[0], reverse=s.board.turn)\r\n #print(move)\r\n #return move[0][1] \r\n #--------------------------------------------------------- \r\n #random rollouts\r\n return possible_moves[np.random.randint(len(possible_moves))]\r\n\r\n def child_most_simulation(self):\r\n children_visits = [c.n() for c in self.children]\r\n return self.children[np.argmax(children_visits)]\r\n\r\n def untried_actions(self):\r\n if self._untried_actions is None:\r\n isort = []\r\n for e in self.state.board.legal_moves:\r\n isort.append(e)\r\n self._untried_actions = isort\r\n return self._untried_actions\r\n \r\n def q(self):\r\n turn = self.state.board.turn\r\n #chess.BLACK because perspective for wins is switched from the parent layer\r\n if turn == chess.BLACK: \r\n wins = self._results[1]\r\n loses = self._results[-1]\r\n return wins - loses\r\n else:\r\n wins = self._results[-1]\r\n loses = self._results[1]\r\n return wins - loses\r\n \r\n def n(self):\r\n return self._number_of_visits\r\n \r\n def expand(self):\r\n #print(self._untried_actions)\r\n action = self._untried_actions.pop()\r\n #print(action)\r\n self.state.board.push(action)\r\n #print(self.state.board)\r\n pushed_move_state = copy.deepcopy(self.state)\r\n #print(\"xxxxxxxxxxxxxxxx\")\r\n #print(pushed_move_state.board)\r\n child_node = MCTSNode(pushed_move_state, parent=self, previous_move=action)\r\n #print(\"child node\")\r\n #print(child_node.state.board)\r\n self.children.append(child_node)\r\n self.state.board.pop()\r\n #print(self.state.board)\r\n return child_node\r\n\r\n def is_terminal_node(self):\r\n return self.state.board.is_game_over()\r\n\r\n def rollout(self):\r\n #current_rollout_state = State(self.state.board)\r\n #print(current_rollout_state.board)\r\n #print(\"child rollout\")\r\n #print(self.state.board.is_game_over())\r\n #print(self.state.board)\r\n i = 0\r\n while not (self.state.board.is_game_over()):\r\n isort = []\r\n for e in self.state.board.legal_moves:\r\n isort.append(e)\r\n possible_moves = isort\r\n action = self.rollout_policy(possible_moves, self.state)\r\n self.state.board.push(action)\r\n i = i + 1\r\n #print(self.state.board)\r\n b = self.state.board\r\n # game over values\r\n if b.is_game_over():\r\n if b.result() == \"1-0\":\r\n for x in range(0, i):\r\n self.state.board.pop()\r\n return 1\r\n elif b.result() == \"0-1\":\r\n for x in range(0, i):\r\n self.state.board.pop()\r\n return -1\r\n else:\r\n for x in range(0, i):\r\n self.state.board.pop()\r\n return 0\r\n \r\n def backpropagate(self, result):\r\n self._number_of_visits += 1.\r\n self._results[result] += 1.\r\n if self.parent:\r\n self.parent.backpropagate(result)"
]
| [
[
"numpy.argmax"
]
]
|
amsclark/docassemble | [
"ae5c194831faabb52681a6c827ec30c106273eb7"
]
| [
"docassemble_webapp/docassemble/webapp/machinelearning.py"
]
| [
"from six import string_types, text_type, PY2\nfrom docassemble.webapp.core.models import MachineLearning\nfrom docassemble.base.core import DAObject, DAList, DADict\nfrom docassemble.webapp.db_object import db\nfrom sqlalchemy import or_, and_\nfrom sklearn.datasets import load_iris\nfrom sklearn.ensemble import RandomForestClassifier\nimport pandas as pd\nfrom pandas.api.types import CategoricalDtype\nimport numpy as np\nimport re\nimport random\nimport codecs\nfrom io import open\nif PY2:\n import cPickle as pickle\nelse:\n import pickle\nimport datetime\nimport os\nimport yaml\nimport json\nimport sys\nfrom pattern.vector import count, KNN, SVM, stem, PORTER, words, Document\nfrom docassemble.base.logger import logmessage\nfrom docassemble.webapp.backend import get_info_from_file_reference\nfrom docassemble.webapp.fixpickle import fix_pickle_obj\nimport docassemble.base.functions\n\nlearners = dict()\nsvms = dict()\nlastmodtime = dict()\nreset_counter = dict()\n\nclass MachineLearningEntry(DAObject):\n \"\"\"An entry in the machine learning system\"\"\"\n def classify(self, dependent=None):\n \"\"\"Sets the dependent variable of the machine learning entry\"\"\"\n if dependent is not None:\n self.dependent = dependent\n self.ml.set_dependent_by_id(self.id, self.dependent)\n return self\n def save(self):\n \"\"\"Saves the entry to the data set. The independent variable must be\n defined in order to save.\"\"\"\n args = dict(independent=self.independent)\n if hasattr(self, 'dependent'):\n args['dependent'] = self.dependent\n if hasattr(self, 'key'):\n args['key'] = self.key\n if hasattr(self, 'id'):\n args['id'] = self.id\n if hasattr(self, 'info') and self.info is not None:\n args['info'] = self.info\n self.ml._save_entry(**args)\n return self\n def predict(self, probabilities=False):\n \"\"\"Returns predictions for this entry's independent variable.\"\"\"\n return self.ml.predict(self.independent, probabilities=probabilities)\n\nclass MachineLearner(object):\n \"\"\"Base class for machine learning objects\"\"\"\n def __init__(self, *pargs, **kwargs):\n if len(pargs) > 0:\n if ':' in pargs[0]:\n raise Exception(\"MachineLearner: you cannot use a colon in a machine learning name\")\n question = docassemble.base.functions.get_current_question()\n if question is not None:\n self.group_id = question.interview.get_ml_store() + ':' + pargs[0]\n else:\n self.group_id = pargs[0]\n if len(pargs) > 1:\n self.initial_file = pargs[1]\n if 'group_id' in kwargs:\n self.group_id = kwargs['group_id']\n if 'initial_file' in kwargs:\n self.initial_file = kwargs['initial_file']\n if kwargs.get('use_initial_file', False):\n question = docassemble.base.functions.get_current_question()\n if question is not None:\n self.initial_file = question.interview.get_ml_store()\n self.reset_counter = 0\n def reset(self):\n self.reset_counter += 1\n def _initialize(self, reset=False):\n if hasattr(self, 'initial_file'):\n self.start_from_file(self.initial_file)\n if hasattr(self, 'group_id') and (self.group_id not in lastmodtime or reset):\n lastmodtime[self.group_id] = datetime.datetime(year=1970, month=1, day=1)\n reset_counter = self.reset_counter\n def export_training_set(self, output_format='json', key=None):\n self._initialize()\n output = list()\n for entry in self.classified_entries(key=key):\n the_entry = dict(independent=entry.independent, dependent=entry.dependent)\n if entry.info is not None:\n the_entry['info'] = entry.info\n output.append(the_entry)\n if output_format == 'json':\n return json.dumps(output, sort_keys=True, indent=4)\n elif output_format == 'yaml':\n return yaml.safe_dump(output, default_flow_style=False)\n else:\n raise Exception(\"Unknown output format \" + str(output_format))\n def dependent_in_use(self, key=None):\n in_use = set()\n if key is None:\n query = db.session.query(MachineLearning.dependent).filter(MachineLearning.group_id == self.group_id).group_by(MachineLearning.dependent)\n else:\n query = db.session.query(MachineLearning.dependent).filter(and_(MachineLearning.group_id == self.group_id, MachineLearning.key == key)).group_by(MachineLearning.dependent)\n for record in query:\n if record.dependent is not None:\n in_use.add(fix_pickle_obj(codecs.decode(bytearray(record.dependent, encoding='utf-8'), 'base64')))\n return sorted(in_use)\n def is_empty(self):\n existing_entry = MachineLearning.query.filter_by(group_id=self.group_id).first()\n if existing_entry is None:\n return True\n return False\n def start_from_file(self, fileref):\n #logmessage(\"Starting from file \" + str(fileref))\n existing_entry = MachineLearning.query.filter_by(group_id=self.group_id).first()\n if existing_entry is not None:\n return\n file_info = get_info_from_file_reference(fileref, folder='sources')\n if 'fullpath' not in file_info or file_info['fullpath'] is None or not os.path.exists(file_info['fullpath']):\n return\n #raise Exception(\"File reference \" + str(fileref) + \" is invalid\")\n with open(file_info['fullpath'], 'rU', encoding='utf-8') as fp:\n content = fp.read()\n if 'mimetype' in file_info and file_info['mimetype'] == 'application/json':\n aref = json.loads(content)\n elif 'extension' in file_info and file_info['extension'].lower() in ['yaml', 'yml']:\n aref = yaml.load(content)\n if type(aref) is dict and hasattr(self, 'group_id'):\n the_group_id = re.sub(r'.*:', '', self.group_id)\n if the_group_id in aref:\n aref = aref[the_group_id]\n if type(aref) is list:\n nowtime = datetime.datetime.utcnow()\n for entry in aref:\n if 'independent' in entry:\n new_entry = MachineLearning(group_id=self.group_id, independent=codecs.encode(pickle.dumps(entry['independent']), 'base64').decode(), dependent=codecs.encode(pickle.dumps(entry.get('dependent', None)), 'base64').decode(), modtime=nowtime, create_time=nowtime, active=True, key=entry.get('key', None), info=codecs.encode(pickle.dumps(entry['info']), 'base64').decode() if entry.get('info', None) is not None else None)\n db.session.add(new_entry)\n db.session.commit()\n def add_to_training_set(self, independent, dependent, key=None, info=None):\n self._initialize()\n nowtime = datetime.datetime.utcnow()\n new_entry = MachineLearning(group_id=self.group_id, independent=codecs.encode(pickle.dumps(independent), 'base64').decode(), dependent=codecs.encode(pickle.dumps(dependent), 'base64').decode(), info=codecs.encode(pickle.dumps(info), 'base64').decode() if info is not None else None, create_time=nowtime, modtime=nowtime, active=True, key=key)\n db.session.add(new_entry)\n db.session.commit()\n return new_entry.id\n def save_for_classification(self, indep, key=None, info=None):\n self._initialize()\n if key is None:\n existing_entry = MachineLearning.query.filter_by(group_id=self.group_id, dependent=None, independent=codecs.encode(pickle.dumps(indep), 'base64').decode()).first()\n else:\n existing_entry = MachineLearning.query.filter_by(group_id=self.group_id, key=key, independent=codecs.encode(pickle.dumps(indep), 'base64').decode()).first()\n if existing_entry is not None:\n logmessage(\"entry is already there\")\n return existing_entry.id\n new_entry = MachineLearning(group_id=self.group_id, independent=codecs.encode(pickle.dumps(indep), 'base64').decode(), create_time=datetime.datetime.utcnow(), active=False, key=key, info=codecs.encode(pickle.dumps(info), 'base64').decode() if info is not None else None)\n db.session.add(new_entry)\n db.session.commit()\n return new_entry.id\n def retrieve_by_id(self, the_id):\n self._initialize()\n existing_entry = MachineLearning.query.filter_by(group_id=self.group_id, id=the_id).first()\n if existing_entry is None:\n raise Exception(\"There was no entry in the database for id \" + str(the_id) + \" with group id \" + str(self.group_id))\n if existing_entry.dependent:\n dependent = fix_pickle_obj(codecs.decode(bytearray(existing_entry.dependent, encoding='utf-8'), 'base64'))\n return MachineLearningEntry(ml=self, id=existing_entry.id, independent=fix_pickle_obj(codecs.decode(bytearray(existing_entry.independent, encoding='utf-8'), 'base64')), dependent=dependent, create_time=existing_entry.create_time, key=existing_entry.key, info=fix_pickle_obj(codecs.decode(bytearray(existing_entry.info, encoding='utf-8'), 'base64')) if existing_entry.info is not None else None)\n else:\n return MachineLearningEntry(ml=self, id=existing_entry.id, independent=fix_pickle_obj(codecs.decode(bytearray(existing_entry.independent, encoding='utf-8'), 'base64')), create_time=existing_entry.create_time, key=existing_entry.key, info=fix_pickle_obj(codecs.decode(bytearray(existing_entry.info, encoding='utf-8'), 'base64')) if existing_entry.info is not None else None)\n def one_unclassified_entry(self, key=None):\n self._initialize()\n if key is None:\n entry = MachineLearning.query.filter_by(group_id=self.group_id, active=False).order_by(MachineLearning.id).first()\n else:\n entry = MachineLearning.query.filter_by(group_id=self.group_id, key=key, active=False).order_by(MachineLearning.id).first()\n if entry is None:\n return None\n return MachineLearningEntry(ml=self, id=entry.id, independent=fix_pickle_obj(codecs.decode(bytearray(entry.independent, encoding='utf-8'), 'base64')), create_time=entry.create_time, key=entry.key, info=fix_pickle_obj(codecs.decode(bytearray(entry.info, encoding='utf-8'), 'base64')) if entry.info is not None else None)._set_instance_name_for_method()\n def new_entry(self, **kwargs):\n return MachineLearningEntry(ml=self, **kwargs)._set_instance_name_for_method()\n def unclassified_entries(self, key=None):\n self._initialize()\n results = DAList()._set_instance_name_for_method()\n results.gathered = True\n if key is None:\n query = MachineLearning.query.filter_by(group_id=self.group_id, active=False).order_by(MachineLearning.id).all()\n else:\n query = MachineLearning.query.filter_by(group_id=self.group_id, key=key, active=False).order_by(MachineLearning.id).all()\n for entry in query:\n results.appendObject(MachineLearningEntry, ml=self, id=entry.id, independent=fix_pickle_obj(codecs.decode(bytearray(entry.independent, encoding='utf-8'), 'base64')), create_time=entry.create_time, key=entry.key, info=fix_pickle_obj(codecs.decode(bytearray(entry.info, encoding='utf-8'), 'base64')) if entry.info is not None else None)\n return results\n def classified_entries(self, key=None):\n self._initialize()\n results = DAList()\n results.gathered = True\n results.set_random_instance_name()\n if key is None:\n query = MachineLearning.query.filter_by(group_id=self.group_id, active=True).order_by(MachineLearning.id).all()\n else:\n query = MachineLearning.query.filter_by(group_id=self.group_id, active=True, key=key).order_by(MachineLearning.id).all()\n for entry in query:\n results.appendObject(MachineLearningEntry, ml=self, id=entry.id, independent=fix_pickle_obj(codecs.decode(bytearray(entry.independent, encoding='utf-8'), 'base64')), dependent=fix_pickle_obj(codecs.decode(bytearray(entry.dependent, encoding='utf-8'), 'base64')), info=fix_pickle_obj(codecs.decode(bytearray(entry.info, encoding='utf-8'), 'base64')) if entry.info is not None else None, create_time=entry.create_time, key=entry.key)\n return results\n def _save_entry(self, **kwargs):\n self._initialize()\n the_id = kwargs.get('id', None)\n need_to_reset = False\n if the_id is None:\n the_entry = MachineLearning(group_id=self.group_id)\n existing = False\n else:\n the_entry = MachineLearning.query.filter_by(group_id=self.group_id, id=the_id).first()\n existing = True\n if the_entry is None:\n raise Exception(\"There was no entry in the database for id \" + str(the_id) + \" with group id \" + str(self.group_id))\n if 'dependent' in kwargs:\n if existing and the_entry.dependent is not None and the_entry.dependent != kwargs['dependent']:\n need_to_reset = True\n the_entry.dependent = codecs.encode(pickle.dumps(kwargs['dependent']), 'base64').decode()\n the_entry.active = True\n if 'independent' in kwargs:\n if existing and the_entry.independent is not None and the_entry.independent != kwargs['independent']:\n need_to_reset = True\n the_entry.independent = codecs.encode(pickle.dumps(kwargs['independent']), 'base64').decode()\n if 'key' in kwargs:\n the_entry.key = kwargs['key']\n if 'info' in kwargs:\n the_entry.info = codecs.encode(pickle.dumps(kwargs['info']), 'base64').decode()\n the_entry.modtime = datetime.datetime.utcnow()\n if not existing:\n db.session.add(the_entry)\n db.session.commit()\n if need_to_reset:\n self.reset()\n def set_dependent_by_id(self, the_id, the_dependent):\n self._initialize()\n existing_entry = MachineLearning.query.filter_by(group_id=self.group_id, id=the_id).first()\n if existing_entry is None:\n raise Exception(\"There was no entry in the database for id \" + str(the_id) + \" with group id \" + str(self.group_id))\n existing_entry.dependent = codecs.encode(pickle.dumps(the_dependent), 'base64').decode()\n existing_entry.modtime = datetime.datetime.utcnow()\n existing_entry.active = True\n db.session.commit()\n def delete_by_id(self, the_id):\n self._initialize()\n MachineLearning.query.filter_by(group_id=self.group_id, id=the_id).delete()\n db.session.commit()\n self.reset()\n def delete_by_key(self, key):\n self._initialize()\n MachineLearning.query.filter_by(group_id=self.group_id, key=key).delete()\n db.session.commit()\n self.reset()\n def save(self):\n db.session.commit()\n def _train_from_db(self):\n #logmessage(\"Doing train_from_db\")\n self._initialize()\n nowtime = datetime.datetime.utcnow()\n success = False\n for record in MachineLearning.query.filter(and_(MachineLearning.group_id == self.group_id, MachineLearning.active == True, MachineLearning.modtime > lastmodtime[self.group_id])).all():\n #logmessage(\"Training...\")\n self._train(fix_pickle_obj(codecs.decode(bytearray(record.independent, encoding='utf-8'), 'base64')), fix_pickle_obj(codecs.decode(bytearray(record.dependent, encoding='utf-8'), 'base64')))\n success = True\n lastmodtime[self.group_id] = nowtime\n return success\n def delete_training_set(self):\n self._initialize()\n MachineLearning.query.filter_by(group_id=self.group_id).all().delete()\n db.session.commit()\n def _train(self, indep, depend):\n pass\n def _predict(self, indep):\n pass\n\nclass SimpleTextMachineLearner(MachineLearner):\n \"\"\"A class used to interact with the machine learning system, using the K Nearest Neighbors method\"\"\"\n def _learner(self):\n return KNN()\n def _initialize(self):\n \"\"\"Initializes a fresh machine learner.\"\"\"\n if self.group_id not in reset_counter or self.reset_counter != reset_counter[self.group_id]:\n need_to_reset = True\n if hasattr(self, 'group_id') and (self.group_id not in learners or need_to_reset):\n learners[self.group_id] = self._learner()\n return super(SimpleTextMachineLearner, self)._initialize(reset=need_to_reset)\n def _train(self, indep, depend):\n \"\"\"Trains the machine learner given an independent variable and a corresponding dependent variable.\"\"\"\n if indep is None:\n return\n the_text = re.sub(r'[\\n\\r]+', r' ', indep).lower()\n learners[self.group_id].train(Document(the_text.lower(), stemmer=PORTER), depend)\n def predict(self, indep, probabilities=False):\n \"\"\"Returns a list of predicted dependent variables for a given independent variable.\"\"\"\n indep = re.sub(r'[\\n\\r]+', r' ', indep).lower()\n if not self._train_from_db():\n return list()\n probs = dict()\n for key, value in learners[self.group_id].classify(Document(indep.lower(), stemmer=PORTER), discrete=False).items():\n probs[key] = value\n if not len(probs):\n single_result = learners[self.group_id].classify(Document(indep.lower(), stemmer=PORTER))\n if single_result is not None:\n probs[single_result] = 1.0\n if probabilities:\n return [(x, probs[x]) for x in sorted(probs.keys(), key=probs.get, reverse=True)]\n else:\n return sorted(probs.keys(), key=probs.get, reverse=True)\n def confusion_matrix(self, key=None, output_format=None, split=False):\n \"\"\"Returns a confusion matrix for the model based on splitting the data set randomly into two pieces, training on one and testing on the other\"\"\"\n if split:\n list_of_dependent = self.dependent_in_use(key=key)\n else:\n list_of_dependent = [None]\n output = ''\n matrices = dict()\n for current_dep in list_of_dependent:\n testing_set = list()\n model = self._learner()\n for record in self.classified_entries(key=key):\n if split:\n dep_result = str(record.dependent == current_dep)\n else:\n dep_result = record.dependent\n if random.random() < 0.5:\n model.train(Document(record.independent.lower(), stemmer=PORTER), dep_result)\n else:\n testing_set.append((Document(record.independent.lower(), stemmer=PORTER), dep_result))\n matrix = model.confusion_matrix(documents=testing_set)\n matrices[current_dep] = matrix\n if output_format == 'html':\n if split:\n output += '<h4>' + current_dep + \"</h4>\"\n vals = matrix.keys()\n output += '<table class=\"table table-bordered\"><thead><tr><td></td><td></td><td style=\"text-align: center\" colspan=\"' + str(len(vals)) + '\">Actual</td></tr><tr><th></th><th></th>'\n first = True\n for val in vals:\n output += '<th>' + val + '</th>'\n output += '</tr></thead><tbody>'\n for val_a in vals:\n output += '<tr>' \n if first:\n output += '<td style=\"text-align: right; vertical-align: middle;\" rowspan=\"' + str(len(vals)) + '\">Predicted</td>'\n first = False\n output += '<th>' + val_a + '</th>'\n for val_b in vals:\n output += '<td>' + str(matrix[val_b].get(val_a, 0)) + '</td>'\n output += '</tr>'\n output += '</tbody></table>'\n #output += \"\\n\\n`\" + str(matrix) + \"`\"\n # output += '<ul>'\n # for document, actual in testing_set:\n # predicted = model.classify(document)\n # output += '<li>Predicted: ' + predicted + '; Actual: ' + actual + '</li>'\n # output += '</ul>'\n if output_format == 'html':\n return output\n if split:\n ret_val = matrices\n else:\n ret_val = matrices[None]\n if output_format == 'json':\n return json.dumps(ret_val, sort_keys=True, indent=4)\n if output_format == 'yaml':\n return yaml.safe_dump(ret_val, default_flow_style=False)\n if output_format is None:\n return ret_val\n return ret_val\n def reset(self):\n \"\"\"Clears the cache of the machine learner\"\"\"\n return super(SimpleTextMachineLearner, self).reset()\n def delete_training_set(self):\n \"\"\"Deletes all of the training data in the database\"\"\"\n return super(SimpleTextMachineLearner, self).delete_training_set()\n def delete_by_key(self, key):\n \"\"\"Deletes all of the training data in the database that was added with a given key\"\"\"\n return super(SimpleTextMachineLearner, self).delete_training_set(key)\n def delete_by_id(self, the_id):\n \"\"\"Deletes the entry in the training data with the given ID\"\"\"\n return super(SimpleTextMachineLearner, self).delete_by_id(the_id)\n def set_dependent_by_id(self, the_id, depend):\n \"\"\"Sets the dependent variable for the entry in the training data with the given ID\"\"\"\n return super(SimpleTextMachineLearner, self).set_dependent_by_id(the_id, depend)\n def classified_entries(self, key=None):\n \"\"\"Returns a list of entries in the data that have been classified.\"\"\"\n return super(SimpleTextMachineLearner, self).classified_entries(key=key)\n def unclassified_entries(self, key=None):\n \"\"\"Returns a list of entries in the data that have not yet been classified.\"\"\"\n return super(SimpleTextMachineLearner, self).unclassified_entries(key=key)\n def one_unclassified_entry(self, key=None):\n \"\"\"Returns the first entry in the data that has not yet been classified, or None if all entries have been classified.\"\"\"\n return super(SimpleTextMachineLearner, self).one_unclassified_entry(key=key)\n def retrieve_by_id(self, the_id):\n \"\"\"Returns the entry in the data that has the given ID.\"\"\"\n return super(SimpleTextMachineLearner, self).retrieve_by_id(the_id)\n def save_for_classification(self, indep, key=None, info=None):\n \"\"\"Creates a not-yet-classified entry in the data for the given independent variable and returns the ID of the entry.\"\"\"\n return super(SimpleTextMachineLearner, self).save_for_classification(indep, key=key, info=info)\n def add_to_training_set(self, indep, depend, key=None, info=None):\n \"\"\"Creates an entry in the data for the given independent and dependent variable and returns the ID of the entry.\"\"\"\n return super(SimpleTextMachineLearner, self).add_to_training_set(indep, depend, key=key, info=info)\n def is_empty(self):\n \"\"\"Returns True if no data have been defined, otherwise returns False.\"\"\"\n return super(SimpleTextMachineLearner, self).is_empty()\n def dependent_in_use(self, key=None):\n \"\"\"Returns a sorted list of unique dependent variables in the data.\"\"\"\n return super(SimpleTextMachineLearner, self).dependent_in_use(key=key)\n def export_training_set(self, output_format='json'):\n \"\"\"Returns the classified entries in the data as JSON or YAML.\"\"\"\n return super(SimpleTextMachineLearner, self).export_training_set(output_format=output_format)\n def new_entry(self, **kwargs):\n \"\"\"Creates a new entry in the data.\"\"\"\n return super(SimpleTextMachineLearner, self).new_entry(**kwargs)\n \nclass SVMMachineLearner(SimpleTextMachineLearner):\n \"\"\"Machine Learning object using the Symmetric Vector Machine method\"\"\"\n def _learner(self):\n return SVM(extension='libsvm')\n\nclass RandomForestMachineLearner(MachineLearner):\n def _learner(self):\n return RandomForestClassifier(n_jobs=2)\n def feature_importances(self):\n \"\"\"Returns the importances of each of the features\"\"\"\n if not self._train_from_db():\n return list()\n return learners[self.group_id]['learner'].feature_importances_\n def _initialize(self):\n \"\"\"Initializes a fresh machine learner.\"\"\"\n if self.group_id not in reset_counter or self.reset_counter != reset_counter[self.group_id]:\n need_to_reset = True\n if hasattr(self, 'group_id') and (self.group_id not in learners or need_to_reset):\n learners[self.group_id] = dict(learner=self._learner(), dep_type=None, indep_type=dict(), indep_categories=dict(), dep_categories=None)\n return super(RandomForestMachineLearner, self)._initialize(reset=need_to_reset)\n def _train_from_db(self):\n #logmessage(\"Doing train_from_db\")\n self._initialize()\n nowtime = datetime.datetime.utcnow()\n success = False\n data = list()\n depend_data = list()\n for record in MachineLearning.query.filter(and_(MachineLearning.group_id == self.group_id, MachineLearning.active == True, MachineLearning.modtime > lastmodtime[self.group_id])).all():\n indep_var = fix_pickle_obj(codecs.decode(bytearray(record.independent, encoding='utf-8'), 'base64'))\n depend_var = fix_pickle_obj(codecs.decode(bytearray(record.dependent, encoding='utf-8'), 'base64'))\n if type(depend_var) is str:\n depend_var = text_type(depend_var)\n if learners[self.group_id]['dep_type'] is not None:\n if type(depend_var) is not learners[self.group_id]['dep_type']:\n if type(depend_var) is int and learners[self.group_id]['dep_type'] is float:\n depend_var = float(depend_var)\n elif type(depend_var) is float and learners[self.group_id]['dep_type'] is int:\n learners[self.group_id]['dep_type'] = float\n else:\n raise Exception(\"RandomForestMachineLearner: dependent variable type was not consistent\")\n else:\n if not isinstance(depend_var, (string_types, int, bool, float)):\n raise Exception(\"RandomForestMachineLearner: dependent variable type for key \" + repr(key) + \" was not a standard variable type\")\n learners[self.group_id]['dep_type'] = type(depend_var)\n depend_data.append(depend_var)\n if isinstance(indep_var, DADict):\n indep_var = indep_var.elements\n if type(indep_var) is not dict:\n raise Exception(\"RandomForestMachineLearner: independent variable was not a dictionary\")\n for key, val in indep_var.items():\n if type(val) is str:\n val = text_type(val)\n if key in learners[self.group_id]['indep_type']:\n if type(val) is not learners[self.group_id]['indep_type'][key]:\n if type(val) is int and learners[self.group_id]['indep_type'][key] is float:\n val = float(val)\n elif type(val) is float and learners[self.group_id]['indep_type'][key] is int:\n learners[self.group_id]['indep_type'][key] = float\n else:\n raise Exception(\"RandomForestMachineLearner: independent variable type for key \" + repr(key) + \" was not consistent\")\n else:\n if not isinstance(val, (string_types, int, bool, float)):\n raise Exception(\"RandomForestMachineLearner: independent variable type for key \" + repr(key) + \" was not a standard variable type\")\n learners[self.group_id]['indep_type'][key] = type(val)\n data.append(indep_var)\n success = True\n if success:\n df = pd.DataFrame(data)\n for key, val in learners[self.group_id]['indep_type'].items():\n if val is text_type:\n df[key] = pd.Series(df[key], dtype=\"category\")\n learners[self.group_id]['indep_categories'][key] = df[key].cat.categories\n df = pd.get_dummies(df, dummy_na=True)\n if learners[self.group_id]['dep_type'] is text_type:\n y = pd.Series(depend_data, dtype=\"category\")\n learners[self.group_id]['dep_categories'] = y.cat.categories\n else:\n y = pd.Series(depend_data)\n learners[self.group_id]['learner'].fit(df, list(y))\n lastmodtime[self.group_id] = nowtime\n return success\n def predict(self, indep, probabilities=False):\n \"\"\"Returns a list of predicted dependent variables for a given independent variable.\"\"\"\n if not self._train_from_db():\n return list()\n if isinstance(indep, DADict):\n indep = indep.elements\n if type(indep) is not dict:\n raise Exception(\"RandomForestMachineLearner: independent variable was not a dictionary\")\n indep = process_independent_data(indep)\n indep_to_use = dict()\n for key, val in indep.items():\n if key in learners[self.group_id]['indep_type']:\n if type(val) is str:\n val = text_type(val)\n if type(val) is not learners[self.group_id]['indep_type'][key]:\n if type(val) is int and learners[self.group_id]['indep_type'][key] is float:\n val = float(val)\n elif type(val) is float and learners[self.group_id]['indep_type'][key] is int:\n learners[self.group_id]['indep_type'][key] = float\n else:\n raise Exception(\"RandomForestMachineLearner: the independent variable type for key \" + repr(key) + \" was not consistent. Stored was \" + str(learners[self.group_id]['indep_type'][key]) + \" and type was \" + str(type(val)))\n else:\n raise Exception(\"RandomForestMachineLearner: independent variable key \" + repr(key) + \" was not recognized\")\n if isinstance(val, string_types):\n if val not in learners[self.group_id]['indep_categories'][key]:\n val = np.nan\n indep_to_use[key] = val\n df = pd.DataFrame([indep_to_use])\n for key, val in indep_to_use.items():\n if learners[self.group_id]['indep_type'][key] is text_type:\n #df[key] = pd.Series(df[key]).astype('category', categories=learners[self.group_id]['indep_categories'][key])\n df[key] = pd.Series(df[key]).astype(CategoricalDtype(learners[self.group_id]['indep_categories'][key]))\n df = pd.get_dummies(df, dummy_na=True)\n pred = learners[self.group_id]['learner'].predict_proba(df)\n indexno = 0\n result = list()\n for x in pred[0]:\n result.append((learners[self.group_id]['dep_categories'][indexno], x))\n indexno += 1\n result = sorted(result, key=lambda x: x[1], reverse=True)\n if probabilities:\n return result\n return [x[0] for x in result]\n def reset(self):\n \"\"\"Clears the cache of the machine learner\"\"\"\n return super(RandomForestMachineLearner, self).reset()\n def delete_training_set(self):\n \"\"\"Deletes all of the training data in the database\"\"\"\n return super(RandomForestMachineLearner, self).delete_training_set()\n def delete_by_key(self, key):\n \"\"\"Deletes all of the training data in the database that was added with a given key\"\"\"\n return super(RandomForestMachineLearner, self).delete_training_set(key)\n def delete_by_id(self, the_id):\n \"\"\"Deletes the entry in the training data with the given ID\"\"\"\n return super(RandomForestMachineLearner, self).delete_by_id(the_id)\n def set_dependent_by_id(self, the_id, depend):\n \"\"\"Sets the dependent variable for the entry in the training data with the given ID\"\"\"\n return super(RandomForestMachineLearner, self).set_dependent_by_id(the_id, depend)\n def classified_entries(self, key=None):\n \"\"\"Returns a list of entries in the data that have been classified.\"\"\"\n return super(RandomForestMachineLearner, self).classified_entries(key=key)\n def unclassified_entries(self, key=None):\n \"\"\"Returns a list of entries in the data that have not yet been classified.\"\"\"\n return super(RandomForestMachineLearner, self).unclassified_entries(key=key)\n def one_unclassified_entry(self, key=None):\n \"\"\"Returns the first entry in the data that has not yet been classified, or None if all entries have been classified.\"\"\"\n return super(RandomForestMachineLearner, self).one_unclassified_entry(key=key)\n def retrieve_by_id(self, the_id):\n \"\"\"Returns the entry in the data that has the given ID.\"\"\"\n return super(RandomForestMachineLearner, self).retrieve_by_id(the_id)\n def save_for_classification(self, indep, key=None, info=None):\n \"\"\"Creates a not-yet-classified entry in the data for the given independent variable and returns the ID of the entry.\"\"\"\n indep = process_independent_data(indep)\n return super(RandomForestMachineLearner, self).save_for_classification(indep, key=key, info=info)\n def add_to_training_set(self, indep, depend, key=None, info=None):\n \"\"\"Creates an entry in the data for the given independent and dependent variable and returns the ID of the entry.\"\"\"\n indep = process_independent_data(indep)\n return super(RandomForestMachineLearner, self).add_to_training_set(indep, depend, key=key, info=info)\n def is_empty(self):\n \"\"\"Returns True if no data have been defined, otherwise returns False.\"\"\"\n return super(RandomForestMachineLearner, self).is_empty()\n def dependent_in_use(self, key=None):\n \"\"\"Returns a sorted list of unique dependent variables in the data.\"\"\"\n return super(RandomForestMachineLearner, self).dependent_in_use(key=key)\n def export_training_set(self, output_format='json'):\n \"\"\"Returns the classified entries in the data as JSON or YAML.\"\"\"\n return super(RandomForestMachineLearner, self).export_training_set(output_format=output_format)\n def new_entry(self, **kwargs):\n \"\"\"Creates a new entry in the data.\"\"\"\n return super(RandomForestMachineLearner, self).new_entry(**kwargs)\n \n \n# def export_training_sets(prefix, output_format='json'):\n# output = dict()\n# re_prefix = re.compile(r'^' + prefix + ':')\n# for record in db.session.query(MachineLearning).filter(MachineLearning.group_id.like(prefix + '%')).group_by(MachineLearning.group_id):\n# the_group_id = re_prefix.sub('', record.group_id)\n# output[the_group_id].append(dict(independent=record.independent, dependent=record.dependent))\n# if output_format == 'json':\n# return json.dumps(output, sort_keys=True, indent=4)\n# elif output_format == 'yaml':\n# return yaml.safe_dump(output, default_flow_style=False)\n# else:\n# raise Exception(\"Unknown output format \" + str(output_format))\n\ndef process_independent_data(data):\n result = dict()\n for key, val in data.items():\n if isinstance(val, DADict) or type(val) is dict:\n for subkey, subval in val.items():\n if not isinstance(subval, (string_types, bool, int, float)):\n raise Exception('RandomForestMachineLearner: invalid data type ' + subval.__class__.__name__ + ' in data')\n result[key + '_' + subkey] = subval\n else:\n if not isinstance(val, (string_types, bool, int, float)):\n raise Exception('RandomForestMachineLearner: invalid data type ' + subval.__class__.__name__ + ' in data')\n result[key] = val\n return result\n"
]
| [
[
"sklearn.ensemble.RandomForestClassifier",
"pandas.DataFrame",
"pandas.api.types.CategoricalDtype",
"pandas.Series",
"pandas.get_dummies"
]
]
|
ArloZ/Tacotron-2 | [
"08bf8e2d60925b1cd47ca69e2bb1a9447fd13b62"
]
| [
"tacotron/models/modules.py"
]
| [
"import tensorflow as tf \n\n\ndef conv1d(inputs, kernel_size, channels, activation, is_training, drop_rate, scope):\n\twith tf.variable_scope(scope):\n\t\tconv1d_output = tf.layers.conv1d(\n\t\t\tinputs,\n\t\t\tfilters=channels,\n\t\t\tkernel_size=kernel_size,\n\t\t\tactivation=None,\n\t\t\tpadding='same')\n\t\tbatched = tf.layers.batch_normalization(conv1d_output, training=is_training)\n\t\tactivated = activation(batched)\n\t\treturn tf.layers.dropout(activated, rate=drop_rate, training=is_training,\n\t\t\t\t\t\t\t\tname='dropout_{}'.format(scope))\n\n\nclass ZoneoutLSTMCell(tf.nn.rnn_cell.RNNCell):\n\t'''Wrapper for tf LSTM to create Zoneout LSTM Cell\n\n\tinspired by:\n\thttps://github.com/teganmaharaj/zoneout/blob/master/zoneout_tensorflow.py\n\n\tPublished by one of 'https://arxiv.org/pdf/1606.01305.pdf' paper writers.\n\n\tMany thanks to @Ondal90 for pointing this out. You sir are a hero!\n\t'''\n\tdef __init__(self, num_units, is_training, zoneout_factor_cell=0., zoneout_factor_output=0., state_is_tuple=True, name=None):\n\t\t'''Initializer with possibility to set different zoneout values for cell/hidden states.\n\t\t'''\n\t\tzm = min(zoneout_factor_output, zoneout_factor_cell)\n\t\tzs = max(zoneout_factor_output, zoneout_factor_cell)\n\n\t\tif zm < 0. or zs > 1.:\n\t\t\traise ValueError('One/both provided Zoneout factors are not in [0, 1]')\n\n\t\tself._cell = tf.nn.rnn_cell.LSTMCell(num_units, state_is_tuple=state_is_tuple, name=name)\n\t\tself._zoneout_cell = zoneout_factor_cell\n\t\tself._zoneout_outputs = zoneout_factor_output\n\t\tself.is_training = is_training\n\t\tself.state_is_tuple = state_is_tuple\n\n\t@property\n\tdef state_size(self):\n\t\treturn self._cell.state_size\n\n\t@property\n\tdef output_size(self):\n\t\treturn self._cell.output_size\n\n\tdef __call__(self, inputs, state, scope=None):\n\t\t'''Runs vanilla LSTM Cell and applies zoneout.\n\t\t'''\n\t\t#Apply vanilla LSTM\n\t\toutput, new_state = self._cell(inputs, state, scope)\n\n\t\tif self.state_is_tuple:\n\t\t\t(prev_c, prev_h) = state\n\t\t\t(new_c, new_h) = new_state\n\t\telse:\n\t\t\tnum_proj = self._cell._num_units if self._cell._num_proj is None else self._cell._num_proj\n\t\t\tprev_c = tf.slice(state, [0, 0], [-1, self._cell._num_units])\n\t\t\tprev_h = tf.slice(state, [0, self._cell._num_units], [-1, num_proj])\n\t\t\tnew_c = tf.slice(new_state, [0, 0], [-1, self._cell._num_units])\n\t\t\tnew_h = tf.slice(new_state, [0, self._cell._num_units], [-1, num_proj])\n\n\t\t#Apply zoneout\n\t\tif self.is_training:\n\t\t\t#nn.dropout takes keep_prob (probability to keep activations) not drop_prob (probability to mask activations)!\n\t\t\tc = (1 - self._zoneout_cell) * tf.nn.dropout(new_c - prev_c, (1 - self._zoneout_cell)) + prev_c\n\t\t\th = (1 - self._zoneout_outputs) * tf.nn.dropout(new_h - prev_h, (1 - self._zoneout_outputs)) + prev_h\n\n\t\telse:\n\t\t\tc = (1 - self._zoneout_cell) * new_c + self._zoneout_cell * prev_c\n\t\t\th = (1 - self._zoneout_outputs) * new_h + self._zoneout_outputs * prev_h\n\n\t\tnew_state = tf.nn.rnn_cell.LSTMStateTuple(c, h) if self.state_is_tuple else tf.concat(1, [c, h])\n\n\t\treturn output, new_state\n\n\nclass EncoderConvolutions:\n\t\"\"\"Encoder convolutional layers used to find local dependencies in inputs characters.\n\t\"\"\"\n\tdef __init__(self, is_training, hparams, activation=tf.nn.relu, scope=None):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tis_training: Boolean, determines if the model is training or in inference to control dropout\n\t\t\tkernel_size: tuple or integer, The size of convolution kernels\n\t\t\tchannels: integer, number of convolutional kernels\n\t\t\tactivation: callable, postnet activation function for each convolutional layer\n\t\t\tscope: Postnet scope.\n\t\t\"\"\"\n\t\tsuper(EncoderConvolutions, self).__init__()\n\t\tself.is_training = is_training\n\n\t\tself.kernel_size = hparams.enc_conv_kernel_size\n\t\tself.channels = hparams.enc_conv_channels\n\t\tself.activation = activation\n\t\tself.scope = 'enc_conv_layers' if scope is None else scope\n\t\tself.drop_rate = hparams.tacotron_dropout_rate\n\t\tself.enc_conv_num_layers = hparams.enc_conv_num_layers\n\n\tdef __call__(self, inputs):\n\t\twith tf.variable_scope(self.scope):\n\t\t\tx = inputs\n\t\t\tfor i in range(self.enc_conv_num_layers):\n\t\t\t\tx = conv1d(x, self.kernel_size, self.channels, self.activation,\n\t\t\t\t\tself.is_training, self.drop_rate, 'conv_layer_{}_'.format(i + 1)+self.scope)\n\t\treturn x\n\n\nclass EncoderRNN:\n\t\"\"\"Encoder bidirectional one layer LSTM\n\t\"\"\"\n\tdef __init__(self, is_training, size=256, zoneout=0.1, scope=None):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tis_training: Boolean, determines if the model is training or in inference to control zoneout\n\t\t\tsize: integer, the number of LSTM units for each direction\n\t\t\tzoneout: the zoneout factor\n\t\t\tscope: EncoderRNN scope.\n\t\t\"\"\"\n\t\tsuper(EncoderRNN, self).__init__()\n\t\tself.is_training = is_training\n\n\t\tself.size = size\n\t\tself.zoneout = zoneout\n\t\tself.scope = 'encoder_LSTM' if scope is None else scope\n\n\t\t#Create forward LSTM Cell\n\t\tself._fw_cell = ZoneoutLSTMCell(size, is_training,\n\t\t\tzoneout_factor_cell=zoneout,\n\t\t\tzoneout_factor_output=zoneout,\n\t\t\tname='encoder_fw_LSTM')\n\n\t\t#Create backward LSTM Cell\n\t\tself._bw_cell = ZoneoutLSTMCell(size, is_training,\n\t\t\tzoneout_factor_cell=zoneout,\n\t\t\tzoneout_factor_output=zoneout,\n\t\t\tname='encoder_bw_LSTM')\n\n\tdef __call__(self, inputs, input_lengths):\n\t\twith tf.variable_scope(self.scope):\n\t\t\toutputs, (fw_state, bw_state) = tf.nn.bidirectional_dynamic_rnn(\n\t\t\t\tself._fw_cell,\n\t\t\t\tself._bw_cell,\n\t\t\t\tinputs,\n\t\t\t\tsequence_length=input_lengths,\n\t\t\t\tdtype=tf.float32,\n\t\t\t\tswap_memory=True)\n\n\t\t\treturn tf.concat(outputs, axis=2) # Concat and return forward + backward outputs\n\n\nclass Prenet:\n\t\"\"\"Two fully connected layers used as an information bottleneck for the attention.\n\t\"\"\"\n\tdef __init__(self, is_training, layers_sizes=[256, 256], drop_rate=0.5, activation=tf.nn.relu, scope=None):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tlayers_sizes: list of integers, the length of the list represents the number of pre-net\n\t\t\t\tlayers and the list values represent the layers number of units\n\t\t\tactivation: callable, activation functions of the prenet layers.\n\t\t\tscope: Prenet scope.\n\t\t\"\"\"\n\t\tsuper(Prenet, self).__init__()\n\t\tself.drop_rate = drop_rate\n\n\t\tself.layers_sizes = layers_sizes\n\t\tself.activation = activation\n\t\tself.is_training = is_training\n\t\t\n\t\tself.scope = 'prenet' if scope is None else scope\n\n\tdef __call__(self, inputs):\n\t\tx = inputs\n\n\t\twith tf.variable_scope(self.scope):\n\t\t\tfor i, size in enumerate(self.layers_sizes):\n\t\t\t\tdense = tf.layers.dense(x, units=size, activation=self.activation,\n\t\t\t\t\tname='dense_{}'.format(i + 1))\n\t\t\t\t#The paper discussed introducing diversity in generation at inference time\n\t\t\t\t#by using a dropout of 0.5 only in prenet layers (in both training and inference).\n\t\t\t\tx = tf.layers.dropout(dense, rate=self.drop_rate, training=True,\n\t\t\t\t\tname='dropout_{}'.format(i + 1) + self.scope)\n\t\treturn x\n\n\nclass DecoderRNN:\n\t\"\"\"Decoder two uni directional LSTM Cells\n\t\"\"\"\n\tdef __init__(self, is_training, layers=2, size=1024, zoneout=0.1, scope=None):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tis_training: Boolean, determines if the model is in training or inference to control zoneout\n\t\t\tlayers: integer, the number of LSTM layers in the decoder\n\t\t\tsize: integer, the number of LSTM units in each layer\n\t\t\tzoneout: the zoneout factor\n\t\t\"\"\"\n\t\tsuper(DecoderRNN, self).__init__()\n\t\tself.is_training = is_training\n\n\t\tself.layers = layers\n\t\tself.size = size\n\t\tself.zoneout = zoneout\n\t\tself.scope = 'decoder_rnn' if scope is None else scope\n\n\t\t#Create a set of LSTM layers\n\t\tself.rnn_layers = [ZoneoutLSTMCell(size, is_training, \n\t\t\tzoneout_factor_cell=zoneout,\n\t\t\tzoneout_factor_output=zoneout,\n\t\t\tname='decoder_LSTM_{}'.format(i+1)) for i in range(layers)]\n\n\t\tself._cell = tf.contrib.rnn.MultiRNNCell(self.rnn_layers, state_is_tuple=True)\n\n\tdef __call__(self, inputs, states):\n\t\twith tf.variable_scope(self.scope):\n\t\t\treturn self._cell(inputs, states)\n\n\nclass FrameProjection:\n\t\"\"\"Projection layer to r * num_mels dimensions or num_mels dimensions\n\t\"\"\"\n\tdef __init__(self, shape=80, activation=None, scope=None):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tshape: integer, dimensionality of output space (r*n_mels for decoder or n_mels for postnet)\n\t\t\tactivation: callable, activation function\n\t\t\tscope: FrameProjection scope.\n\t\t\"\"\"\n\t\tsuper(FrameProjection, self).__init__()\n\n\t\tself.shape = shape\n\t\tself.activation = activation\n\t\t\n\t\tself.scope = 'Linear_projection' if scope is None else scope\n\t\tself.dense = tf.layers.Dense(units=shape, activation=activation, name='projection_{}'.format(self.scope))\n\n\tdef __call__(self, inputs):\n\t\twith tf.variable_scope(self.scope):\n\t\t\t#If activation==None, this returns a simple Linear projection\n\t\t\t#else the projection will be passed through an activation function\n\t\t\t# output = tf.layers.dense(inputs, units=self.shape, activation=self.activation,\n\t\t\t# \tname='projection_{}'.format(self.scope))\n\t\t\toutput = self.dense(inputs)\n\n\t\t\treturn output\n\n\nclass StopProjection:\n\t\"\"\"Projection to a scalar and through a sigmoid activation\n\t\"\"\"\n\tdef __init__(self, is_training, shape=1, activation=tf.nn.sigmoid, scope=None):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tis_training: Boolean, to control the use of sigmoid function as it is useless to use it\n\t\t\t\tduring training since it is integrate inside the sigmoid_crossentropy loss\n\t\t\tshape: integer, dimensionality of output space. Defaults to 1 (scalar)\n\t\t\tactivation: callable, activation function. only used during inference\n\t\t\tscope: StopProjection scope.\n\t\t\"\"\"\n\t\tsuper(StopProjection, self).__init__()\n\t\tself.is_training = is_training\n\t\t\n\t\tself.shape = shape\n\t\tself.activation = activation\n\t\tself.scope = 'stop_token_projection' if scope is None else scope\n\n\tdef __call__(self, inputs):\n\t\twith tf.variable_scope(self.scope):\n\t\t\toutput = tf.layers.dense(inputs, units=self.shape,\n\t\t\t\tactivation=None, name='projection_{}'.format(self.scope))\n\n\t\t\t#During training, don't use activation as it is integrated inside the sigmoid_cross_entropy loss function\n\t\t\tif self.is_training:\n\t\t\t\treturn output\n\t\t\treturn self.activation(output)\n\n\nclass Postnet:\n\t\"\"\"Postnet that takes final decoder output and fine tunes it (using vision on past and future frames)\n\t\"\"\"\n\tdef __init__(self, is_training, hparams, activation=tf.nn.tanh, scope=None):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tis_training: Boolean, determines if the model is training or in inference to control dropout\n\t\t\tkernel_size: tuple or integer, The size of convolution kernels\n\t\t\tchannels: integer, number of convolutional kernels\n\t\t\tactivation: callable, postnet activation function for each convolutional layer\n\t\t\tscope: Postnet scope.\n\t\t\"\"\"\n\t\tsuper(Postnet, self).__init__()\n\t\tself.is_training = is_training\n\n\t\tself.kernel_size = hparams.postnet_kernel_size\n\t\tself.channels = hparams.postnet_channels\n\t\tself.activation = activation\n\t\tself.scope = 'postnet_convolutions' if scope is None else scope\n\t\tself.postnet_num_layers = hparams.postnet_num_layers\n\t\tself.drop_rate = hparams.tacotron_dropout_rate\n\n\tdef __call__(self, inputs):\n\t\twith tf.variable_scope(self.scope):\n\t\t\tx = inputs\n\t\t\tfor i in range(self.postnet_num_layers - 1):\n\t\t\t\tx = conv1d(x, self.kernel_size, self.channels, self.activation,\n\t\t\t\t\tself.is_training, self.drop_rate, 'conv_layer_{}_'.format(i + 1)+self.scope)\n\t\t\tx = conv1d(x, self.kernel_size, self.channels, lambda _: _, self.is_training, self.drop_rate,\n\t\t\t\t'conv_layer_{}_'.format(5)+self.scope)\n\t\treturn x\n\ndef _round_up_tf(x, multiple):\n\t# Tf version of remainder = x % multiple\n\tremainder = tf.mod(x, multiple)\n\t# Tf version of return x if remainder == 0 else x + multiple - remainder\n\tx_round = tf.cond(tf.equal(remainder, tf.zeros(tf.shape(remainder), dtype=tf.int32)),\n\t\tlambda: x,\n\t\tlambda: x + multiple - remainder)\n\n\treturn x_round\n\ndef sequence_mask(lengths, r, expand=True):\n\t'''Returns a 2-D or 3-D tensorflow sequence mask depending on the argument 'expand'\n\t'''\n\tmax_len = tf.reduce_max(lengths)\n\tmax_len = _round_up_tf(max_len, tf.convert_to_tensor(r))\n\tif expand:\n\t\treturn tf.expand_dims(tf.sequence_mask(lengths, maxlen=max_len, dtype=tf.float32), axis=-1)\n\treturn tf.sequence_mask(lengths, maxlen=max_len, dtype=tf.float32)\n\ndef MaskedMSE(targets, outputs, targets_lengths, hparams, mask=None):\n\t'''Computes a masked Mean Squared Error\n\t'''\n\n\t#[batch_size, time_dimension, 1]\n\t#example:\n\t#sequence_mask([1, 3, 2], 5) = [[[1., 0., 0., 0., 0.]],\n\t#\t\t\t\t\t\t\t [[1., 1., 1., 0., 0.]],\n\t#\t\t\t\t\t\t\t [[1., 1., 0., 0., 0.]]]\n\t#Note the maxlen argument that ensures mask shape is compatible with r>1\n\t#This will by default mask the extra paddings caused by r>1\n\tif mask is None:\n\t\tmask = sequence_mask(targets_lengths, hparams.outputs_per_step, True)\n\n\t#[batch_size, time_dimension, channel_dimension(mels)]\n\tones = tf.ones(shape=[tf.shape(mask)[0], tf.shape(mask)[1], tf.shape(targets)[-1]], dtype=tf.float32)\n\tmask_ = mask * ones\n\n\twith tf.control_dependencies([tf.assert_equal(tf.shape(targets), tf.shape(mask_))]):\n\t\treturn tf.losses.mean_squared_error(labels=targets, predictions=outputs, weights=mask_)\n\ndef MaskedSigmoidCrossEntropy(targets, outputs, targets_lengths, hparams, mask=None):\n\t'''Computes a masked SigmoidCrossEntropy with logits\n\t'''\n\n\t#[batch_size, time_dimension]\n\t#example:\n\t#sequence_mask([1, 3, 2], 5) = [[1., 0., 0., 0., 0.],\n\t#\t\t\t\t\t\t\t [1., 1., 1., 0., 0.],\n\t#\t\t\t\t\t\t\t [1., 1., 0., 0., 0.]]\n\t#Note the maxlen argument that ensures mask shape is compatible with r>1\n\t#This will by default mask the extra paddings caused by r>1\n\tif mask is None:\n\t\tmask = sequence_mask(targets_lengths, hparams.outputs_per_step, False)\n\n\twith tf.control_dependencies([tf.assert_equal(tf.shape(targets), tf.shape(mask))]):\n\t\t#Use a weighted sigmoid cross entropy to measure the <stop_token> loss. Set hparams.cross_entropy_pos_weight to 1\n\t\t#will have the same effect as vanilla tf.nn.sigmoid_cross_entropy_with_logits.\n\t\tlosses = tf.nn.weighted_cross_entropy_with_logits(targets=targets, logits=outputs, pos_weight=hparams.cross_entropy_pos_weight)\n\n\twith tf.control_dependencies([tf.assert_equal(tf.shape(mask), tf.shape(losses))]):\n\t\tmasked_loss = losses * mask\n\n\treturn tf.reduce_sum(masked_loss) / tf.count_nonzero(masked_loss, dtype=tf.float32)"
]
| [
[
"tensorflow.contrib.rnn.MultiRNNCell",
"tensorflow.losses.mean_squared_error",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.layers.batch_normalization",
"tensorflow.layers.conv1d",
"tensorflow.variable_scope",
"tensorflow.nn.rnn_cell.LSTMStateTuple",
"tensorflow.nn.dropout",
"tensorflow.nn.weighted_cross_entropy_with_logits",
"tensorflow.mod",
"tensorflow.count_nonzero",
"tensorflow.reduce_sum",
"tensorflow.sequence_mask",
"tensorflow.convert_to_tensor",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.reduce_max",
"tensorflow.slice"
]
]
|
steveli/mogp | [
"d142e7b9e5b7dbc67cfae4760c837cafd9691a51"
]
| [
"pyPQN/polyinterp.py"
]
| [
"from __future__ import division\nimport numpy as np\nfrom scipy.linalg import solve\n\n\ndef polyinterp(points):\n \"\"\"Minimum of interpolating polynomial based on function and derivative\n values\n\n In can also be used for extrapolation if {xmin,xmax} are outside\n the domain of the points.\n\n Input:\n points(pointNum,[x f g])\n xmin: min value that brackets minimum (default: min of points)\n xmax: max value that brackets maximum (default: max of points)\n\n set f or g to sqrt(-1) if they are not known\n the order of the polynomial is the number of known f and g values minus 1\n \"\"\"\n\n nPoints = points.shape[0]\n order = (np.isreal(points[:, 1:3])).sum() - 1\n\n # Code for most common case:\n # - cubic interpolation of 2 points\n # w/ function and derivative values for both\n # - no xminBound/xmaxBound\n\n if nPoints == 2 and order == 3:\n # Solution in this case (where x2 is the farthest point):\n # d1 = g1 + g2 - 3*(f1-f2)/(x1-x2)\n # d2 = sqrt(d1^2 - g1*g2)\n # minPos = x2 - (x2 - x1)*((g2 + d2 - d1)/(g2 - g1 + 2*d2))\n # t_new = min(max(minPos,x1),x2)\n if points[0, 1] < points[1, 1]:\n x_lo, x_hi = points[0, 0], points[1, 0]\n f_lo, f_hi = points[0, 1], points[1, 1]\n g_lo, g_hi = points[0, 2], points[1, 2]\n else:\n x_lo, x_hi = points[1, 0], points[0, 0]\n f_lo, f_hi = points[1, 1], points[0, 1]\n g_lo, g_hi = points[1, 2], points[0, 2]\n d1 = g_lo + g_hi - 3 * (f_lo - f_hi) / (x_lo - x_hi)\n d2 = np.sqrt(d1 * d1 - g_lo * g_hi)\n if np.isreal(d2):\n t = x_hi - (x_hi - x_lo) * ((g_hi + d2 - d1) /\n (g_hi - g_lo + 2 * d2))\n minPos = min(max(t, x_lo), x_hi)\n else:\n minPos = (x_lo + x_hi) / 2\n return minPos\n\n xmin = min(points[:, 0])\n xmax = max(points[:, 0])\n\n # Compute Bounds of Interpolation Area\n\n xminBound = xmin\n xmaxBound = xmax\n\n # Constraints Based on available Function Values\n A = np.zeros((0, order + 1))\n b = []\n\n for i in xrange(nPoints):\n if np.isreal(points[i, 1]):\n constraint = np.zeros(order + 1)\n for j in xrange(order + 1):\n constraint[order - j] = points[i, 0]**j\n A = np.vstack((A, constraint))\n b = np.append(b, points[i, 1])\n\n # Constraints based on available Derivatives\n for i in xrange(nPoints):\n if np.isreal(points[i, 2]):\n constraint = np.zeros(order + 1)\n for j in xrange(order):\n constraint[j] = (order - j) * points[i, 0]**(order - j - 1)\n A = np.vstack((A, constraint))\n b = np.append(b, points[i, 2])\n\n # Find interpolating polynomial\n params = solve(A, b)\n\n # Compute Critical Points\n dParams = np.zeros(order)\n for i in xrange(len(params) - 1):\n dParams[i] = params[i] * (order - i)\n\n if np.any(np.isinf(dParams)):\n cp = np.concatenate((np.array([xminBound, xmaxBound]),\n points[:, 0]))\n else:\n cp = np.concatenate((np.array([xminBound, xmaxBound]),\n points[:, 0]),\n np.roots(dParams))\n\n # Test Critical Points\n fmin = np.inf\n # Default to Bisection if no critical points valid\n minPos = (xminBound + xmaxBound) / 2\n for xCP in cp:\n if np.isreal(xCP) and xCP >= xminBound and xCP <= xmaxBound:\n fCP = np.polyval(params, xCP)\n if np.isreal(fCP) and fCP < fmin:\n minPos = np.real(xCP)\n fmin = np.real(fCP)\n\n return minPos\n"
]
| [
[
"numpy.isinf",
"numpy.array",
"numpy.zeros",
"numpy.roots",
"numpy.real",
"numpy.polyval",
"numpy.isreal",
"numpy.vstack",
"numpy.sqrt",
"numpy.append",
"scipy.linalg.solve"
]
]
|
Columbine21/TFR-Net | [
"1da01577542e7f477fdf7323ec0696aebc632357"
]
| [
"models/baselines/MISA.py"
]
| [
"\"\"\"\nFrom: https://github.com/declare-lab/MISA\nPaper: MISA: Modality-Invariant and -Specific Representations for Multimodal Sentiment Analysis\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Function\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n\nfrom models.subNets.BertTextEncoder import BertTextEncoder\n\n__all__ = ['MISA']\n\nclass ReverseLayerF(Function):\n \"\"\"\n Adapted from https://github.com/fungtion/DSN/blob/master/functions.py\n \"\"\"\n @staticmethod\n def forward(ctx, x, p):\n ctx.p = p\n\n return x.view_as(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n output = grad_output.neg() * ctx.p\n\n return output, None\n\n# let's define a simple model that can deal with multimodal variable length sequence\nclass MISA(nn.Module):\n def __init__(self, config):\n super(MISA, self).__init__()\n\n assert config.use_bert == True\n\n self.config = config\n self.text_size = config.feature_dims[0]\n self.visual_size = config.feature_dims[2]\n self.acoustic_size = config.feature_dims[1]\n\n\n self.input_sizes = input_sizes = [self.text_size, self.visual_size, self.acoustic_size]\n self.hidden_sizes = hidden_sizes = [int(self.text_size), int(self.visual_size), int(self.acoustic_size)]\n self.output_size = output_size = config.num_classes if config.train_mode == \"classification\" else 1\n self.dropout_rate = dropout_rate = config.dropout\n self.activation = nn.ReLU()\n self.tanh = nn.Tanh()\n \n \n rnn = nn.LSTM if self.config.rnncell == \"lstm\" else nn.GRU\n # defining modules - two layer bidirectional LSTM with layer norm in between\n \n if config.use_bert:\n # text subnets\n self.bertmodel = BertTextEncoder(language=config.language, use_finetune=config.use_finetune)\n\n self.vrnn1 = rnn(input_sizes[1], hidden_sizes[1], bidirectional=True)\n self.vrnn2 = rnn(2*hidden_sizes[1], hidden_sizes[1], bidirectional=True)\n \n self.arnn1 = rnn(input_sizes[2], hidden_sizes[2], bidirectional=True)\n self.arnn2 = rnn(2*hidden_sizes[2], hidden_sizes[2], bidirectional=True)\n\n\n\n ##########################################\n # mapping modalities to same sized space\n ##########################################\n if self.config.use_bert:\n self.project_t = nn.Sequential()\n self.project_t.add_module('project_t', nn.Linear(in_features=768, out_features=config.hidden_size))\n self.project_t.add_module('project_t_activation', self.activation)\n self.project_t.add_module('project_t_layer_norm', nn.LayerNorm(config.hidden_size))\n else:\n self.project_t = nn.Sequential()\n self.project_t.add_module('project_t', nn.Linear(in_features=hidden_sizes[0]*4, out_features=config.hidden_size))\n self.project_t.add_module('project_t_activation', self.activation)\n self.project_t.add_module('project_t_layer_norm', nn.LayerNorm(config.hidden_size))\n\n self.project_v = nn.Sequential()\n self.project_v.add_module('project_v', nn.Linear(in_features=hidden_sizes[1]*4, out_features=config.hidden_size))\n self.project_v.add_module('project_v_activation', self.activation)\n self.project_v.add_module('project_v_layer_norm', nn.LayerNorm(config.hidden_size))\n\n self.project_a = nn.Sequential()\n self.project_a.add_module('project_a', nn.Linear(in_features=hidden_sizes[2]*4, out_features=config.hidden_size))\n self.project_a.add_module('project_a_activation', self.activation)\n self.project_a.add_module('project_a_layer_norm', nn.LayerNorm(config.hidden_size))\n\n\n ##########################################\n # private encoders\n ##########################################\n self.private_t = nn.Sequential()\n self.private_t.add_module('private_t_1', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))\n self.private_t.add_module('private_t_activation_1', nn.Sigmoid())\n \n self.private_v = nn.Sequential()\n self.private_v.add_module('private_v_1', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))\n self.private_v.add_module('private_v_activation_1', nn.Sigmoid())\n \n self.private_a = nn.Sequential()\n self.private_a.add_module('private_a_3', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))\n self.private_a.add_module('private_a_activation_3', nn.Sigmoid())\n \n\n ##########################################\n # shared encoder\n ##########################################\n self.shared = nn.Sequential()\n self.shared.add_module('shared_1', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))\n self.shared.add_module('shared_activation_1', nn.Sigmoid())\n\n\n ##########################################\n # reconstruct\n ##########################################\n self.recon_t = nn.Sequential()\n self.recon_t.add_module('recon_t_1', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))\n self.recon_v = nn.Sequential()\n self.recon_v.add_module('recon_v_1', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))\n self.recon_a = nn.Sequential()\n self.recon_a.add_module('recon_a_1', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))\n\n\n\n ##########################################\n # shared space adversarial discriminator\n ##########################################\n if not self.config.use_cmd_sim:\n self.discriminator = nn.Sequential()\n self.discriminator.add_module('discriminator_layer_1', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))\n self.discriminator.add_module('discriminator_layer_1_activation', self.activation)\n self.discriminator.add_module('discriminator_layer_1_dropout', nn.Dropout(dropout_rate))\n self.discriminator.add_module('discriminator_layer_2', nn.Linear(in_features=config.hidden_size, out_features=len(hidden_sizes)))\n\n ##########################################\n # shared-private collaborative discriminator\n ##########################################\n\n self.sp_discriminator = nn.Sequential()\n self.sp_discriminator.add_module('sp_discriminator_layer_1', nn.Linear(in_features=config.hidden_size, out_features=4))\n\n\n\n self.fusion = nn.Sequential()\n self.fusion.add_module('fusion_layer_1', nn.Linear(in_features=self.config.hidden_size*6, out_features=self.config.hidden_size*3))\n self.fusion.add_module('fusion_layer_1_dropout', nn.Dropout(dropout_rate))\n self.fusion.add_module('fusion_layer_1_activation', self.activation)\n self.fusion.add_module('fusion_layer_3', nn.Linear(in_features=self.config.hidden_size*3, out_features= output_size))\n\n self.tlayer_norm = nn.LayerNorm((hidden_sizes[0]*2,))\n self.vlayer_norm = nn.LayerNorm((hidden_sizes[1]*2,))\n self.alayer_norm = nn.LayerNorm((hidden_sizes[2]*2,))\n\n\n encoder_layer = nn.TransformerEncoderLayer(d_model=self.config.hidden_size, nhead=2)\n self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=1)\n\n \n\n \n def extract_features(self, sequence, lengths, rnn1, rnn2, layer_norm):\n packed_sequence = pack_padded_sequence(sequence, lengths, batch_first=True, enforce_sorted=False)\n\n if self.config.rnncell == \"lstm\":\n packed_h1, (final_h1, _) = rnn1(packed_sequence)\n else:\n packed_h1, final_h1 = rnn1(packed_sequence)\n\n padded_h1, _ = pad_packed_sequence(packed_h1)\n padded_h1 = padded_h1.permute(1, 0, 2)\n normed_h1 = layer_norm(padded_h1)\n packed_normed_h1 = pack_padded_sequence(normed_h1, lengths, batch_first=True, enforce_sorted=False)\n\n if self.config.rnncell == \"lstm\":\n _, (final_h2, _) = rnn2(packed_normed_h1)\n else:\n _, final_h2 = rnn2(packed_normed_h1)\n\n return final_h1, final_h2\n\n def alignment(self, text, acoustic, visual):\n # bert_sent_mask : consists of seq_len of 1, followed by padding of 0.\n bert_sent, bert_sent_mask, bert_sent_type = text[:,0,:], text[:,1,:], text[:,2,:]\n\n batch_size = text.size(0)\n \n if self.config.use_bert:\n bert_output = self.bertmodel(text) # [batch_size, seq_len, 768]\n\n # Use the mean value of bert of the front real sentence length as the final representation of text.\n masked_output = torch.mul(bert_sent_mask.unsqueeze(2), bert_output)\n mask_len = torch.sum(bert_sent_mask, dim=1, keepdim=True) \n bert_output = torch.sum(masked_output, dim=1, keepdim=False) / mask_len\n\n utterance_text = bert_output\n\n\n lengths = mask_len.squeeze().int().detach().cpu().view(-1)\n # extract features from visual modality\n final_h1v, final_h2v = self.extract_features(visual, lengths, self.vrnn1, self.vrnn2, self.vlayer_norm)\n utterance_video = torch.cat((final_h1v, final_h2v), dim=2).permute(1, 0, 2).contiguous().view(batch_size, -1)\n\n # extract features from acoustic modality\n final_h1a, final_h2a = self.extract_features(acoustic, lengths, self.arnn1, self.arnn2, self.alayer_norm)\n utterance_audio = torch.cat((final_h1a, final_h2a), dim=2).permute(1, 0, 2).contiguous().view(batch_size, -1)\n\n # Shared-private encoders\n self.shared_private(utterance_text, utterance_video, utterance_audio)\n\n\n if not self.config.use_cmd_sim:\n # discriminator\n reversed_shared_code_t = ReverseLayerF.apply(self.utt_shared_t, self.config.reverse_grad_weight)\n reversed_shared_code_v = ReverseLayerF.apply(self.utt_shared_v, self.config.reverse_grad_weight)\n reversed_shared_code_a = ReverseLayerF.apply(self.utt_shared_a, self.config.reverse_grad_weight)\n\n self.domain_label_t = self.discriminator(reversed_shared_code_t)\n self.domain_label_v = self.discriminator(reversed_shared_code_v)\n self.domain_label_a = self.discriminator(reversed_shared_code_a)\n else:\n self.domain_label_t = None\n self.domain_label_v = None\n self.domain_label_a = None\n\n\n self.shared_or_private_p_t = self.sp_discriminator(self.utt_private_t)\n self.shared_or_private_p_v = self.sp_discriminator(self.utt_private_v)\n self.shared_or_private_p_a = self.sp_discriminator(self.utt_private_a)\n self.shared_or_private_s = self.sp_discriminator( (self.utt_shared_t + self.utt_shared_v + self.utt_shared_a)/3.0 )\n \n # For reconstruction\n self.reconstruct()\n \n # 1-LAYER TRANSFORMER FUSION\n h = torch.stack((self.utt_private_t, self.utt_private_v, self.utt_private_a, self.utt_shared_t, self.utt_shared_v, self.utt_shared_a), dim=0)\n h = self.transformer_encoder(h)\n h = torch.cat((h[0], h[1], h[2], h[3], h[4], h[5]), dim=1)\n o = self.fusion(h)\n return o\n \n def reconstruct(self,):\n\n self.utt_t = (self.utt_private_t + self.utt_shared_t)\n self.utt_v = (self.utt_private_v + self.utt_shared_v)\n self.utt_a = (self.utt_private_a + self.utt_shared_a)\n\n self.utt_t_recon = self.recon_t(self.utt_t)\n self.utt_v_recon = self.recon_v(self.utt_v)\n self.utt_a_recon = self.recon_a(self.utt_a)\n\n\n def shared_private(self, utterance_t, utterance_v, utterance_a):\n \n # Projecting to same sized space\n self.utt_t_orig = utterance_t = self.project_t(utterance_t)\n self.utt_v_orig = utterance_v = self.project_v(utterance_v)\n self.utt_a_orig = utterance_a = self.project_a(utterance_a)\n\n\n # Private-shared components\n self.utt_private_t = self.private_t(utterance_t)\n self.utt_private_v = self.private_v(utterance_v)\n self.utt_private_a = self.private_a(utterance_a)\n\n self.utt_shared_t = self.shared(utterance_t)\n self.utt_shared_v = self.shared(utterance_v)\n self.utt_shared_a = self.shared(utterance_a)\n\n\n def forward(self, text, audio, video):\n output = self.alignment(text, audio, video)\n tmp = {\n \"M\": output\n }\n return tmp"
]
| [
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.LayerNorm",
"torch.stack",
"torch.nn.Dropout",
"torch.nn.Sigmoid",
"torch.nn.Sequential",
"torch.nn.Tanh",
"torch.nn.ReLU",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.TransformerEncoderLayer",
"torch.nn.TransformerEncoder",
"torch.sum"
]
]
|
saraahsimon/landlab | [
"1cf809b685efbccaaa149b5899a600c3ccedf30f"
]
| [
"landlab/graph/ugrid.py"
]
| [
"import numpy as np\nimport xarray as xr\n\nfrom ..utils.jaggedarray import flatten_jagged_array\n\nMESH_ATTRS = {\n \"cf_role\": \"mesh_topology\",\n \"long_name\": \"Topology data of 2D unstructured mesh\",\n \"topology_dimension\": 2,\n \"node_coordinates\": \"x_of_node y_of_node\",\n \"face_node_connectivity\": \"nodes_at_patch\",\n \"face_dimension\": \"patch\",\n \"face_edge_connectivity\": \"links_at_patch\",\n \"edge_node_connectivity\": \"nodes_at_link\",\n \"edge_dimension\": \"link\",\n}\n\nDUAL_MESH_ATTRS = {\n \"cf_role\": \"mesh_topology\",\n \"long_name\": \"Topology data of 2D unstructured mesh\",\n \"topology_dimension\": 2,\n \"node_coordinates\": \"x_of_corner y_of_corner\",\n \"face_node_connectivity\": \"corners_at_cell\",\n \"face_dimension\": \"cell\",\n \"face_edge_connectivity\": \"faces_at_cell\",\n \"edge_node_connectivity\": \"corners_at_face\",\n \"edge_dimension\": \"face\",\n}\n\n\ndef ugrid_from_structured_quad(coords, shape=None):\n from .structured_quad.structured_quad import (\n setup_nodes_at_link,\n setup_links_at_patch,\n setup_node_coords_structured,\n )\n\n node_y_and_x = setup_node_coords_structured(coords, shape=shape)\n shape = node_y_and_x[0].shape\n\n return ugrid_from_unstructured(\n node_y_and_x, setup_nodes_at_link(shape), setup_links_at_patch(shape)\n )\n\n\ndef ugrid_from_rectilinear(coords):\n from .structured_quad.structured_quad import setup_node_coords_rectilinear\n\n node_y_and_x = setup_node_coords_rectilinear(coords)\n return ugrid_from_structured_quad(node_y_and_x)\n\n\ndef ugrid_from_uniform_rectilinear(shape, spacing=1., origin=0.):\n from .structured_quad.structured_quad import setup_node_coords\n\n node_y_and_x = setup_node_coords(shape, spacing, origin)\n return ugrid_from_structured_quad(node_y_and_x)\n\n\ndef load_ugrid(ugrid):\n if not isinstance(ugrid, xr.Dataset):\n raise AssertionError(\"not an instance of xarray.Dataset\")\n\n ds = xr.Dataset({\"mesh\": xr.DataArray(data=\"a\", attrs=MESH_ATTRS)})\n\n meta = ugrid.a.attrs\n\n node_coords = meta[\"node_coordinates\"].split()\n ds.update({\"x_of_node\": ugrid[node_coords[0]], \"y_of_node\": ugrid[node_coords[1]]})\n ds = ds.rename({ds.x_of_node.dims[0]: \"node\"})\n\n ds.update({\"nodes_at_link\": ugrid[meta[\"edge_node_connectivity\"]]})\n ds = ds.rename({ds.nodes_at_link.dims[0]: \"link\", ds.nodes_at_link.dims[1]: \"Two\"})\n\n ds.update({\"links_at_patch\": ugrid[meta[\"face_edge_connectivity\"]]})\n ds = ds.rename(\n {\n ds.links_at_patch.dims[0]: \"patch\",\n ds.links_at_patch.dims[1]: \"max_patch_links\",\n }\n )\n\n return ds\n\n\ndef ugrid_as_dual(ugrid):\n rename = {\n \"node\": \"corner\",\n \"link\": \"face\",\n \"patch\": \"cell\",\n \"x_of_node\": \"x_of_corner\",\n \"y_of_node\": \"y_of_corner\",\n \"nodes_at_link\": \"corners_at_face\",\n \"links_at_patch\": \"faces_at_cell\",\n \"max_patch_links\": \"max_cell_faces\",\n }\n return ugrid.rename(rename)\n\n\ndef ugrid_from_unstructured(node_y_and_x, links=None, patches=None):\n ugrid = xr.Dataset({\"mesh\": xr.DataArray(data=\"a\", attrs=MESH_ATTRS)})\n\n update_node_coords(ugrid, node_y_and_x)\n\n if links is not None:\n update_nodes_at_link(ugrid, links)\n\n if patches is not None and \"nodes_at_link\" in ugrid:\n update_links_at_patch(ugrid, patches)\n\n return ugrid\n\n\ndef update_node_coords(ugrid, node_y_and_x):\n node_y, node_x = (\n np.asarray(node_y_and_x[0], dtype=float),\n np.asarray(node_y_and_x[1], dtype=float),\n )\n y_of_node = xr.DataArray(\n data=node_y.reshape((-1,)),\n coords={\"node\": np.arange(node_y.size)},\n dims=(\"node\",),\n attrs={\"long_name\": \"y\", \"units\": \"m\"},\n )\n x_of_node = xr.DataArray(\n data=node_x.reshape((-1,)),\n coords={\"node\": np.arange(node_x.size)},\n dims=(\"node\",),\n attrs={\"long_name\": \"x\", \"units\": \"m\"},\n )\n ugrid.update({\"y_of_node\": y_of_node, \"x_of_node\": x_of_node})\n\n return ugrid\n\n\ndef update_nodes_at_link(ugrid, node_links):\n node_links = np.asarray(node_links, dtype=np.int).reshape((-1, 2))\n nodes_at_link = xr.DataArray(\n data=node_links,\n dims=(\"link\", \"Two\"),\n attrs={\n \"cf_role\": \"edge_node_connectivity\",\n \"long_name\": \"nodes a link tail and head\",\n \"start_index\": 0,\n },\n )\n ugrid.update({\"nodes_at_link\": nodes_at_link})\n\n\ndef update_links_at_patch(ugrid, patches):\n from .matrix.at_patch import links_at_patch\n\n if len(patches) > 0:\n patches = flatten_jagged_array(patches, dtype=int)\n patch_links = links_at_patch(patches)\n links_at_patch = xr.DataArray(\n data=patch_links,\n dims=(\"patch\", \"max_patch_links\"),\n attrs={\n \"cf_role\": \"face_edge_connectivity\",\n \"long_name\": \"Maps every face to its edges\",\n \"start_index\": 0,\n },\n )\n ugrid.update({\"links_at_patch\": links_at_patch})\n\n\ndef update_nodes_at_patch(ugrid, nodes_at_patch):\n nodes_at_patch = xr.DataArray(\n data=nodes_at_patch,\n dims=(\"patch\", \"max_patch_links\"),\n attrs={\n \"cf_role\": \"patch_node_connectivity\",\n \"long_name\": \"nodes defining patches\",\n \"start_index\": 0,\n },\n )\n ugrid.update({\"nodes_at_patch\": nodes_at_patch})\n\n\ndef update_node_at_cell(ugrid, node_at_cell):\n node_at_cell = xr.DataArray(\n data=node_at_cell,\n dims=(\"cell\",),\n attrs={\n \"cf_role\": \"node_cell_connectivity\",\n \"long_name\": \"node at cell\",\n \"start_index\": 0,\n },\n )\n ugrid.update({\"node_at_cell\": node_at_cell})\n\n\ndef update_nodes_at_face(ugrid, nodes_at_face):\n nodes_at_face = xr.DataArray(\n data=nodes_at_face,\n dims=(\"face\", \"Two\"),\n attrs={\n \"cf_role\": \"node_face_connectivity\",\n \"long_name\": \"nodes at face\",\n \"start_index\": 0,\n },\n )\n ugrid.update({\"nodes_at_face\": nodes_at_face})\n"
]
| [
[
"numpy.arange",
"numpy.asarray"
]
]
|
Asurada2015/TFAPI_translation | [
"1c8d9432b0b8a21c2bb5670b25456d095d0a1ecf"
]
| [
"array_ops/tf_rank.py"
]
| [
"\"\"\"tf.rank(input, name = None)\n解释:这个函数是返回Tensor的秩。\n注意:Tensor的秩和矩阵的秩是不一样的,Tensor的秩指的是元素维度索引的数目,\n这个概念也被成为order, degree或者ndims。比如,一个Tensor的维度是[1, 28, 28, 1],那么它的秩就是4。\"\"\"\n\nimport tensorflow as tf\n\nsess = tf.Session()\ndata = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\nprint(sess.run(data))\nd = tf.rank(data)\nprint(sess.run(tf.shape(data))) # [2 2 3]\nprint(sess.run(d)) # 3\n\"\"\"输入参数:\n ● input: 一个Tensor。\n ● name:(可选)为这个操作取一个名字。\n输出参数:\n ● 一个Tensor,数据类型是int32。\"\"\"\n"
]
| [
[
"tensorflow.rank",
"tensorflow.constant",
"tensorflow.Session",
"tensorflow.shape"
]
]
|
debbiemarkslab/variational-synthesis | [
"75c1edbe0aeb369d6985a68f5e4c6c63825cec2b"
]
| [
"VariationalSynthesis/model.py"
]
| [
"import argparse\nimport configparser\nfrom datetime import datetime\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pickle\n\nimport torch\nfrom torch.distributions import OneHotCategorical\nfrom torch import optim\nfrom torch.utils.data import DataLoader\n\nfrom pyro.contrib.mue.dataloaders import BiosequenceDataset\nfrom VariationalSynthesis import bio_utils as bu\n\n\nclass SynthesisModel:\n\n def __init__(self, K, M, Ls, assembly='fixed', alph_unit='codon',\n alph_constraint='arbitrary', alphabet_size=None,\n enzyme='mutazymeII', lr=0.01, grad_steps=5, tau_max=10,\n epsilon=1e-300, pin_memory=False, cuda=False):\n\n self.K = K\n self.M = M\n self.Ls = Ls\n self.L = sum(Ls)\n self.D = len(bu.alphabets['aa'])\n self.B = len(bu.alphabets['dna'])\n self.indx = [slice(sum(Ls[:j]), sum(Ls[:(j+1)]))\n for j in range(len(Ls))]\n assert assembly in ['fixed', 'combinatorial']\n self.assembly = assembly\n assert alph_unit in ['nuc', 'codon']\n self.alph_unit = alph_unit\n assert alph_constraint in ['enzymatic', 'finite', 'arbitrary']\n assert not (alph_constraint == 'arbitrary' and alph_unit == 'nuc'), (\n 'Not yet implemented.')\n assert not (alph_unit == 'codon' and alph_constraint == 'enzymatic'), (\n 'Not available.')\n self.alph_constraint = alph_constraint\n self.lr = lr\n self.grad_steps = grad_steps\n self.pin_memory = pin_memory\n self.cuda = cuda\n if cuda:\n self.gen_device = torch.device('cuda')\n else:\n self.gen_device = torch.device('cpu')\n self.transfer = bu.transfer.to(self.gen_device)\n self.mask = bu.mask.to(self.gen_device)\n\n if alph_constraint == 'enzymatic':\n self.A = self.B\n submat = bu.substitution_mat[enzyme].to(self.gen_device)\n self.Stauln = torch.cat([torch.log(torch.matrix_power(\n submat, tau))[None, :, :]\n for tau in range(1, tau_max+1)], axis=0)\n self.tau_max = tau_max\n else:\n self.A = alphabet_size\n if alph_unit == 'nuc':\n self._make_rho(self.A)\n\n self.epsilon = epsilon\n self.params = dict()\n\n def _make_rho(self, A):\n \"\"\"Construct the constant rho for converting codon representations.\"\"\"\n rho = torch.zeros((A**3, 3, A))\n for n in range(rho.shape[0]):\n for l in range(3):\n for a in range(A):\n rho[n, l, a] = torch.tensor(\n float(math.floor((n % (A**(l+1)))/(A**l)) == a))\n self.rho = rho\n\n def _make_alpha(self, x):\n \"\"\"Construct log(u . T . x)\"\"\"\n if self.alph_unit == 'codon':\n if self.alph_constraint == 'arbitrary':\n return torch.einsum('cjd,ijd->icj', self.params['uln'], x)\n elif self.alph_constraint == 'finite':\n return torch.einsum('cja,ad,ijd->icj', self.params['xt'],\n self.params['vln'], x)\n if self.alph_unit == 'nuc':\n if self.alph_constraint == 'finite':\n vtilde = self.params['vtilde']\n vln = vtilde - torch.logsumexp(vtilde, axis=1, keepdim=True)\n elif self.alph_constraint == 'enzymatic':\n vln = self.Stauln[self.params['tau']-1]\n nuc_lp = torch.einsum('cjla,ab->cjlb', self.params['xt'], vln)\n aa_lp = bu.codon_to_aa_lp(nuc_lp, self.transfer, self.mask)\n return torch.einsum('cjd,ijd->icj', aa_lp, x)\n\n def _make_c_marg_mat(self):\n \"\"\"Construct log(u . T)\"\"\"\n if self.alph_unit == 'codon':\n if self.alph_constraint == 'arbitrary':\n return self.params['uln']\n elif self.alph_constraint == 'finite':\n return torch.einsum('cja,ad->cjd', self.params['xt'],\n self.params['vln'])\n if self.alph_unit == 'nuc':\n if self.alph_constraint == 'finite':\n vtilde = self.params['vtilde']\n vln = vtilde - torch.logsumexp(vtilde, axis=1, keepdim=True)\n elif self.alph_constraint == 'enzymatic':\n vln = self.Stauln[self.params['tau']-1]\n nuc_lp = torch.einsum('cjla,ab->cjlb', self.params['xt'], vln)\n aa_lp = bu.codon_to_aa_lp(nuc_lp, self.transfer, self.mask)\n return aa_lp\n\n def get_seq_marg(self):\n \"\"\"Get marginals at each sequence of synthesis model.\"\"\"\n with torch.no_grad():\n per_well_marg = torch.exp(self._make_c_marg_mat())\n if self.assembly == 'fixed':\n return torch.einsum('cjd,c->jd',\n per_well_marg, self.params['w'])\n elif self.assembly == 'combinatorial':\n margs = []\n for k in range(self.K):\n margs.append(torch.einsum('cjd,c->jd',\n per_well_marg[:, self.indx[k]],\n self.params['w'][:, k]))\n return torch.cat(margs, 0)\n\n def get_cross_cov(self):\n \"\"\"Get L2 norm of cross covariance of synthesis model.\"\"\"\n with torch.no_grad():\n per_well_marg = torch.exp(self._make_c_marg_mat())\n diag_ind = (torch.eye(per_well_marg.shape[1])[:, :, None, None] *\n torch.eye(per_well_marg.shape[2])[None, None, :, :]\n ).to(self.gen_device)\n if self.assembly == 'fixed':\n total_marg = torch.einsum('cjd,c->jd', per_well_marg,\n self.params['w'])\n indep_probs = torch.einsum('jd,ef->jedf', total_marg,\n total_marg)\n dep_probs = torch.einsum('c,cjd,cef->jedf', self.params['w'],\n per_well_marg, per_well_marg)\n elif self.assembly == 'combinatorial':\n indep_probs = torch.zeros_like(diag_ind)\n dep_probs = torch.zeros_like(diag_ind)\n margs = []\n for k in range(self.K):\n k_marg_mat = per_well_marg[:, self.indx[k]]\n total_marg = torch.einsum('cjd,c->jd',\n k_marg_mat,\n self.params['w'][:, k])\n indep_probs[self.indx[k], self.indx[k]] = torch.einsum(\n 'jd,ef->jedf', total_marg, total_marg)\n dep_probs[self.indx[k], self.indx[k]] = torch.einsum(\n 'c,cjd,cef->jedf',\n self.params['w'][:, k],\n k_marg_mat, k_marg_mat)\n margs.append(total_marg)\n total_marg = torch.cat(margs, 0)\n dep_probs = dep_probs - dep_probs * diag_ind\n dep_probs = dep_probs + diag_ind * total_marg[:, None, :, None]\n return torch.sqrt(torch.square(dep_probs-indep_probs).sum((2, 3)))\n\n def get_samples(self, n):\n \"\"\"Sample from the synthesis model.\"\"\"\n with torch.no_grad():\n per_well_marg = torch.exp(self._make_c_marg_mat())\n if self.assembly == 'fixed':\n well_dists = [torch.distributions.OneHotCategorical(ps)\n for ps in per_well_marg]\n ws_dist = torch.distributions.Multinomial(n, self.params['w'])\n num_ws = ws_dist.sample().to(torch.long)\n return torch.cat([dist.sample((ni,))\n for dist, ni in zip(well_dists, num_ws)\n if ni > 0], 0)[torch.randperm(n)]\n elif self.assembly == 'combinatorial':\n seqs = []\n for k in range(self.K):\n well_dists = [torch.distributions.OneHotCategorical(ps)\n for ps in per_well_marg[:, self.indx[k]]]\n ws_dist = torch.distributions.Multinomial(\n n, self.params['w'][:, k])\n num_ws = ws_dist.sample().to(torch.long)\n segs = torch.cat([\n dist.sample((ni,))\n for dist, ni in zip(well_dists, num_ws)\n if ni > 0], 0)\n # Randomize for independence across segments.\n seqs.append(segs[torch.randperm(n)])\n return torch.cat(tuple(seqs), 1)\n\n def get_log_probs(self, x):\n \"\"\"Get log probabilities for sequences x\"\"\"\n with torch.no_grad():\n alpha = self._make_alpha(x)\n if self.assembly == 'fixed':\n rtilde = (torch.sum(alpha, axis=2) +\n torch.log(self.params['w'] + self.epsilon)[None, :])\n rtilde_norm = torch.logsumexp(rtilde, axis=1)\n elif self.assembly == 'combinatorial':\n rtilde_norm = torch.zeros(x.shape[0])\n for k in range(self.K):\n rtilde = (torch.sum(alpha[:, :, self.indx[k]], axis=2) +\n torch.log(self.params['w']\n + self.epsilon)[None, :, k])\n rtilde_norm += torch.logsumexp(rtilde, axis=1)\n return rtilde_norm\n\n def online_update(self, new, prev, step, decay=-0.6):\n \"\"\"Smoothing update for stochastic optimization.\"\"\"\n if step == 0:\n return new\n else:\n gamma = step**decay\n return prev + gamma*(new - prev)\n\n def E_step(self, x, step, decay=-0.6):\n \"\"\"E step in EM algorithm.\"\"\"\n with torch.no_grad():\n alpha = self._make_alpha(x)\n if self.assembly == 'fixed':\n rtilde = (torch.sum(alpha, axis=2) +\n torch.log(self.params['w'] + self.epsilon)[None, :])\n rtilde_norm = torch.logsumexp(rtilde, axis=1, keepdim=True)\n logp = torch.sum(rtilde_norm)\n r = torch.exp(rtilde - rtilde_norm)\n rmn = r.mean(axis=0)\n rxmn = torch.einsum('ic,ijd->cjd', r, x) / x.shape[0]\n elif self.assembly == 'combinatorial':\n logp = torch.tensor(0.)\n r = torch.zeros([x.shape[0], self.M, self.K])\n rxmn = torch.zeros([self.M, self.K, self.L, self.D])\n for k in range(self.K):\n rtilde = (torch.sum(alpha[:, :, self.indx[k]], axis=2) +\n torch.log(self.params['w']\n + self.epsilon)[None, :, k])\n rtilde_norm = torch.logsumexp(rtilde, axis=1, keepdim=True)\n logp += torch.sum(rtilde_norm)\n r[:, :, k] = torch.exp(rtilde - rtilde_norm)\n rxmn[:, k, self.indx[k], :] = torch.einsum(\n 'ic,ijd->cjd', r[:, :, k],\n x[:, self.indx[k], :]) / x.shape[0]\n rmn = r.mean(axis=0)\n self.params['rmn'] = self.online_update(rmn, self.params['rmn'],\n step, decay=decay)\n self.params['rxmn'] = self.online_update(rxmn, self.params['rxmn'],\n step, decay=decay)\n return logp\n\n def M_step(self):\n \"\"\"M step in EM algorithm.\"\"\"\n # w parameter.\n self.params['w'] = self.params['rmn']\n # u parameter.\n dtype = self.params['w'].dtype\n if self.alph_constraint == 'arbitrary' and self.alph_unit == 'codon':\n if self.assembly == 'fixed':\n utilde = torch.log(self.params['rxmn'] + self.epsilon)\n self.params['uln'] = (\n utilde - torch.logsumexp(utilde, axis=2, keepdim=True))\n elif self.assembly == 'combinatorial':\n for k in range(self.K):\n utilde = torch.log(self.params['rxmn'][:, k, self.indx[k]]\n + self.epsilon)\n self.params['uln'][:, self.indx[k], :] = (\n utilde - torch.logsumexp(utilde, axis=2, keepdim=True))\n elif self.alph_constraint == 'finite' and self.alph_unit == 'codon':\n if self.assembly == 'fixed':\n amax = torch.argmax(\n torch.einsum('ad,cjd->cja', self.params['vln'],\n self.params['rxmn']), axis=2, keepdim=True)\n self.params['xt'] = (\n amax == torch.arange(self.A)[None, None, :]).to(dtype)\n vtilde = torch.log(torch.einsum(\n 'cja,cjd->ad', self.params['xt'], self.params['rxmn'])\n + self.epsilon)\n self.params['vln'] = (\n vtilde - torch.logsumexp(vtilde, axis=1, keepdim=True))\n elif self.assembly == 'combinatorial':\n vtilde = torch.zeros((self.A, self.D))\n for k in range(self.K):\n amax = torch.argmax(\n torch.einsum('ad,cjd->cja', self.params['vln'],\n self.params['rxmn'][:, k, self.indx[k]]),\n axis=2, keepdim=True)\n self.params['xt'][:, self.indx[k], :] = (\n amax == torch.arange(self.A)[None, None, :]).to(dtype)\n vtilde += torch.einsum(\n 'cja,cjd->ad',\n self.params['xt'][:, self.indx[k]],\n self.params['rxmn'][:, k, self.indx[k]])\n vtilde = torch.log(vtilde + self.epsilon)\n self.params['vln'] = (\n vtilde - torch.logsumexp(vtilde, axis=1, keepdim=True))\n elif (self.alph_constraint in ['finite', 'enzymatic']\n and self.alph_unit == 'nuc'):\n with torch.no_grad():\n if self.alph_constraint == 'finite':\n vtilde = self.params['vtilde']\n vln = vtilde - torch.logsumexp(vtilde, axis=1,\n keepdim=True)\n elif self.alph_constraint == 'enzymatic':\n vln = self.Stauln[self.params['tau']-1]\n aa_lp = bu.codon_to_aa_lp(torch.einsum('nla,ab->nlb',\n self.rho, vln),\n self.transfer, self.mask)\n if self.assembly == 'fixed':\n rxlp = torch.einsum('nd,cjd->cjn', aa_lp,\n self.params['rxmn'])\n amax = torch.argmax(rxlp, axis=2, keepdim=True)\n amax_oh = (amax == torch.arange(self.A**3)[None, None, :]\n ).to(dtype)\n self.params['xt'] = torch.einsum('nla,ijn->ijla', self.rho,\n amax_oh)\n elif self.assembly == 'combinatorial':\n for k in range(self.K):\n rxlp = torch.einsum(\n 'nd,cjd->cjn', aa_lp,\n self.params['rxmn'][:, k, self.indx[k]])\n amax = torch.argmax(rxlp, axis=2, keepdim=True)\n amax_oh = (amax ==\n torch.arange(self.A**3)[None, None, :]\n ).to(dtype)\n self.params['xt'][:, self.indx[k], :] = torch.einsum(\n 'nla,ijn->ijla', self.rho, amax_oh)\n if self.alph_constraint == 'finite':\n for gstep in range(self.grad_steps):\n self.optimizer.zero_grad()\n vtilde = self.params['vtilde']\n vln = vtilde - torch.logsumexp(vtilde, axis=1,\n keepdim=True)\n nuc_lp = torch.einsum('cjla,ab->cjlb', self.params['xt'],\n vln)\n aa_lp = bu.codon_to_aa_lp(nuc_lp, self.transfer, self.mask)\n if self.assembly == 'fixed':\n loss = -torch.sum(aa_lp * self.params['rxmn'])\n elif self.assembly == 'combinatorial':\n loss = torch.tensor(0.)\n for k in range(self.K):\n loss -= torch.sum(\n aa_lp[:, self.indx[k]] *\n self.params['rxmn'][:, k, self.indx[k]])\n loss.backward()\n self.optimizer.step()\n elif self.alph_constraint == 'enzymatic':\n t_Elp = torch.zeros(self.tau_max)\n # Iterate over t to avoid memory overflow.\n for t in range(self.tau_max):\n nuc_lp = torch.einsum('cjla,ab->cjlb', self.params['xt'],\n self.Stauln[t])\n aa_lp = bu.codon_to_aa_lp(nuc_lp, self.transfer, self.mask)\n if self.assembly == 'fixed':\n t_Elp[t] = torch.einsum('cjd,cjd->', aa_lp,\n self.params['rxmn'])\n elif self.assembly == 'combinatorial':\n for k in range(self.K):\n t_Elp[t] += torch.einsum(\n 'cjd,cjd->', aa_lp[:, self.indx[k]],\n self.params['rxmn'][:, k, self.indx[k]])\n self.params['tau'] = torch.argmax(t_Elp) + 1\n\n def initialize_EM(self, x):\n \"\"\"Initialize EM parameters.\"\"\"\n N = x.shape[0]\n if self.alph_constraint == 'arbitrary' and self.alph_unit == 'codon':\n utilde = torch.randn((self.M, self.L, self.D))\n self.params['uln'] = utilde - torch.logsumexp(\n utilde, axis=2, keepdim=True)\n elif self.alph_constraint == 'finite' and self.alph_unit == 'codon':\n self.params['xt'] = OneHotCategorical(\n probs=(1/self.A)*torch.ones(self.A)).sample((self.M, self.L))\n vtilde = torch.randn((self.A, self.D))\n self.params['vln'] = (\n vtilde - torch.logsumexp(vtilde, axis=1, keepdim=True))\n elif self.alph_constraint == 'finite' and self.alph_unit == 'nuc':\n self.params['xt'] = OneHotCategorical(\n probs=(1/self.A)*torch.ones(self.A)\n ).sample((self.M, self.L, 3))\n self.params['vtilde'] = torch.randn((self.A, self.B),\n requires_grad=True)\n self.optimizer = optim.Adam([self.params['vtilde']], lr=self.lr)\n elif self.alph_constraint == 'enzymatic' and self.alph_unit == 'nuc':\n # Get device.\n device = torch.tensor(1.).device\n # Initialize cluster means with subsample of data\n xsub = x[torch.randperm(N)[:self.M]].clone().to(device)\n # Probability of each codon.\n collapse_codon = torch.sum(self.transfer, axis=(0, 1))\n codon_to_aa_prob = collapse_codon / torch.sum(collapse_codon,\n axis=0)\n xtcodon_prob = torch.einsum('cjd,nd->cjn', xsub,\n codon_to_aa_prob)\n # For padded zeros use a uniform distribution.\n xtcodon_prob = xtcodon_prob + (\n torch.sum(xtcodon_prob, axis=-1, keepdim=True) < 0.0001\n ) / xtcodon_prob.shape[-1]\n # Sample codons.\n xtcodon = OneHotCategorical(probs=xtcodon_prob).sample()\n self.params['xt'] = torch.einsum('cjn,land->cjla', xtcodon,\n self.transfer)\n self.params['tau'] = 5\n\n # Initialize mixture component weights.\n if self.assembly == 'fixed':\n self.params['w'] = (1/self.M) * torch.ones(self.M)\n elif self.assembly == 'combinatorial':\n self.params['w'] = (1/self.M) * torch.ones((self.M, self.K))\n\n # Initialize summary statistics.\n self.params['rmn'] = None\n self.params['rxmn'] = None\n\n def train(self, dataset, epochs, batch_size=None, decay=-0.6,\n polyak=True, shuffle=True, initialize=True):\n\n if batch_size is None:\n batch_size = dataset.data_size\n dataload = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle,\n pin_memory=self.pin_memory,\n generator=torch.Generator(\n device=self.gen_device))\n\n if initialize:\n self.initialize_EM(dataset.seq_data)\n logp = []\n n = 0\n tot_steps = int(epochs * math.ceil(dataset.data_size / batch_size))\n polyak_thresh = int(math.ceil(tot_steps / 2))\n for epoch in range(epochs):\n for seq_data, L_data in dataload:\n if self.cuda:\n seq_data = seq_data.cuda()\n\n # E step.\n logp_batch = self.E_step(seq_data, n, decay=decay).cpu()\n n += 1\n\n # Batch estimate of log probability.\n logp.append((dataset.data_size/seq_data.shape[0]) * logp_batch)\n\n # Polyak-Ruppert averaging.\n if polyak:\n if n == polyak_thresh:\n rmn, rxmn = self.params['rmn'], self.params['rxmn']\n elif n > polyak_thresh:\n rmn += self.params['rmn']\n rxmn += self.params['rxmn']\n if n == tot_steps:\n self.params['rmn'] = rmn/(n - polyak_thresh + 1)\n self.params['rxmn'] = rxmn/(n - polyak_thresh + 1)\n\n # M step.\n self.M_step()\n\n return torch.tensor(logp, device=torch.device('cpu'))\n\n def evaluate(self, dataset, batch_size=None):\n \"\"\"Compute total log probability and per residue logp.\"\"\"\n if batch_size is None:\n batch_size = dataset.data_size\n dataload = DataLoader(dataset, batch_size=batch_size, shuffle=False,\n pin_memory=self.pin_memory,\n generator=torch.Generator(\n device=self.gen_device))\n logps = []\n logpres = []\n for seq_data, L_data in dataload:\n if self.cuda:\n seq_data = seq_data.cuda()\n batch_logps = self.get_log_probs(seq_data).cpu()\n logps.append(batch_logps)\n logpres.append(batch_logps/L_data)\n return torch.cat(logps), torch.cat(logpres)\n\n\ndef generate_example_data(small_test):\n \"\"\"Generate mini example dataset.\"\"\"\n if small_test:\n mult_dat = 1\n else:\n mult_dat = 10\n\n seqs = ['CACCA']*mult_dat + ['CAAC']*mult_dat + ['CACCC']*mult_dat\n dataset = BiosequenceDataset(seqs, 'list', 'amino-acid',\n include_stop=True)\n\n return dataset\n\n\ndef main(config):\n\n # Configure.\n test = config['general']['test'] == 'True'\n small = config['general']['small'] == 'True'\n rng_seed = int(config['general']['rng_seed'])\n file = config['general']['target_samples']\n out_folder = config['general']['out_folder']\n no_save = config['general']['no_save'] == 'True'\n cuda = config['general']['cuda'] == 'True'\n if cuda:\n torch.set_default_tensor_type(torch.cuda.DoubleTensor)\n pin_memory = True\n else:\n torch.set_default_dtype(torch.float64)\n pin_memory = False\n cpu_data = config['general']['cpu_data'] == 'True'\n if cpu_data or not cuda:\n device = torch.device('cpu')\n else:\n device = torch.device('cuda')\n include_stop = config['general']['include_stop'] == 'True'\n\n ntemplates = int(config['model']['ntemplates'])\n npools = int(config['model']['npools'])\n assembly = config['model']['assembly']\n unit = config['model']['unit']\n constraint = config['model']['constraint']\n alph_size = int(config['model']['alph_size'])\n enzyme = config['model']['enzyme']\n\n lr = float(config['train']['lr'])\n grad_steps = int(config['train']['grad_steps'])\n tau_max = int(config['train']['tau_max'])\n epochs = int(config['train']['epochs'])\n batch_size = config['train']['batch_size']\n if batch_size == 'None':\n batch_size = None\n else:\n batch_size = int(batch_size)\n polyak = config['train']['polyak'] == 'True'\n\n torch.set_default_dtype(torch.float64)\n\n # Load dataset\n if test:\n dataset = generate_example_data(small)\n else:\n alph = ''.join(bu.alphabets['aa'])\n if include_stop:\n alph = alph[:-1] # Stop will be automatically added by loader.\n dataset = BiosequenceDataset(file, 'fasta', alph,\n include_stop=include_stop, device=device)\n\n # Seed.\n torch.manual_seed(rng_seed)\n\n # Construct model.\n K = npools\n M = ntemplates\n L = dataset.max_length\n # split evenly for now.\n Ls = [len(elem) for elem in torch.split(torch.arange(L),\n torch.ceil(torch.tensor(L/K)))]\n model = SynthesisModel(\n K, M, Ls, assembly=assembly, alph_unit=unit,\n alph_constraint=constraint,\n alphabet_size=alph_size, enzyme=enzyme,\n lr=lr, grad_steps=grad_steps,\n tau_max=tau_max, pin_memory=pin_memory, cuda=cuda)\n\n # Fit synthesis model.\n logp_trains = model.train(dataset, epochs, batch_size=batch_size,\n polyak=polyak)\n\n # Evaluation.\n logps, logpres = model.evaluate(dataset, batch_size)\n\n # Plot.\n time_stamp = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n out_folder = os.path.join(out_folder, 'logs', time_stamp)\n os.mkdir(out_folder)\n if not no_save:\n # Training curve.\n plt.figure(figsize=(8, 6))\n plt.plot(logp_trains)\n plt.xlabel('iteration', fontsize=18)\n plt.ylabel(r'$\\log p$', fontsize=18)\n plt.savefig(os.path.join(out_folder, 'logp.pdf'))\n\n # Save parameters.\n params_file = os.path.join(out_folder, 'params.pkl')\n with open(params_file, 'wb') as pw:\n pickle.dump(model, pw)\n pickle.dump(logps, pw)\n pickle.dump(logpres, pw)\n\n # Evaluation results.\n logp_trains_file = os.path.join(out_folder, 'logp_trains.npy')\n np.save(logp_trains_file, logp_trains)\n config['results']['logp_trains'] = logp_trains_file\n config['results']['logp_per_seq'] = str(\n (logps.mean()).numpy())\n config['results']['logp_per_res'] = str(logpres.mean().numpy())\n config['results']['perpl_per_res'] = str(\n torch.exp(-logpres.mean()).numpy())\n config['results']['params_file'] = params_file\n with open(os.path.join(out_folder, 'config.cfg'), 'w') as cw:\n config.write(cw)\n\n return config, logp_trains\n\n\nif __name__ == '__main__':\n # Parse command line arguments.\n parser = argparse.ArgumentParser(description=\"Synthesis model.\")\n parser.add_argument('configPath')\n args = parser.parse_args()\n config = configparser.ConfigParser()\n config.read(args.configPath)\n\n main(config)\n"
]
| [
[
"torch.cat",
"torch.einsum",
"torch.Generator",
"torch.randperm",
"torch.ones",
"torch.eye",
"torch.set_default_dtype",
"torch.exp",
"torch.sum",
"torch.matrix_power",
"numpy.save",
"torch.manual_seed",
"torch.distributions.OneHotCategorical",
"torch.tensor",
"torch.zeros_like",
"torch.distributions.Multinomial",
"torch.zeros",
"torch.device",
"torch.square",
"torch.set_default_tensor_type",
"matplotlib.pyplot.figure",
"torch.log",
"torch.argmax",
"torch.arange",
"matplotlib.pyplot.xlabel",
"torch.no_grad",
"matplotlib.pyplot.plot",
"torch.optim.Adam",
"torch.logsumexp",
"matplotlib.pyplot.ylabel",
"torch.randn"
]
]
|
szmark001/orbit | [
"ad13b094d59c16e15159d658f8b8ce9383f52b13"
]
| [
"orbit/utils/simulation.py"
]
| [
"import pandas as pd\nimport numpy as np\nimport statsmodels.api as sm\nimport math\nfrom orbit.exceptions import IllegalArgument\n\n\ndef make_trend(series_len, rw_loc=0.001, rw_scale=0.1, type='rw', seed=1):\n \"\"\" Module to generate time-series trend with different methods\n\n Parameters\n ----------\n series_len: int\n total length of series\n type: str ['arma', 'rw']\n rw_loc: float\n mean of random walk\n rw_scale: float\n scale of random walk\n seed: int\n Returns\n -------\n np.array-llike with length equals `series_len`\n \"\"\"\n # make trend\n if type == \"rw\":\n rw = np.random.default_rng(seed).normal(rw_loc, rw_scale, series_len)\n trend = np.cumsum(rw)\n elif type == \"arma\":\n # TODO: consider parameterize this\n arparams = np.array([.25])\n maparams = np.array([.6])\n ar = np.r_[1, -arparams]\n ma = np.r_[1, maparams]\n arma_process = sm.tsa.ArmaProcess(ar, ma)\n trend = arma_process.generate_sample(series_len)\n else:\n raise IllegalArgument(\"Invalid trend_type.\")\n\n return trend\n\n\ndef make_seasonality(series_len, seasonality, order=3, duration=1, scale=.05, type='discrete', seed=1):\n \"\"\" Module to generate time-series seasonality with different methods\n series_len: int\n total length of series\n seasonality: int\n for example, seasonality=52 would be a weekly series\n order: int\n fourier series order to generate seasonality. Used when type = 'fourier' ONLY.\n duration: int\n for example, seasonality=52 and duration=7 would be a daily series with annual seasonality on weeks. Used\n in non-fourier type of seasonality ONLY.\n scale: float\n scale parameter of seasonality generation\n type: str ['discrete', 'fourier']\n seed: int\n Returns\n -------\n np.array-llike with length equals `series_len`\n \"\"\"\n if seasonality > 1:\n if type == 'fourier':\n t = np.arange(0, series_len)\n out = []\n for i in range(1, order + 1):\n x = 2.0 * i * np.pi * t / seasonality\n out.append(np.sin(x))\n out.append(np.cos(x))\n out = np.column_stack(out)\n b = np.random.default_rng(seed).normal(0, scale, order * 2)\n seas = np.matmul(out, b)\n else:\n # initialization\n seas = []\n iterations = math.ceil(series_len / duration)\n # initialize vector to be repeated\n init_seas = np.zeros(seasonality)\n init_seas[:-1] = np.random.default_rng(seed).normal(0, scale, seasonality - 1)\n init_seas[seasonality - 1] = -1 * np.sum(init_seas)\n for idx in range(iterations):\n seas += [init_seas[idx % seasonality]] * duration\n seas = np.array(seas[:series_len])\n else:\n seas = np.zeros(series_len)\n return seas\n\n\ndef make_ts_multiplicative(series_len=200, seasonality=-1, coefs=None, regressor_relevance=0.0,\n regressor_log_loc=0.0, regressor_log_scale=0.2,\n regressor_log_cov=None,\n noise_to_signal_ratio=1.0, regression_sparsity=0.5,\n obs_val_base=1000, regresspr_val_base=1000,\n trend_type='rw', rw_loc=0.001, rw_scale=0.1, seas_scale=.05,\n response_col='y', seed=0):\n \"\"\"\n Parameters\n ----------\n series_len: int\n seasonality: int\n coefs: 1-D array_like for regression coefs\n regressor_relevance: float\n 0 to 1; higher value indicates less number of useful regressors\n regressor_log_loc: float\n regressor_log_scale: float\n regressor_log_cov: 2-D array_like, of shape (num_of_regressors, num_of_regressors)\n covariance of regressors in log unit scale\n noise_to_signal_ratio: float\n regression_sparsity: float\n 0 to 1 to control probability of value > 0 at time t of a regressor\n obs_val_base: float\n positive values\n regresspr_val_base: float\n positive values\n trend_type: str\n ['arma', 'rw']\n seas_scale: float\n response_col: str\n seed: int\n\n Notes\n ------\n Some ideas are from https://scikit-learn.org/stable/auto_examples/linear_model/plot_bayesian_ridge.html\n and https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.BayesianRidge.html#sklearn.linear_model.BayesianRidge\n \"\"\"\n with_regression = False\n if coefs is not None:\n with_regression = True\n # make regression\n if with_regression:\n num_of_regressors = len(coefs)\n num_irrelevant_coefs = int(num_of_regressors * regressor_relevance)\n if num_irrelevant_coefs >= 1:\n irrelevant_coef_idx = np.random.choice(num_of_regressors, num_irrelevant_coefs, replace=False)\n coefs[irrelevant_coef_idx] = 0.0\n if regressor_log_cov is None:\n x_log1p = np.random.default_rng(seed).normal(\n regressor_log_loc, regressor_log_scale, series_len * num_of_regressors).reshape(series_len, -1) + 1\n else:\n x_log1p = np.random.default_rng(seed).multivariate_normal(\n np.array([regressor_log_loc] * num_of_regressors, dtype=np.float64),\n regressor_log_cov, series_len)\n # control probability of regression kick-in\n z = np.random.default_rng(seed).binomial(\n 1, regression_sparsity, series_len * num_of_regressors).reshape(series_len, -1)\n x_obs = x_log1p * z\n\n # make trend\n # if trend_type == \"rw\":\n # rw = np.random.default_rng(seed).normal(rw_loc, rw_scale, series_len)\n # trend = np.cumsum(rw)\n # elif trend_type == \"arma\":\n # # TODO: consider parameterize this\n # arparams = np.array([.25])\n # maparams = np.array([.6])\n # ar = np.r_[1, -arparams]\n # ma = np.r_[1, maparams]\n # arma_process = sm.tsa.ArmaProcess(ar, ma)\n # trend = arma_process.generate_sample(series_len)\n # else:\n # raise IllegalArgument(\"Invalid trend_type.\")\n trend = make_trend(series_len=series_len, rw_loc=rw_loc, rw_scale=rw_scale, type=trend_type, seed=seed)\n\n # make seasonal component\n # if seasonality > 1:\n # init_seas = np.zeros(seasonality)\n # init_seas[:-1] = np.random.default_rng(seed).normal(0, seas_scale, seasonality - 1)\n # init_seas[seasonality - 1] = -1 * np.sum(init_seas)\n # seas = np.zeros(series_len)\n # for idx in range(series_len):\n # seas[idx] = init_seas[idx % seasonality]\n # else:\n # seas = np.zeros(series_len)\n seas = make_seasonality(seasonality=seasonality, series_len=series_len, scale=seas_scale, seed=seed)\n\n # make noise\n obs_log_scale = noise_to_signal_ratio * regressor_log_scale\n noise = np.random.default_rng(seed).normal(0, obs_log_scale, series_len)\n\n # make observed data\n if with_regression:\n y = np.round(obs_val_base * np.exp(trend + seas + np.matmul(x_obs, coefs) + noise)).reshape(-1, 1)\n X = np.round(np.expm1(x_obs) * regresspr_val_base)\n observed_matrix = np.concatenate([y, X], axis=1)\n regressor_cols = [f\"regressor_{x}\" for x in range(1, num_of_regressors + 1)]\n df_cols = [response_col] + regressor_cols\n else:\n y = np.round(obs_val_base * np.exp(trend + seas + noise)).reshape(-1, 1)\n observed_matrix = y\n df_cols = [response_col]\n\n # TODO: right now we hard-coded the frequency; it is not impactful since in orbit we are only using date_col\n # TODO: as index\n # datetime index\n if seasonality == 52:\n dt = pd.date_range(start='2016-01-04', periods=series_len, freq=\"1W\")\n elif seasonality == 12:\n dt = pd.date_range(start='2016-01-04', periods=series_len, freq=\"1M\")\n else:\n dt = pd.date_range(start='2016-01-04', periods=series_len, freq=\"1D\")\n\n df = pd.DataFrame(observed_matrix, columns=df_cols)\n df['date'] = dt\n\n return df, trend, seas, coefs\n"
]
| [
[
"numpy.concatenate",
"numpy.array",
"numpy.sin",
"numpy.random.choice",
"numpy.matmul",
"numpy.zeros",
"pandas.DataFrame",
"pandas.date_range",
"numpy.sum",
"numpy.random.default_rng",
"numpy.exp",
"numpy.expm1",
"numpy.arange",
"numpy.cos",
"numpy.cumsum",
"numpy.column_stack"
]
]
|
taylorfturner/data-profiler | [
"da416d1ccaed4b04d2e5b93da41a508de58b642e"
]
| [
"data_profiler/tests/profilers/test_datetime_column_profile.py"
]
| [
"from __future__ import print_function\n\nimport unittest\nfrom unittest import mock\nimport datetime\nimport six\nimport warnings\nfrom collections import defaultdict\n\nimport pandas as pd\nimport numpy as np\n\nfrom . import utils\nfrom .. import test_utils\n\nfrom data_profiler.profilers import DateTimeColumn\n\n\n# This is taken from: https://github.com/rlworkgroup/dowel/pull/36/files\n# undo when cpython#4800 is merged.\nunittest.case._AssertWarnsContext.__enter__ = test_utils.patched_assert_warns\n\n\nclass TestDateTimeColumnProfiler(unittest.TestCase):\n\n def setUp(self):\n utils.set_seed(seed=0)\n\n @staticmethod\n def _generate_datetime_data(date_format):\n\n gen_data = []\n for i in range(50):\n start_date = pd.Timestamp(1950, 7, 14)\n end_date = pd.Timestamp(2020, 7, 14)\n\n date_sample = utils.generate_random_date_sample(\n start_date, end_date, [date_format]\n )\n gen_data.append(date_sample)\n\n return pd.Series(gen_data)\n\n def _test_datetime_detection_helper(self, date_formats):\n\n for date_format in date_formats:\n # generate a few samples for each date format\n gen_data = self._generate_datetime_data(date_format)\n\n # Test to see if the format and col type is detected correctly.\n datetime_profile = DateTimeColumn(gen_data.name)\n datetime_profile.update(gen_data)\n\n self.assertEqual(date_format, datetime_profile.date_formats[0])\n\n def test_base_case(self):\n data = pd.Series([], dtype=object)\n profiler = DateTimeColumn(data.name)\n profiler.update(data)\n profiler.update(data) # intentional to validate no changes if empty\n\n self.assertEqual(profiler.match_count, 0)\n self.assertIsNone(profiler.min)\n self.assertIsNone(profiler.max)\n self.assertListEqual([], profiler.date_formats)\n self.assertIsNone(profiler.data_type_ratio)\n\n def test_profiled_date_time_formats(self):\n \"\"\"\n Checks whether the profiler properly determines all datetime formats.\n :return:\n \"\"\"\n date_formats_1 = [\n \"%Y-%m-%d %H:%M:%S\", # 2013-03-5 15:43:30\n \"%Y-%m-%dT%H:%M:%S\", # 2013-03-6T15:43:30\n \"%Y-%m-%dT%H:%M:%S.%fZ\", # 2013-03-6T15:43:30.123456Z\n \"%m/%d/%y %H:%M\", # 03/10/13 15:43\n \"%m/%d/%Y %H:%M\", # 3/8/2013 15:43\n \"%Y%m%dT%H%M%S\", # 2013036T154330\n \"%H:%M:%S.%f\", # 05:46:30.258509\n ]\n df_1 = pd.Series([], dtype=object)\n for date_format in date_formats_1:\n # generate a few samples for each date format\n df_1 = pd.concat(\n [df_1, self._generate_datetime_data(date_format)]\n )\n\n date_formats_2 = [\n \"%Y-%m-%d\", # 2013-03-7\n \"%m/%d/%Y\", # 3/8/2013\n \"%m/%d/%y\", # 03/10/13\n \"%B %d, %Y\", # March 9, 2013\n \"%b %d, %Y\", # Mar 11, 2013\n \"%d%b%y\", # 12Mar13\n \"%b-%d-%y\", # Mar-13-13\n \"%m%d%Y\", # 03142013\n ]\n df_2 = pd.Series([], dtype=object)\n for date_format in date_formats_2:\n # generate a few samples for each date format\n df_2 = pd.concat(\n [df_2, self._generate_datetime_data(date_format)]\n )\n\n date_formats_all = date_formats_1 + date_formats_2\n df_all = pd.concat([df_1, df_2])\n datetime_profile = DateTimeColumn(df_all.name)\n datetime_profile.update(df_all)\n\n six.assertCountEqual(self,\n date_formats_all,\n set(datetime_profile.date_formats))\n\n # Test chunks\n datetime_profile = DateTimeColumn(df_1.name)\n datetime_profile.update(df_1)\n\n six.assertCountEqual(self,\n date_formats_1,\n set(datetime_profile.date_formats))\n\n datetime_profile.update(df_2)\n six.assertCountEqual(self,\n date_formats_all,\n datetime_profile.date_formats)\n\n def test_profiled_min(self):\n\n def date_linspace(start, end, steps):\n delta = (end - start) / steps\n increments = list(range(0, steps)) * np.array([delta] * steps)\n return start + increments\n\n df = pd.core.series.Series(\n date_linspace(datetime.datetime.min, datetime.datetime.max, 11)\n )\n df = df.apply(\n lambda x: x - datetime.timedelta(microseconds=x.microsecond)\n ).apply(str)\n\n datetime_profile = DateTimeColumn(df[1:].name)\n datetime_profile.update(df[1:])\n \n self.assertEqual(datetime_profile.min, df.iloc[1])\n\n datetime_profile.update(df)\n self.assertEqual(datetime_profile.min, df.iloc[0])\n\n datetime_profile.update(pd.Series([np.nan, df.iloc[3]]))\n self.assertEqual(datetime_profile.min, df.iloc[0])\n\n datetime_profile.update(df[1:2]) # only way to keep as df\n self.assertEqual(datetime_profile.min, df.iloc[0])\n\n def test_profiled_max(self):\n\n def date_linspace(start, end, steps):\n delta = (end - start) / steps\n increments = list(range(0, steps)) * np.array([delta] * steps)\n return start + increments\n\n df = pd.core.series.Series(\n date_linspace(datetime.datetime.min, datetime.datetime.max, 11)\n )\n df = df.apply(\n lambda x: x - datetime.timedelta(microseconds=x.microsecond)\n ).apply(str)\n\n datetime_profile = DateTimeColumn(df[:-1].name)\n datetime_profile.update(df[:-1])\n\n self.assertEqual(datetime_profile.max, df.iloc[-2])\n\n datetime_profile.update(df)\n self.assertEqual(datetime_profile.max, df.iloc[-1])\n\n datetime_profile.update(pd.Series([np.nan, df.iloc[3]]))\n self.assertEqual(datetime_profile.max, df.iloc[-1])\n\n datetime_profile.update(df[1:2]) # only way to keep as df\n self.assertEqual(datetime_profile.max, df.iloc[-1])\n\n def test_date_time_detection(self):\n \"\"\"\n Tests if get_datetime_params is able to detect the date time cols\n correctly\n :return:\n \"\"\"\n date_formats = [\n \"%Y-%m-%d %H:%M:%S\", # 2013-03-5 15:43:30\n \"%Y-%m-%dT%H:%M:%S\", # 2013-03-6T15:43:30\n \"%Y-%m-%dT%H:%M:%S.%fZ\", # 2013-03-6T15:43:30.123456Z\n \"%m/%d/%y %H:%M\", # 03/10/13 15:43\n \"%m/%d/%Y %H:%M\", # 3/8/2013 15:43\n \"%Y%m%dT%H%M%S\", # 2013036T154330\n \"%H:%M:%S.%f\" # 05:46:30.258509\n ]\n\n self._test_datetime_detection_helper(date_formats)\n\n def test_date_time_detection_without_time(self):\n \"\"\"\n Tests if get_datetime_params is able to detect the date cols correctly\n :return:\n \"\"\"\n date_formats = [\n \"%Y-%m-%d\", # 2013-03-7\n \"%m/%d/%Y\", # 3/8/2013\n \"%m/%d/%y\", # 03/10/13\n \"%B %d, %Y\", # March 9, 2013\n \"%b %d, %Y\", # Mar 11, 2013\n \"%d%b%y\", # 12Mar13\n \"%b-%d-%y\", # Mar-13-13\n \"%m%d%Y\", # 03142013\n ]\n\n self._test_datetime_detection_helper(date_formats)\n\n def test_data_ratio(self):\n data = [\n 2.5, 12.5, '2013-03-5 15:43:30', 5, '03/10/13 15:43', 'Mar 11, 2013'\n ]\n df = pd.Series(data).apply(str)\n\n profiler = DateTimeColumn(df.name)\n self.assertEqual(profiler.data_type_ratio, None)\n\n profiler.update(df)\n self.assertEqual(profiler.data_type_ratio, 0.5)\n\n profiler.update(pd.Series([None, '10/20/13', 'nan']))\n self.assertEqual(profiler.data_type_ratio, 4/9.0)\n\n def test_profile(self):\n data = [\n 2.5, 12.5, '2013-03-10 15:43:30', 5, '03/10/13 15:43',\n 'Mar 11, 2013'\n ]\n df = pd.Series(data).apply(str)\n profiler = DateTimeColumn(df.name)\n expected_profile = dict(\n min='03/10/13 15:43',\n max='Mar 11, 2013',\n median=None,\n histogram=None,\n format=[\n '%Y-%m-%d %H:%M:%S',\n \"%m/%d/%y %H:%M\",\n \"%b %d, %Y\",\n ],\n times=defaultdict(float, {'datetime': 1.0})\n )\n time_array = [float(i) for i in range(4, 0, -1)]\n with mock.patch('time.time', side_effect=lambda: time_array.pop()):\n # Validate that the times dictionary is empty\n self.assertEqual(defaultdict(float), profiler.profile['times'])\n\n # Validate the time in the datetime class has the expected time.\n profiler.update(df)\n expected = defaultdict(float, {'datetime': 1.0})\n self.assertEqual(expected, profiler.profile['times'])\n profile = profiler.profile\n self.assertDictEqual(expected_profile, profile)\n \n # Validate time in datetime class has expected time after second\n # update\n profiler.update(df)\n expected = defaultdict(float, {'datetime': 2.0})\n self.assertEqual(expected, profiler.profile['times'])\n\n def test_warning_for_bad_dates(self):\n\n df = pd.Series(['03/10/2013 15:43'])\n\n profiler = DateTimeColumn(df.name)\n with warnings.catch_warnings(record=True) as w:\n profiler.update(df)\n self.assertEqual(len(w), 0)\n\n df = pd.Series(['03/10/13 15:43'])\n with self.assertWarns(RuntimeWarning) as r_warning:\n profiler.update(df)\n self.assertEqual(\n str(r_warning.warning),\n \"Years provided were in two digit format. As a result, \"\n \"datetime assumes dates < 69 are for 2000s and above \"\n \"are for the 1990s. \"\n \"https://stackoverflow.com/questions/37766353/\"\n \"pandas-to-datetime-parsing-wrong-year\"\n )\n\n def test_add(self):\n # unique format for the first profile\n data1 = [\n \"2013-03-5 15:43:30\",\n \"2013-03-6T15:43:30\",\n \"2013-03-6T15:43:30.123456Z\",\n \"03/10/2013 15:43\",\n \"3/8/2013 15:43\",\n \"%2013036T154330\",\n \"05:46:30.258509\",\n ]\n df = pd.Series(data1).apply(str)\n profile1 = DateTimeColumn(df.name)\n profile1.update(df)\n\n # unique format for second profile\n data2 = [\n 2.5, 12.5, '2013-03-10 15:23:20', 5, '03/10/2013 15:23',\n 'Mar 12, 2013'\n ]\n df = pd.Series(data2).apply(str)\n profile2 = DateTimeColumn(df.name)\n profile2.update(df)\n\n merged_profile = profile1 + profile2\n\n # checks for _dt_objs\n min_dt_obj = datetime.datetime.strptime('05:46:30.258509',\n '%H:%M:%S.%f')\n max_dt_obj = datetime.datetime.strptime('2013-03-12', '%Y-%m-%d')\n self.assertEqual(min_dt_obj, merged_profile._dt_obj_min)\n self.assertEqual(max_dt_obj, merged_profile._dt_obj_max)\n\n # checks for the proper max and min to be merged\n self.assertEqual('05:46:30.258509', merged_profile.min)\n self.assertEqual('Mar 12, 2013', merged_profile.max)\n\n # checks for date format merge\n self.assertEqual(\n ['%Y-%m-%d %H:%M:%S', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%fZ',\n '%m/%d/%Y %H:%M', '%H:%M:%S.%f', '%b %d, %Y'],\n merged_profile.date_formats)\n\n # Checks for DateTimeColumn type for argument\n with self.assertRaises(TypeError) as exc:\n profile2 = \"example_string\"\n profile1 + profile2\n\n self.assertEqual(str(exc.exception),\n \"Unsupported operand type(s) for +: \"\n \"'DateTimeColumn' and '{}'\"\n .format(profile2.__class__.__name__))"
]
| [
[
"pandas.concat",
"pandas.Timestamp",
"numpy.array",
"pandas.Series"
]
]
|
vishwakarmarhl/SlowFast | [
"91cf75d4404c68c108057bbfd525edf9671799e7"
]
| [
"slowfast/utils/metrics.py"
]
| [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\n\"\"\"Functions for computing metrics.\"\"\"\n\nimport torch\nimport numpy as np\n\n\ndef topks_correct(preds, labels, ks):\n \"\"\"\n Given the predictions, labels, and a list of top-k values, compute the\n number of correct predictions for each top-k value.\n\n Args:\n preds (array): array of predictions. Dimension is batchsize\n N x ClassNum.\n labels (array): array of labels. Dimension is batchsize N.\n ks (list): list of top-k values. For example, ks = [1, 5] correspods\n to top-1 and top-5.\n\n Returns:\n topks_correct (list): list of numbers, where the `i`-th entry\n corresponds to the number of top-`ks[i]` correct predictions.\n \"\"\"\n assert preds.size(0) == labels.size(\n 0\n ), \"Batch dim of predictions and labels must match\"\n #import pdb; pdb.set_trace()\n \n # Find the top max_k predictions for each sample\n _top_max_k_vals, top_max_k_inds = torch.topk(\n preds, max(ks), dim=1, largest=True, sorted=True\n )\n # (batch_size, max_k) -> (max_k, batch_size).\n top_max_k_inds = top_max_k_inds.t()\n # (batch_size, ) -> (max_k, batch_size).\n rep_max_k_labels = labels.view(1, -1).expand_as(top_max_k_inds)\n # (i, j) = 1 if top i-th prediction for the j-th sample is correct.\n top_max_k_correct = top_max_k_inds.eq(rep_max_k_labels)\n # Compute the number of topk correct predictions for each k.\n topks_correct = [\n top_max_k_correct[:k, :].float().sum() for k in ks\n ] # .contiguous().view(-1)\n return topks_correct\n\n\ndef multitask_topks_correct(preds, labels, ks=(1,)):\n \"\"\"\n Args:\n preds: tuple(torch.FloatTensor), each tensor should be of shape\n [batch_size, class_count], class_count can vary on a per task basis, i.e.\n outputs[i].shape[1] can be different to outputs[j].shape[j].\n labels: tuple(torch.LongTensor), each tensor should be of shape [batch_size]\n ks: tuple(int), compute accuracy at top-k for the values of k specified\n in this parameter.\n Returns:\n tuple(float), same length at topk with the corresponding accuracy@k in.\n \"\"\"\n max_k = int(np.max(ks))\n task_count = len(preds)\n batch_size = labels[0].size(0)\n all_correct = torch.zeros(max_k, batch_size).type(torch.ByteTensor)\n if torch.cuda.is_available():\n all_correct = all_correct.cuda()\n for output, label in zip(preds, labels):\n _, max_k_idx = output.topk(max_k, dim=1, largest=True, sorted=True)\n # Flip batch_size, class_count as .view doesn't work on non-contiguous\n max_k_idx = max_k_idx.t()\n correct_for_task = max_k_idx.eq(label.view(1, -1).expand_as(max_k_idx))\n all_correct.add_(correct_for_task)\n\n multitask_topks_correct = [\n torch.ge(all_correct[:k].float().sum(0), task_count).float().sum(0) for k in ks\n ]\n\n return multitask_topks_correct\n\n\ndef topk_errors(preds, labels, ks):\n \"\"\"\n Computes the top-k error for each k.\n Args:\n preds (array): array of predictions. Dimension is N.\n labels (array): array of labels. Dimension is N.\n ks (list): list of ks to calculate the top accuracies.\n \"\"\"\n num_topks_correct = topks_correct(preds, labels, ks)\n return [(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct]\n\n\ndef topk_accuracies(preds, labels, ks):\n \"\"\"\n Computes the top-k accuracy for each k.\n Args:\n preds (array): array of predictions. Dimension is N.\n labels (array): array of labels. Dimension is N.\n ks (list): list of ks to calculate the top accuracies.\n \"\"\"\n num_topks_correct = topks_correct(preds, labels, ks)\n return [(x / preds.size(0)) * 100.0 for x in num_topks_correct]\n\n\ndef multitask_topk_accuracies(preds, labels, ks):\n \"\"\"\n Computes the top-k accuracy for each k.\n Args:\n preds (array): array of predictions. Dimension is N.\n labels (array): array of labels. Dimension is N.\n ks (list): list of ks to calculate the top accuracies.\n \"\"\"\n num_multitask_topks_correct = multitask_topks_correct(preds, labels, ks)\n return [(x / preds[0].size(0)) * 100.0 for x in num_multitask_topks_correct]\n"
]
| [
[
"numpy.max",
"torch.cuda.is_available",
"torch.zeros"
]
]
|
lsx137946009/pawo | [
"69cef1797fe1971d87c59cb8c5c167089fa66ecd"
]
| [
"parseData/microband.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 11 16:46:06 2019\n\n@author: lsx\n\"\"\"\n\nimport time\nimport pandas as pd\n\n#filepath = './data/micro'\n##savepath = './result'\n#filelist = os.listdir(filepath)\n#\n#\n#HR = [] # Heart Rate No.1\n#ST = [] # Skin Temperature NO.2\n#GS = [] # Galvanic skin reaction No.3\n#AL = [] # Ambient light NO.4\n#Ac = [] # Accelerometer No.5\n#Gy = [] # Gyro No.6\n#PM = [] # 其实是步数 No.7\n#c = [] # 中间变量,分割后的每一行数据\n#temp_time = 0 #临时时间,为了做对比\n#\n# \n#def read_mic(filepath):\n# \n# fodata = open(file=filepath, mode='rt', encoding='utf-8')\n# for line in fodata:\n# aline = fodata.readline()\n# b = aline.split(';')\n# c = []\n# for i in b:\n# try:\n# m = float(i)\n# c.append(m)\n# except ValueError:\n# i = None\n# c.append(i)\n# try:\n# dt = c[0] / 1000\n# dt = time.localtime(dt)\n# c[0] = time.strftime('%Y-%m-%d %H:%M:%S', dt)\n# except:\n# # print('time convert error')\n# # print(c)\n# pass\n#\n# try:\n# if c[1] == 1:\n# HR.append(c)\n# elif c[1] == 2:\n# ST.append(c)\n# elif c[1] == 3:\n# GS.append(c)\n# elif c[1] == 4:\n# AL.append(c)\n# elif c[1] == 5:\n# Ac.append(c)\n# elif c[1] == 6:\n# Gy.append(c)\n# elif c[1] == 7:\n# PM.append(c)\n# else:\n# tempid = c[1]\n# except IndexError as e:\n# # print('index error')\n# pass\n# print('%s, OK' %filename)\n# return HR\n#\n#for filename in filelist[0:1]:\n# print(filename) \n# datapath = os.path.join(filepath, filename)\n# hr = read_mic(datapath)\n# if not len(hr) == 0:\n# dfHR = pd.DataFrame(hr)\n# dfHR = dfHR.drop_duplicates()\n# dfHR = dfHR.iloc[:, 0:3] # 只取前三列\n# dfHR.columns = ['time', 'id', 'hr']\n# dfHR['name'] = filename\n# dfHR['time'] = pd.to_datetime(dfHR['time']) # print(dfHR)\n# else:\n# print('HR of %s is empty'%filename)\n \n \ndef parser(filepath):\n \n #时间戳转换\n def parse_time(stamp):\n stamp = stamp/1000 #why /1000\n dtime = time.localtime(stamp)\n dtime = time.strftime('%Y-%m-%d %H:%M:%S', dtime)\n return dtime\n \n \n hr = list() # Heart Rate No.1\n st = list() # Skin Temperature NO.2\n gs = list() # Galvanic skin reaction No.3\n al = list() # Ambient light NO.4\n ac = list() # Accelerometer No.5\n gy = list() # Gyro No.6\n pm = list() # Step No.7\n uk = list() # Unknown\n \n fodata = open(file=filepath, mode='rt', encoding='utf-8')\n for line in fodata:\n steam = fodata.readline()\n frame = steam.split(';')\n parse = list()\n for i in frame:\n try:\n parse.append(float(i))\n except ValueError:\n parse.append(None)\n try:\n parse[0] = parse_time(parse[0])\n except:\n pass\n \n try:\n flag = parse[1]\n except:\n pass\n \n try:\n if flag == 1:\n hr.append(parse)\n elif flag == 2:\n st.append(parse)\n elif flag == 3:\n gs.append(parse)\n elif flag == 4:\n al.append(parse)\n elif flag == 5:\n ac.append(parse)\n elif flag == 6:\n gy.append(parse)\n elif flag == 7:\n pm.append(parse)\n else:\n uk.append(parse)\n except IndexError as e:\n pass\n \n print('%s, OK' %filepath)\n if not len(hr) == 0:\n hr = pd.DataFrame(hr)\n hr = hr.drop_duplicates()\n hr = hr.iloc[:, :3] # 只取前三列\n hr.columns = ['time', 'id', 'hr']\n hr['name'] = filepath\n hr['time'] = pd.to_datetime(hr['time'])\n return hr\n if not len(st) == 0:\n st = pd.DataFrame(st)\n st = st.drop_duplicates()\n st = st.iloc[:, :3] # 只取前三列\n st.columns = ['time', 'id', 'hr']\n st['name'] = filepath\n st['time'] = pd.to_datetime(st['time'])\n return st\n if not len(ac) == 0:\n ac = pd.DataFrame(ac)\n ac = ac.drop_duplicates()\n ac = ac.iloc[:, :5]\n ac.columns = ['time', 'id', 'ac1', 'ac2', 'ac3']\n ac['name'] = filepath\n ac['time'] = pd.to_datetime(ac['time'])\n return ac\n if not len(gy) == 0:\n gy = pd.DataFrame(gy)\n gy = gy.drop_duplicates()\n gy = gy.iloc[:, :5]\n gy.columns = ['time', 'id', 'gy1', 'gy2', 'gy3']\n gy['name'] = filepath\n gy['time'] = pd.to_datetime(gy['time'])\n return gy\n \n \n \n \n \n \n \n "
]
| [
[
"pandas.to_datetime",
"pandas.DataFrame"
]
]
|
abhishek9594/nl_to_pl | [
"09c6ad979e3b8d7b4432a4f701875d959adaa972"
]
| [
"node.py"
]
| [
"#!/usr/bin/env python\n\"\"\"\nnode.py: Map all the node types of the PL grammar to node_id\n\nUsage:\n node.py --lang=<str> NODE_FILE [options]\n\nOptions:\n -h --help Show this screen.\n --lang=<str> target language\n\"\"\"\n\nfrom docopt import docopt\nimport pickle\nimport torch\n\nfrom utils import pad_sents\n\nclass Node(object):\n def __init__(self, node2id=None):\n \"\"\"\n @param node2id (dict): dictionary mapping nodes -> indices\n \"\"\"\n if node2id:\n self.node2id = node2id\n else:\n self.node2id = dict()\n self.node2id['<pad>'] = 0 #pad token\n self.node2id['<start>'] = 1 #start token\n self.pad_id = self.node2id['<pad>']\n self.id2node = {v: k for k, v in self.node2id.items()}\n\n def __getitem__(self, node):\n \"\"\" Retrieve node's index.\n @param node (str): node to look up.\n @returns index (int): index of the node \n \"\"\"\n return self.node2id.get(node)\n\n def __contains__(self, node):\n \"\"\" Check if node is captured by Node.\n @param node (str): node to look up\n @returns contains (bool): whether node is contained \n \"\"\"\n return node in self.node2id\n\n def __setitem__(self, key, value):\n \"\"\" Raise error, if one tries to edit the Node.\n \"\"\"\n raise ValueError('Node dictionary is readonly')\n\n def __len__(self):\n \"\"\" Compute number of nodes in Node.\n @returns len (int): number of nodes in Node\n \"\"\"\n return len(self.node2id)\n\n def __repr__(self):\n \"\"\" Representation of Node to be used\n when printing the object.\n \"\"\"\n return 'Node[size=%d]' % len(self)\n\n def id2node(self, n_id):\n \"\"\" Return mapping of index to node.\n @param n_id (int): node index\n @returns node (str): node corresponding to index\n \"\"\"\n return self.id2node[n_id]\n\n def add(self, node):\n \"\"\" Add node to Node, if it is previously unseen.\n @param node (str): node to add to Node\n @return index (int): index that the node has been assigned\n \"\"\"\n if node not in self:\n n_id = self.node2id[node] = len(self)\n self.id2node[n_id] = node\n return n_id\n else:\n return self[node]\n\n def nodes2indices(self, sents):\n \"\"\" Convert list of tokens or list of sentences of tokens\n into list or list of list of indices.\n @param sents (list[str] or list[list[str]]): sentence(s) containing either node or GenToken toks\n @return node_ids (list[int] or list[list[int]]): sentence(s) in indices\n \"\"\"\n if type(sents[0]) == list:\n return [[self[node] for node in sent] for sent in sents]\n else:\n sent = sents\n return [self[node] for node in sent]\n\n def indices2nodes(self, node_ids):\n \"\"\" Convert list of indices into nodes.\n @param node_ids (list[int]): list of node ids\n @return sents (list[str]): list of nodes\n \"\"\"\n return [self.id2node[n_id] for n_id in node_ids]\n\n def nodes2Tensor(self, sents):\n \"\"\"\n Convert list of tgt nodes tensor by padding required sents\n where tgt sents contain nodes\n @param sents (list[list[str]]): batch of tgt sents\n @return node_tensor (torch.tensor (max_sent_len, batch_size))\n \"\"\"\n node_ids = self.nodes2indices(sents)\n nodes_padded = pad_sents(node_ids, self.pad_id)\n return torch.tensor(nodes_padded, dtype=torch.long)\n\n @staticmethod\n def build(grammar):\n \"\"\" Given a grammar (ASDL) description of language, extract all node types\n @param grammar (ASDLGrammar): grammar object described in the asdl file for the target language\n @returns nodes (Node): Node instance produced from the grammar\n \"\"\"\n nodes = Node()\n \n for field in grammar.fields: #field: Field(name, type, cardinality)\n node_name = field.type.name #ASDLType(type_name)\n node_cardinality = field.cardinality\n if node_cardinality == 'optional':\n node_name += '?'\n elif node_cardinality == 'multiple':\n node_name += '*'\n \n nodes.add(node_name)\n\n return nodes\n\n def save(self, file_path):\n \"\"\" Save Node to file as pickle dump.\n @param file_path (str): file path to node file\n \"\"\"\n pickle.dump(self.node2id, open(file_path, 'wb'))\n\n @staticmethod\n def load(file_path):\n \"\"\"\n @param file_path (str): file path to node file\n @returns Node object loaded from pickle dump\n \"\"\"\n node2id = pickle.load(open(file_path, 'rb'))\n\n return Node(node2id)\n\nif __name__ == '__main__':\n args = docopt(__doc__)\n\n lang = args['--lang']\n if lang == 'lambda':\n from lang.Lambda.asdl import ASDLGrammar\n\n asdl_desc = open('lang/Lambda/lambda_asdl.txt').read()\n grammar = ASDLGrammar.from_text(asdl_desc)\n\n nodes = Node.build(grammar)\n print('generated nodes: %d' % (len(nodes)))\n\n nodes.save(args['NODE_FILE'])\n print('nodes saved to %s' % args['NODE_FILE'])\n else:\n print('language: %s currently not supported' % (lang))\n"
]
| [
[
"torch.tensor"
]
]
|
fusecloud/fusetools | [
"4352d0beebd4d676b8578a1f96977553d3ebf28c"
]
| [
"fusetools/social_tools.py"
]
| [
"import pandas as pd\nimport tweepy\nimport os\n\n\nclass Twitter:\n\n @classmethod\n def pull_user_likes(cls, screen_name, twtr_api_key, twtr_api_secret, **kwargs):\n # TWITTER AUTH\n print(\"Authenticating to Twitter\")\n\n screen_name = screen_name\n auth = tweepy.AppAuthHandler(twtr_api_key, twtr_api_secret)\n api = tweepy.API(auth)\n alltweets = api.favorites(screen_name=screen_name, **kwargs)\n\n tweet_df = \\\n pd.DataFrame({\n \"id\": [x._json.get('id') for x in alltweets],\n \"datetime\": [x._json.get('created_at') for x in alltweets],\n \"text\": [x._json.get('text') for x in alltweets],\n \"tweet_source\": [x._json.get('source') for x in alltweets],\n \"symbols\": [x._json.get('entities').get('symbols')\n for x in alltweets],\n \"rt_count\": [x._json.get('retweet_count') for x in alltweets],\n \"fav_count\": [x._json.get('favorite_count') for x in alltweets]\n })\n\n return tweet_df\n\n @classmethod\n def pull_user_tweets(cls, screen_name, twtr_api_key, twtr_api_secret):\n # TWITTER AUTH\n print(\"Authenticating to Twitter\")\n\n screen_name = screen_name\n auth = tweepy.AppAuthHandler(twtr_api_key, twtr_api_secret)\n api = tweepy.API(auth)\n\n # initialize a list to hold all the tweepy Tweets\n alltweets = []\n\n print(f\"Grabbing user: {screen_name} tweets\")\n # make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(screen_name=screen_name, count=200)\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n # keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n print(f\"getting tweets before {oldest}\")\n\n # all subsequent requests use the max_id param to prevent duplicates\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, max_id=oldest)\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n print(f\"...{len(alltweets)} tweets downloaded so far\")\n\n tweet_df = \\\n pd.DataFrame({\n \"id\": [x._json.get('id') for x in alltweets],\n \"datetime\": [x._json.get('created_at') for x in alltweets],\n \"text\": [x._json.get('text') for x in alltweets],\n \"tweet_source\": [x._json.get('source') for x in alltweets],\n \"symbols\": [x._json.get('entities').get('symbols')\n for x in alltweets],\n \"media\": [x._json.get('entities').get('media')\n for x in alltweets],\n \"rt_count\": [x._json.get('retweet_count') for x in alltweets],\n \"fav_count\": [x._json.get('favorite_count') for x in alltweets]\n })\n\n print(f\"len of tweets: {len(tweet_df)}\")\n tweet_df = tweet_df[tweet_df['symbols'].astype(str).str.contains(\"text\")].reset_index(drop=True)\n\n tweet_df['symbols_flat'] = \\\n (tweet_df\n # .apply(lambdas x: flatten_dicts(x['symbols']), axis=1)\n .apply(lambda x: [d.get(\"text\") for d in x['symbols']], axis=1)\n .astype(str)\n .str.replace(\"[\", \"\")\n .str.replace(\"]\", \"\")\n .str.replace(\"'\", \"\")\n )\n\n tweet_df['tweet_source'] = \\\n tweet_df.apply(lambda x: (x['tweet_source']\n .split(\">\")[1]\n .split(\"<\")[0]\n ), axis=1)\n\n tweet_df['datetime'] = \\\n pd.to_datetime(tweet_df['datetime']) # .dt.tz_localize('UTC')\n\n # tweet_df['datetime'] = \\\n # tweet_df['datetime'].dt.tz_convert('US/Eastern')\n\n tweet_df['datetime'] = \\\n tweet_df['datetime'].astype(str).str[:19]\n\n tweet_df['tweet_link'] = \\\n tweet_df.apply(\n lambda x: \"https://twitter.com/\" + \\\n screen_name +\n \"/status/\" + \\\n str(x['id']), axis=1)\n\n tweet_df.drop([\"symbols\"], axis=1, inplace=True)\n tweet_df.rename(columns={\"symbols_flat\": \"symbols\"}, inplace=True)\n return tweet_df\n\n @classmethod\n def pull_tweet_details(cls, twtr_api_key, twtr_api_secret, tweet_id):\n auth = tweepy.AppAuthHandler(twtr_api_key, twtr_api_secret)\n api = tweepy.API(auth)\n tweet = api.get_status(tweet_id)\n return tweet\n"
]
| [
[
"pandas.to_datetime"
]
]
|
vivekparasharr/Challenges-and-Competitions | [
"c99d67838a0bb14762d5f4be4993dbcce6fe0c5a",
"c99d67838a0bb14762d5f4be4993dbcce6fe0c5a"
]
| [
"30DayChartChallenge/20210430-uncertainties-3d.py",
"30DayChartChallenge/20210404-comparisons-magical.py"
]
| [
"\n# https://jakevdp.github.io/PythonDataScienceHandbook/04.12-three-dimensional-plotting.html\n#!/usr/bin/env python3\nfrom re import X\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Taubin\ndef Taubin_heart_3d(x,y,z):\n return (x**2+(9/4)*y**2+z**2-1)**3-x**2*z**3-(9/80)*y**2*z**3\n# Kuska\ndef Kuska_heart_3d(x,y,z):\n return ((2*(x**2) + y**2 + z**2 - 1)**3 - ((x**2)*(z**3))/10 - ((y**2)*(z**3)))\n# Nordstrand\ndef Nordstrand_heart_3d(x,y,z):\n return ( (32/9)*(x**2) + 2*(y**2) + z**2 - 1 )**3 - (8/45)*(x**2)*(z**3) - (y**2)*(z**3) \n\n\ndef plot_implicit(fn, fig, sub_plot, color, resolution, slices, elevation, azimuth, bbox=(-1.5, 1.5)):\n ''' create a plot of an implicit function\n fn ...implicit function (plot where fn==0)\n bbox ..the x,y,and z limits of plotted interval'''\n xmin, xmax, ymin, ymax, zmin, zmax = bbox*3\n #fig = plt.figure()\n ax = fig.add_subplot(sub_plot, projection='3d')\n A = np.linspace(xmin, xmax, resolution) # resolution of the contour\n B = np.linspace(xmin, xmax, slices) # number of slices\n A1, A2 = np.meshgrid(A, A) # grid on which the contour is plotted\n\n for z in B: # plot contours in the XY plane\n X, Y = A1, A2\n Z = fn(X, Y, z)\n cset = ax.contour(X, Y, Z+z, [z], zdir='z', colors=(color,))\n # [z] defines the only level to plot\n # for this contour for this value of z\n\n for y in B: # plot contours in the XZ plane\n X, Z = A1, A2\n Y = fn(X, y, Z)\n cset = ax.contour(X, Y+y, Z, [y], zdir='y', colors=(color,))\n\n for x in B: # plot contours in the YZ plane\n Y, Z = A1, A2\n X = fn(x, Y, Z)\n cset = ax.contour(X+x, Y, Z, [x], zdir='x',colors=(color,))\n\n # must set plot limits because the contour will likely extend\n # way beyond the displayed level. Otherwise matplotlib extends the plot limits\n # to encompass all values in the contour.\n ax.set_zlim3d(zmin, zmax)\n ax.set_xlim3d(xmin, xmax)\n ax.set_ylim3d(ymin, ymax)\n # elevation of 60 degrees (that is, 60 degrees above the x-y plane) and an \n # azimuth of 35 degrees (that is, rotated 35 degrees counter-clockwise about the z-axis)\n ax.view_init(elevation, azimuth) \n #plt.show()\n\nfig = plt.figure(figsize=[12.8, 9.6])\n#fig, ax = plt.subplots(2,2)\nfig.suptitle('Plotting Heart Surface on a 3-Dimentional Axes', fontsize=26, color='dimgray')\nfig.text(0.35, 0.05, '#30DayChartChallenge - 2021/04/30 | uncertainties | 3-dimensional', style = 'italic', fontsize = 10, color = \"dimgray\") \nfig.text(0.35, 0.03, 'Plotting 3 variable implicit equations', style = 'italic', fontsize = 10, color = \"dimgray\") \nfig.text(0.35, 0.01, 'twitter.com/vivekparasharr | github.com/vivekparasharr | vivekparasharr.medium.com', style = 'italic', fontsize = 10, color = \"dimgray\") \n\nfig.text(0.15, 0.87, \"Taubin's heart surface equation (filled)\", style = 'normal', fontsize = 16, color = \"dimgray\") \nfig.text(0.15, 0.84, \"(x^2+(9/4)*y^2+z^2-1)^3-x^2*z^3-(9/80)*y^2*z^3\", style = 'normal', fontsize = 12, color = \"slategray\") \nplot_implicit(Taubin_heart_3d, fig, 221, 'crimson', 100, 40, 20, 70) # Taubin\nfig.text(0.55, 0.87, \"Taubin's heart surface equation (mesh)\", style = 'normal', fontsize = 16, color = \"dimgray\") \nfig.text(0.55, 0.84, \"(x^2+(9/4)*y^2+z^2-1)^3-x^2*z^3-(9/80)*y^2*z^3\", style = 'normal', fontsize = 12, color = \"slategray\") \nplot_implicit(Taubin_heart_3d, fig, 222, 'red', 50, 20, 20, 70)\nfig.text(0.15, 0.47, \"Kuska's heart surface equation\", style = 'normal', fontsize = 16, color = \"dimgray\") \nfig.text(0.15, 0.44, \"((2*(x^2)+y^2+z^2-1)^3-((x^2)*(z^3))/10-((y^2)*(z^3)))\", style = 'normal', fontsize = 12, color = \"slategray\") \nplot_implicit(Kuska_heart_3d, fig, 223, 'darkorchid', 50, 20, 20, 20)\nfig.text(0.55, 0.47, \"Nordstrand's heart surface equation\", style = 'normal', fontsize = 16, color = \"dimgray\") \nfig.text(0.55, 0.44, \"((32/9)*(x^2)+2*(y^2)+z^2-1)^3-(8/45)*(x^2)*(z^3)-(y^2)*(z^3)\", style = 'normal', fontsize = 12, color = \"slategray\") \nplot_implicit(Nordstrand_heart_3d, fig, 224, 'fuchsia', 50, 20, 20, 15)\nplt.savefig('Charts/2021-04-30.png', dpi=300, facecolor='w')\nplt.show()\n\n\n",
"\nimport pandas as pd\nimport numpy as np\n\ndf = pd.read_csv('Data/annual-working-hours-per-worker.csv')\nbook = pd.read_csv(\"Data/harrypotter/Book 1 - The Philosopher's Stone.txt\")\n\n# read text file into a list\nwith open(\"Data/harrypotter/Book 1 - The Philosopher's Stone.txt\") as f:\n book = f.readlines()\n\n# convert list to dataframe\ndf = pd.DataFrame(book, columns=['book_lines'])\n\ndf = df.book_lines.str.replace('\\n', '')\ndf = pd.DataFrame(df, columns=['book_lines'])\n\ndf = df.book_lines.replace('', np.nan)\ndf = pd.DataFrame(df, columns=['book_lines'])\n\ndf = df.dropna()\n\ndf = df.reset_index(drop=True)\n\nimport dataprep.eda as eda\neda.plot(df,'book_lines')\neda.plot_correlation(df, 'numeric-column') \neda.plot_missing(df, 'country')\n\nimport matplotlib.pyplot as plt\n\n# wordclouds\nfrom PIL import Image\nfrom wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\n\n# wordcloud of artwork titles\n# combine the text from all rows of the desired column into one big text variable\ndf1 = df\ntext = str(df1.book_lines[0])\nfor i in range(1,len(df1)):\n text = text + ' ' + str(df1.book_lines[i])\n# Create stopword list: These words wont be included in the word cloud\nstopwords = set(STOPWORDS)\nstopwords.update(['page'])\n#stopwords = set(STOPWORDS)\n#stopwords.add(\"said\")\n# Create and generate a word cloud image:\nhogwarts1_mask = np.array(Image.open(\"Data/hogwarts1.jpg\"))\nwc = WordCloud(background_color=\"white\", max_words=2000, mask=hogwarts1_mask,\n stopwords=stopwords, contour_width=3, contour_color='steelblue')\n# generate word cloud\nwc.generate(text)\n# show\ncm = 1/2.54 # centimeters in inches\nplt.figure(figsize=(50*cm, 35*cm))\nplt.imshow(wc, interpolation='bilinear')\nplt.axis(\"off\")\nplt.savefig('hogwarts1_wordcloud.png')\nplt.show()\n\n"
]
| [
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"numpy.linspace",
"numpy.meshgrid"
],
[
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.imshow"
]
]
|
BakerAugust/natelib | [
"7e79c705b0593fdd9348c05f4d8a835abe4f6b30"
]
| [
"src/tests/test_neurons.py"
]
| [
"from torch.nn.functional import mse_loss\nfrom deep_learning.neurons import LogisticRegression, Perceptron, Adaline, AdalineNN\nfrom sklearn.datasets import make_blobs, load_breast_cancer\nfrom sklearn.metrics import roc_auc_score\nimport torch\n\n\ndef test_perceptron():\n # Generate some linearly seperable data\n n_features = 2\n data, target = make_blobs(n_samples=100, centers=2, n_features=2, shuffle=True)\n x = torch.tensor(data, dtype=torch.float)\n y = torch.tensor(target, dtype=torch.float)\n\n # Train perceptron\n perceptron = Perceptron(n_features)\n perceptron.train(x, y, 2)\n\n # Predict\n predictions = perceptron.forward(x)\n\n # Should be perfect fit if linearly seperable\n assert torch.allclose(predictions.to(torch.float), y)\n\n\ndef test_adaline_simple():\n \"\"\"\n Tests adaline on linearly separable data\n \"\"\"\n # Load in test data\n n_features = 2\n data, target = make_blobs(\n n_samples=100, centers=2, n_features=n_features, shuffle=True\n )\n\n x = torch.tensor(data[:, :n_features], dtype=torch.float)\n y = torch.tensor(target, dtype=torch.float)\n\n # Train adaline\n adaline = Adaline(n_features)\n adaline.train(x, y, learning_rate=0.01, epochs=20, batch_size=50, verbose=False)\n\n # Predict\n predictions = adaline.forward(x)\n\n # Asser mse loss below some reasonable threshold\n assert mse_loss(predictions, y) < 0.15\n\n\ndef adaline_complex(Model):\n \"\"\"\n Tests adaline on breast cancer dataset\n \"\"\"\n # Load in test data\n n_features = 10\n data, target = load_breast_cancer(return_X_y=True)\n\n x = torch.tensor(data[:, :n_features], dtype=torch.float)\n y = torch.tensor(target, dtype=torch.float)\n\n # Scale the features\n x_means = torch.mean(x, 0)\n x_stds = torch.std(x, 0)\n x = (x - x_means[None, :]) / x_stds[None, :]\n\n # Train adaline\n adaline = Model(n_features)\n adaline.train(x, y, learning_rate=0.1, epochs=10, batch_size=50, verbose=False)\n\n # Predict\n with torch.no_grad():\n predictions = adaline.forward(x)\n # Assert some reasonable level of performance\n assert roc_auc_score(y, predictions.detach().numpy()) > 0.95\n\n\ndef test_adaline_manual():\n \"\"\"\n Tests manual adaline implementation\n \"\"\"\n adaline_complex(Adaline)\n\n\ndef test_adaline_autograd():\n \"\"\"\n Tests adaline implementation using pytorch magic\n \"\"\"\n adaline_complex(AdalineNN)\n\n\ndef test_logistic_regression():\n \"\"\" \"\"\"\n adaline_complex(LogisticRegression)\n"
]
| [
[
"sklearn.datasets.make_blobs",
"sklearn.datasets.load_breast_cancer",
"torch.no_grad",
"torch.std",
"torch.nn.functional.mse_loss",
"torch.tensor",
"torch.mean"
]
]
|
svmillin/covid-19-canada-gov-data | [
"7b04fe719c394355104262fd113ff525edd73ad6"
]
| [
"archiver.py"
]
| [
"# archiver.py: Automated, daily backups of COVID-19 data from Canadian government sources #\n# https://github.com/jeanpaulrsoucy/covid-19-canada-gov-data #\n# Maintainer: Jean-Paul R. Soucy #\n\n# import modules\n\n## core utilities\nimport sys\nimport time\nimport os\nfrom shutil import copyfile\nfrom datetime import datetime, timedelta\nfrom array import *\nimport json\nimport re\nimport tempfile\nimport csv\nfrom zipfile import ZipFile\n\n## other utilities\nimport pandas as pd # better data processing\nimport pytz # better time zones\nfrom colorit import * # colourful printing\n\n## git\nfrom git import Repo\n\n## web-scraping\nimport requests\nfrom selenium import webdriver # requires ChromeDriver and Chromium/Chrome\nfrom selenium.webdriver.chrome.options import Options\n\n# list of environmental variables used in this script\n## GH_TOKEN: personal access token for the GitHub API (used when mode = prod)\n## GH_NAME: name to use for GitHub commits (used when mode = prod)\n## GH_MAIL: email address to use for GitHub commits (used when mode = prod)\n## GOOGLE_CHROME_BIN: path to binary in heroku-buildpack-google-chrome (used when mode = server)\n## CHROMEDRIVER_PATH: path to binary in heroku-buildpack-chromedriver (used when mode = server)\n\n# set mode (server vs. local and prod vs. test)\n## server: read environmental variables from Heroku config variables\n## local: read environmental variables from local files (in directory .gh/) or from system environmental variables\n## prod: download GitHub repo so that downloaded files can be added via commit\n## test: don't download GitHub repo, just test that files can be successfully downloaded\nif len(sys.argv) == 1 or ((len(sys.argv) == 2) and (sys.argv[1] == 'serverprod')):\n mode = 'serverprod' # server / prod\nelif len(sys.argv) == 2 and sys.argv[1] == 'localprod':\n mode = 'localprod' # local / prod\nelif len(sys.argv) == 2 and sys.argv[1] == 'servertest':\n mode = 'servertest' # server / test\nelif len(sys.argv) == 2 and sys.argv[1] == 'localtest':\n mode = 'localtest' # local / test\nelse:\n sys.exit(\"Error: Invalid arguments.\")\n\n# enable printing with colour\ninit_colorit()\n\n# define date and time in America/Toronto time zone\nt = datetime.now(pytz.timezone('America/Toronto'))\n\n# initialize success and failure counters\nsuccess = 0\nfailure = 0\n\n# access repo\nif mode == 'serverprod' or mode == 'localprod':\n ## access token\n if mode == 'serverprod':\n token = os.environ['GH_TOKEN']\n gh_name = os.environ['GH_NAME']\n gh_mail = os.environ['GH_MAIL']\n elif mode == 'localprod':\n token = open('.gh/token.txt', 'r').readline().rstrip()\n gh_name = open('.gh/gh_name.txt', 'r').readline().rstrip()\n gh_mail = open('.gh/gh_mail.txt', 'r').readline().rstrip()\n ## set repository directory\n repo_dir = 'temp_archive'\n ## shallow clone (minimize download size while still allowing a commit to be made)\n repo_remote = 'https://' + token + ':[email protected]/jeanpaulrsoucy/covid-19-canada-gov-data'\n repo = Repo.clone_from(repo_remote, repo_dir, depth=1)\n origin = repo.remote('origin')\n ### set GitHub identity\n repo.config_writer().set_value(\"user\", \"name\", gh_name).release()\n repo.config_writer().set_value(\"user\", \"email\", gh_mail).release()\n ## initialize file list for commit\n file_list = []\n ## initialize commit message for commit\n commit_message = ''\n\n# define functions\n\ndef prep_file(repo_dir, name, full_name, data = None, fpath=None, copy=False):\n \"\"\"Prepare file for commit.\n \n File is either written into the git repository (when copy is False) or copied from a temporary directory into the git repository (when copy is True).\n \n Parameters:\n repo_dir (str): Directory containing the git repository.\n name (str): Output file name with timestamp and no extension.\n full_name (str): Output filename with timestamp, extension, and relative path.\n data (bytes): The file as a bytes object (provide only when copy is False).\n fpath (str): The path of the file in the temporary directory (provide only when copy is True).\n copy (bool): Is the file already written and needs to be copied? (Default: False, see fpath and data)\n \n \"\"\"\n global commit_message, success, failure\n ## define path to save file\n spath = os.path.join(repo_dir, full_name)\n ## create directory if necessary\n os.makedirs(os.path.dirname(spath), exist_ok=True)\n ## copy is True: downloaded file exists as a file in a temporary directory,\n ## need to copy it to the save path\n if copy:\n copyfile(fpath, spath)\n ## copy is False: downloaded file exists as an object in Python,\n ## need to write it to the save path\n else:\n with open(spath, mode='wb') as local_file:\n local_file.write(data)\n ## append file to the list of files in the commit\n try:\n file_list.append(full_name)\n ## append name of file to the commit message\n commit_message = commit_message + 'Success: ' + full_name + '\\n'\n print(color('Copy successful: ' + full_name, Colors.blue))\n success+=1\n except:\n commit_message = commit_message + 'Failure: ' + full_name + '\\n'\n print(background('Error copying: ' + full_name, Colors.red))\n failure+=1\n\ndef commit_files(repo, origin, file_list, commit_message, success, failure, t):\n \"\"\"Commit files to git repository and push to remote.\n \n Parameters:\n repo: Repo object from gitpython.\n origin: Remote object from gitpython.\n file_list (list): List of paths to files to commit.\n commit_message (str): Commit message.\n success (int): The number of files successfully downloaded.\n failure (int): The number of files unsuccessfully downloaded.\n t (datetime): Date and time script began running (America/Toronto).\n \n \"\"\"\n print(\"Commiting files...\")\n try:\n total_files = str(success + failure)\n commit_message = 'Successful downloads : ' + str(success) + '/' + total_files + '\\n' + 'Failed downloads: ' + str(failure) + '/' + total_files + '\\n\\n' + commit_message\n commit_message = 'Nightly update: ' + str(t.date()) + '\\n\\n' + commit_message\n repo.index.add(file_list)\n repo.index.commit(commit_message)\n origin.push()\n print(color('Commit successful!', Colors.green))\n except:\n print(background('Commit failed!', Colors.red))\n\ndef dl_file(url, path, file, user=False, ext='.csv', unzip=False, mb_json_to_csv=False):\n \"\"\"Download file (generic).\n \n Used to download most file types (when Selenium is not required). Some files are handled with file-specific code:\n \n - unzip=True and file='13100781' has unique code.\n - Each instance of mb_json_to_csv=True has unique code.\n \n Parameters:\n url (str): URL to download file from.\n path (str): Path to output file (excluding file name). Example: 'can/epidemiology-update/'\n file (str): Output file name (excluding extension). Example: 'covid19'\n user (bool): Should the request impersonate a normal browser? Needed to access some data. Default: False.\n ext (str): Extension of the output file. Defaults to '.csv'.\n unzip (bool): If True, this file requires unzipping. Default: False.\n mb_json_to_csv (bool): If True, this is a Manitoba JSON file that that should be converted to CSV. Default: False.\n \n \"\"\"\n global commit_message, success, failure\n \n ## set names with timestamp and file ext\n name = file + '_' + datetime.now(pytz.timezone('America/Toronto')).strftime('%Y-%m-%d_%H-%M')\n full_name = os.path.join(path, name + ext) \n \n ## some websites will reject the request unless you look like a normal web browser\n ## user is True provides a normal-looking user agent string to bypass this\n if user is True:\n headers = {\"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:66.0) Gecko/20100101 Firefox/66.0\"}\n req = requests.get(url, headers=headers)\n else:\n req = requests.get(url)\n \n ## check if request was successful\n if not req.ok:\n ## print failure\n print(background('Error downloading: ' + full_name, Colors.red))\n failure+=1\n ## write failure to commit message if mode == prod\n if mode == 'serverprod' or mode == 'localprod':\n commit_message = commit_message + 'Failure: ' + full_name + '\\n'\n ## successful request: if mode == test, print success and end\n elif mode == 'servertest' or mode == 'localtest':\n ## print success\n print(color('Test download successful: ' + full_name, Colors.green))\n success+=1\n ## successful request: mode == prod, prepare files for commit\n else:\n if unzip:\n ## unzip data\n name = file + '_' + datetime.now(pytz.timezone('America/Toronto')).strftime('%Y-%m-%d_%H-%M')\n full_name = os.path.join(path, name + ext)\n tmpdir = tempfile.TemporaryDirectory()\n zpath = os.path.join(tmpdir.name, 'zip_file.zip')\n with open(zpath, mode='wb') as local_file:\n local_file.write(req.content) \n with ZipFile(zpath, 'r') as zip_file:\n zip_file.extractall(tmpdir.name)\n fpath = os.path.join(tmpdir.name, file + ext)\n if file == '13100781':\n ## read CSV (informative columns only)\n data = pd.read_csv(fpath, usecols=['REF_DATE', 'Case identifier number', 'Case information', 'VALUE'])\n ## save original order of column values\n col_order = data['Case information'].unique()\n ## pivot long to wide\n data = data.pivot(index=['REF_DATE', 'Case identifier number'], columns='Case information', values='VALUE').reset_index()\n ## use original column order\n data = data[['REF_DATE', 'Case identifier number'] + col_order.tolist()]\n ## write CSV\n data.to_csv(fpath, index=None, quoting=csv.QUOTE_NONNUMERIC)\n ## prepare file for commit\n prep_file(repo_dir, name=name, full_name=full_name, fpath=fpath, copy=True) \n elif mb_json_to_csv:\n ## for Manitoba JSON data only: convert JSON to CSV and save as temporary file\n name = file + '_' + datetime.now(pytz.timezone('America/Toronto')).strftime('%Y-%m-%d_%H-%M')\n full_name = os.path.join(path, name + ext) \n tmpdir = tempfile.TemporaryDirectory()\n fpath = os.path.join(tmpdir.name, file + ext)\n data = pd.json_normalize(json.loads(req.content)['features'])\n data.columns = data.columns.str.lstrip('attributes.') # strip prefix\n ## replace timestamps with actual dates\n if 'Date' in data.columns:\n data.Date = pd.to_datetime(data.Date / 1000, unit='s').dt.date\n data = data.to_csv(fpath, index=None)\n ## prepare file for commit\n prep_file(repo_dir, name=name, full_name=full_name, fpath=fpath, copy=True)\n else:\n ## all other data: grab content from request as an object\n data = req.content\n ## prepare file for commit\n prep_file(repo_dir, name=name, full_name=full_name, data=data)\n\ndef load_webdriver(mode, tmpdir):\n \"\"\"Load Chromium headless webdriver for Selenium.\n \n Parameters:\n mode (str): One of serverprod, localprod, servertest, localtest. Defines how the webdriver is loaded.\n tmpdir (TemporaryDirectory): A temporary directory for saving files.\n \n \"\"\"\n options = Options()\n if mode == 'serverprod' or mode == 'servertest':\n options.binary_location = os.environ['GOOGLE_CHROME_BIN']\n options.add_argument(\"--headless\")\n options.add_argument(\"--disable-dev-shm-usage\")\n options.add_argument(\"--no-sandbox\")\n prefs = {'download.default_directory' : tmpdir.name}\n options.add_experimental_option('prefs', prefs)\n if mode == 'serverprod' or mode == 'servertest':\n return webdriver.Chrome(executable_path=os.environ['CHROMEDRIVER_PATH'], options=options)\n else:\n return webdriver.Chrome(options=options)\n\ndef dl_ab_cases(url, path, file, ext='.csv', wait=5):\n \"\"\"Download CSV file: AB - \"COVID-19 Alberta statistics\".\n https://www.alberta.ca/stats/covid-19-alberta-statistics.htm\n \n The file requires Selenium to click a tab then click a CSV button.\n \n Parameters:\n url (str): URL to download file from.\n path (str): Path to output file (excluding file name). Example: 'can/epidemiology-update/'\n file (str): Output file name (excluding extension). Example: 'covid19'\n ext (str): Extension of the output file. Defaults to '.csv'.\n wait (int): Time in seconds that the function should wait. Should be > 0 to ensure the download is successful.\n \n \"\"\"\n global commit_message, success, failure\n \n ## set names with timestamp and file ext\n name = file + '_' + datetime.now(pytz.timezone('America/Toronto')).strftime('%Y-%m-%d_%H-%M')\n full_name = os.path.join(path, name + ext) \n \n ## create temporary directory\n tmpdir = tempfile.TemporaryDirectory()\n \n ## load webdriver\n driver = load_webdriver(mode, tmpdir)\n driver.implicitly_wait(10)\n \n ## click to correct tab then click CSV button to export\n driver.get(url)\n elements = driver.find_elements_by_tag_name(\"li\")\n for element in elements:\n if element.text == 'Data export':\n element.click()\n elements = driver.find_elements_by_tag_name(\"button\")\n for element in elements:\n if element.text == 'CSV':\n element.click()\n \n ## verify download\n fpath = os.path.join(tmpdir.name, file + ext)\n time.sleep(wait) # wait for download to finish\n if not os.path.isfile(fpath):\n ## print failure\n print(background('Error downloading: ' + full_name, Colors.red))\n failure+=1\n ## write failure to commit message if mode == prod\n if mode == 'serverprod' or mode == 'localprod':\n commit_message = commit_message + 'Failure: ' + full_name + '\\n'\n ## successful request: if mode == test, print success and end\n elif mode == 'servertest' or mode == 'localtest':\n ## print success\n print(color('Test download successful: ' + full_name, Colors.green))\n success+=1\n ## successful request: mode == prod, prepare files for commit\n else:\n ## prepare file for commit\n prep_file(repo_dir, name=name, full_name=full_name, fpath=fpath, copy=True)\n \n ## quit webdriver\n driver.quit()\n\ndef dl_ab_oneclick(url, path, file, ext='.csv', wait=5):\n \"\"\"Download CSV file: AB - \"COVID-19 relaunch status map\" or AB - \"COVID-19 school status map\"\n https://www.alberta.ca/maps/covid-19-status-map.htm\n https://www.alberta.ca/schools/covid-19-school-status-map.htm\n \n The file requires Selenium to click a CSV button.\n \n Parameters:\n url (str): URL to download file from.\n path (str): Path to output file (excluding file name). Example: 'can/epidemiology-update/'\n file (str): Output file name (excluding extension). Example: 'covid19'\n ext (str): Extension of the output file. Defaults to '.csv'.\n wait (int): Time in seconds that the function should wait. Should be > 0 to ensure the download is successful.\n \n \"\"\" \n global commit_message, success, failure\n \n ## set names with timestamp and file ext\n name = file + '_' + datetime.now(pytz.timezone('America/Toronto')).strftime('%Y-%m-%d_%H-%M')\n full_name = os.path.join(path, name + ext) \n \n ## create temporary directory\n tmpdir = tempfile.TemporaryDirectory()\n \n ## load webdriver\n driver = load_webdriver(mode, tmpdir)\n driver.implicitly_wait(10)\n \n ## click CSV button to export\n driver.get(url)\n elements = driver.find_elements_by_tag_name(\"button\")\n for element in elements:\n if element.text == 'CSV':\n element.click()\n \n ## verify download\n fpath = os.path.join(tmpdir.name, file + ext)\n time.sleep(wait) # wait for download to finish\n if not os.path.isfile(fpath):\n ## print failure\n print(background('Error downloading: ' + full_name, Colors.red))\n failure+=1\n ## write failure to commit message if mode == prod\n if mode == 'serverprod' or mode == 'localprod':\n commit_message = commit_message + 'Failure: ' + full_name + '\\n'\n ## successful request: if mode == test, print success and end\n elif mode == 'servertest' or mode == 'localtest':\n ## print success\n print(color('Test download successful: ' + full_name, Colors.green))\n success+=1\n ## successful request: mode == prod, prepare files for commit\n else:\n ## prepare file for commit\n prep_file(repo_dir, name=name, full_name=full_name, fpath=fpath, copy=True)\n \n ## quit webdriver\n driver.quit()\n\ndef html_page(url, path, file, ext='.html', js=False, wait=None):\n \"\"\"Save HTML of a webpage.\n \n Parameters:\n url (str): URL to screenshot.\n path (str): Path to output file (excluding file name). Example: 'can/epidemiology-update/'\n file (str): Output file name (excluding extension). Example: 'covid19'\n ext (str): Extension of the output file. Defaults to '.html'.\n js (bool): Is the HTML source rendered by JavaScript?\n wait (int): Used only if js = True. Time in seconds that the function should wait for the page to render. If the time is too short, the source code may not be captured.\n \n \"\"\"\n global commit_message, success, failure\n \n ## set names with timestamp and file ext\n name = file + '_' + datetime.now(pytz.timezone('America/Toronto')).strftime('%Y-%m-%d_%H-%M')\n full_name = os.path.join(path, name + ext) \n \n ## create temporary directory\n tmpdir = tempfile.TemporaryDirectory()\n \n ## load webdriver\n driver = load_webdriver(mode, tmpdir)\n \n ## load page\n driver.get(url)\n \n ## save HTML of webpage\n fpath = os.path.join(tmpdir.name, file + ext)\n if js:\n time.sleep(wait)\n with open(fpath, 'w') as local_file:\n local_file.write(driver.find_element_by_tag_name('html').get_attribute('innerHTML'))\n else:\n with open(fpath, 'w') as local_file:\n local_file.write(driver.page_source)\n \n ## verify download\n if not os.path.isfile(fpath):\n ## print failure\n print(background('Error downloading: ' + full_name, Colors.red))\n failure+=1\n ## write failure to commit message if mode == prod\n if mode == 'serverprod' or mode == 'localprod':\n commit_message = commit_message + 'Failure: ' + full_name + '\\n'\n ## successful request: if mode == test, print success and end\n elif mode == 'servertest' or mode == 'localtest':\n ## print success\n print(color('Test download successful: ' + full_name, Colors.green))\n success+=1\n ## successful request: mode == prod, prepare files for commit\n else:\n ## prepare file for commit\n prep_file(repo_dir, name=name, full_name=full_name, fpath=fpath, copy=True)\n \n ## quit webdriver\n driver.quit() \n\ndef ss_page(url, path, file, ext='.png', wait=5, width=None, height=None):\n \"\"\"Take a screenshot of a webpage.\n \n By default, Selenium attempts to capture the entire page.\n \n Parameters:\n url (str): URL to screenshot.\n path (str): Path to output file (excluding file name). Example: 'can/epidemiology-update/'\n file (str): Output file name (excluding extension). Example: 'covid19'\n ext (str): Extension of the output file. Defaults to '.png'.\n wait (int): Time in seconds that the function should wait. Should be > 0 to ensure the entire page is captured.\n width (int): Width of the output screenshot. Default: None. If not set, the function attempts to detect the maximum width.\n height (int): Height of the output screenshot. Default: None. If not set, the function attempts to detect the maximum height.\n \n \"\"\"\n global commit_message, success, failure\n \n ## set names with timestamp and file ext\n name = file + '_' + datetime.now(pytz.timezone('America/Toronto')).strftime('%Y-%m-%d_%H-%M')\n full_name = os.path.join(path, name + ext) \n \n ## create temporary directory\n tmpdir = tempfile.TemporaryDirectory()\n \n ## load webdriver\n driver = load_webdriver(mode, tmpdir)\n \n ## load page and wait\n driver.get(url)\n time.sleep(wait) # wait for page to load \n \n ## take screenshot\n fpath = os.path.join(tmpdir.name, file + ext)\n \n ## get total width of the page if it is not set by the user\n if width is None:\n width = driver.execute_script('return document.body.parentNode.scrollWidth')\n ## get total height of the page if it is not set by the user\n if height is None:\n height = driver.execute_script('return document.body.parentNode.scrollHeight')\n ## set window size\n driver.set_window_size(width, height)\n ## take screenshot (and don't stop the script if it fails)\n try:\n driver.find_element_by_tag_name('body').screenshot(fpath) # remove scrollbar\n \n ## verify screenshot\n if not os.path.isfile(fpath):\n ## print failure\n print(background('Error downloading: ' + full_name, Colors.red))\n failure+=1\n ## write failure to commit message if mode == prod\n if mode == 'serverprod' or mode == 'localprod':\n commit_message = commit_message + 'Failure: ' + full_name + '\\n'\n elif mode == 'servertest' or mode == 'localtest':\n ## print success\n print(color('Test download successful: ' + full_name, Colors.green))\n success+=1\n else:\n ## prepare file for commit\n prep_file(repo_dir, name=name, full_name=full_name, fpath=fpath, copy=True) \n except Exception as e:\n ## print exception\n print(e)\n ## print failure\n print(background('Error downloading: ' + full_name, Colors.red))\n failure+=1\n ## write failure to commit message if mode == prod\n if mode == 'serverprod' or mode == 'localprod':\n commit_message = commit_message + 'Failure: ' + full_name + '\\n'\n \n ## quit webdriver\n driver.quit()\n\n# AB - COVID-19 Alberta statistics\ndl_ab_cases('https://www.alberta.ca/stats/covid-19-alberta-statistics.htm',\n 'ab/cases/',\n 'covid19dataexport')\n\n# AB - COVID-19 relaunch status map\ndl_ab_oneclick('https://www.alberta.ca/maps/covid-19-status-map.htm',\n 'ab/active-cases-by-region/',\n 'covid19dataexport-relaunch')\n\n# AB - COVID-19 school status map\ndl_ab_oneclick('https://www.alberta.ca/schools/covid-19-school-status-map.htm',\n 'ab/school-status-by-region/',\n 'covid19dataexport-schools')\n\n# AB - COVID-19 in Alberta: Current cases by local geographic area (Edmonton)\ndl_file('https://data.edmonton.ca/api/views/ix8f-s9xp/rows.csv?accessType=DOWNLOAD',\n 'ab/edmonton-cases-by-area/',\n 'COVID-19_in_Alberta__Current_cases_by_local_geographic_area')\n\n# BC - BC COVID-19 Data (Case data)\ndl_file('http://www.bccdc.ca/Health-Info-Site/Documents/BCCDC_COVID19_Dashboard_Case_Details.csv',\n 'bc/case-data/',\n 'BCCDC_COVID19_Dashboard_Case_Details')\n\n# BC - BC COVID-19 Data (Laboratory data)\ndl_file('http://www.bccdc.ca/Health-Info-Site/Documents/BCCDC_COVID19_Dashboard_Lab_Information.csv',\n 'bc/laboratory-data/',\n 'BCCDC_COVID19_Dashboard_Lab_Information')\n\n# BC - Public exposures (webpage)\nss_page('http://www.bccdc.ca/health-info/diseases-conditions/covid-19/public-exposures',\n 'bc/public-exposures-webpage/',\n 'public-exposures-screenshot',\n width=1920,\n height=5500) # set height otherwise truncated\n\n# BC - Public exposures: flights\ndl_file('http://www.bccdc.ca/Health-Info-Site/Documents/public-exposures-flights-tables-Current.pdf',\n 'bc/public-exposures-flights/',\n 'public-exposures-flights-tables-Current',\n ext = '.pdf')\n\n# BC - Public exposures by setting and regional health authority (webpage)\nbc_exposures = [\n ['https://www.fraserhealth.ca/covid19exposure', 'bc/regional-exposure-events-fraser-webpage', 'regional-exposure-events-fraser-webpage'],\n ['https://news.interiorhealth.ca/news/public-exposures/', 'bc/regional-exposure-events-interior-webpage', 'regional-exposure-events-interior-webpage'],\n ['https://www.islandhealth.ca/learn-about-health/covid-19/outbreaks-and-exposures', 'bc/regional-exposure-events-island-webpage', 'regional-exposure-events-island-webpage'],\n ['https://www.northernhealth.ca/health-topics/public-exposures-and-outbreaks#covid-19-public-exposures#covid-19-communityfacility-outbreaks#non-covid-19-communityfacility-outbreaks', 'bc/regional-exposure-events-northern-webpage', 'regional-exposure-events-northern-webpage'],\n ['http://www.vch.ca/covid-19/public-exposures', 'bc/regional-exposure-events-vancouver-coastal-webpage', 'regional-exposure-events-vancouver-coastal-webpage'],\n ['https://www.fraserhealth.ca/schoolexposures', 'bc/school-exposures-fraser-webpage', 'school-exposures-fraser-webpage'],\n ['https://news.interiorhealth.ca/news/school-exposures/', 'bc/school-exposures-interior-webpage', 'school-exposures-interior-webpage'],\n ['https://www.islandhealth.ca/learn-about-health/covid-19/exposures-schools', 'bc/school-exposures-island-webpage', 'school-exposures-island-webpage'],\n ['https://www.northernhealth.ca/health-topics/public-exposures-and-outbreaks#covid-19-school-exposures', 'bc/school-exposures-northern-webpage', 'school-exposures-northern-webpage'],\n ['http://www.vch.ca/covid-19/school-outbreaks', 'bc/school-exposures-vancouver-coastal-webpage', 'school-exposures-vancouver-coastal-webpage']\n]\nfor i in range(0, len(bc_exposures)):\n html_page(bc_exposures[i][0],\n bc_exposures[i][1],\n bc_exposures[i][2])\n\n# CAN - COVID-19 Situational Awareness Dashboard (Epidemiology update)\ndl_file('https://health-infobase.canada.ca/src/data/covidLive/covid19.csv',\n 'can/epidemiology-update/',\n 'covid19')\n\n# CAN - COVID-19 Situational Awareness Dashboard (Epidemiology update - as above but different date format)\ndl_file('https://health-infobase.canada.ca/src/data/covidLive/covid19-download.csv',\n 'can/epidemiology-update-2/',\n 'covid19-download')\n\n# CAN - COVID-19 Situational Awareness Dashboard (Epidemiology summary statements)\ndl_file('https://health-infobase.canada.ca/src/data/covidLive/covid19-epiSummary-statements.csv',\n 'can/epidemiology-summary-statements/',\n 'covid19-epiSummary-statements')\n\n# CAN - COVID-19 Situational Awareness Dashboard (NML summary)\ndl_file('https://health-infobase.canada.ca/src/data/covidLive/covid19-epiSummary-NML.csv',\n 'can/nml-summary/',\n 'covid19-epiSummary-NML')\n\n# CAN - COVID-19 Situational Awareness Dashboard (NML weekly testing)\ndl_file('https://health-infobase.canada.ca/src/data/covidLive/NML_weekly_testing.csv',\n 'can/nml-weekly-testing/',\n 'NML_weekly_testing')\n\n# CAN - COVID-19 Situational Awareness Dashboard (Number of cases with detailed case report data)\ndl_file('https://health-infobase.canada.ca/src/data/covidLive/covid19-nTotal.csv',\n 'can/detailed-case-report-n/',\n 'covid19-nTotal')\n\n# CAN - COVID-19 Situational Awareness Dashboard (Cases and deaths by health region time series)\ndl_file('https://health-infobase.canada.ca/src/data/covidLive/file_out_v5.csv',\n 'can/cases-and-deaths-by-hr-time-series/',\n 'file_out_v5')\n\n# CAN - COVID-19 Situational Awareness Dashboard (Health region UID table)\ndl_file('https://health-infobase.canada.ca/src/data/covidLive/covid19-healthregions-hruid.csv',\n 'can/health-region-uid/',\n 'covid19-healthregions-hruid')\n\n# CAN - COVID-19 Situational Awareness Dashboard (Cases by exposure setting time series)\ndl_file('https://health-infobase.canada.ca/src/data/covidLive/covid19-epiSummary-casesovertime.csv',\n 'can/cases-by-exposure-time-series/',\n 'covid19-epiSummary-casesovertime')\n\n# CAN - COVID-19 Situational Awareness Dashboard (Epidemic curve by date of illness onset by age group)\ndl_file('https://health-infobase.canada.ca/src/data/covidLive/covid19-epiSummary-epiCurveByAge.csv',\n 'can/epidemic-curve-by-age/',\n 'covid19-epiSummary-epiCurveByAge')\n\n# CAN - COVID-19 Situational Awareness Dashboard (Severity by age group and sex)\ndl_file('https://health-infobase.canada.ca/src/data/covidLive/covid19-epiSummary-severityUpdate.csv',\n 'can/severity-by-age-and-sex/',\n 'covid19-epiSummary-severityUpdate')\n\n# CAN - COVID-19 Situational Awareness Dashboard (Cases by severity)\ndl_file('https://health-infobase.canada.ca/src/data/covidLive/covid19-epiSummary-severity.csv',\n 'can/cases-by-severity/',\n 'covid19-epiSummary-severity')\n\n# CAN - COVID-19 Situational Awareness Dashboard (Cases by age group and sex)\ndl_file('https://health-infobase.canada.ca/src/data/covidLive/covid19-epiSummary-agegroups2.csv',\n 'can/cases-by-age-and-sex/',\n 'covid19-epiSummary-agegroups2')\n\n# CAN - COVID-19 Situational Awareness Dashboard (Cases by probable exposure setting)\ndl_file('https://health-infobase.canada.ca/src/data/covidLive/covid19-epiSummary-probableexposure2.csv',\n 'can/cases-by-probable-exposure-setting/',\n 'covid19-epiSummary-probableexposure2')\n\n# CAN - COVID-19 Situational Awareness Dashboard (Symptoms summary)\ndl_file('https://health-infobase.canada.ca/src/data/covidLive/covid19-epiSummary-symptoms.csv',\n 'can/symptoms-summary/',\n 'covid19-epiSummary-symptoms')\n\n# CAN - COVID-19 Situational Awareness Dashboard (Situational awareness dashboard update time)\ndl_file('https://health-infobase.canada.ca/src/data/covidLive/covid19-updateTime.csv',\n 'can/situational-awareness-dashboard-update-time/',\n 'covid19-updateTime')\n\n# CAN - Detailed preliminary information on cases of COVID-19: 6 Dimensions (Aggregated data)\ndl_file('https://www150.statcan.gc.ca/n1/tbl/csv/13100774-eng.zip',\n 'can/detailed-preliminary-case-info-aggregated-6-dimensions/',\n '13100774',\n unzip=True)\n\n# CAN - Detailed preliminary information on cases of COVID-19: 4 Dimensions (Aggregated data)\ndl_file('https://www150.statcan.gc.ca/n1/tbl/csv/13100775-eng.zip',\n 'can/detailed-preliminary-case-info-aggregated-4-dimensions/',\n '13100775',\n unzip=True)\n\n# CAN - Detailed preliminary information on confirmed cases of COVID-19 (Revised)\ndl_file('https://www150.statcan.gc.ca/n1/tbl/csv/13100781-eng.zip',\n 'can/detailed-preliminary-confirmed-case-info-revised/',\n '13100781',\n unzip=True)\n\n# MB - COVID-19 data by RHA and district\ndl_file('https://services.arcgis.com/mMUesHYPkXjaFGfS/arcgis/rest/services/mb_covid_cases_summary_stats_geography/FeatureServer/0/query?f=json&where=1%3D1&returnGeometry=false&spatialRel=esriSpatialRelIntersects&outFields=*',\n 'mb/covid-data-by-rha-and-district/',\n 'covid-data-by-rha-and-district',\n mb_json_to_csv=True)\n\n# MB - Cases by demographics and RHA\ndl_file('https://services.arcgis.com/mMUesHYPkXjaFGfS/arcgis/rest/services/mb_covid_cases_by_demographics_rha_all/FeatureServer/0/query?f=json&where=1%3D1&returnGeometry=false&spatialRel=esriSpatialRelIntersects&outFields=*&groupByFieldsForStatistics=Age_Group%2CGender&orderByFields=Age_Group%20desc',\n 'mb/cases-demographics-by-rha/',\n 'cases-demographics-by-rha',\n mb_json_to_csv=True)\n\n# MB - Cases by status and RHA\ndl_file('https://services.arcgis.com/mMUesHYPkXjaFGfS/arcgis/rest/services/mb_covid_cases_by_status_daily_rha/FeatureServer/0/query?f=json&where=1%3D1&returnGeometry=false&spatialRel=esriSpatialRelIntersects&outFields=*&groupByFieldsForStatistics=Date%2CRHA',\n 'mb/cases-by-status-and-rha/',\n 'cases-by-status-and-rha',\n mb_json_to_csv=True)\n\n# MB - Manitoba five-day test positivity rate\ndl_file('https://services.arcgis.com/mMUesHYPkXjaFGfS/arcgis/rest/services/mb_covid_5_day_positivity_rate/FeatureServer/0/query?f=json&where=1%3D1&returnGeometry=false&spatialRel=esriSpatialRelIntersects&outFields=*&orderByFields=Date%20asc',\n 'mb/five-day-test-positivity/',\n 'five-day-test-positivity',\n mb_json_to_csv=True)\n\n# NS - Coronavirus (COVID-19): case data\ndl_file('https://novascotia.ca/coronavirus/data/ns-covid19-data.csv',\n 'ns/case-data/',\n 'ns-covid19-data')\n\n# ON - How Ontario is responding to COVID-19 (webpage)\nhtml_page('https://www.ontario.ca/page/how-ontario-is-responding-covid-19',\n 'on/ontario-webpage/',\n 'ontario-webpage',\n js=True,\n wait=10)\n\n# ON - Confirmed positive cases of COVID19 in Ontario\ndl_file('https://data.ontario.ca/dataset/f4112442-bdc8-45d2-be3c-12efae72fb27/resource/455fd63b-603d-4608-8216-7d8647f43350/download/conposcovidloc.csv',\n 'on/confirmed-positive-cases/',\n 'conposcovidloc')\n\n# ON - Status of COVID-19 cases in Ontario\ndl_file('https://data.ontario.ca/dataset/f4f86e54-872d-43f8-8a86-3892fd3cb5e6/resource/ed270bb8-340b-41f9-a7c6-e8ef587e6d11/download/covidtesting.csv',\n 'on/status-of-cases/',\n 'covidtesting')\n\n# ON - Testing of inmates in provincial correctional institutions\ndl_file('https://data.ontario.ca/dataset/c4022f0f-6f3d-4e16-bd28-5312333a4bac/resource/d0d6ccc7-fc60-4a18-ac96-7f9493e9f10e/download/inmatetesting.csv',\n 'on/correctional-institutions-inmates-testing/',\n 'inmatetesting')\n\n# ON - Status of cases in provincial correctional institutions\ndl_file('https://data.ontario.ca/dataset/ecb75ea0-8b72-4f46-a14a-9bd54841d6ab/resource/1f95eda9-53b5-448e-abe0-afc0b71581ed/download/correctionsinmatecases.csv',\n 'on/correctional-institutions-status/',\n 'correctionsinmatecases')\n\n# ON - Long term care homes: Summary data\ndl_file('https://data.ontario.ca/dataset/42df36df-04a0-43a9-8ad4-fac5e0e22244/resource/0f8b343e-fc28-4ca5-9aab-c3a1d2c919f1/download/ltccovidsummary.csv',\n 'on/long-term-care-home-summary/',\n 'ltccovidsummary')\n\n# ON - Long term care homes: Active outbreaks\ndl_file('https://data.ontario.ca/dataset/42df36df-04a0-43a9-8ad4-fac5e0e22244/resource/4b64488a-0523-4ebb-811a-fac2f07e6d59/download/activeltcoutbreak.csv',\n 'on/long-term-care-home-active/',\n 'activeltcoutbreak')\n\n# ON - Long term care homes: Resolved outbreaks\ndl_file('https://data.ontario.ca/dataset/42df36df-04a0-43a9-8ad4-fac5e0e22244/resource/0cf2f01e-d4e1-48ed-8027-2133d059ec8b/download/resolvedltc.csv',\n 'on/long-term-care-home-resolved/',\n 'resolvedltc')\n\n# ON - Cases in schools and childcare centres (webpage)\nhtml_page('https://www.ontario.ca/page/covid-19-cases-schools-and-child-care-centres',\n 'on/cases-schools-and-child-care-centres-webpage/',\n 'cases-schools-and-child-care-centres-webpage',\n js=True,\n wait=10)\n\n# ON - Schools: Summary of cases in schools\ndl_file('https://data.ontario.ca/dataset/b1fef838-8784-4338-8ef9-ae7cfd405b41/resource/7fbdbb48-d074-45d9-93cb-f7de58950418/download/schoolcovidsummary.csv',\n 'on/schools-summary/',\n 'schoolcovidsummary')\n\n# ON - Schools: Schools with active COVID-19 cases\ndl_file('https://data.ontario.ca/dataset/b1fef838-8784-4338-8ef9-ae7cfd405b41/resource/8b6d22e2-7065-4b0f-966f-02640be366f2/download/schoolsactivecovid.csv',\n 'on/schools-active/',\n 'schoolsactivecovid')\n\n# ON - Schools: Cases in school board partners\ndl_file('https://data.ontario.ca/dataset/b1fef838-8784-4338-8ef9-ae7cfd405b41/resource/245479eb-db0a-4ec4-97af-459d61da0801/download/schoolpartnersactivecovid.csv',\n 'on/school-board-partners/',\n 'schoolpartnersactivecovid')\n\n# ON - Licensed child care settings: Summary of cases in licensed child care settings\ndl_file('https://data.ontario.ca/dataset/5bf54477-6147-413f-bab0-312f06fcb388/resource/74f9ac9f-7ca8-4860-b2c3-189a2c25e30c/download/lccovidsummary.csv',\n 'on/licensed-child-care-settings-summary/',\n 'lccovidsummary')\n\n# ON - Licensed child care settings: Licensed child care centres and agencies with active COVID-19 cases\ndl_file('https://data.ontario.ca/dataset/5bf54477-6147-413f-bab0-312f06fcb388/resource/eee282d3-01e6-43ac-9159-4ba694757aea/download/lccactivecovid.csv',\n 'on/licensed-child-care-settings-active/',\n 'lccactivecovid')\n\n# ON - City of Toronto Daily Status of COVID-19 Cases\ndl_file('https://docs.google.com/spreadsheets/d/11KF1DuN5tntugNc10ogQDzFnW05ruzLH/export?format=xlsx&id=11KF1DuN5tntugNc10ogQDzFnW05ruzLH',\n 'on/toronto-daily-status/',\n 'CityofToronto_COVID-19_Daily_Public_Reporting',\n ext='.xlsx')\n\n# ON - City of Toronto COVID-19 Summary\ndl_file('https://docs.google.com/spreadsheets/d/1euhrML0rkV_hHF1thiA0G5vSSeZCqxHY/export?format=xlsx&id=1euhrML0rkV_hHF1thiA0G5vSSeZCqxHY',\n 'on/toronto-covid-summary/',\n 'CityofToronto_COVID-19_Data',\n ext='.xlsx')\n\n# ON - City of Toronto COVID-19 Neighbourhood Case Data\ndl_file('https://docs.google.com/spreadsheets/d/1jzH64LvFQ-UsDibXO0MOtvjbL2CvnV3N/export?format=xlsx&id=1jzH64LvFQ-UsDibXO0MOtvjbL2CvnV3N',\n 'on/toronto-neighbourhood-data/',\n 'CityofToronto_COVID-19_NeighbourhoodData',\n ext='.xlsx')\n\n# ON - City of Toronto COVID-19 Neighbourhood Testing Data\ndl_file('https://docs.google.com/spreadsheets/d/1xI6ckKQIOt_RNCuI0HXs7WJsgqFP015c/export?format=xlsx&id=1xI6ckKQIOt_RNCuI0HXs7WJsgqFP015c',\n 'on/toronto-neighbourhood-test-data/',\n 'CityofToronto_COVID-19_Testing',\n ext='.xlsx')\n\n# ON - City of Toronto COVID-19 Monitoring Dashboard\ndl_file('https://docs.google.com/spreadsheets/d/1-7j48S_KQY-I-4Qu3N3lsEOALXON2StG/export?format=xlsx&id=1-7j48S_KQY-I-4Qu3N3lsEOALXON2StG',\n 'on/toronto-monitoring-dashboard/',\n 'CityofToronto_COVID-19_RecoveryData',\n ext='.xlsx')\n\n# ON - COVID-19 Cases in Toronto\n# run only on Wednesdays\nif t.weekday() == 2:\n dl_file('https://ckan0.cf.opendata.inter.prod-toronto.ca/download_resource/e5bf35bc-e681-43da-b2ce-0242d00922ad?format=csv',\n 'on/toronto-cases/',\n 'COVID19_cases')\n\n# ON - University of Toronto COVID-19 tracking (webpage)\nhtml_page('https://www.utoronto.ca/utogether2020/covid19-dashboard',\n 'on/u-of-t-covid-tracking-webpage/',\n 'u-of-t-covid-tracking-webpage')\n\n# ON - Ottawa Demographics and Source of Infection for Cases, Deaths, and Hospitalizations\ndl_file('https://www.arcgis.com/sharing/rest/content/items/6bfe7832017546e5b30c5cc6a201091b/data',\n 'on/ottawa-cases-deaths-hosp-demographics-source-of-infection/',\n 'COVID-19_Cases_and_Deaths_Ottawa_EN')\n\n# ON - Ottawa Outbreaks in Healthcare Institutions, Childcare, Summer Camps, and Educational Establishments\ndl_file('https://www.arcgis.com/sharing/rest/content/items/5b24f70482fe4cf1824331d89483d3d3/data',\n 'on/ottawa-outbreaks-healthcare-childcare-camps-schools/',\n 'COVID-19_Institutional_Outbreaks')\n\n# ON - Ottawa Community Outbreaks\ndl_file('https://opendata.arcgis.com/datasets/0df365456c254fbc942fe3d85c3dbf83_0.csv',\n 'on/ottawa-community-outbreaks/',\n 'COVID-19_Community_Outbreaks_in_Ottawa')\n\n# ON - Ottawa Weekly Rates\ndl_file('https://www.arcgis.com/sharing/rest/content/items/734a327141b14a55b666953c9141abf3/data',\n 'on/ottawa-weekly-rates/',\n 'COVID-19_Weekly_Cases_and_Rates_by_Age_in_Ottawa_EN')\n\n# ON - Ottawa Estimated Reproduction Number in Ottawa\ndl_file('https://www.arcgis.com/sharing/rest/content/items/d010a848b6e54f4990d60a202f2f2f99/data',\n 'on/ottawa-estimated-rt/',\n 'EN_-_Covid-19_Reproduction_Number,_R(t)')\n\n# ON - Ottawa Testing - Ottawa Residents\ndl_file('https://www.arcgis.com/sharing/rest/content/items/26c902bf1da44d3d90b099392b544b81/data',\n 'on/ottawa-residents-tested/',\n 'COVID-19_Ottawa_Residents_Tested_EN')\n\n# QC - Data on COVID-19 in Quebec (webpage EN)\nhtml_page('https://www.quebec.ca/en/health/health-issues/a-z/2019-coronavirus/situation-coronavirus-in-quebec/',\n 'qc/qc-webpage-en/',\n 'qc-webpage-en')\n\n# QC - Données sur la COVID-19 au Québec (webpage FR)\nhtml_page('https://www.quebec.ca/sante/problemes-de-sante/a-z/coronavirus-2019/situation-coronavirus-quebec/',\n 'qc/qc-webpage-fr/',\n 'qc-webpage-fr')\n\n# QC - COVID-19 time series by region and demographics\ndl_file('https://www.inspq.qc.ca/sites/default/files/covid/donnees/covid19-hist.csv',\n 'qc/covid-time-series-by-region-and-demographics/',\n 'covid19-hist')\n\n# QC - COVID-19 data (charts - summary, time series, and hospitalization by age)\ndl_file('https://www.inspq.qc.ca/sites/default/files/covid/donnees/manual-data.csv',\n 'qc/covid-data-charts-summary-time-series-hosp-by-age/',\n 'manual-data')\n\n# QC - Summary by region\ndl_file('https://www.inspq.qc.ca/sites/default/files/covid/donnees/regions.csv',\n 'qc/summary-by-region/',\n 'regions')\n\n# QC - Deaths by RSS (health region) and living environment\ndl_file('https://www.inspq.qc.ca/sites/default/files/covid/donnees/tableau-rpa-new.csv',\n 'qc/deaths-by-rss-and-living-environment/',\n 'tableau-rpa-new')\n\n# QC - Cases by RSS (health region) and RLS (local service network)\ndl_file('https://www.inspq.qc.ca/sites/default/files/covid/donnees/tableau-rls-new.csv',\n 'qc/cases-by-rss-and-rls/',\n 'tableau-rls-new')\n\n# QC - Comparisons (provinces)\ndl_file('https://www.inspq.qc.ca/sites/default/files/covid/donnees/comparaisons_prov.csv',\n 'qc/comparisons-provinces/',\n 'comparaisons_prov')\n\n# QC - Comparisons (countries)\ndl_file('https://www.inspq.qc.ca/sites/default/files/covid/donnees/comparaisons_pays.csv',\n 'qc/comparisons-countries/',\n 'comparaisons_pays')\n\n# QC - COVID-19 data by age group and sex\ndl_file('https://www.inspq.qc.ca/sites/default/files/covid/donnees/PL_AGE_SEXE.csv',\n 'qc/covid-data-by-age-and-sex/',\n 'PL_AGE_SEXE')\n\n# QC - Deaths time series by living environment\ndl_file('https://cdn-contenu.quebec.ca/cdn-contenu/sante/documents/Problemes_de_sante/covid-19/csv/decesquotidien.csv',\n 'qc/deaths-time-series-by-living-environment/',\n 'decesquotidien')\n\n# QC - Recent daily cases by region\ndl_file('https://cdn-contenu.quebec.ca/cdn-contenu/sante/documents/Problemes_de_sante/covid-19/csv/cas-region.csv',\n 'qc/recent-daily-cases-by-region/',\n 'cas-region')\n\n# QC - Cumulative deaths by region\ndl_file('https://cdn-contenu.quebec.ca/cdn-contenu/sante/documents/Problemes_de_sante/covid-19/csv/deces-region.csv',\n 'qc/cumulative-deaths-by-region/',\n 'deces-region')\n\n# QC - Situation in Quebec\ndl_file('https://cdn-contenu.quebec.ca/cdn-contenu/sante/documents/Problemes_de_sante/covid-19/csv/situation-au-quebec.csv',\n 'qc/situation-in-quebec/',\n 'situation-au-quebec')\n\n# QC - Cases percentage by age group\ndl_file('https://cdn-contenu.quebec.ca/cdn-contenu/sante/documents/Problemes_de_sante/covid-19/csv/pourcentage-cas-age.csv',\n 'qc/cases-percentage-by-age-group/',\n 'pourcentage-cas-age')\n\n# QC - Deaths percentage by age group\ndl_file('https://cdn-contenu.quebec.ca/cdn-contenu/sante/documents/Problemes_de_sante/covid-19/csv/pourcentage-deces-age.csv',\n 'qc/deaths-percentage-by-age-group/',\n 'pourcentage-deces-age')\n\n# QC - COVID-19 daily data 7 days\ndl_file('https://cdn-contenu.quebec.ca/cdn-contenu/sante/documents/Problemes_de_sante/covid-19/csv/synthese-7jours.csv',\n 'qc/covid-data-daily-7-days/',\n 'synthese-7jours')\n\n# QC - Cases by region 7 days\ndl_file('https://cdn-contenu.quebec.ca/cdn-contenu/sante/documents/Problemes_de_sante/covid-19/csv/apercu/cas-region-7jours.csv',\n 'qc/cases-by-region-7-days/',\n 'cas-region-7jours')\n\n# QC - Status report on confirmed cases and deaths by RPA\ndl_file('https://cdn-contenu.quebec.ca/cdn-contenu/sante/documents/Problemes_de_sante/covid-19/etat_situation_rpa.pdf',\n 'qc/status-report-cases-and-deaths-by-rpa/',\n 'etat_situation_rpa',\n ext = '.pdf')\n\n# QC - Status report on confirmed cases and deaths by CHSLD\ndl_file('https://cdn-contenu.quebec.ca/cdn-contenu/sante/documents/Problemes_de_sante/covid-19/etat_situation_chsld.pdf',\n 'qc/status-report-cases-and-deaths-by-chsld/',\n 'etat_situation_chsld',\n ext = '.pdf')\n\n# QC - Highlights - public and private school system\ndl_file('https://cdn-contenu.quebec.ca/cdn-contenu/adm/min/education/publications-adm/covid-19/reseauScolaire_faitsSaillants.pdf',\n 'qc/schools-highlights/',\n 'reseauScolaire_faitsSaillants',\n ext = '.pdf')\n\n# QC - List of schools - public and private school system\ndl_file('https://cdn-contenu.quebec.ca/cdn-contenu/adm/min/education/publications-adm/covid-19/reseauScolaire_listeEcoles.pdf',\n 'qc/schools-list-of-schools/',\n 'reseauScolaire_listeEcoles',\n ext = '.pdf')\n\n# QC - Montréal cases and deaths by CIUSSS (integrated health and social services centres)\ndl_file('https://santemontreal.qc.ca/fileadmin/fichiers/Campagnes/coronavirus/situation-montreal/ciusss.csv',\n 'qc/montreal-cases-and-deaths-by-ciusss/',\n 'ciusss',\n user=True)\n\n# QC - Montréal cases by area\ndl_file('https://santemontreal.qc.ca/fileadmin/fichiers/Campagnes/coronavirus/situation-montreal/municipal.csv',\n 'qc/montreal-cases-by-area/',\n 'municipal',\n user=True)\n\n# QC - Montréal cases and deaths by age group\ndl_file('https://santemontreal.qc.ca/fileadmin/fichiers/Campagnes/coronavirus/situation-montreal/grage.csv',\n 'qc/montreal-cases-and-deaths-by-age-group/',\n 'grage',\n user=True)\n\n# QC - Montréal cases and deaths by sex\ndl_file('https://santemontreal.qc.ca/fileadmin/fichiers/Campagnes/coronavirus/situation-montreal/sexe.csv',\n 'qc/montreal-cases-and-deaths-by-sex/',\n 'sexe',\n user=True)\n\n# QC - Montréal epidemic curve\ndl_file('https://santemontreal.qc.ca/fileadmin/fichiers/Campagnes/coronavirus/situation-montreal/courbe.csv',\n 'qc/montreal-epidemic-curve/',\n 'courbe',\n user=True)\n\n# SK - Saskatchewan's Dashboard - Total Cases\nsk_url_cases = 'https://dashboard.saskatchewan.ca' + re.search('(?<=href=\\\").*(?=\\\">CSV)', requests.get('https://dashboard.saskatchewan.ca/health-wellness/covid-19/cases').text).group(0)\ndl_file(sk_url_cases,\n 'sk/cases-by-region/',\n 'cases')\n\n# SK - Saskatchewan's Dashboard - Total Cases (webpage)\nhtml_page('https://dashboard.saskatchewan.ca/health-wellness/covid-19/cases',\n 'sk/cases-by-region-webpage/',\n 'cases-webpage')\n\n# SK - Saskatchewan's Dashboard - Total Tests\nsk_url_tests = 'https://dashboard.saskatchewan.ca' + re.search('(?<=href=\\\").*(?=\\\">CSV)', requests.get('https://dashboard.saskatchewan.ca/health-wellness/covid-19/tests').text).group(0)\ndl_file(sk_url_tests,\n 'sk/tests-by-region/',\n 'tests')\n\n# SK - Saskatchewan's Dashboard - Total Tests (webpage)\nhtml_page('https://dashboard.saskatchewan.ca/health-wellness/covid-19/tests',\n 'sk/tests-by-region-webpage/',\n 'tests-webpage')\n\n# Other: CAN - Unofficial COVID Alert Dashboard\ndl_file('https://kappel.cs.uwaterloo.ca/uhengart/diagnosis-keys/estimated_infections_per_day.txt',\n 'other/can/unofficial-covid-alert-dashboard/',\n 'estimated_infections_per_day',\n ext = '.txt')\n\n# Other: QC - Covid Écoles Québec (Excel)\ndl_file('https://drive.google.com/uc?export=download&id=1xOl0uhyx9IuHZfJuRH-OR7BcGFuWYUex',\n 'other/qc/covid-ecoles-quebec-school-list/',\n 'COVIDECOLESQUEBEC',\n ext = '.xlsx')\n\n# Other: CAN - Canada COVID-19 School Case Tracker (KML)\ndl_file('https://www.google.com/maps/d/u/0/kml?mid=1blA_H3Hv5S9Ii_vyudgDk-j6SfJQil9S&forcekml=1',\n 'other/can/canada-covid-19-school-case-tracker/',\n 'Canada_COVID-19_School_Report_Tracker',\n ext = '.kml')\n\n# Summarize successes and failures\ntotal_files = str(success + failure)\nprint(background('Successful downloads: ' + str(success) + '/' + total_files, Colors.blue))\nprint(background('Failed downloads: ' + str(failure) + '/' + total_files, Colors.red))\n\n# Commit files\nif mode == 'serverprod' or mode == 'localprod':\n commit_files(repo, origin, file_list, commit_message, success, failure, t)"
]
| [
[
"pandas.to_datetime",
"pandas.read_csv"
]
]
|
panvaf/retina_std | [
"d2f86de9419f06fbf908e344f0f97cc6d0f7271b"
]
| [
"classes.py"
]
| [
"\"\"\"\nIncludes classes for all basic elements of the networks.\n\"\"\"\n\n# imports\nimport numpy as np\nfrom scipy import signal\nimport copy as cp\n\n# Global variables\nimage_size = np.array([250, 250]) # in um\npixel = 5 # in um\nimg_size = (image_size/pixel).astype(int) # number\ntemporal_res = 2 # in msec\nt_time = 100 # in sec\n\n###############################################################################\n\n# Generic parent class for anything that is common across the building blocks \n# (elements) of the circuits\n\nclass Element:\n\n # Every element contains the name of its inputs and the corresponding weights\n def __init__(self, inputs, weights, center):\n # Inputs: array of input elements to this element\n # Weights: corresponding weights from these elements\n # Center: tuple indicating position in 2-D neural sheet. It is also center\n # of the receptive field for bipolar cells\n \n self.n_in = len(inputs)\n self.w = weights\n # Inputs is a list of objects. I want to compute their\n # output and use it is the output method\n self.inputs = inputs\n # Needed to compute which elements to connect to this element\n self.center = center\n \n # preallocate matrix of activity for this cell\n self.output = np.nan\n # Can be used to see if the cell has computed its output, to avoid\n # uneccesary computations. Can also be replaced by a time marker if the\n # network cannot be computed all at once\n \n \n############################################################################### \n \n \nclass BipolarCell(Element):\n \n def __init__(self, inputs, weights, center, attributes):\n # Attributes: contains a list of attributes needed to define the cell. Can contain:\n # type: can be \"On\" or \"Off\" \n # separable: determines whether spatiotemporal field is separable, boolean\n # spatiotemporal: contains given spatiotemporal receptive field\n # spatial: contains spatial receptive field or function name to produce it\n # width: width of the spatial receptive field\n # temporal: contains temporal receptive field or function name to produce it\n # duration: \"time constant\" of the temporal receptive field\n # activation: the nonlinear activation function of the output\n # threshold: threshold for the activation function\n # Can also contain other parameters required to define the receptive\n # field, look at respective function for what their name and\n # specification should be\n \n super().__init__(inputs, weights, center)\n \n self.type = attributes[\"type\"]\n self.separable = attributes[\"separable\"]\n if self.separable:\n \n if isinstance(attributes[\"spatial\"],np.ndarray):\n self.spatial = attributes[\"spatial\"]\n else:\n self.spatial = Spatial(self.center,attributes)\n \n if isinstance(attributes[\"temporal\"],np.ndarray):\n self.temporal = attributes[\"temporal\"]\n else:\n self.temporal = Temporal(attributes)\n \n # Spatiotemporal receptive field is spatial * temporal\n temp1 = self.temporal[np.newaxis,:]; temp1 = temp1[np.newaxis,:]\n temp2 = np.expand_dims(self.spatial,2)\n self.spatiotemporal = temp2*temp1\n \n else:\n self.spatiotemporal = attributes[\"spatiotemporal\"]\n \n self.activation = attributes[\"activation\"]\n self.threshold = attributes[\"threshold\"]\n \n def out(self):\n # Since there is no recurrent connectivity involving bipolar cells,\n # we can compute all the output and then sample it from the other cells\n \n if not np.any(np.isnan(self.output)):\n pass\n else:\n # the first element of the list 'inputs' should contain the image\n temp = signal.fftconvolve(self.inputs[0],self.spatiotemporal,'full',axes = 2)[:,:,0:np.size(self.inputs[0],2)]\n self.output = activation(np.sum(temp,axis = (0,1)),self.activation,self.threshold)\n \n return self.output\n \n \n############################################################################### \n \n \nclass AmacrineCell(Element):\n \n def __init__(self, inputs, weights, center, attributes):\n # Attributes: contains a list of attributes needed to define the cell. Can contain:\n # temporal: contains temporal receptive fields as matrix or list\n # of function names to produce them. Each input has a different\n # corresponding receptive field\n # duration: array of \"time constants\" of temporal receptive fields\n # activation: the nonlinear activation function of the output\n # threshold: threshold for the activation function\n # Can also contain other parameters required to define the receptive\n # field, look at respective function for what their name should be\n \n super().__init__(inputs, weights, center)\n \n if isinstance(attributes[\"temporal\"],np.ndarray):\n self.temporal = attributes[\"temporal\"]\n else:\n self.temporal = Temporal_multiple(attributes,self.n_in)\n \n self.activation = attributes[\"activation\"]\n self.threshold = attributes[\"threshold\"]\n \n if \"recurrent\" in attributes:\n self.recurrent = attributes[\"recurrent\"]\n else:\n self.recurrent = np.nan\n \n def out(self):\n # Amacrine cells receive recurrent connections\n\n # assuming that inputs is a list of input objects\n if not np.any(np.isnan(self.output)):\n pass\n else:\n values = np.asarray(list(map(lambda x: x.out(),self.inputs)))\n \n for i in range(self.n_in):\n # Different temporal receptive field for each input\n values[i,:] = signal.fftconvolve(values[i,:],self.temporal[i],'full')[0:np.size(values[i,:])]\n \n # Use transpose to do multiplication with np.dot\n temp = np.dot(values.transpose(),self.w).transpose()\n \n try: \n if np.isnan(self.recurrent):\n self.output = activation(temp,self.activation,self.threshold)\n except:\n time_p = np.size(temp,0); l = np.size(self.recurrent)\n self.output = np.zeros((time_p,))\n \n for t in range(time_p):\n if t < l:\n self.output[t] = activation(temp[t],self.activation,self.threshold)\n else:\n temp[t] = temp[t] + np.dot(self.output[t-l:t],self.recurrent)\n self.output[t] = activation(temp[t],self.activation,self.threshold)\n \n return self.output\n\n\n###############################################################################\n\n\nclass GanglionCell(Element):\n \n def __init__(self, inputs, weights, center, attributes):\n # Attributes: contains a list of attributes needed to define the cell. Can contain:\n # temporal: contains temporal receptive fields as matrix or list\n # of function names to produce them. Each input has a different\n # corresponding receptive field\n # duration: array of \"time constants\" of temporal receptive fields\n # activation: the nonlinear activation function of the output\n # threshold: threshold for the activation function\n # recurrent: recurrent filter coefficients\n # Can also contain other parameters required to define the receptive\n # field, look at respective function for what their name should be\n \n super().__init__(inputs, weights, center)\n \n if isinstance(attributes[\"temporal\"],np.ndarray):\n self.temporal = attributes[\"temporal\"]\n else:\n self.temporal = Temporal_multiple(attributes,self.n_in)\n \n self.activation = attributes[\"activation\"]\n self.threshold = attributes[\"threshold\"]\n \n if \"recurrent\" in attributes:\n self.recurrent = attributes[\"recurrent\"]\n else:\n self.recurrent = np.nan\n \n def out(self):\n # Ganglion cells receive recurrent connections\n\n # assuming that inputs is a list of input objects\n if not np.any(np.isnan(self.output)):\n pass\n else:\n # care must be taken that the length of all arrays is equal. So do it\n # for values, after this it is ok\n values = np.asarray(list(map(lambda x: x.out(),self.inputs)))\n \n for i in range(self.n_in):\n # Different temporal receptive field for each input\n values[i,:] = signal.fftconvolve(values[i,:],self.temporal[i],'full')[0:np.size(values[i,:])]\n \n # Use transpose to do multiplication with np.dot\n temp = np.dot(values.transpose(),self.w).transpose()\n \n try: \n if np.isnan(self.recurrent):\n self.output = activation(temp,self.activation,self.threshold)\n except:\n time_p = np.size(temp); l = np.size(self.recurrent)\n self.output = np.zeros((time_p,))\n \n for t in range(time_p):\n if t < l:\n self.output[t] = activation(temp[t],self.activation,self.threshold)\n else:\n temp[t] = temp[t] + np.dot(self.output[t-l:t],self.recurrent)\n self.output[t] = activation(temp[t],self.activation,self.threshold)\n \n return self.output\n\n\n###############################################################################\n\n\nclass Delay(Element):\n \n def __init__(self, inputs, weights, center, attributes):\n \n # Attributes can contain:\n # t_delay : time delay the element introduces, in time units\n \n super().__init__(inputs, weights, center)\n # convert time to count\n self.delay = int(attributes[\"t_delay\"])\n \n def out(self):\n \n if not np.any(np.isnan(self.output)):\n pass\n else:\n self.output = np.roll(self.inputs[0].out(),self.delay)\n self.output[0:self.delay] = 0\n \n return self.output\n\n\n###############################################################################\n\n\nclass PresynapticSilencer(Element):\n \n # Used so that amacrine cells can silence bipolar cells before reaching\n # cells. Necessary component for OMS cells\n \n def __init__(self, inputs, weights, center, attributes):\n \n super().__init__(inputs, weights, center)\n \n def out(self):\n \n if not np.any(np.isnan(self.output)):\n pass\n else:\n values = np.asarray(list(map(lambda x: x.out(),self.inputs)))\n self.output = np.dot(values.transpose(),self.w).transpose()\n \n return self.output\n\n\n###############################################################################\n\n# Functions for spatial attributes\n\ndef Spatial(center,attributes):\n # Define spatial receptive fields. Options:\n # difference of gaussians: \"spatial\" should be \"DoG\", other parameters\n # needed in \"attributes\": \"width\", \"center\", \"on_off_ratio\"\n \n # Access global variables used throughout\n global img_size\n \n if np.array_equal(attributes[\"spatial\"],'DoG'):\n spatial = DoG(attributes[\"width\"],attributes[\"on_off_ratio\"],center,img_size)\n if np.array_equal(attributes[\"spatial\"],'Gauss'):\n spatial = Gauss(attributes[\"width\"],center,img_size)\n \n spatial = norm(spatial)\n \n return spatial\n\n\ndef DoG(sigmas,ratio,center,img_size):\n # Sigmas contain the standard deviations the positive (center) and negative\n # (surround) part. Ratio is the ratio of the peaks of the gaussians (center/surround)\n \n x = np.arange(0,img_size[0])\n y = np.arange(0,img_size[1])\n X, Y = np.meshgrid(x,y)\n \n norm_dist1 = 1/2*(((X-center[0])**2+(Y-center[1])**2)/sigmas[0]**2)\n norm_dist2 = 1/2*(((X-center[0])**2+(Y-center[1])**2)/sigmas[1]**2)\n gauss1 = ratio/(2*np.pi*sigmas[0]**2)*np.exp(-norm_dist1)\n gauss2 = 1/(2*np.pi*sigmas[1]**2)*np.exp(-norm_dist2)\n # Normalization? Should it not be zero sum? (no reaction to constant input)\n \n return (gauss1 - gauss2)/(1+ratio)\n\ndef Gauss(sigma,center,img_size):\n # Sigma: standard deviation of the receptive field\n \n x = np.arange(0,img_size[0])\n y = np.arange(0,img_size[1])\n X, Y = np.meshgrid(x,y)\n \n norm_dist = 1/2*(((X-center[0])**2+(Y-center[1])**2)/sigma**2)\n gauss =1/(2*np.pi*sigma**2)*np.exp(-norm_dist)\n \n return gauss\n\ndef Gaussian(x,y,sigmax,sigmay,peak):\n norm_dist = 1/2*(x**2/sigmax**2+y**2/sigmay**2)\n return peak*np.exp(-norm_dist)\n\n###############################################################################\n\n# Functions for temporal attributes\n\ndef Temporal_multiple(attributes,n):\n # Unpacks contents of attributes and passes them one at a time to Temporal()\n # Returns list of receptive fields, each one corresponding to one input\n \n temporals = [None]*n\n atts = cp.deepcopy(attributes) # Changes in atts should not affect attributes\n \n for i in range(n):\n atts[\"temporal\"] = attributes[\"temporal\"][i]\n atts[\"duration\"] = attributes[\"duration\"][i]\n atts[\"coeffs\"] = attributes[\"coeffs\"][i]\n temporals[i] = Temporal(atts)\n \n return temporals\n\ndef Temporal(attributes):\n # Define temporal receptive fields. Options:\n # Adelson and Bergen 1985: \"temporal\" should be \"Adelson_Bergen\", other \n # parameters needed in \"attributes\": \"duration\"\n # Stretched sin: \"temporal\" should be \"stretched_sin\", other parameters\n # needed: \"duration\", \"coeffs\"\n \n if np.array_equal(attributes[\"temporal\"],'Adelson_Bergen'):\n temporal = Adelson_Bergen(attributes[\"duration\"])\n elif np.array_equal(attributes[\"temporal\"],'stretched_sin'):\n temporal = stretched_sin(attributes[\"duration\"],attributes[\"coeffs\"])\n \n temporal = norm(temporal)\n \n return temporal\n\n\ndef Adelson_Bergen(duration):\n # Alpha acts as an inverse time constant. Equation (2.29) from Dayan & Abbott\n \n alpha = 20/duration\n t = np.arange(duration)\n norm_t = alpha*t\n \n return alpha*np.exp(-norm_t)*(norm_t**5/np.math.factorial(5)-norm_t**7/np.math.factorial(7))\n\ndef stretched_sin(tf,coeffs):\n # Computes equation (5) from Keat et al 2001\n # tf is the maximal length of the filter\n # coeffs should be a numpy array with the corresponding coefficient of the\n # n-th term of (5) in position n-1\n \n filt = 0\n \n for i in range(len(coeffs)):\n filt = filt + coeffs[i]*stretched_sin_basis(tf,i+1)\n \n return filt\n\ndef stretched_sin_basis(tf,order):\n # Equation (6) from Keat et al 2001\n \n t = np.arange(tf)\n norm_t = t/tf\n \n return np.sin(np.pi*order*(2*norm_t-norm_t**2))\n \n\n###############################################################################\n\n# Functions for activations\n\ndef activation(h,function,threshold):\n # Computes output of elements. Options:\n # \"relu\", \"sigmoid\"\n \n if np.array_equal(function,\"relu\"):\n out = relu(h,threshold)\n elif np.array_equal(function,\"sigmoid\"):\n out = sigmoid(h,threshold)\n \n return out\n \n\ndef relu(h,threshold,gain = 1):\n # could define gain for each cell\n out = h-threshold; \n if np.isscalar(out): \n if out<0: \n out = 0 \n else: \n out[out<0] = 0\n return gain*out\n\ndef sigmoid(h,threshold,k=.5,b=5,s=1):\n # could define k, b and s separately for each cell\n return 1/(1+k*np.exp(-b*(h-threshold)))\n\n###############################################################################\n \n# Utils\n \ndef norm(array):\n coeff = np.linalg.norm(array)\n if coeff>0:\n array = array/coeff\n return array"
]
| [
[
"numpy.sin",
"numpy.array",
"numpy.array_equal",
"numpy.linalg.norm",
"numpy.isnan",
"scipy.signal.fftconvolve",
"numpy.zeros",
"numpy.sum",
"numpy.dot",
"numpy.exp",
"numpy.arange",
"numpy.isscalar",
"numpy.math.factorial",
"numpy.size",
"numpy.meshgrid",
"numpy.expand_dims"
]
]
|
dfki-asr/morphablegraphs | [
"02c77aab72aa4b58f4067c720f5d124f0be3ea80"
]
| [
"examples/run_construction.py"
]
| [
"import os\nimport json\nimport numpy as np\nimport scipy.interpolate as si\nimport collections\nimport argparse\nimport glob\nfrom anim_utils.animation_data.bvh import BVHReader\nfrom anim_utils.animation_data import SkeletonBuilder, MotionVector\nfrom anim_utils.animation_data.skeleton_models import SKELETON_MODELS\nfrom anim_utils.utilities.io_helper_functions import load_json_file\nfrom morphablegraphs.construction.motion_model_constructor import MotionModelConstructor\nfrom morphablegraphs.motion_model.motion_primitive_wrapper import MotionPrimitiveModelWrapper\nfrom morphablegraphs.construction.utils import get_cubic_b_spline_knots\n\n\nMM_FILE_ENDING = \"_quaternion_mm.json\"\n\n\ndef load_skeleton(file_path, joint_filter=None, scale=1.0):\n target_bvh = BVHReader(file_path)\n bvh_joints = list(target_bvh.get_animated_joints())\n if joint_filter is not None:\n animated_joints = [j for j in bvh_joints if j in joint_filter]\n else:\n print(\"set default joints\")\n animated_joints = bvh_joints\n skeleton = SkeletonBuilder().load_from_bvh(target_bvh, animated_joints)\n skeleton.scale(scale)\n return skeleton\n\n\ndef load_motion_vector_from_bvh_file(bvh_file_path, animated_joints):\n bvh_data = BVHReader(bvh_file_path)\n mv = MotionVector(None)\n mv.from_bvh_reader(bvh_data, filter_joints=False, animated_joints=animated_joints)\n return mv\n\n\ndef load_motion_data(motion_folder, max_count=np.inf, animated_joints=None):\n motions = collections.OrderedDict()\n for root, dirs, files in os.walk(motion_folder):\n for file_name in files:\n if file_name.endswith(\"bvh\"):\n mv = load_motion_vector_from_bvh_file(motion_folder + os.sep + file_name, animated_joints)\n motions[file_name[:-4]] = np.array(mv.frames, dtype=np.float)\n if len(motions) > max_count:\n break\n return motions\n\n\ndef get_standard_config():\n config = dict()\n config[\"n_spatial_basis_factor\"] = 0.2\n config[\"n_basis_functions_spatial\"] = 16\n config[\"fraction\"] = 0.95\n config[\"n_basis_functions_temporal\"] = 8\n config[\"npc_temporal\"] = 3\n config[\"n_components\"] = None\n config[\"precision_temporal\"] = 0.99\n return config\n\n\ndef export_frames_to_bvh(skeleton, frames, filename):\n print(\"export\", len(frames[0]))\n mv = MotionVector()\n mv.frames = np.array([skeleton.add_fixed_joint_parameters_to_frame(f) for f in frames])\n print(mv.frames.shape)\n mv.export(skeleton, filename, add_time_stamp=False)\n\n\ndef export_motions(skeleton, motions):\n for idx, frames in enumerate(motions):\n export_frames_to_bvh(skeleton, frames, \"out\" + str(idx))\n\n\ndef define_sections_from_keyframes(motion_names, keyframes):\n sections = []\n for key in motion_names:\n if key not in keyframes:\n continue\n m_sections = []\n keyframe = keyframes[key]\n section = dict()\n section[\"start_idx\"] = 0\n section[\"end_idx\"] = keyframe\n m_sections.append(section)\n section = dict()\n section[\"start_idx\"] = keyframe\n section[\"end_idx\"] = -1\n m_sections.append(section)\n sections.append(m_sections)\n return sections\n\n\ndef smooth_quaternion_frames(skeleton, frames, reference_frame):\n print(\"smooth\", len(frames[0]), len(reference_frame))\n for frame in frames:\n for idx, node in enumerate(skeleton.animated_joints):\n o = idx*4 + 3\n ref_q = reference_frame[o:o+4]\n q = frame[o:o+4]\n if np.dot(q, ref_q) < 0:\n frame[o:o + 4] = -q\n return frames\n\n\ndef define_sections_from_annotations(motion_folder, motions):\n filtered_motions = collections.OrderedDict()\n sections = collections.OrderedDict()\n for key in motions.keys():\n annotations_file = motion_folder + os.sep + key + \"_sections.json\"\n if os.path.isfile(annotations_file):\n data = load_json_file(annotations_file)\n annotations = data[\"semantic_annotation\"]\n motion_sections = dict()\n for label in annotations:\n annotations[label].sort()\n section = dict()\n section[\"start_idx\"] = annotations[label][0]\n section[\"end_idx\"] = annotations[label][-1]\n motion_sections[section[\"start_idx\"]] = section\n motion_sections = collections.OrderedDict(sorted(motion_sections.items()))\n sections[key] = motion_sections.values()\n filtered_motions[key] = motions[key]\n\n if len(sections) > 0:\n motions = filtered_motions\n return motions, sections\n else:\n return motions, None\n\n\ndef convert_motion_to_static_motion_primitive(name, motion, skeleton, n_basis=7, degree=3):\n \"\"\"\n Represent motion data as functional data, motion data should be narray<2d> n_frames * n_dims,\n the functional data has the shape n_basis * n_dims\n \"\"\"\n\n motion_data = np.asarray(motion)\n n_frames, n_dims = motion_data.shape\n knots = get_cubic_b_spline_knots(n_basis, n_frames)\n x = list(range(n_frames))\n coeffs = [si.splrep(x, motion_data[:, i], k=degree, t=knots[degree + 1: -(degree + 1)])[1][:-4] for i in range(n_dims)]\n coeffs = np.asarray(coeffs).T\n\n data = dict()\n data[\"name\"] = name\n data[\"spatial_coeffs\"] = coeffs.tolist()\n data[\"knots\"] = knots.tolist()\n data[\"n_canonical_frames\"] = len(motion)\n data[\"skeleton\"] = skeleton.to_json()\n return data\n\n\ndef train_model(name, motion_folder, output_folder, skeleton, max_training_samples=100, animated_joints=None, save_skeleton=False, use_multi_processing=True, temp_data_dir=None, pre_aligned=False):\n print(\"train model\",name, motion_folder, use_multi_processing)\n motions = load_motion_data(motion_folder, max_count=max_training_samples, animated_joints=animated_joints)\n ref_frame = None\n for key, m in motions.items():\n if ref_frame is None:\n ref_frame = m[0]\n motions[key] = smooth_quaternion_frames(skeleton, m, ref_frame)\n\n keyframes_filename = motion_folder+os.sep+\"keyframes.json\"\n if os.path.isfile(keyframes_filename):\n keyframes = load_json_file(keyframes_filename)\n sections = define_sections_from_keyframes(motions.keys(), keyframes)\n filtered_motions = collections.OrderedDict()\n for key in motions.keys():\n if key in keyframes:\n filtered_motions[key] = motions[key]\n motions = filtered_motions\n else:\n motions, sections = define_sections_from_annotations(motion_folder, motions)\n\n out_filename = output_folder + os.sep + name + MM_FILE_ENDING\n if len(motions) > 1:\n config = get_standard_config()\n config[\"use_multi_processing\"] = use_multi_processing\n config[\"temp_data_dir\"] = temp_data_dir\n constructor = MotionModelConstructor(skeleton, config)\n align_frames = True\n if not pre_aligned or not os.path.isfile(motion_folder+ os.sep+ \"temporal_data.npy\"):\n constructor.set_motions(motions)\n constructor.set_dtw_sections(sections)\n else:\n constructor.set_aligned_frames(motions)\n temporal_data = np.load(motion_folder+ os.sep+ \"temporal_data.npy\",allow_pickle=True)\n constructor.set_timewarping(temporal_data)\n align_frames = False\n model_data = constructor.construct_model(name, version=3, save_skeleton=save_skeleton, align_frames=align_frames)\n \n with open(out_filename, 'w') as outfile:\n json.dump(model_data, outfile)\n\n elif len(motions) == 1:\n keys = list(motions.keys())\n model_data = convert_motion_to_static_motion_primitive(name, motions[keys[0]], skeleton)\n with open(out_filename, 'w') as outfile:\n json.dump(model_data, outfile)\n else:\n print(\"Error: Did not find any BVH files in the directory\", motion_folder)\n model_data = dict()\n model_data[\"n_motions\"] = len(motions)\n model_data[\"n_files\"] = len(glob.glob(motion_folder+\"*\"))\n out_filename = output_folder + os.sep + \"MODELING_FAILED\"\n with open(out_filename, 'w') as outfile:\n json.dump(model_data, outfile)\n\n\ndef load_model(filename, skeleton):\n with open(filename, 'r') as infile:\n model_data = json.load(infile)\n model = MotionPrimitiveModelWrapper()\n model._initialize_from_json(skeleton.convert_to_mgrd_skeleton(), model_data)\n motion_spline = model.sample(False)\n frames = motion_spline.get_motion_vector()\n print(frames.shape)\n export_frames_to_bvh(skeleton, frames, \"sample\")\n\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description='Creat motion model.')\n parser.add_argument('--name', help='name')\n parser.add_argument('--skel_filename', help='skeleton filename')\n parser.add_argument('--input_folder', help='folder containing BVH files')\n parser.add_argument('--output_folder', help='folder containing the resulting model')\n parser.add_argument('--scale', nargs='?', default=1.0, help='scale')\n parser.add_argument('-n','--n_max_samples', nargs='?', default=1000, help='Maximum number of samples per primitive')\n parser.add_argument('-s','--save_skeleton', nargs='?', default=\"True\", help='stores skeleton in the model file')\n parser.add_argument('--single_process', action=\"store_true\", help='deactivates multiple processes')\n parser.add_argument('--pre_aligned', action=\"store_true\", help='deactivates time warping')\n parser.add_argument('-t', \"--temp_data_dir\", nargs='?',default=None, help='directory for temp data export')\n parser.add_argument( \"--joint_filter\", nargs='+',default=None, help='Sequence of joint names to model')\n parser.add_argument( \"--user\", nargs='+',default=None, help='User for server access')\n parser.add_argument( \"--password\", nargs='+',default=None, help='Password for server access')\n parser.add_argument( \"--token\", nargs='+',default=None, help='Encrypted user password for server access')\n args = parser.parse_args()\n if args.name and args.skel_filename and args.input_folder and args.output_folder:\n joint_filter = args.joint_filter\n skeleton = load_skeleton(args.skel_filename, joint_filter, args.scale)\n animated_joints = skeleton.animated_joints\n if not os.path.isdir(args.output_folder):\n os.makedirs(args.output_folder)\n train_model(args.name, args.input_folder, args.output_folder, \n skeleton, args.n_max_samples, \n animated_joints, save_skeleton=args.save_skeleton, \n use_multi_processing=not args.single_process, temp_data_dir=args.temp_data_dir, pre_aligned=args.pre_aligned)\n else:\n print(\"Not enough arguments\")\n\nif __name__ == \"__main__\":\n main()\n \n\n\n"
]
| [
[
"numpy.array",
"numpy.dot",
"numpy.asarray",
"scipy.interpolate.splrep",
"numpy.load"
]
]
|
sangeetsu/LenslessLearning | [
"751efc614eff5616a229972620192478af2c39c1"
]
| [
"models/admm_model.py"
]
| [
"import torch\nimport torch.nn as nn\nfrom admm_helper_functions_torch import *\nfrom admm_rgb_pytorch import *\nimport admm_filters_no_soft as admm_s\n\nclass ADMM_Net(nn.Module):\n def __init__(self, batch_size, h, iterations, learning_options = {'learned_vars': []}, \n cuda_device = torch.device('cpu'), le_admm_s = False, denoise_model = []):\n super(ADMM_Net, self).__init__()\n \n self.iterations = iterations # Number of unrolled iterations\n self.batch_size = batch_size # Batch size \n self.autotune = False # Using autotune (True or False)\n self.realdata = True # Real Data or Simulated Measurements\n self.printstats = False # Print ADMM Variables\n \n self.addnoise = False # Add noise (only if using simulated data)\n self.noise_std = 0.05 # Noise standard deviation \n self.cuda_device = cuda_device\n \n self.l_admm_s = le_admm_s # Turn on if using Le-ADMM*, otherwise should be set to False\n if le_admm_s == True:\n self.Denoiser = denoise_model.to(cuda_device)\n \n # Leared structure options \n self.learning_options = learning_options\n print(learning_options['learned_vars'])\n\n \n ## Initialize constants \n self.DIMS0 = h.shape[0] # Image Dimensions\n self.DIMS1 = h.shape[1] # Image Dimensions\n \n self.PAD_SIZE0 = int((self.DIMS0)//2) # Pad size\n self.PAD_SIZE1 = int((self.DIMS1)//2) # Pad size\n \n # Initialize Variables \n self.initialize_learned_variables(learning_options)\n \n\n # PSF\n self.h_var = torch.nn.Parameter(torch.tensor(h, dtype=torch.float32, device=self.cuda_device),\n requires_grad=False)\n \n self.h_zeros = torch.nn.Parameter(torch.zeros(self.DIMS0*2, self.DIMS1*2, dtype=torch.float32, device=self.cuda_device),\n requires_grad=False)\n\n \n self.h_complex = torch.stack((pad_zeros_torch(self, self.h_var), self.h_zeros),2).unsqueeze(0)\n \n self.H = torch.fft(batch_ifftshift2d(self.h_complex).squeeze(), 2) \n self.Hconj = self.H* torch.tensor([1,-1], dtype = torch.float32, device=self.cuda_device) \n self.HtH = complex_abs(complex_multiplication(self.H, self.Hconj))\n \n\n self.LtL = torch.nn.Parameter(torch.tensor(make_laplacian(self), dtype=torch.float32, device=self.cuda_device),\n requires_grad=False)\n \n self.resid_tol = torch.tensor(1.5, dtype= torch.float32, device=self.cuda_device)\n self.mu_inc = torch.tensor(1.2, dtype = torch.float32, device=self.cuda_device)\n self.mu_dec = torch.tensor(1.2, dtype = torch.float32, device=self.cuda_device) \n\n def initialize_learned_variables(self, learning_options):\n \n if 'mus' in learning_options['learned_vars']: # Make mu parameters learnable\n self.mu1= torch.nn.Parameter(torch.tensor(np.ones(self.iterations)*1e-4, dtype = torch.float32))\n self.mu2= torch.nn.Parameter(torch.tensor(np.ones(self.iterations)*1e-4, dtype = torch.float32))\n self.mu3= torch.nn.Parameter(torch.tensor(np.ones(self.iterations)*1e-4, dtype = torch.float32))\n else: # Not learnable\n self.mu1= torch.ones(self.iterations, dtype = torch.float32, device=self.cuda_device)*1e-4\n self.mu2= torch.ones(self.iterations, dtype = torch.float32, device=self.cuda_device)*1e-4\n self.mu3 = torch.ones(self.iterations, dtype = torch.float32, device=self.cuda_device)*1e-4\n\n\n if 'tau' in learning_options['learned_vars']: # Make tau parameter learnable\n self.tau= torch.nn.Parameter(torch.tensor(np.ones(self.iterations)*2e-4, dtype = torch.float32))\n \n else:\n self.tau= torch.ones(self.iterations, dtype = torch.float32, device=self.cuda_device)*2e-3 \n \n\n\n def forward(self, inputs): \n \n self.batch_size = inputs.shape[0]\n\n #self.HtH = complex_abs(complex_multiplication(self.H, self.Hconj))\n \n self.mu_vals = torch.stack([self.mu1, self.mu2, self.mu3, self.tau])\n \n self.admmstats = {'dual_res_s': [], 'dual_res_u': [], 'dual_res_w': [], \n 'primal_res_s': [], 'primal_res_u': [], 'primal_res_w': [],\n 'data_loss': [], 'total_loss': []}\n \n if self.autotune==True:\n self.mu_auto_list = {'mu1': [], 'mu2': [], 'mu3': []}\n\n # If using simulated data, input the raw image and run through forward model\n if self.realdata == False: \n y = crop(self, self.Hfor(pad_dim2(self, inputs)))\n if self.addnoise == True:\n y = self.gaussian_noise_layer(y, self.noise_std)\n \n \n # Otherwise, input is the normalized Diffuser Image \n else:\n y = inputs\n \n \n Cty = pad_zeros_torch(self, y) # Zero padded input\n CtC = pad_zeros_torch(self, torch.ones_like(y)) # Zero padded ones \n \n # Create list of inputs/outputs \n in_vars = []; in_vars1 = []\n in_vars2 = []; Hsk_list = []\n a2k_1_list=[]; a2k_2_list= []\n\n sk = torch.zeros_like(Cty, dtype = torch.float32)\n alpha1k = torch.zeros_like(Cty, dtype = torch.float32)\n alpha3k = torch.zeros_like(Cty, dtype = torch.float32)\n Hskp = torch.zeros_like(Cty, dtype = torch.float32)\n\n if self.l_admm_s == True:\n Lsk_init, mem_init = self.Denoiser.forward(sk)\n alpha2k = torch.zeros_like(Lsk_init, dtype = torch.float32, device=self.cuda_device) \n else:\n \n alpha2k_1 = torch.zeros_like(sk[:,:,:-1,:], dtype = torch.float32) \n alpha2k_2 = torch.zeros_like(sk[:,:,:,:-1], dtype = torch.float32)\n \n a2k_1_list.append(alpha2k_1)\n a2k_2_list.append(alpha2k_2)\n\n mu_auto = torch.stack([self.mu1[0], self.mu2[0], self.mu3[0], self.tau[0]])\n\n \n in_vars.append(torch.stack([sk, alpha1k, alpha3k, Hskp]))\n \n\n \n\n for i in range(0,self.iterations):\n \n if self.l_admm_s == True:\n \n out_vars, alpha2k, _ , symm, admmstats = admm_s.admm(self, in_vars[-1], alpha2k, CtC, Cty, [], i, y)\n in_vars.append(out_vars)\n \n else:\n \n if self.autotune==True:\n out_vars, a_out1, a_out2, mu_auto , symm, admmstats= admm(self, in_vars[-1], \n a2k_1_list[-1], a2k_2_list[-1], CtC, Cty, mu_auto, i, y)\n\n self.mu_auto_list['mu1'].append(mu_auto[0])\n self.mu_auto_list['mu2'].append(mu_auto[1])\n self.mu_auto_list['mu3'].append(mu_auto[2])\n\n else:\n out_vars, a_out1, a_out2, _ , symm, admmstats = admm(self, in_vars[-1], \n a2k_1_list[-1], a2k_2_list[-1], CtC, Cty, [], i, y)\n\n #if torch.any(out_vars != out_vars):\n # print('loop')\n\n in_vars.append(out_vars)\n a2k_1_list.append(a_out1)\n a2k_2_list.append(a_out2)\n\n\n if self.printstats == True: # Print ADMM Variables\n self.admmstats['dual_res_s'].append(admmstats['dual_res_s'])\n self.admmstats['primal_res_s'].append(admmstats['primal_res_s'])\n self.admmstats['dual_res_w'].append(admmstats['dual_res_w'])\n self.admmstats['primal_res_w'].append(admmstats['primal_res_w'])\n self.admmstats['dual_res_u'].append(admmstats['dual_res_u'])\n self.admmstats['primal_res_u'].append(admmstats['primal_res_u'])\n self.admmstats['data_loss'].append(admmstats['data_loss'])\n self.admmstats['total_loss'].append(admmstats['total_loss'])\n \n \n x_out = crop(self, in_vars[-1][0])\n x_outn = normalize_image(x_out)\n self.in_list = in_vars\n \n return x_outn#, symm\n\n \n"
]
| [
[
"torch.zeros",
"torch.device",
"torch.stack",
"torch.ones",
"torch.tensor",
"torch.ones_like",
"torch.zeros_like"
]
]
|
andrybicio/ByteTrack_ReID | [
"ae43f9d7d3daee796f80f34dde76324f902999bf"
]
| [
"exps/example/mot/yolox_s_mix_det.py"
]
| [
"# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\n\nfrom yolox.exp import Exp as MyExp\nfrom yolox.data import get_yolox_datadir\n\nclass Exp(MyExp):\n def __init__(self):\n super(Exp, self).__init__()\n self.num_classes = 1\n self.depth = 0.33\n self.width = 0.50\n self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n self.train_ann = \"train.json\"\n self.val_ann = \"train.json\"\n self.input_size = (608, 1088)\n self.test_size = (608, 1088)\n self.random_size = (12, 26)\n self.max_epoch = 80\n self.print_interval = 20\n self.eval_interval = 5\n self.test_conf = 0.001\n self.nmsthre = 0.7\n self.no_aug_epochs = 10\n self.basic_lr_per_img = 0.001 / 64.0\n self.warmup_epochs = 1\n\n def get_data_loader(self, batch_size, is_distributed, no_aug=False):\n from yolox.data import (\n MOTDataset,\n TrainTransform,\n YoloBatchSampler,\n DataLoader,\n InfiniteSampler,\n MosaicDetection,\n )\n\n dataset = MOTDataset(\n data_dir=os.path.join(get_yolox_datadir(), \"mix_det\"),\n json_file=self.train_ann,\n name='',\n img_size=self.input_size,\n preproc=TrainTransform(\n rgb_means=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225),\n max_labels=500,\n ),\n ) # dataset.annotations[0][0].shape: [obj_num, 6], tlbr(absolute value) + class_id + track_id\n total_ids = dataset.nID # TODO: total ids for reid classifier\n\n dataset = MosaicDetection(\n dataset,\n mosaic=not no_aug,\n img_size=self.input_size,\n preproc=TrainTransform(\n rgb_means=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225),\n max_labels=1000,\n ),\n degrees=self.degrees,\n translate=self.translate,\n scale=self.scale,\n shear=self.shear,\n perspective=self.perspective,\n enable_mixup=self.enable_mixup,\n )\n\n self.dataset = dataset\n\n if is_distributed:\n batch_size = batch_size // dist.get_world_size()\n\n sampler = InfiniteSampler(\n len(self.dataset), seed=self.seed if self.seed else 0\n )\n\n batch_sampler = YoloBatchSampler(\n sampler=sampler,\n batch_size=batch_size,\n drop_last=False,\n input_dimension=self.input_size,\n mosaic=not no_aug,\n )\n\n dataloader_kwargs = {\"num_workers\": self.data_num_workers, \"pin_memory\": True}\n dataloader_kwargs[\"batch_sampler\"] = batch_sampler\n train_loader = DataLoader(self.dataset, **dataloader_kwargs)\n\n settings = {'total_ids': total_ids}\n return train_loader, settings\n\n def get_eval_loader(self, batch_size, is_distributed, testdev=False):\n from yolox.data import MOTDataset, ValTransform\n\n valdataset = MOTDataset(\n data_dir=os.path.join(get_yolox_datadir(), \"mot\"),\n json_file=self.val_ann,\n img_size=self.test_size,\n name='train',\n preproc=ValTransform(\n rgb_means=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225),\n ),\n )\n\n if is_distributed:\n batch_size = batch_size // dist.get_world_size()\n sampler = torch.utils.data.distributed.DistributedSampler(\n valdataset, shuffle=False\n )\n else:\n sampler = torch.utils.data.SequentialSampler(valdataset)\n\n dataloader_kwargs = {\n \"num_workers\": self.data_num_workers,\n \"pin_memory\": True,\n \"sampler\": sampler,\n }\n dataloader_kwargs[\"batch_size\"] = batch_size\n val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)\n\n return val_loader\n\n def get_evaluator(self, batch_size, is_distributed, testdev=False):\n from yolox.evaluators import COCOEvaluator\n\n val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)\n evaluator = COCOEvaluator(\n dataloader=val_loader,\n img_size=self.test_size,\n confthre=self.test_conf,\n nmsthre=self.nmsthre,\n num_classes=self.num_classes,\n testdev=testdev,\n )\n return evaluator\n"
]
| [
[
"torch.distributed.get_world_size",
"torch.utils.data.SequentialSampler",
"torch.utils.data.DataLoader",
"torch.utils.data.distributed.DistributedSampler"
]
]
|
IlyaKodua/AutorncoderSignal | [
"ba5e020c8230fe70858d2c22e7c4e1f3b0c584ce"
]
| [
"Model.py"
]
| [
"from torch import nn\nimport torch.nn.functional as F\nimport torch\nimport torchvision\nimport numpy as np\n\nclass DNCNN(nn.Module):\n\n def __init__(self, n_channels, n_filters, kernel_size):\n\n super(DNCNN, self).__init__()\n\n\n layers = [\n nn.Conv2d(in_channels=n_channels, out_channels=n_filters, kernel_size=kernel_size,\n padding=1, bias=False),\n nn.ReLU(inplace=True)\n ]\n\n depth = 20\n for _ in range(depth-2):\n layers.append(nn.Conv2d(in_channels=n_filters, out_channels=n_filters, kernel_size=kernel_size,\n padding=1, bias=False))\n layers.append(nn.BatchNorm2d(n_filters))\n layers.append(nn.ReLU(inplace=True))\n layers.append(nn.Conv2d(in_channels=n_filters, out_channels=n_channels, kernel_size=kernel_size,\n padding=1, bias=False))\n self.dncnn = nn.Sequential(*layers)\n\n\n\n def forward(self,x):\n out = self.dncnn(x)\n return out\n\n\n\n"
]
| [
[
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
]
|
tomaszps/pandas | [
"4628665e53c214ae2e2efde54c4d703fb1c198ab"
]
| [
"pandas/tests/indexes/multi/test_indexing.py"
]
| [
"from datetime import timedelta\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import InvalidIndexError\n\nimport pandas as pd\nfrom pandas import Categorical, Index, MultiIndex, date_range\nimport pandas._testing as tm\n\n\nclass TestSliceLocs:\n def test_slice_locs_partial(self, idx):\n sorted_idx, _ = idx.sortlevel(0)\n\n result = sorted_idx.slice_locs((\"foo\", \"two\"), (\"qux\", \"one\"))\n assert result == (1, 5)\n\n result = sorted_idx.slice_locs(None, (\"qux\", \"one\"))\n assert result == (0, 5)\n\n result = sorted_idx.slice_locs((\"foo\", \"two\"), None)\n assert result == (1, len(sorted_idx))\n\n result = sorted_idx.slice_locs(\"bar\", \"baz\")\n assert result == (2, 4)\n\n def test_slice_locs(self):\n df = tm.makeTimeDataFrame()\n stacked = df.stack()\n idx = stacked.index\n\n slob = slice(*idx.slice_locs(df.index[5], df.index[15]))\n sliced = stacked[slob]\n expected = df[5:16].stack()\n tm.assert_almost_equal(sliced.values, expected.values)\n\n slob = slice(\n *idx.slice_locs(\n df.index[5] + timedelta(seconds=30),\n df.index[15] - timedelta(seconds=30),\n )\n )\n sliced = stacked[slob]\n expected = df[6:15].stack()\n tm.assert_almost_equal(sliced.values, expected.values)\n\n def test_slice_locs_with_type_mismatch(self):\n df = tm.makeTimeDataFrame()\n stacked = df.stack()\n idx = stacked.index\n with pytest.raises(TypeError, match=\"^Level type mismatch\"):\n idx.slice_locs((1, 3))\n with pytest.raises(TypeError, match=\"^Level type mismatch\"):\n idx.slice_locs(df.index[5] + timedelta(seconds=30), (5, 2))\n df = tm.makeCustomDataframe(5, 5)\n stacked = df.stack()\n idx = stacked.index\n with pytest.raises(TypeError, match=\"^Level type mismatch\"):\n idx.slice_locs(timedelta(seconds=30))\n # TODO: Try creating a UnicodeDecodeError in exception message\n with pytest.raises(TypeError, match=\"^Level type mismatch\"):\n idx.slice_locs(df.index[1], (16, \"a\"))\n\n def test_slice_locs_not_sorted(self):\n index = MultiIndex(\n levels=[Index(np.arange(4)), Index(np.arange(4)), Index(np.arange(4))],\n codes=[\n np.array([0, 0, 1, 2, 2, 2, 3, 3]),\n np.array([0, 1, 0, 0, 0, 1, 0, 1]),\n np.array([1, 0, 1, 1, 0, 0, 1, 0]),\n ],\n )\n msg = \"[Kk]ey length.*greater than MultiIndex lexsort depth\"\n with pytest.raises(KeyError, match=msg):\n index.slice_locs((1, 0, 1), (2, 1, 0))\n\n # works\n sorted_index, _ = index.sortlevel(0)\n # should there be a test case here???\n sorted_index.slice_locs((1, 0, 1), (2, 1, 0))\n\n def test_slice_locs_not_contained(self):\n # some searchsorted action\n\n index = MultiIndex(\n levels=[[0, 2, 4, 6], [0, 2, 4]],\n codes=[[0, 0, 0, 1, 1, 2, 3, 3, 3], [0, 1, 2, 1, 2, 2, 0, 1, 2]],\n )\n\n result = index.slice_locs((1, 0), (5, 2))\n assert result == (3, 6)\n\n result = index.slice_locs(1, 5)\n assert result == (3, 6)\n\n result = index.slice_locs((2, 2), (5, 2))\n assert result == (3, 6)\n\n result = index.slice_locs(2, 5)\n assert result == (3, 6)\n\n result = index.slice_locs((1, 0), (6, 3))\n assert result == (3, 8)\n\n result = index.slice_locs(-1, 10)\n assert result == (0, len(index))\n\n @pytest.mark.parametrize(\n \"index_arr,expected,start_idx,end_idx\",\n [\n ([[np.nan, \"a\", \"b\"], [\"c\", \"d\", \"e\"]], (0, 3), np.nan, None),\n ([[np.nan, \"a\", \"b\"], [\"c\", \"d\", \"e\"]], (0, 3), np.nan, \"b\"),\n ([[np.nan, \"a\", \"b\"], [\"c\", \"d\", \"e\"]], (0, 3), np.nan, (\"b\", \"e\")),\n ([[\"a\", \"b\", \"c\"], [\"d\", np.nan, \"e\"]], (1, 3), (\"b\", np.nan), None),\n ([[\"a\", \"b\", \"c\"], [\"d\", np.nan, \"e\"]], (1, 3), (\"b\", np.nan), \"c\"),\n ([[\"a\", \"b\", \"c\"], [\"d\", np.nan, \"e\"]], (1, 3), (\"b\", np.nan), (\"c\", \"e\")),\n ],\n )\n def test_slice_locs_with_missing_value(\n self, index_arr, expected, start_idx, end_idx\n ):\n # issue 19132\n idx = MultiIndex.from_arrays(index_arr)\n result = idx.slice_locs(start=start_idx, end=end_idx)\n assert result == expected\n\n\ndef test_putmask_with_wrong_mask(idx):\n # GH18368\n\n msg = \"putmask: mask and data must be the same size\"\n with pytest.raises(ValueError, match=msg):\n idx.putmask(np.ones(len(idx) + 1, np.bool), 1)\n\n with pytest.raises(ValueError, match=msg):\n idx.putmask(np.ones(len(idx) - 1, np.bool), 1)\n\n with pytest.raises(ValueError, match=msg):\n idx.putmask(\"foo\", 1)\n\n\nclass TestGetIndexer:\n def test_get_indexer(self):\n major_axis = Index(np.arange(4))\n minor_axis = Index(np.arange(2))\n\n major_codes = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)\n minor_codes = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)\n\n index = MultiIndex(\n levels=[major_axis, minor_axis], codes=[major_codes, minor_codes]\n )\n idx1 = index[:5]\n idx2 = index[[1, 3, 5]]\n\n r1 = idx1.get_indexer(idx2)\n tm.assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))\n\n r1 = idx2.get_indexer(idx1, method=\"pad\")\n e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)\n tm.assert_almost_equal(r1, e1)\n\n r2 = idx2.get_indexer(idx1[::-1], method=\"pad\")\n tm.assert_almost_equal(r2, e1[::-1])\n\n rffill1 = idx2.get_indexer(idx1, method=\"ffill\")\n tm.assert_almost_equal(r1, rffill1)\n\n r1 = idx2.get_indexer(idx1, method=\"backfill\")\n e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)\n tm.assert_almost_equal(r1, e1)\n\n r2 = idx2.get_indexer(idx1[::-1], method=\"backfill\")\n tm.assert_almost_equal(r2, e1[::-1])\n\n rbfill1 = idx2.get_indexer(idx1, method=\"bfill\")\n tm.assert_almost_equal(r1, rbfill1)\n\n # pass non-MultiIndex\n r1 = idx1.get_indexer(idx2.values)\n rexp1 = idx1.get_indexer(idx2)\n tm.assert_almost_equal(r1, rexp1)\n\n r1 = idx1.get_indexer([1, 2, 3])\n assert (r1 == [-1, -1, -1]).all()\n\n # create index with duplicates\n idx1 = Index(list(range(10)) + list(range(10)))\n idx2 = Index(list(range(20)))\n\n msg = \"Reindexing only valid with uniquely valued Index objects\"\n with pytest.raises(InvalidIndexError, match=msg):\n idx1.get_indexer(idx2)\n\n def test_get_indexer_nearest(self):\n midx = MultiIndex.from_tuples([(\"a\", 1), (\"b\", 2)])\n msg = (\n \"method='nearest' not implemented yet for MultiIndex; \"\n \"see GitHub issue 9365\"\n )\n with pytest.raises(NotImplementedError, match=msg):\n midx.get_indexer([\"a\"], method=\"nearest\")\n msg = \"tolerance not implemented yet for MultiIndex\"\n with pytest.raises(NotImplementedError, match=msg):\n midx.get_indexer([\"a\"], method=\"pad\", tolerance=2)\n\n def test_get_indexer_categorical_time(self):\n # https://github.com/pandas-dev/pandas/issues/21390\n midx = MultiIndex.from_product(\n [\n Categorical([\"a\", \"b\", \"c\"]),\n Categorical(date_range(\"2012-01-01\", periods=3, freq=\"H\")),\n ]\n )\n result = midx.get_indexer(midx)\n tm.assert_numpy_array_equal(result, np.arange(9, dtype=np.intp))\n\n @pytest.mark.parametrize(\n \"index_arr,labels,expected\",\n [\n (\n [[1, np.nan, 2], [3, 4, 5]],\n [1, np.nan, 2],\n np.array([-1, -1, -1], dtype=np.intp),\n ),\n ([[1, np.nan, 2], [3, 4, 5]], [(np.nan, 4)], np.array([1], dtype=np.intp)),\n ([[1, 2, 3], [np.nan, 4, 5]], [(1, np.nan)], np.array([0], dtype=np.intp)),\n (\n [[1, 2, 3], [np.nan, 4, 5]],\n [np.nan, 4, 5],\n np.array([-1, -1, -1], dtype=np.intp),\n ),\n ],\n )\n def test_get_indexer_with_missing_value(self, index_arr, labels, expected):\n # issue 19132\n idx = MultiIndex.from_arrays(index_arr)\n result = idx.get_indexer(labels)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_get_indexer_methods(self):\n # https://github.com/pandas-dev/pandas/issues/29896\n # test getting an indexer for another index with different methods\n # confirms that getting an indexer without a filling method, getting an\n # indexer and backfilling, and getting an indexer and padding all behave\n # correctly in the case where all of the target values fall in between\n # several levels in the MultiIndex into which they are getting an indexer\n #\n # visually, the MultiIndexes used in this test are:\n # mult_idx_1:\n # 0: -1 0\n # 1: 2\n # 2: 3\n # 3: 4\n # 4: 0 0\n # 5: 2\n # 6: 3\n # 7: 4\n # 8: 1 0\n # 9: 2\n # 10: 3\n # 11: 4\n #\n # mult_idx_2:\n # 0: 0 1\n # 1: 3\n # 2: 4\n mult_idx_1 = MultiIndex.from_product([[-1, 0, 1], [0, 2, 3, 4]])\n mult_idx_2 = MultiIndex.from_product([[0], [1, 3, 4]])\n\n indexer = mult_idx_1.get_indexer(mult_idx_2)\n expected = np.array([-1, 6, 7], dtype=indexer.dtype)\n tm.assert_almost_equal(expected, indexer)\n\n backfill_indexer = mult_idx_1.get_indexer(mult_idx_2, method=\"backfill\")\n expected = np.array([5, 6, 7], dtype=backfill_indexer.dtype)\n tm.assert_almost_equal(expected, backfill_indexer)\n\n # ensure the legacy \"bfill\" option functions identically to \"backfill\"\n backfill_indexer = mult_idx_1.get_indexer(mult_idx_2, method=\"bfill\")\n expected = np.array([5, 6, 7], dtype=backfill_indexer.dtype)\n tm.assert_almost_equal(expected, backfill_indexer)\n\n pad_indexer = mult_idx_1.get_indexer(mult_idx_2, method=\"pad\")\n expected = np.array([4, 6, 7], dtype=pad_indexer.dtype)\n tm.assert_almost_equal(expected, pad_indexer)\n\n # ensure the legacy \"ffill\" option functions identically to \"pad\"\n pad_indexer = mult_idx_1.get_indexer(mult_idx_2, method=\"ffill\")\n expected = np.array([4, 6, 7], dtype=pad_indexer.dtype)\n tm.assert_almost_equal(expected, pad_indexer)\n\n def test_get_indexer_three_or_more_levels(self):\n # https://github.com/pandas-dev/pandas/issues/29896\n # tests get_indexer() on MultiIndexes with 3+ levels\n # visually, these are\n # mult_idx_1:\n # 0: 1 2 5\n # 1: 7\n # 2: 4 5\n # 3: 7\n # 4: 6 5\n # 5: 7\n # 6: 3 2 5\n # 7: 7\n # 8: 4 5\n # 9: 7\n # 10: 6 5\n # 11: 7\n #\n # mult_idx_2:\n # 0: 1 1 8\n # 1: 1 5 9\n # 2: 1 6 7\n # 3: 2 1 6\n # 4: 2 7 6\n # 5: 2 7 8\n # 6: 3 6 8\n mult_idx_1 = pd.MultiIndex.from_product([[1, 3], [2, 4, 6], [5, 7]])\n mult_idx_2 = pd.MultiIndex.from_tuples(\n [\n (1, 1, 8),\n (1, 5, 9),\n (1, 6, 7),\n (2, 1, 6),\n (2, 7, 7),\n (2, 7, 8),\n (3, 6, 8),\n ]\n )\n # sanity check\n assert mult_idx_1.is_monotonic\n assert mult_idx_1.is_unique\n assert mult_idx_2.is_monotonic\n assert mult_idx_2.is_unique\n\n # show the relationships between the two\n assert mult_idx_2[0] < mult_idx_1[0]\n assert mult_idx_1[3] < mult_idx_2[1] < mult_idx_1[4]\n assert mult_idx_1[5] == mult_idx_2[2]\n assert mult_idx_1[5] < mult_idx_2[3] < mult_idx_1[6]\n assert mult_idx_1[5] < mult_idx_2[4] < mult_idx_1[6]\n assert mult_idx_1[5] < mult_idx_2[5] < mult_idx_1[6]\n assert mult_idx_1[-1] < mult_idx_2[6]\n\n indexer_no_fill = mult_idx_1.get_indexer(mult_idx_2)\n expected = np.array([-1, -1, 5, -1, -1, -1, -1], dtype=indexer_no_fill.dtype)\n tm.assert_almost_equal(expected, indexer_no_fill)\n\n # test with backfilling\n indexer_backfilled = mult_idx_1.get_indexer(mult_idx_2, method=\"backfill\")\n expected = np.array([0, 4, 5, 6, 6, 6, -1], dtype=indexer_backfilled.dtype)\n tm.assert_almost_equal(expected, indexer_backfilled)\n\n # now, the same thing, but forward-filled (aka \"padded\")\n indexer_padded = mult_idx_1.get_indexer(mult_idx_2, method=\"pad\")\n expected = np.array([-1, 3, 5, 5, 5, 5, 11], dtype=indexer_padded.dtype)\n tm.assert_almost_equal(expected, indexer_padded)\n\n # now, do the indexing in the other direction\n assert mult_idx_2[0] < mult_idx_1[0] < mult_idx_2[1]\n assert mult_idx_2[0] < mult_idx_1[1] < mult_idx_2[1]\n assert mult_idx_2[0] < mult_idx_1[2] < mult_idx_2[1]\n assert mult_idx_2[0] < mult_idx_1[3] < mult_idx_2[1]\n assert mult_idx_2[1] < mult_idx_1[4] < mult_idx_2[2]\n assert mult_idx_2[2] == mult_idx_1[5]\n assert mult_idx_2[5] < mult_idx_1[6] < mult_idx_2[6]\n assert mult_idx_2[5] < mult_idx_1[7] < mult_idx_2[6]\n assert mult_idx_2[5] < mult_idx_1[8] < mult_idx_2[6]\n assert mult_idx_2[5] < mult_idx_1[9] < mult_idx_2[6]\n assert mult_idx_2[5] < mult_idx_1[10] < mult_idx_2[6]\n assert mult_idx_2[5] < mult_idx_1[11] < mult_idx_2[6]\n\n indexer = mult_idx_2.get_indexer(mult_idx_1)\n expected = np.array(\n [-1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1], dtype=indexer.dtype\n )\n tm.assert_almost_equal(expected, indexer)\n\n backfill_indexer = mult_idx_2.get_indexer(mult_idx_1, method=\"bfill\")\n expected = np.array(\n [1, 1, 1, 1, 2, 2, 6, 6, 6, 6, 6, 6], dtype=backfill_indexer.dtype\n )\n tm.assert_almost_equal(expected, backfill_indexer)\n\n pad_indexer = mult_idx_2.get_indexer(mult_idx_1, method=\"pad\")\n expected = np.array(\n [0, 0, 0, 0, 1, 2, 5, 5, 5, 5, 5, 5], dtype=pad_indexer.dtype\n )\n tm.assert_almost_equal(expected, pad_indexer)\n\n def test_get_indexer_crossing_levels(self):\n # https://github.com/pandas-dev/pandas/issues/29896\n # tests a corner case with get_indexer() with MultiIndexes where, when we\n # need to \"carry\" across levels, proper tuple ordering is respected\n #\n # the MultiIndexes used in this test, visually, are:\n # mult_idx_1:\n # 0: 1 1 1 1\n # 1: 2\n # 2: 2 1\n # 3: 2\n # 4: 1 2 1 1\n # 5: 2\n # 6: 2 1\n # 7: 2\n # 8: 2 1 1 1\n # 9: 2\n # 10: 2 1\n # 11: 2\n # 12: 2 2 1 1\n # 13: 2\n # 14: 2 1\n # 15: 2\n #\n # mult_idx_2:\n # 0: 1 3 2 2\n # 1: 2 3 2 2\n mult_idx_1 = pd.MultiIndex.from_product([[1, 2]] * 4)\n mult_idx_2 = pd.MultiIndex.from_tuples([(1, 3, 2, 2), (2, 3, 2, 2)])\n\n # show the tuple orderings, which get_indexer() should respect\n assert mult_idx_1[7] < mult_idx_2[0] < mult_idx_1[8]\n assert mult_idx_1[-1] < mult_idx_2[1]\n\n indexer = mult_idx_1.get_indexer(mult_idx_2)\n expected = np.array([-1, -1], dtype=indexer.dtype)\n tm.assert_almost_equal(expected, indexer)\n\n backfill_indexer = mult_idx_1.get_indexer(mult_idx_2, method=\"bfill\")\n expected = np.array([8, -1], dtype=backfill_indexer.dtype)\n tm.assert_almost_equal(expected, backfill_indexer)\n\n pad_indexer = mult_idx_1.get_indexer(mult_idx_2, method=\"ffill\")\n expected = np.array([7, 15], dtype=pad_indexer.dtype)\n tm.assert_almost_equal(expected, pad_indexer)\n\n\ndef test_getitem(idx):\n # scalar\n assert idx[2] == (\"bar\", \"one\")\n\n # slice\n result = idx[2:5]\n expected = idx[[2, 3, 4]]\n assert result.equals(expected)\n\n # boolean\n result = idx[[True, False, True, False, True, True]]\n result2 = idx[np.array([True, False, True, False, True, True])]\n expected = idx[[0, 2, 4, 5]]\n assert result.equals(expected)\n assert result2.equals(expected)\n\n\ndef test_getitem_group_select(idx):\n sorted_idx, _ = idx.sortlevel(0)\n assert sorted_idx.get_loc(\"baz\") == slice(3, 4)\n assert sorted_idx.get_loc(\"foo\") == slice(0, 2)\n\n\[email protected](\"ind1\", [[True] * 5, pd.Index([True] * 5)])\[email protected](\n \"ind2\",\n [[True, False, True, False, False], pd.Index([True, False, True, False, False])],\n)\ndef test_getitem_bool_index_all(ind1, ind2):\n # GH#22533\n idx = MultiIndex.from_tuples([(10, 1), (20, 2), (30, 3), (40, 4), (50, 5)])\n tm.assert_index_equal(idx[ind1], idx)\n\n expected = MultiIndex.from_tuples([(10, 1), (30, 3)])\n tm.assert_index_equal(idx[ind2], expected)\n\n\[email protected](\"ind1\", [[True], pd.Index([True])])\[email protected](\"ind2\", [[False], pd.Index([False])])\ndef test_getitem_bool_index_single(ind1, ind2):\n # GH#22533\n idx = MultiIndex.from_tuples([(10, 1)])\n tm.assert_index_equal(idx[ind1], idx)\n\n expected = pd.MultiIndex(\n levels=[np.array([], dtype=np.int64), np.array([], dtype=np.int64)],\n codes=[[], []],\n )\n tm.assert_index_equal(idx[ind2], expected)\n\n\nclass TestGetLoc:\n def test_get_loc(self, idx):\n assert idx.get_loc((\"foo\", \"two\")) == 1\n assert idx.get_loc((\"baz\", \"two\")) == 3\n with pytest.raises(KeyError, match=r\"^10$\"):\n idx.get_loc((\"bar\", \"two\"))\n with pytest.raises(KeyError, match=r\"^'quux'$\"):\n idx.get_loc(\"quux\")\n\n msg = \"only the default get_loc method is currently supported for MultiIndex\"\n with pytest.raises(NotImplementedError, match=msg):\n idx.get_loc(\"foo\", method=\"nearest\")\n\n # 3 levels\n index = MultiIndex(\n levels=[Index(np.arange(4)), Index(np.arange(4)), Index(np.arange(4))],\n codes=[\n np.array([0, 0, 1, 2, 2, 2, 3, 3]),\n np.array([0, 1, 0, 0, 0, 1, 0, 1]),\n np.array([1, 0, 1, 1, 0, 0, 1, 0]),\n ],\n )\n with pytest.raises(KeyError, match=r\"^\\(1, 1\\)$\"):\n index.get_loc((1, 1))\n assert index.get_loc((2, 0)) == slice(3, 5)\n\n def test_get_loc_duplicates(self):\n index = Index([2, 2, 2, 2])\n result = index.get_loc(2)\n expected = slice(0, 4)\n assert result == expected\n\n index = Index([\"c\", \"a\", \"a\", \"b\", \"b\"])\n rs = index.get_loc(\"c\")\n xp = 0\n assert rs == xp\n\n with pytest.raises(KeyError):\n index.get_loc(2)\n\n def test_get_loc_level(self):\n index = MultiIndex(\n levels=[Index(np.arange(4)), Index(np.arange(4)), Index(np.arange(4))],\n codes=[\n np.array([0, 0, 1, 2, 2, 2, 3, 3]),\n np.array([0, 1, 0, 0, 0, 1, 0, 1]),\n np.array([1, 0, 1, 1, 0, 0, 1, 0]),\n ],\n )\n loc, new_index = index.get_loc_level((0, 1))\n expected = slice(1, 2)\n exp_index = index[expected].droplevel(0).droplevel(0)\n assert loc == expected\n assert new_index.equals(exp_index)\n\n loc, new_index = index.get_loc_level((0, 1, 0))\n expected = 1\n assert loc == expected\n assert new_index is None\n\n with pytest.raises(KeyError, match=r\"^\\(2, 2\\)$\"):\n index.get_loc_level((2, 2))\n # GH 22221: unused label\n with pytest.raises(KeyError, match=r\"^2$\"):\n index.drop(2).get_loc_level(2)\n # Unused label on unsorted level:\n with pytest.raises(KeyError, match=r\"^2$\"):\n index.drop(1, level=2).get_loc_level(2, level=2)\n\n index = MultiIndex(\n levels=[[2000], list(range(4))],\n codes=[np.array([0, 0, 0, 0]), np.array([0, 1, 2, 3])],\n )\n result, new_index = index.get_loc_level((2000, slice(None, None)))\n expected = slice(None, None)\n assert result == expected\n assert new_index.equals(index.droplevel(0))\n\n @pytest.mark.parametrize(\"dtype1\", [int, float, bool, str])\n @pytest.mark.parametrize(\"dtype2\", [int, float, bool, str])\n def test_get_loc_multiple_dtypes(self, dtype1, dtype2):\n # GH 18520\n levels = [np.array([0, 1]).astype(dtype1), np.array([0, 1]).astype(dtype2)]\n idx = pd.MultiIndex.from_product(levels)\n assert idx.get_loc(idx[2]) == 2\n\n @pytest.mark.parametrize(\"level\", [0, 1])\n @pytest.mark.parametrize(\"dtypes\", [[int, float], [float, int]])\n def test_get_loc_implicit_cast(self, level, dtypes):\n # GH 18818, GH 15994 : as flat index, cast int to float and vice-versa\n levels = [[\"a\", \"b\"], [\"c\", \"d\"]]\n key = [\"b\", \"d\"]\n lev_dtype, key_dtype = dtypes\n levels[level] = np.array([0, 1], dtype=lev_dtype)\n key[level] = key_dtype(1)\n idx = MultiIndex.from_product(levels)\n assert idx.get_loc(tuple(key)) == 3\n\n def test_get_loc_cast_bool(self):\n # GH 19086 : int is casted to bool, but not vice-versa\n levels = [[False, True], np.arange(2, dtype=\"int64\")]\n idx = MultiIndex.from_product(levels)\n\n assert idx.get_loc((0, 1)) == 1\n assert idx.get_loc((1, 0)) == 2\n\n with pytest.raises(KeyError, match=r\"^\\(False, True\\)$\"):\n idx.get_loc((False, True))\n with pytest.raises(KeyError, match=r\"^\\(True, False\\)$\"):\n idx.get_loc((True, False))\n\n @pytest.mark.parametrize(\"level\", [0, 1])\n def test_get_loc_nan(self, level, nulls_fixture):\n # GH 18485 : NaN in MultiIndex\n levels = [[\"a\", \"b\"], [\"c\", \"d\"]]\n key = [\"b\", \"d\"]\n levels[level] = np.array([0, nulls_fixture], dtype=type(nulls_fixture))\n key[level] = nulls_fixture\n idx = MultiIndex.from_product(levels)\n assert idx.get_loc(tuple(key)) == 3\n\n def test_get_loc_missing_nan(self):\n # GH 8569\n idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])\n assert isinstance(idx.get_loc(1), slice)\n with pytest.raises(KeyError, match=r\"^3$\"):\n idx.get_loc(3)\n with pytest.raises(KeyError, match=r\"^nan$\"):\n idx.get_loc(np.nan)\n with pytest.raises(TypeError, match=\"unhashable type: 'list'\"):\n # listlike/non-hashable raises TypeError\n idx.get_loc([np.nan])\n\n def test_get_loc_with_values_including_missing_values(self):\n # issue 19132\n idx = MultiIndex.from_product([[np.nan, 1]] * 2)\n expected = slice(0, 2, None)\n assert idx.get_loc(np.nan) == expected\n\n idx = MultiIndex.from_arrays([[np.nan, 1, 2, np.nan]])\n expected = np.array([True, False, False, True])\n tm.assert_numpy_array_equal(idx.get_loc(np.nan), expected)\n\n idx = MultiIndex.from_product([[np.nan, 1]] * 3)\n expected = slice(2, 4, None)\n assert idx.get_loc((np.nan, 1)) == expected\n\n def test_get_loc_duplicates2(self):\n # TODO: de-duplicate with test_get_loc_duplicates above?\n index = MultiIndex(\n levels=[[\"D\", \"B\", \"C\"], [0, 26, 27, 37, 57, 67, 75, 82]],\n codes=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2], [1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],\n names=[\"tag\", \"day\"],\n )\n\n assert index.get_loc(\"D\") == slice(0, 3)\n\n\nclass TestWhere:\n def test_where(self):\n i = MultiIndex.from_tuples([(\"A\", 1), (\"A\", 2)])\n\n msg = r\"\\.where is not supported for MultiIndex operations\"\n with pytest.raises(NotImplementedError, match=msg):\n i.where(True)\n\n @pytest.mark.parametrize(\"klass\", [list, tuple, np.array, pd.Series])\n def test_where_array_like(self, klass):\n i = MultiIndex.from_tuples([(\"A\", 1), (\"A\", 2)])\n cond = [False, True]\n msg = r\"\\.where is not supported for MultiIndex operations\"\n with pytest.raises(NotImplementedError, match=msg):\n i.where(klass(cond))\n\n\nclass TestContains:\n def test_contains_top_level(self):\n midx = MultiIndex.from_product([[\"A\", \"B\"], [1, 2]])\n assert \"A\" in midx\n assert \"A\" not in midx._engine\n\n def test_contains_with_nat(self):\n # MI with a NaT\n mi = MultiIndex(\n levels=[[\"C\"], pd.date_range(\"2012-01-01\", periods=5)],\n codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],\n names=[None, \"B\"],\n )\n assert (\"C\", pd.Timestamp(\"2012-01-01\")) in mi\n for val in mi.values:\n assert val in mi\n\n def test_contains(self, idx):\n assert (\"foo\", \"two\") in idx\n assert (\"bar\", \"two\") not in idx\n assert None not in idx\n\n def test_contains_with_missing_value(self):\n # GH#19132\n idx = MultiIndex.from_arrays([[1, np.nan, 2]])\n assert np.nan in idx\n\n idx = MultiIndex.from_arrays([[1, 2], [np.nan, 3]])\n assert np.nan not in idx\n assert (1, np.nan) in idx\n\n def test_multiindex_contains_dropped(self):\n # GH#19027\n # test that dropped MultiIndex levels are not in the MultiIndex\n # despite continuing to be in the MultiIndex's levels\n idx = MultiIndex.from_product([[1, 2], [3, 4]])\n assert 2 in idx\n idx = idx.drop(2)\n\n # drop implementation keeps 2 in the levels\n assert 2 in idx.levels[0]\n # but it should no longer be in the index itself\n assert 2 not in idx\n\n # also applies to strings\n idx = MultiIndex.from_product([[\"a\", \"b\"], [\"c\", \"d\"]])\n assert \"a\" in idx\n idx = idx.drop(\"a\")\n assert \"a\" in idx.levels[0]\n assert \"a\" not in idx\n\n def test_contains_td64_level(self):\n # GH#24570\n tx = pd.timedelta_range(\"09:30:00\", \"16:00:00\", freq=\"30 min\")\n idx = MultiIndex.from_arrays([tx, np.arange(len(tx))])\n assert tx[0] in idx\n assert \"element_not_exit\" not in idx\n assert \"0 day 09:30:00\" in idx\n\n @pytest.mark.slow\n def test_large_mi_contains(self):\n # GH#10645\n result = MultiIndex.from_arrays([range(10 ** 6), range(10 ** 6)])\n assert not (10 ** 6, 0) in result\n\n\ndef test_timestamp_multiindex_indexer():\n # https://github.com/pandas-dev/pandas/issues/26944\n idx = pd.MultiIndex.from_product(\n [\n pd.date_range(\"2019-01-01T00:15:33\", periods=100, freq=\"H\", name=\"date\"),\n [\"x\"],\n [3],\n ]\n )\n df = pd.DataFrame({\"foo\": np.arange(len(idx))}, idx)\n result = df.loc[pd.IndexSlice[\"2019-1-2\":, \"x\", :], \"foo\"]\n qidx = pd.MultiIndex.from_product(\n [\n pd.date_range(\n start=\"2019-01-02T00:15:33\",\n end=\"2019-01-05T02:15:33\",\n freq=\"H\",\n name=\"date\",\n ),\n [\"x\"],\n [3],\n ]\n )\n should_be = pd.Series(data=np.arange(24, len(qidx) + 24), index=qidx, name=\"foo\")\n tm.assert_series_equal(result, should_be)\n\n\[email protected](\n \"index_arr,expected,target,algo\",\n [\n ([[np.nan, \"a\", \"b\"], [\"c\", \"d\", \"e\"]], 0, np.nan, \"left\"),\n ([[np.nan, \"a\", \"b\"], [\"c\", \"d\", \"e\"]], 1, (np.nan, \"c\"), \"right\"),\n ([[\"a\", \"b\", \"c\"], [\"d\", np.nan, \"d\"]], 1, (\"b\", np.nan), \"left\"),\n ],\n)\ndef test_get_slice_bound_with_missing_value(index_arr, expected, target, algo):\n # issue 19132\n idx = MultiIndex.from_arrays(index_arr)\n result = idx.get_slice_bound(target, side=algo, kind=\"loc\")\n assert result == expected\n\n\[email protected](\n \"index_arr,expected,start_idx,end_idx\",\n [\n ([[np.nan, 1, 2], [3, 4, 5]], slice(0, 2, None), np.nan, 1),\n ([[np.nan, 1, 2], [3, 4, 5]], slice(0, 3, None), np.nan, (2, 5)),\n ([[1, 2, 3], [4, np.nan, 5]], slice(1, 3, None), (2, np.nan), 3),\n ([[1, 2, 3], [4, np.nan, 5]], slice(1, 3, None), (2, np.nan), (3, 5)),\n ],\n)\ndef test_slice_indexer_with_missing_value(index_arr, expected, start_idx, end_idx):\n # issue 19132\n idx = MultiIndex.from_arrays(index_arr)\n result = idx.slice_indexer(start=start_idx, end=end_idx)\n assert result == expected\n\n\ndef test_pyint_engine():\n # GH#18519 : when combinations of codes cannot be represented in 64\n # bits, the index underlying the MultiIndex engine works with Python\n # integers, rather than uint64.\n N = 5\n keys = [\n tuple(l)\n for l in [\n [0] * 10 * N,\n [1] * 10 * N,\n [2] * 10 * N,\n [np.nan] * N + [2] * 9 * N,\n [0] * N + [2] * 9 * N,\n [np.nan] * N + [2] * 8 * N + [0] * N,\n ]\n ]\n # Each level contains 4 elements (including NaN), so it is represented\n # in 2 bits, for a total of 2*N*10 = 100 > 64 bits. If we were using a\n # 64 bit engine and truncating the first levels, the fourth and fifth\n # keys would collide; if truncating the last levels, the fifth and\n # sixth; if rotating bits rather than shifting, the third and fifth.\n\n for idx in range(len(keys)):\n index = MultiIndex.from_tuples(keys)\n assert index.get_loc(keys[idx]) == idx\n\n expected = np.arange(idx + 1, dtype=np.intp)\n result = index.get_indexer([keys[i] for i in expected])\n tm.assert_numpy_array_equal(result, expected)\n\n # With missing key:\n idces = range(len(keys))\n expected = np.array([-1] + list(idces), dtype=np.intp)\n missing = tuple([0, 1] * 5 * N)\n result = index.get_indexer([missing] + [keys[i] for i in idces])\n tm.assert_numpy_array_equal(result, expected)\n"
]
| [
[
"numpy.array",
"pandas.Index",
"pandas.date_range",
"pandas.MultiIndex.from_tuples",
"pandas._testing.assert_almost_equal",
"pandas.timedelta_range",
"pandas._testing.makeCustomDataframe",
"pandas.MultiIndex.from_arrays",
"pandas._testing.makeTimeDataFrame",
"pandas._testing.assert_numpy_array_equal",
"numpy.arange",
"pandas.MultiIndex",
"pandas.MultiIndex.from_product",
"pandas.Categorical",
"pandas._testing.assert_index_equal",
"pandas._testing.assert_series_equal",
"pandas.Timestamp"
]
]
|
LilDataScientist/Kaggle | [
"03ebd6f73b530ef698ee978784374b2fabe6679d"
]
| [
"Multiclass Classification/example.py"
]
| [
"from sklearn import datasets\nfrom MulticlassClassification import MulticlassClassification\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\n\nX, y = datasets.load_iris(return_X_y=True)\n\nlabel_encoding = LabelEncoder()\ny = label_encoding.fit_transform(y)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\n\nclf = MulticlassClassification()\n\nclf.fit(X_train, y_train)\n\nprint(accuracy_score(y_test, clf.predict(X_test)))\n"
]
| [
[
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.LabelEncoder",
"sklearn.datasets.load_iris"
]
]
|
slippy0/gpt-2 | [
"26d55a564c1da8c0a1697a9e2fe63dd8923dadd3"
]
| [
"train.py"
]
| [
"#!/usr/bin/env python3\n# Usage:\n# PYTHONPATH=src ./train --dataset <file|directory|glob>\n\nimport argparse\nimport json\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport time\nimport tqdm\nfrom tensorflow.core.protobuf import rewriter_config_pb2\n\nimport model, sample, encoder\nfrom load_dataset import load_dataset, Sampler\nfrom accumulate import AccumulatingOptimizer\nimport memory_saving_gradients\n\n\nCHECKPOINT_DIR = 'checkpoint'\nSAMPLE_DIR = 'samples'\n\n\nparser = argparse.ArgumentParser(\n description='Fine-tune GPT-2 on your custom dataset.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\nparser.add_argument('--dataset', metavar='PATH', type=str, required=False, help='Input file, directory, or glob pattern (utf-8 text, or preencoded .npz files).')\nparser.add_argument('--perm_dataset', metavar='PATH', type=str, required=False, help='Non-cycled input data. Input file, directory, or glob pattern (utf-8 text, or preencoded .npz files).')\nparser.add_argument('--model_name', metavar='MODEL', type=str, default='117M', help='Pretrained model name')\nparser.add_argument('--combine', metavar='CHARS', type=int, default=50000, help='Concatenate input files with <|endoftext|> separator into chunks of this minimum size')\nparser.add_argument('--num_cycle_files', metavar='N', type=int, default=50, help='Number of files to cycle')\n\nparser.add_argument('--batch_size', metavar='SIZE', type=int, default=1, help='Batch size')\n\nparser.add_argument('--learning_rate', metavar='LR', type=float, default=0.00002, help='Learning rate for Adam')\nparser.add_argument('--grad_clip', metavar='N', type=float, default=1, help='Gradient clip')\nparser.add_argument('--accumulate_gradients', metavar='N', type=int, default=1, help='Accumulate gradients across N minibatches.')\nparser.add_argument('--memory_saving_gradients', default=False, action='store_true', help='Use gradient checkpointing to reduce vram usage.')\nparser.add_argument('--only_train_transformer_layers', default=False, action='store_true', help='Restrict training to the transformer blocks.')\nparser.add_argument('--optimizer', type=str, default='adam', help='Optimizer. <adam|sgd>.')\nparser.add_argument('--noise', type=float, default=0.0, help='Add noise to input training data to regularize against typos.')\n\nparser.add_argument('--top_k', type=int, default=40, help='K for top-k sampling.')\nparser.add_argument('--top_p', type=float, default=0.0, help='P for top-p sampling. Overrides top_k if set > 0.')\n\nparser.add_argument('--restore_from', type=str, default='latest', help='Either \"latest\", \"fresh\", or a path to a checkpoint file')\nparser.add_argument('--run_name', type=str, default='run1', help='Run id. Name of subdirectory in checkpoint/ and samples/')\nparser.add_argument('--sample_every', metavar='N', type=int, default=100, help='Generate samples every N steps')\nparser.add_argument('--sample_length', metavar='TOKENS', type=int, default=1023, help='Sample this many tokens')\nparser.add_argument('--sample_num', metavar='N', type=int, default=1, help='Generate this many samples')\nparser.add_argument('--save_every', metavar='N', type=int, default=1000, help='Write a checkpoint every N steps')\nparser.add_argument('--cycle_every', metavar='N', type=int, default=100, help='Cycle dataset')\nparser.add_argument('--train_for', metavar='N', type=int, default=0, help='max number of iterations to train for.')\n\n\nparser.add_argument('--val_dataset', metavar='PATH', type=str, default=None, help='Dataset for validation loss, defaults to --dataset.')\nparser.add_argument('--val_batch_size', metavar='SIZE', type=int, default=2, help='Batch size for validation.')\nparser.add_argument('--val_batch_count', metavar='N', type=int, default=40, help='Number of batches for validation.')\nparser.add_argument('--val_every', metavar='STEPS', type=int, default=0, help='Calculate validation loss every STEPS steps.')\n\n\ndef maketree(path):\n try:\n os.makedirs(path)\n except:\n pass\n\n\ndef randomize(context, hparams, p):\n if p > 0:\n mask = tf.random.uniform(shape=tf.shape(context)) < p\n noise = tf.random.uniform(shape=tf.shape(context), minval=0, maxval=hparams.n_vocab, dtype=tf.int32)\n return tf.where(mask, noise, context)\n else:\n return context\n\n\ndef main():\n args = parser.parse_args()\n enc = encoder.get_encoder(args.model_name)\n hparams = model.default_hparams()\n with open(os.path.join('models', args.model_name, 'hparams.json')) as f:\n hparams.override_from_dict(json.load(f))\n\n if args.sample_length > hparams.n_ctx:\n raise ValueError(\n \"Can't get samples longer than window size: %s\" % hparams.n_ctx)\n\n if args.model_name != '117M':\n args.memory_saving_gradients = True\n if args.optimizer == 'adam' and not args.only_train_transformer_layers:\n print('WARNING: cannot use adam and still train embeddings on 1080ti!')\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.graph_options.rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.OFF\n with tf.Session(config=config) as sess:\n context = tf.placeholder(tf.int32, [args.batch_size, None])\n context_in = randomize(context, hparams, args.noise)\n output = model.model(hparams=hparams, X=context_in)\n loss = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=context[:, 1:], logits=output['logits'][:, :-1]))\n\n if args.val_every > 0:\n val_context = tf.placeholder(tf.int32, [args.val_batch_size, None])\n val_output = model.model(hparams=hparams, X=val_context)\n val_loss = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=val_context[:, 1:], logits=val_output['logits'][:, :-1]))\n val_loss_summary = tf.summary.scalar('val_loss', val_loss)\n\n\n tf_sample = sample.sample_sequence(\n hparams=hparams,\n length=args.sample_length,\n context=context,\n batch_size=args.batch_size,\n temperature=1.0,\n top_k=args.top_k,\n top_p=args.top_p)\n\n all_vars = [v for v in tf.trainable_variables() if 'model' in v.name]\n train_vars = [v for v in all_vars if '/h' in v.name] if args.only_train_transformer_layers else all_vars\n\n if args.optimizer == 'adam':\n opt = tf.train.AdamOptimizer(learning_rate=args.learning_rate)\n elif args.optimizer == 'sgd':\n opt = tf.train.GradientDescentOptimizer(learning_rate=args.learning_rate)\n else:\n exit('Bad optimizer:', args.optimizer)\n\n if args.accumulate_gradients > 1:\n opt = AccumulatingOptimizer(\n opt=opt,\n var_list=train_vars, memsavinggrads=args.memory_saving_gradients, grad_clip=args.grad_clip)\n opt_reset = opt.reset()\n opt_compute = opt.compute_gradients(loss)\n opt_apply, opt_norm = opt.apply_gradients()\n summary_loss = tf.summary.scalar('loss', opt_apply)\n else:\n if args.memory_saving_gradients:\n opt_grads = memory_saving_gradients.gradients(loss, train_vars)\n else:\n opt_grads = tf.gradients(loss, train_vars)\n\n opt_grads, opt_norm = tf.clip_by_global_norm(opt_grads, args.grad_clip)\n opt_grads = list(zip(opt_grads, train_vars))\n\n opt_apply = opt.apply_gradients(opt_grads)\n summary_loss = tf.summary.scalar('loss', loss)\n\n summary_lr = tf.summary.scalar('learning_rate', args.learning_rate)\n summaries = tf.summary.merge([summary_lr, summary_loss])\n\n summary_log = tf.summary.FileWriter(\n os.path.join(CHECKPOINT_DIR, args.run_name))\n\n saver = tf.train.Saver(\n var_list=all_vars,\n max_to_keep=5,\n keep_checkpoint_every_n_hours=2)\n sess.run(tf.global_variables_initializer())\n\n if args.restore_from == 'latest':\n ckpt = tf.train.latest_checkpoint(\n os.path.join(CHECKPOINT_DIR, args.run_name))\n if ckpt is None:\n # Get fresh GPT weights if new run.\n ckpt = tf.train.latest_checkpoint(\n os.path.join('models', args.model_name))\n elif args.restore_from == 'fresh':\n ckpt = tf.train.latest_checkpoint(\n os.path.join('models', args.model_name))\n else:\n ckpt = tf.train.latest_checkpoint(args.restore_from)\n print('Loading checkpoint', ckpt)\n saver.restore(sess, ckpt)\n\n print('Loading dataset...')\n data_sampler = Sampler(enc, args.combine, args.dataset, args.perm_dataset, args.num_cycle_files)\n if args.val_every > 0:\n val_chunks = load_dataset(enc, args.val_dataset, args.combine) if args.val_dataset else chunks\n print('dataset has', data_sampler.total_size, 'tokens')\n print('Training...')\n\n if args.val_every > 0:\n # Sample from validation set once with fixed seed to make\n # it deterministic during training as well as across runs.\n val_data_sampler = Sampler(val_chunks, seed=1)\n val_batches = [[val_data_sampler.sample(1024) for _ in range(args.val_batch_size)]\n for _ in range(args.val_batch_count)]\n\n counter = 1\n counter_path = os.path.join(CHECKPOINT_DIR, args.run_name, 'counter')\n if os.path.exists(counter_path):\n # Load the step number if we're resuming a run\n # Add 1 so we don't immediately try to save again\n with open(counter_path, 'r') as fp:\n counter = int(fp.read()) + 1\n start = counter - 1\n def save():\n maketree(os.path.join(CHECKPOINT_DIR, args.run_name))\n print(\n 'Saving',\n os.path.join(CHECKPOINT_DIR, args.run_name,\n 'model-{}').format(counter))\n saver.save(\n sess,\n os.path.join(CHECKPOINT_DIR, args.run_name, 'model'),\n global_step=counter)\n with open(counter_path, 'w') as fp:\n fp.write(str(counter) + '\\n')\n\n def generate_samples():\n print('Generating samples...')\n context_tokens = data_sampler.sample(1)\n all_text = []\n index = 0\n while index < args.sample_num:\n out = sess.run(\n tf_sample,\n feed_dict={context: args.batch_size * [context_tokens]})\n for i in range(min(args.sample_num - index, args.batch_size)):\n text = enc.decode(out[i])\n text = '======== SAMPLE {} ========\\n{}\\n'.format(\n index + 1, text)\n all_text.append(text)\n index += 1\n print(text)\n maketree(os.path.join(SAMPLE_DIR, args.run_name))\n with open(\n os.path.join(SAMPLE_DIR, args.run_name,\n 'samples-{}').format(counter), 'w') as fp:\n fp.write('\\n'.join(all_text))\n\n def validation():\n print('Calculating validation loss...')\n losses = []\n for batch in tqdm.tqdm(val_batches):\n losses.append(sess.run(val_loss, feed_dict={val_context: batch}))\n v_val_loss = np.mean(losses)\n v_summary = sess.run(val_loss_summary, feed_dict={val_loss: v_val_loss})\n summary_log.add_summary(v_summary, counter)\n summary_log.flush()\n print(\n '[{counter} | {time:2.2f}] validation loss = {loss:2.2f}'\n .format(\n counter=counter,\n time=time.time() - start_time,\n loss=v_val_loss))\n\n def sample_batch():\n return [data_sampler.sample(1024) for _ in range(args.batch_size)]\n\n\n avg_loss = (0.0, 0.0)\n start_time = time.time()\n\n try:\n while True:\n if counter % args.save_every == 0:\n save()\n if counter % args.sample_every == 0:\n generate_samples()\n if args.val_every > 0 and (counter % args.val_every == 0 or counter == 1):\n validation()\n if args.train_for and counter >= args.train_for:\n print(\"reached iteration limit! Shutting down...\")\n save()\n break\n if args.accumulate_gradients > 1:\n sess.run(opt_reset)\n for _ in range(args.accumulate_gradients):\n sess.run(\n opt_compute, feed_dict={context: sample_batch()})\n (v_loss, v_summary, v_norm) = sess.run((opt_apply, summary_loss, opt_norm))\n else:\n (_, v_loss, v_summary, v_norm) = sess.run(\n (opt_apply, loss, summary_loss, opt_norm),\n feed_dict={context: sample_batch()})\n\n summary_log.add_summary(v_summary, counter)\n\n avg_loss = (avg_loss[0] * 0.99 + v_loss,\n avg_loss[1] * 0.99 + 1.0)\n\n print(\n '[{counter} | {time:2.2f}] loss={loss:2.2f} avg={avg:2.2f}'\n .format(\n counter=counter,\n time=(time.time() - start_time) / (counter - start),\n loss=v_loss,\n avg=avg_loss[0] / avg_loss[1]))\n\n if counter % args.cycle_every == 0:\n data_sampler.cycle_files()\n counter += 1\n except KeyboardInterrupt:\n print('interrupted')\n save()\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"tensorflow.trainable_variables",
"tensorflow.summary.merge",
"tensorflow.train.AdamOptimizer",
"tensorflow.shape",
"tensorflow.train.latest_checkpoint",
"tensorflow.summary.scalar",
"tensorflow.where",
"tensorflow.Session",
"tensorflow.train.Saver",
"numpy.mean",
"tensorflow.gradients",
"tensorflow.ConfigProto",
"tensorflow.placeholder",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.clip_by_global_norm",
"tensorflow.global_variables_initializer",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits"
]
]
|
kevjp/openstreetmap-carto | [
"be30cfe8d73f78cb4b5ba9acaaf42a942c70270d"
]
| [
"geo_agent/visualiser_geo.py"
]
| [
"'''\ncontains all methods for visualisation tasks\n'''\n\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport geopandas as gpd\nimport geoplot.crs as gcrs\nimport geoplot as gplt\nimport numpy as np\nimport pandas as pd\nfrom shapely.geometry import Point, LineString, shape\nimport ruamel.yaml as yaml\nfrom parse_yaml import parse_config\nimport os\n\n\n\n\ndef set_style(Config):\n '''sets the plot style\n\n '''\n if Config.plot_style.lower() == 'dark':\n mpl.style.use('plot_styles/dark.mplstyle')\n\n\ndef build_fig_geo(Config, shapefile = './postcode_shape_files/osm_north_london.shp', figsize=(5,7)):\n # set style parameters for plot\n set_style(Config)\n # Set total size of figure to 5'' x 7''\n fig = plt.figure(figsize=(5,7))\n # specify space where plots will be placed. There will be 2 subplots\n # one on each row where each plot will stretch across the entire screen\n spec = fig.add_gridspec(ncols=1, nrows=2, height_ratios=[10,5])\n\n # read in shapefile for map\n map_df = gpd.read_file(shapefile)\n\n\n #set projection for shapefile in first subplot\n ax1 = fig.add_subplot(spec[0,0], projection=gcrs.AlbersEqualArea())\n\n extent = [-0.18, 51.62, -0.10, 51.7]\n # plot map\n gplt.polyplot(map_df['geometry'], projection=gcrs.AlbersEqualArea(), ax=ax1, extent=extent)\n\n plt.title('Map showing location of infected individuals')\n # plt.show(ax1)\n\n # Plot to show SIR curve\n ax2 = fig.add_subplot(spec[1,0])\n ax2.set_title('number of infected')\n #ax2.set_xlim(0, simulation_steps)\n ax2.set_ylim(0, Config.pop_size + 100)\n return fig, spec, ax1, ax2, map_df\n\n\n\ndef draw_tstep(Config,\n fig, ax1, ax2, shapefile_df):\n #construct plot and visualise\n\n #get color palettes\n palette = Config.get_palette()\n\n # specify space where plots will be placed. There will be 2 subplots\n # one on each row where each plot will stretch across the entire screen\n spec = fig.add_gridspec(ncols=1, nrows=2, height_ratios=[10,5])\n # clear previous subplots\n ax1.clear()\n ax2.clear()\n\n extent = [-0.18, 51.62, -0.10, 51.7]\n #set projection for shapefile in first subplot\n ax1 = gplt.polyplot(shapefile_df, projection=gcrs.AlbersEqualArea(), ax=ax1)\n\n\n # if Config.self_isolate and Config.isolation_bounds != None:\n # build_hospital(Config.isolation_bounds[0], Config.isolation_bounds[2],\n # Config.isolation_bounds[1], Config.isolation_bounds[3], ax1,\n # addcross = False)\n\n #plot population segments\n all_agents = Config.point_plots_matrix\n\n # # Create nd array containing healthy, infected, immune and fatalities in one column and a second column containing label\n # healthy = population[population[:,6] == 0][:,1:3]\n # healthy_label = ['healthy'] * healthy.shape[0]\n\n # infected = population[population[:,6] == 1][:,1:3]\n # infected_label = ['infected'] * infected.shape[0]\n\n # immune = population[population[:,6] == 2][:,1:3]\n # immune_label = ['immune'] * immune.shape[0]\n\n # fatalities = population[population[:,6] == 3][:,1:3]\n # fatalities_label = ['fatalities'] * fatalities.shape[0]\n\n # # merge all data together in one nd array for plotting\n # pop_coordinates = np.concatenate((healthy, infected, immune, fatalities))\n\n # pop_labels = np.concatenate((healthy_label, infected_label, immune_label, fatalities_label))\n\n # pop_matrix = np.vstack((pop_coordinates.T, pop_labels))\n # pop_df = pd.DataFrame(pop_matrix.T, columns = ['Longitude', 'Latitude', 'Label'])\n pop_df = pd.DataFrame(all_agents, columns = ['geometry', 'label', 'disease_progression'])\n # Manually added landmarks for now\n # pop_df = pop_df.append({'geometry' : Point(-0.1256032, 51.63368), 'label' : 'supermarket'}, ignore_index=True)\n # pop_df = pop_df.append({'geometry' : Point(-0.1713237, 51.6495658), 'label' : 'supermarket'}, ignore_index=True)\n # pop_df = pop_df.append({'geometry' : Point(-0.1137062, 51.6347074), 'label' : 'park'}, ignore_index=True)\n # pop_df = pop_df.append({'geometry' : Point(-0.1693677, 51.6615703), 'label' : 'home'}, ignore_index=True)\n # pop_df = pop_df.append({'geometry' : Point(-0.1705196, 51.6665827), 'label' : 'home'}, ignore_index=True)\n\n pop_gdf = gpd.GeoDataFrame(pop_df, geometry=pop_df.geometry)\n\n # pop_gdf = gpd.GeoDataFrame(all_agents, columns = ['geometry', 'label'])\n\n extent = [-0.18, 51.62, -0.10, 51.7]\n extent = [-0.2671, 51.6167, -0.1198, 51.6940]\n gplt.pointplot(pop_gdf, ax=ax1, extent=extent, hue='label', s= 2)\n\n\n # plt.show()\n plt.draw()\n plt.pause(0.001)\n # Convert to geodataframe and plot on axis\n # pop_gdf = gpd.GeoDa0taFrame(pop_df, geometry=gpd.points_from_xy(pop_df.Longitude.astype(float).values, pop_df.Latitude.astype(float).values))\n # gplt.pointplot(pop_gdf, ax=ax1, extent=shapefile_df.total_bounds, hue = 'Label', legend=True)\n # gplt.pointplot(pop_gdf, ax=ax1, hue = 'Label', legend=True)\n\n\ndef parse_agent_location(Config):\n\n #plot population segments\n all_agents = Config.point_plots_matrix\n\n pop_df = pd.DataFrame(all_agents, columns = ['geometry', 'label', 'disease_progression'])\n # pop_df = pd.DataFrame(all_agents, columns = ['geometry', 'label'])\n\n # pop_df = pop_df.append({'geometry' : Point(-0.1256032, 51.63368), 'label' : 'supermarket'}, ignore_index=True)\n # pop_df = pop_df.append({'geometry' : Point(-0.1713237, 51.6495658), 'label' : 'supermarket'}, ignore_index=True)\n # pop_df = pop_df.append({'geometry' : Point(-0.1137062, 51.6347074), 'label' : 'park'}, ignore_index=True)\n # pop_df = pop_df.append({'geometry' : Point(-0.1693677, 51.6615703), 'label' : 'home'}, ignore_index=True)\n # pop_df = pop_df.append({'geometry' : Point(-0.1705196, 51.6665827), 'label' : 'home'}, ignore_index=True)\n\n\n pop_gdf = gpd.GeoDataFrame(pop_df, geometry=pop_df.geometry)\n pop_gdf.index.name = 'id'\n # local machine file path\n # pop_gdf.to_file(\"/Users/kevinryan/Documents/City_DSI/population_movement_simulations/openstreetmap-carto/output.json\", index=True, driver=\"GeoJSON\")\n # docker file path\n pop_gdf.to_file(\"/openstreetmap-carto/output.json\", index=True, driver=\"GeoJSON\")\n\n # convert_df_2_string(pop_gdf)\n\n # parse_yaml_result()\n\n # str_out = convert_df_2_string(pop_gdf)\n # assign_agent_loc_2_mml_file(updated_locations = str_out)\n # return str_out\n\n\ndef convert_df_2_string(df):\n \"\"\"\n Convert data frame rows to string output where each new line is defined as \\n\n \"\"\"\n # ititialise string\n output = 'agent,wkt\\n'\n for i, row in df.iterrows():\n if i == len(df) - 1:\n output += str(row['label']) + ',' + str(row['geometry'])\n else:\n output += str(row['label']) + ',' + str(row['geometry']) + '\\n'\n # set environment variable ${AGENTS}\n # os.environ['AGENTS'] = output\n return output\n\n# local machine version\n# def assign_agent_loc_2_mml_file(file='/Users/kevinryan/Documents/City_DSI/population_movement_simulations/openstreetmap-carto/project.mml', updated_locations = None):\n# docker version\ndef assign_agent_loc_2_mml_file(file='/openstreetmap-carto/project.mml', updated_locations = None):\n\n yml = yaml.YAML()\n yml.preserve_quotes = True\n yml.width = 4096\n with open(file, 'r') as stream:\n file_string = yml.load(stream)\n # update project.mml file with current agent locations\n file_string['Layer'][-1]['Datasource']['inline'] = updated_locations\n # write to yaml file\n # local machine version\n # with open('/Users/kevinryan/Documents/City_DSI/population_movement_simulations/openstreetmap-carto/project.mml', 'w') as file:\n # docker version\n with open('/openstreetmap-carto/project.mml', 'w') as file:\n\n yml.indent(mapping=2, sequence=4, offset=2)\n documents = yml.dump(file_string, file)\n\n\ndef parse_yaml_result():\n # local machine version\n # conf = parse_config(path=\"/Users/kevinryan/Documents/City_DSI/population_movement_simulations/openstreetmap-carto/project.mml\")\n # with open('/Users/kevinryan/Documents/City_DSI/population_movement_simulations/openstreetmap-carto/project.mml', 'w') as file:\n # docker version\n conf = parse_config(path=\"/openstreetmap-carto/project.mml\")\n with open('/openstreetmap-carto/project.mml', 'w') as file:\n yaml.dump(conf, file)\n\n\n\n\n"
]
| [
[
"matplotlib.style.use",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.pause"
]
]
|
noskill/JRMOT_ROS | [
"ca1e87e51ecfeb14f2b652d613f3b02c592afb38"
]
| [
"paper_experiments/utils/detection.py"
]
| [
"# vim: expandtab:ts=4:sw=4\nimport numpy as np\n\n\nclass Detection(object):\n \"\"\"\n This class represents a bounding box detection in a single image.\n\n Parameters\n ----------\n tlwh : array_like\n Bounding box in format `(x, y, w, h)`.\n confidence : float\n Detector confidence score.\n feature : array_like\n A feature vector that describes the object contained in this image.\n\n Attributes\n ----------\n tlwh : ndarray\n Bounding box in format `(top left x, top left y, width, height)`.\n confidence : ndarray\n Detector confidence score.\n feature : ndarray | NoneType\n A feature vector that describes the object contained in this image.\n\n \"\"\"\n\n def __init__(self, tlwh, box_3d, confidence, appearance_feature, feature):\n self.tlwh = np.asarray(tlwh, dtype=np.float)\n # Note that detections format is centre of 3D box and dimensions (not bottom face)\n self.box_3d = box_3d\n if box_3d is not None:\n self.box_3d[1] -= box_3d[4]/2\n self.box_3d = np.asarray(box_3d, dtype=np.float32)\n self.confidence = float(confidence)\n self.appearance_feature = np.asarray(appearance_feature, dtype=np.float32)\n if feature is not None:\n self.feature = np.asarray(feature, dtype = np.float32)\n else:\n self.feature = None\n\n\n def to_tlbr(self):\n \"\"\"Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,\n `(top left, bottom right)`.\n \"\"\"\n ret = self.tlwh.copy()\n ret[2:] += ret[:2]\n return ret\n\n def to_xyah(self):\n \"\"\"Convert bounding box to format `(center x, center y, aspect ratio,\n height)`, where the aspect ratio is `width / height`.\n \"\"\"\n ret = self.tlwh.copy()\n ret[:2] += ret[2:] / 2\n ret[2] /= ret[3]\n return ret\n def to_xywh(self):\n \"\"\"Convert bounding box to format `(center x, center y, aspect ratio,\n height)`, where the aspect ratio is `width / height`.\n \"\"\"\n ret = self.tlwh.copy()\n ret[:2] += ret[2:] / 2\n return ret\n def get_3d_distance(self):\n if self.box_3d is not None:\n return np.sqrt(self.box_3d[0]**2 + self.box_3d[2]**2)"
]
| [
[
"numpy.asarray",
"numpy.sqrt"
]
]
|
tom-bird/binary-gen-models | [
"a9311d27d74e25eb55d1b06295ac8a121b5c1d7b"
]
| [
"models/flowpp_cifar.py"
]
| [
"import re\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom models.coupling import (\n Parallel, MixLogisticConvAttnCoupling, TupleFlip, Squeeze, StripeSplit, ChannelSplit, GatedConv\n)\nfrom models.flows import BaseFlow, Compose, Inverse, ImgProc, Normalize, Sigmoid, Pointwise\nfrom models.modules import WnConv2d, BinarisedWnLayer, Pass\nfrom utils import sumflat, standard_normal_logp, unpack_bits, pack_bits\n\n\nclass FlowModel(nn.Module):\n def __init__(self, main_flow, dequant_flow, x_shape, z_shape):\n super().__init__()\n self.main_flow = main_flow\n self.dequant_flow = dequant_flow\n assert isinstance(x_shape, tuple) and isinstance(z_shape, tuple)\n self.x_shape = x_shape\n self.z_shape = z_shape\n self.best_loss = np.inf\n\n def calc_dequant_noise(self, x):\n eps = torch.randn_like(x)\n u, dequant_logd = self.dequant_flow(eps=eps, aux=x, inverse=False)\n assert u.shape == x.shape and dequant_logd.shape == (x.shape[0],)\n return u, dequant_logd - sumflat(standard_normal_logp(eps))\n\n def forward(self, x, *, u=None, dequant_logd=None):\n assert (u is None) == (dequant_logd is None)\n if u is None:\n u, dequant_logd = self.calc_dequant_noise(x)\n assert u.shape == x.shape and dequant_logd.shape == (x.shape[0],)\n assert (u >= 0).all() and (u <= 1).all()\n\n z, main_logd = self.main_flow(x + u, aux=None, inverse=False)\n z_logp = sumflat(standard_normal_logp(z))\n total_logd = dequant_logd + main_logd + z_logp\n assert z.shape[0] == x.shape[0] and z.numel() == x.numel()\n assert main_logd.shape == dequant_logd.shape == total_logd.shape == z_logp.shape == (x.shape[0],)\n return {\n 'u': u,\n 'z': z,\n 'total_logd': total_logd,\n 'dequant_logd': dequant_logd,\n }\n\n def loss(self, x, *args, **kwargs):\n ret = self.forward(x, *args, **kwargs)\n loss = -np.log2(np.e) * torch.sum(ret['total_logd']) / np.prod(x.shape)\n breakdown = {'dequant_logd': torch.sum(ret['dequant_logd'])}\n return loss, breakdown\n\n def sample(self, n, device):\n with torch.no_grad():\n z = torch.randn(n, *self.z_shape).to(device)\n x = self.main_flow(z, aux=None, inverse=True)[0]\n return (x-127.5) / 127.5 # rescale to [-1, 1]\n\n def clamp_weights(self):\n for module in self.modules():\n if isinstance(module, BinarisedWnLayer):\n module.clamp_weights()\n\n def post_epoch(self):\n pass\n\n def binary_state_dict(self, **kwargs):\n state_dict = super().state_dict(**kwargs)\n for name, p in state_dict.items():\n if any(re.search(s, name) for s in self.binary_conv_patterns):\n b = torch.sign(p)\n b = ((b + 1) / 2).byte() # to {0, 1}\n state_dict[name] = pack_bits(b)\n return state_dict\n\n def load_binary_state_dict(self, state_dict, **kwargs):\n for name, p in state_dict.items():\n if any(re.search(s, name) for s in self.binary_conv_patterns):\n b = unpack_bits(p)\n b = 2*b - 1 # to {-1, 1}\n state_dict[name] = b\n super().load_state_dict(state_dict, **kwargs)\n\n\nclass CifarFlowPP(FlowModel):\n def __init__(self, *, hdim=96, blocks=10, dequant_blocks=2, mix_components=32, attn_heads=4, attn_version=True,\n pdrop=0.2, force_float32_cond, binarised=False, binarised_act=False, use_reslayers=True, **kwargs):\n fp_acts = not binarised_act\n def coupling(cf_shape_, for_dequant=False):\n return [\n Parallel([lambda: Normalize(cf_shape_)] * 2),\n Parallel([lambda: Pointwise(channels=cf_shape_[0])] * 2),\n MixLogisticConvAttnCoupling(\n cf_shape=cf_shape_,\n hidden_channels=hdim,\n aux_channels=32 if for_dequant else 0,\n blocks=dequant_blocks if for_dequant else blocks,\n mix_components=mix_components,\n attn_heads=attn_heads,\n attn_version=attn_version,\n pdrop=pdrop,\n force_float32_cond=force_float32_cond,\n binarised=binarised,\n use_reslayers=use_reslayers,\n fp_acts=fp_acts,\n ),\n TupleFlip(),\n ]\n\n class Dequant(BaseFlow):\n def __init__(self):\n super().__init__()\n self.context_proc = torch.nn.Sequential(\n WnConv2d(6, 32, kernel_size=3, padding=1),\n GatedConv(in_channels=32, aux_channels=0, pdrop=pdrop,\n binarised=binarised, fp_acts=fp_acts) if use_reslayers else Pass(),\n GatedConv(in_channels=32, aux_channels=0, pdrop=pdrop,\n binarised=binarised, fp_acts=fp_acts) if use_reslayers else Pass(),\n GatedConv(in_channels=32, aux_channels=0, pdrop=pdrop,\n binarised=binarised, fp_acts=fp_acts) if use_reslayers else Pass(),\n )\n self.noise_flow = Compose([\n # input: Gaussian noise\n StripeSplit(),\n *coupling((3, 32, 16), for_dequant=True),\n *coupling((3, 32, 16), for_dequant=True),\n *coupling((3, 32, 16), for_dequant=True),\n *coupling((3, 32, 16), for_dequant=True),\n Inverse(StripeSplit()),\n Sigmoid(),\n ])\n self.aux_split = StripeSplit()\n\n def _process_context(self, aux):\n a = aux / 256.0 - 0.5\n a = torch.cat(self.aux_split(a, inverse=False, aux=None)[0], dim=1)\n return self.context_proc(a)\n\n def forward(self, eps, *, aux, inverse: bool):\n # base distribution noise -> dequantization noise\n return self.noise_flow(eps, aux=self._process_context(aux), inverse=inverse)\n\n def code(self, input_sym, *, aux, inverse: bool, stream):\n return self.noise_flow.code(input_sym, aux=self._process_context(aux), inverse=inverse, stream=stream)\n\n super().__init__(\n main_flow=Compose([\n # input image 3, 32, 32\n ImgProc(),\n\n StripeSplit(),\n *coupling((3, 32, 16)),\n *coupling((3, 32, 16)),\n *coupling((3, 32, 16)),\n *coupling((3, 32, 16)),\n Inverse(StripeSplit()),\n\n Squeeze(), # 12, 16, 16\n\n ChannelSplit(),\n *coupling((6, 16, 16)),\n *coupling((6, 16, 16)),\n Inverse(ChannelSplit()),\n\n StripeSplit(),\n *coupling((12, 16, 8)),\n *coupling((12, 16, 8)),\n *coupling((12, 16, 8)),\n Inverse(StripeSplit()),\n ]),\n dequant_flow=Dequant(),\n x_shape=(3, 32, 32),\n z_shape=(12, 16, 16)\n )\n\n self.best_elbo = np.inf\n self.binary_conv_patterns = ['cond.blocks.[0-9]+.conv.conv.v',\n 'cond.blocks.[0-9]+.conv.gate.nin.dense.v',\n 'cond.blocks.[0-9]+.conv.aux_proj.dense.v']\n binary_params = [p for n, p in self.named_parameters()\n if any(re.search(s, n) for s in self.binary_conv_patterns)]\n all_parameters = [p for p in self.parameters() if p.requires_grad]\n num_params = sum([np.prod(p.size()) for p in all_parameters])\n num_binary_params = sum([np.prod(p.size()) for p in binary_params]) if binarised else 0\n num_fp_params = num_params - num_binary_params\n print('{} trainable params'.format(num_params))\n print('{} binary params ({:.2f}%)'.format(num_binary_params, 100 * num_binary_params / num_params))\n print('{} FP params ({:.2f}%)'.format(num_fp_params, 100 * num_fp_params / num_params))\n\n self.transfer_selection = [n for n, p in self.named_parameters() if n[-2:] == '.v']\n\n def load_from_tf(self, filename):\n tf_params = np.load(filename)\n torch_params = OrderedDict(sorted(list(self.named_parameters())))\n\n _unused_torch_names = set(torch_params.keys())\n _unused_tf_names = set(tf_params.keys())\n assert len(_unused_torch_names) == len(_unused_tf_names)\n from tqdm import tqdm\n bar = tqdm(list(range(len(_unused_torch_names))), desc='Loading parameters', leave=False)\n\n def load(torch_name, tf_name, transform):\n tensor = torch.from_numpy(tf_params[tf_name])\n if transform is not None:\n tensor = transform(tensor)\n torch_params[torch_name].data.copy_(tensor)\n _unused_torch_names.remove(torch_name)\n _unused_tf_names.remove(tf_name)\n bar.update()\n # print(torch_name, '<--', tf_name)\n\n def load_dense(torch_prefix, tf_prefix):\n load(f'{torch_prefix}.b', f'{tf_prefix}/b', None)\n load(f'{torch_prefix}.w', f'{tf_prefix}/W', lambda t: t.permute(1, 0))\n\n def load_conv(torch_prefix, tf_prefix):\n load(f'{torch_prefix}.b', f'{tf_prefix}/b', None)\n load(f'{torch_prefix}.w', f'{tf_prefix}/W', lambda t: t.permute(3, 2, 0, 1))\n\n def load_gated_conv(torch_prefix, tf_prefix):\n load_conv(f'{torch_prefix}.conv', f'{tf_prefix}/c1')\n load_conv(f'{torch_prefix}.gate.conv', f'{tf_prefix}/c2')\n\n def load_norm(torch_prefix, tf_prefix):\n load(f'{torch_prefix}.normalize.b', f'{tf_prefix}/b', lambda t: t.permute(2, 0, 1))\n load(f'{torch_prefix}.normalize.g', f'{tf_prefix}/g', lambda t: t.permute(2, 0, 1))\n\n def load_ln(torch_prefix, tf_prefix):\n load(f'{torch_prefix}.bias', f'{tf_prefix}/b', lambda t: t[0, 0, 0, :])\n load(f'{torch_prefix}.weight', f'{tf_prefix}/g', lambda t: t[0, 0, 0, :])\n\n def load_conv_attn_block(torch_prefix, tf_prefix, aux):\n load_dense(f'{torch_prefix}.attn.proj_in.dense', f'{tf_prefix}/attn/proj1')\n load_dense(f'{torch_prefix}.attn.gate.nin.dense', f'{tf_prefix}/attn/proj2')\n if aux:\n load_dense(f'{torch_prefix}.conv.aux_proj.dense', f'{tf_prefix}/conv/a_proj')\n load_conv(f'{torch_prefix}.conv.conv', f'{tf_prefix}/conv/c1')\n load_dense(f'{torch_prefix}.conv.gate.nin.dense', f'{tf_prefix}/conv/c2')\n load_ln(f'{torch_prefix}.ln1.layernorm', f'{tf_prefix}/ln1')\n load_ln(f'{torch_prefix}.ln2.layernorm', f'{tf_prefix}/ln2')\n\n tf_counters = defaultdict(lambda: 0)\n\n def get_tf_counter(prefix):\n return prefix if (tf_counters[prefix] == 0) else f'{prefix}_{tf_counters[prefix]}'\n\n def load_coupling(prefix, i, blocks, aux):\n load_norm(f'{prefix}.{i}.flows.0', f'{get_tf_counter(\"Norm\")}/norm0')\n load_norm(f'{prefix}.{i}.flows.1', f'{get_tf_counter(\"Norm\")}/norm1')\n tf_counters['Norm'] += 1\n\n load(f'{prefix}.{i + 1}.flows.0.w', f'{get_tf_counter(\"Pointwise\")}/W0', lambda t: t.permute(1, 0))\n load(f'{prefix}.{i + 1}.flows.1.w', f'{get_tf_counter(\"Pointwise\")}/W1', lambda t: t.permute(1, 0))\n tf_counters['Pointwise'] += 1\n\n load(f'{prefix}.{i + 2}.cond.pos_emb',\n f'{get_tf_counter(\"MixLogisticAttnCoupling\")}/pos_emb', lambda t: t.permute(2, 0, 1))\n load_conv(f'{prefix}.{i + 2}.cond.proj_in',\n f'{get_tf_counter(\"MixLogisticAttnCoupling\")}/proj_in')\n load_conv(f'{prefix}.{i + 2}.cond.proj_out',\n f'{get_tf_counter(\"MixLogisticAttnCoupling\")}/proj_out')\n\n for block in range(blocks):\n load_conv_attn_block(f'{prefix}.{i + 2}.cond.blocks.{block}',\n f'{get_tf_counter(\"MixLogisticAttnCoupling\")}/block{block}', aux=aux)\n tf_counters['MixLogisticAttnCoupling'] += 1\n\n # context proc\n load_conv('dequant_flow.context_proc.0', 'context_proc/proj')\n for i in [1, 2, 3]:\n load_gated_conv(f'dequant_flow.context_proc.{i}', f'context_proc/c{i - 1}')\n # dequant flow\n for i in range(1, 15 + 1, 4):\n load_coupling('dequant_flow.noise_flow.flows', i, blocks=2, aux=True)\n # main flow\n for i in (list(range(2, 16 + 1, 4)) + list(range(21, 27 + 1, 4)) + list(range(31, 41 + 1, 4))):\n load_coupling('main_flow.flows', i, blocks=10, aux=False)\n\n bar.close()\n assert len(_unused_tf_names) == len(_unused_torch_names) == 0\n return self\n\n\ndef load_cifar_model(filename, force_float32_cond, float32=False):\n model = CifarFlowPP(force_float32_cond=force_float32_cond).load_from_tf(filename).eval()\n if not float32:\n model = model.double()\n # freeze the model\n for p in model.parameters():\n p.requires_grad = False\n return model\n"
]
| [
[
"torch.no_grad",
"numpy.load",
"torch.sign",
"torch.from_numpy",
"torch.randn",
"torch.randn_like",
"numpy.prod",
"numpy.log2",
"torch.sum"
]
]
|
anandsaha/ai.ml.lib | [
"1866bb0c163a171d7f2478bbee81f83b04d5c20b"
]
| [
"src/run.py"
]
| [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndataset = pd.read_csv('Mall_Customers.csv')\n\nX = dataset.iloc[:, [3, 4]].values\n\nprint(type(X))\n"
]
| [
[
"pandas.read_csv"
]
]
|
rebryk/kaggle | [
"0c656f64ce681dd313ca5145f0ff834a1a6d822e"
]
| [
"recursion-cellular/challenge/test.py"
]
| [
"import argparse\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.optimize import linear_sum_assignment\n\nimport utils\nfrom challenge.dataset import EXP_TRAIN\nfrom utils.neighbors import k_neighbors_classify, k_neighbors_classify_scores\n\n\ndef parse_args() -> argparse.Namespace:\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config', nargs='+', type=str, required=True, help='path to configuration file')\n parser.add_argument('-n', '--n_neighbors', nargs='+', type=int, required=False, default=[20, 20, 20, 20],\n help='number of neighbors')\n parser.add_argument('--use_valid', action='store_true', help='whether to use valid for test predictions')\n return parser.parse_args()\n\n\ndef get_group_scores(embeddings_train: np.ndarray,\n labels_train: np.ndarray,\n groups_train: np.ndarray,\n embeddings_test: np.ndarray,\n n_neighbors: int) -> np.ndarray:\n scores = np.zeros(4, dtype=float)\n\n for group in range(4):\n mask = groups_train == group\n _, scores_ = k_neighbors_classify(\n X_train=embeddings_train[mask],\n y_train=labels_train[mask],\n X_test=embeddings_test,\n n_neighbors=n_neighbors\n )\n scores[group] = scores_.mean()\n\n return scores\n\n\ndef get_train_group_mapping(root: Path) -> dict:\n # Mapping from the first sirna in group to group number\n sirna_to_group = {0: 0, 1: 1, 2: 2, 4: 3}\n\n df_train = pd.read_csv(root / 'train.csv')\n groups = df_train.groupby([\"experiment\", \"plate\"]).apply(lambda it: sirna_to_group[it.sirna.min()])\n\n return dict(groups.items())\n\n\ndef get_group_redictions(embeddings_train: np.ndarray,\n labels_train: np.ndarray,\n embeddings_test: np.ndarray,\n n_neighbors: int) -> np.ndarray:\n scores, labels = k_neighbors_classify_scores(\n X_train=embeddings_train,\n y_train=labels_train,\n X_test=embeddings_test,\n n_neighbors=n_neighbors\n )\n\n _, col_ind = linear_sum_assignment(-scores)\n preds = labels[col_ind]\n\n return preds\n\n\ndef get_predictions(embeddings_train: np.ndarray,\n labels_train: np.ndarray,\n groups_train: np.ndarray,\n embeddings_test: np.ndarray,\n experiments_test: np.ndarray,\n plates_test: np.ndarray,\n n_neighbors: int) -> np.ndarray:\n preds = np.zeros(len(experiments_test), dtype=int)\n plates = np.array([1, 2, 3, 4])\n\n for experiment in np.unique(experiments_test):\n plate_group_score = np.zeros((4, 4), dtype=float)\n\n for i, plate in enumerate(plates):\n mask_test = (experiments_test == experiment) & (plates_test == plate)\n\n plate_group_score[i] = get_group_scores(\n embeddings_train=embeddings_train,\n labels_train=labels_train,\n groups_train=groups_train,\n embeddings_test=embeddings_test[mask_test],\n n_neighbors=n_neighbors\n )\n\n # Match groups with plates\n rows, groups = linear_sum_assignment(-plate_group_score)\n\n for plate, group in zip(plates, groups):\n mask_test = (experiments_test == experiment) & (plates_test == plate)\n mask_train = (groups_train == group)\n\n preds[mask_test] = get_group_redictions(\n embeddings_train=embeddings_train[mask_train],\n labels_train=labels_train[mask_train],\n embeddings_test=embeddings_test[mask_test],\n n_neighbors=n_neighbors\n )\n\n return preds\n\n\nif __name__ == '__main__':\n args = parse_args()\n config = utils.load_config(args.config[0])\n\n utils.fix_seed(0)\n logger = utils.get_logger(name='test', path=config.path)\n\n df_result = pd.DataFrame(columns=['id_code', 'sirna'])\n\n root = Path(config.root)\n train_group_mapping = get_train_group_mapping(root)\n\n acc_scores = []\n\n if len(args.n_neighbors) == 1:\n args.n_neighbors = args.n_neighbors * 4\n\n configs = [utils.load_config(it) for it in args.config]\n\n for exp_id in range(len(EXP_TRAIN)):\n data = dict()\n\n for stage in ['train', 'valid', 'test']:\n embeddings = []\n labels = None\n plates = None\n experiments = None\n\n for config_ in configs:\n data_path = f'{config_.path}/data/{exp_id}'\n labels_ = np.load(f'{data_path}/labels_{stage}.npy', allow_pickle=True)\n embeddings_ = np.load(f'{data_path}/embeddings_{stage}.npy', allow_pickle=True)\n plates_ = np.load(f'{data_path}/plates_{stage}.npy', allow_pickle=True)\n experiments_ = np.load(f'{data_path}/experiments_{stage}.npy', allow_pickle=True)\n\n # Average embeddings for sites\n n = len(labels_) // 2\n labels_ = labels_[:n]\n plates_ = plates_[:n]\n experiments_ = experiments_[:n]\n embeddings_ = (embeddings_[:n] + embeddings_[n:]) / 2\n\n # Collect embeddings\n embeddings.append(embeddings_)\n labels = labels_\n plates = plates_\n experiments = experiments_\n\n # Average embeddings for experiments\n embeddings = np.mean(embeddings, axis=0)\n\n data[stage] = {\n 'labels': labels,\n 'embeddings': embeddings,\n 'plates': plates,\n 'experiments': experiments,\n }\n\n if stage != 'test':\n data[stage]['groups'] = np.array([train_group_mapping[it] for it in zip(experiments, plates)])\n\n embeddings_train = data['train']['embeddings']\n labels_train = data['train']['labels']\n groups_train = data['train']['groups']\n\n logger.info(f'Making predictions for valid...')\n pred = get_predictions(\n embeddings_train=embeddings_train,\n labels_train=labels_train,\n groups_train=groups_train,\n embeddings_test=data['valid']['embeddings'],\n experiments_test=data['valid']['experiments'],\n plates_test=data['valid']['plates'],\n n_neighbors=args.n_neighbors[exp_id]\n )\n\n acc_score = np.mean(pred == data['valid']['labels'])\n acc_scores.append(acc_score)\n logger.info(f'Valid accuracy score for experiment {exp_id}: {acc_score:0.3f}')\n\n # Use validation data\n if args.use_valid:\n embeddings_train = np.concatenate([embeddings_train, data['valid']['embeddings']])\n labels_train = np.concatenate([labels_train, data['valid']['labels']])\n groups_train = np.concatenate([groups_train, data['valid']['groups']])\n\n logger.info(f'Making predictions for test...')\n df_test = pd.DataFrame({\n 'id_code': data['test']['labels'],\n 'sirna': get_predictions(\n embeddings_train=embeddings_train,\n labels_train=labels_train,\n groups_train=groups_train,\n embeddings_test=data['test']['embeddings'],\n experiments_test=data['test']['experiments'],\n plates_test=data['test']['plates'],\n n_neighbors=args.n_neighbors[exp_id]\n )\n })\n\n df_result = df_result.append(df_test)\n\n logger.info(f'Valid accuracy score: {np.mean(acc_scores):0.3f}')\n df_result.to_csv(f'{config.path}/pred.csv', index=False)\n"
]
| [
[
"numpy.concatenate",
"numpy.array",
"numpy.zeros",
"pandas.DataFrame",
"numpy.load",
"numpy.mean",
"scipy.optimize.linear_sum_assignment",
"pandas.read_csv",
"numpy.unique"
]
]
|
shan18/EVA4-Phase-2 | [
"12922fbeac63397944e9b464c2d5c60faf24e532"
]
| [
"08 - SRGAN and Neural Style Transfer/srgan/model.py"
]
| [
"import math\nimport torch\nfrom torch import nn\nimport torchvision\n\n\nclass Generator(nn.Module):\n def __init__(self, scale_factor):\n upsample_block_num = int(math.log(scale_factor, 2))\n\n super(Generator, self).__init__()\n self.block1 = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=9, padding=4),\n nn.PReLU()\n )\n self.block2 = ResidualBlock(64)\n self.block3 = ResidualBlock(64)\n self.block4 = ResidualBlock(64)\n self.block5 = ResidualBlock(64)\n self.block6 = ResidualBlock(64)\n self.block7 = nn.Sequential(\n nn.Conv2d(64, 64, kernel_size=3, padding=1),\n nn.BatchNorm2d(64)\n )\n block8 = [UpsampleBLock(64, 2) for _ in range(upsample_block_num)]\n block8.append(nn.Conv2d(64, 3, kernel_size=9, padding=4))\n self.block8 = nn.Sequential(*block8)\n\n def forward(self, x):\n block1 = self.block1(x)\n block2 = self.block2(block1)\n block3 = self.block3(block2)\n block4 = self.block4(block3)\n block5 = self.block5(block4)\n block6 = self.block6(block5)\n block7 = self.block7(block6)\n block8 = self.block8(block1 + block7)\n\n return (torch.tanh(block8) + 1) / 2\n\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n vgg19 = torchvision.models.vgg19_bn(pretrained=True)\n vgg19.features[52] = nn.AdaptiveAvgPool2d(1)\n self.vgg19_bn = nn.Sequential(*list(vgg19.children())[:-2])\n self.conv1 = nn.Conv2d(512, 256, kernel_size=1)\n self.conv3 = nn.Conv2d(256, 1, kernel_size=1)\n\n \n def forward(self, x):\n batch_size = x.size(0)\n x = self.vgg19_bn(x)\n x = self.conv1(x)\n x = self.conv3(x)\n return torch.sigmoid(x.view(batch_size))\n\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, channels):\n super(ResidualBlock, self).__init__()\n self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)\n self.bn1 = nn.BatchNorm2d(channels)\n self.prelu = nn.PReLU()\n self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)\n self.bn2 = nn.BatchNorm2d(channels)\n\n def forward(self, x):\n residual = self.conv1(x)\n residual = self.bn1(residual)\n residual = self.prelu(residual)\n residual = self.conv2(residual)\n residual = self.bn2(residual)\n\n return x + residual\n\n\nclass UpsampleBLock(nn.Module):\n def __init__(self, in_channels, up_scale):\n super(UpsampleBLock, self).__init__()\n self.conv = nn.Conv2d(in_channels, in_channels * up_scale ** 2, kernel_size=3, padding=1)\n self.pixel_shuffle = nn.PixelShuffle(up_scale)\n self.prelu = nn.PReLU()\n\n def forward(self, x):\n x = self.conv(x)\n x = self.pixel_shuffle(x)\n x = self.prelu(x)\n return x\n"
]
| [
[
"torch.nn.Sequential",
"torch.tanh",
"torch.nn.BatchNorm2d",
"torch.nn.PixelShuffle",
"torch.nn.Conv2d",
"torch.nn.PReLU",
"torch.nn.AdaptiveAvgPool2d"
]
]
|
Office2012/DameDaneGenerator | [
"ff32a948eee18f18e97fd8991a3e75174097d85e"
]
| [
"firstordermodel/modules/util.py"
]
| [
"from torch import nn\r\n\r\nimport torch.nn.functional as F\r\nimport torch\r\n\r\nfrom firstordermodel.sync_batchnorm import SynchronizedBatchNorm2d as BatchNorm2d\r\n\r\n\r\ndef kp2gaussian(kp, spatial_size, kp_variance):\r\n \"\"\"\r\n Transform a keypoint into gaussian like representation\r\n \"\"\"\r\n mean = kp['value']\r\n\r\n coordinate_grid = make_coordinate_grid(spatial_size, mean.type())\r\n number_of_leading_dimensions = len(mean.shape) - 1\r\n shape = (1,) * number_of_leading_dimensions + coordinate_grid.shape\r\n coordinate_grid = coordinate_grid.view(*shape)\r\n repeats = mean.shape[:number_of_leading_dimensions] + (1, 1, 1)\r\n coordinate_grid = coordinate_grid.repeat(*repeats)\r\n\r\n # Preprocess kp shape\r\n shape = mean.shape[:number_of_leading_dimensions] + (1, 1, 2)\r\n mean = mean.view(*shape)\r\n\r\n mean_sub = (coordinate_grid - mean)\r\n\r\n out = torch.exp(-0.5 * (mean_sub ** 2).sum(-1) / kp_variance)\r\n\r\n return out\r\n\r\n\r\ndef make_coordinate_grid(spatial_size, type):\r\n \"\"\"\r\n Create a meshgrid [-1,1] x [-1,1] of given spatial_size.\r\n \"\"\"\r\n h, w = spatial_size\r\n x = torch.arange(w).type(type)\r\n y = torch.arange(h).type(type)\r\n\r\n x = (2 * (x / (w - 1)) - 1)\r\n y = (2 * (y / (h - 1)) - 1)\r\n\r\n yy = y.view(-1, 1).repeat(1, w)\r\n xx = x.view(1, -1).repeat(h, 1)\r\n\r\n meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)\r\n\r\n return meshed\r\n\r\n\r\nclass ResBlock2d(nn.Module):\r\n \"\"\"\r\n Res block, preserve spatial resolution.\r\n \"\"\"\r\n\r\n def __init__(self, in_features, kernel_size, padding):\r\n super(ResBlock2d, self).__init__()\r\n self.conv1 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,\r\n padding=padding)\r\n self.conv2 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,\r\n padding=padding)\r\n self.norm1 = BatchNorm2d(in_features, affine=True)\r\n self.norm2 = BatchNorm2d(in_features, affine=True)\r\n\r\n def forward(self, x):\r\n out = self.norm1(x)\r\n out = F.relu(out)\r\n out = self.conv1(out)\r\n out = self.norm2(out)\r\n out = F.relu(out)\r\n out = self.conv2(out)\r\n out += x\r\n return out\r\n\r\n\r\nclass UpBlock2d(nn.Module):\r\n \"\"\"\r\n Upsampling block for use in decoder.\r\n \"\"\"\r\n\r\n def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):\r\n super(UpBlock2d, self).__init__()\r\n\r\n self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,\r\n padding=padding, groups=groups)\r\n self.norm = BatchNorm2d(out_features, affine=True)\r\n\r\n def forward(self, x):\r\n out = F.interpolate(x, scale_factor=2)\r\n out = self.conv(out)\r\n out = self.norm(out)\r\n out = F.relu(out)\r\n return out\r\n\r\n\r\nclass DownBlock2d(nn.Module):\r\n \"\"\"\r\n Downsampling block for use in encoder.\r\n \"\"\"\r\n\r\n def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):\r\n super(DownBlock2d, self).__init__()\r\n self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,\r\n padding=padding, groups=groups)\r\n self.norm = BatchNorm2d(out_features, affine=True)\r\n self.pool = nn.AvgPool2d(kernel_size=(2, 2))\r\n\r\n def forward(self, x):\r\n out = self.conv(x)\r\n out = self.norm(out)\r\n out = F.relu(out)\r\n out = self.pool(out)\r\n return out\r\n\r\n\r\nclass SameBlock2d(nn.Module):\r\n \"\"\"\r\n Simple block, preserve spatial resolution.\r\n \"\"\"\r\n\r\n def __init__(self, in_features, out_features, groups=1, kernel_size=3, padding=1):\r\n super(SameBlock2d, self).__init__()\r\n self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features,\r\n kernel_size=kernel_size, padding=padding, groups=groups)\r\n self.norm = BatchNorm2d(out_features, affine=True)\r\n\r\n def forward(self, x):\r\n out = self.conv(x)\r\n out = self.norm(out)\r\n out = F.relu(out)\r\n return out\r\n\r\n\r\nclass Encoder(nn.Module):\r\n \"\"\"\r\n Hourglass Encoder\r\n \"\"\"\r\n\r\n def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):\r\n super(Encoder, self).__init__()\r\n\r\n down_blocks = []\r\n for i in range(num_blocks):\r\n down_blocks.append(DownBlock2d(in_features if i == 0 else min(max_features, block_expansion * (2 ** i)),\r\n min(max_features, block_expansion * (2 ** (i + 1))),\r\n kernel_size=3, padding=1))\r\n self.down_blocks = nn.ModuleList(down_blocks)\r\n\r\n def forward(self, x):\r\n outs = [x]\r\n for down_block in self.down_blocks:\r\n outs.append(down_block(outs[-1]))\r\n return outs\r\n\r\n\r\nclass Decoder(nn.Module):\r\n \"\"\"\r\n Hourglass Decoder\r\n \"\"\"\r\n\r\n def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):\r\n super(Decoder, self).__init__()\r\n\r\n up_blocks = []\r\n\r\n for i in range(num_blocks)[::-1]:\r\n in_filters = (1 if i == num_blocks - 1 else 2) * min(max_features, block_expansion * (2 ** (i + 1)))\r\n out_filters = min(max_features, block_expansion * (2 ** i))\r\n up_blocks.append(UpBlock2d(in_filters, out_filters, kernel_size=3, padding=1))\r\n\r\n self.up_blocks = nn.ModuleList(up_blocks)\r\n self.out_filters = block_expansion + in_features\r\n\r\n def forward(self, x):\r\n out = x.pop()\r\n for up_block in self.up_blocks:\r\n out = up_block(out)\r\n skip = x.pop()\r\n out = torch.cat([out, skip], dim=1)\r\n return out\r\n\r\n\r\nclass Hourglass(nn.Module):\r\n \"\"\"\r\n Hourglass architecture.\r\n \"\"\"\r\n\r\n def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):\r\n super(Hourglass, self).__init__()\r\n self.encoder = Encoder(block_expansion, in_features, num_blocks, max_features)\r\n self.decoder = Decoder(block_expansion, in_features, num_blocks, max_features)\r\n self.out_filters = self.decoder.out_filters\r\n\r\n def forward(self, x):\r\n return self.decoder(self.encoder(x))\r\n\r\n\r\nclass AntiAliasInterpolation2d(nn.Module):\r\n \"\"\"\r\n Band-limited downsampling, for better preservation of the input signal.\r\n \"\"\"\r\n def __init__(self, channels, scale):\r\n super(AntiAliasInterpolation2d, self).__init__()\r\n sigma = (1 / scale - 1) / 2\r\n kernel_size = 2 * round(sigma * 4) + 1\r\n self.ka = kernel_size // 2\r\n self.kb = self.ka - 1 if kernel_size % 2 == 0 else self.ka\r\n\r\n kernel_size = [kernel_size, kernel_size]\r\n sigma = [sigma, sigma]\r\n # The gaussian kernel is the product of the\r\n # gaussian function of each dimension.\r\n kernel = 1\r\n meshgrids = torch.meshgrid(\r\n [\r\n torch.arange(size, dtype=torch.float32)\r\n for size in kernel_size\r\n ]\r\n )\r\n for size, std, mgrid in zip(kernel_size, sigma, meshgrids):\r\n mean = (size - 1) / 2\r\n kernel *= torch.exp(-(mgrid - mean) ** 2 / (2 * std ** 2))\r\n\r\n # Make sure sum of values in gaussian kernel equals 1.\r\n kernel = kernel / torch.sum(kernel)\r\n # Reshape to depthwise convolutional weight\r\n kernel = kernel.view(1, 1, *kernel.size())\r\n kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))\r\n\r\n self.register_buffer('weight', kernel)\r\n self.groups = channels\r\n self.scale = scale\r\n\r\n def forward(self, input):\r\n if self.scale == 1.0:\r\n return input\r\n\r\n out = F.pad(input, (self.ka, self.kb, self.ka, self.kb))\r\n out = F.conv2d(out, weight=self.weight, groups=self.groups)\r\n out = F.interpolate(out, scale_factor=(self.scale, self.scale))\r\n\r\n return out\r\n"
]
| [
[
"torch.cat",
"torch.nn.ModuleList",
"torch.arange",
"torch.nn.AvgPool2d",
"torch.nn.functional.interpolate",
"torch.nn.Conv2d",
"torch.nn.functional.relu",
"torch.nn.functional.pad",
"torch.nn.functional.conv2d",
"torch.exp",
"torch.sum"
]
]
|
antuniooh/probability-and-statistics-database-analysis | [
"a23d2e3fc4f51b0a64732d67cfc7ea7661ff8112"
]
| [
"src/main.py"
]
| [
"#Passo 1 - Definir Database\n\nfrom scipy.stats import shapiro\nimport pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\n\ndf = pd.read_csv('data/concrete_data.csv')\n\n#Passo 2 - Limpar Database\n\n#exibir valores ausentes ou null\ndf.isnull().sum().sort_values(ascending=False)[:10]\nprint(\"Número de linhas e colunas no conjunto de treinamento:\", df.shape)\nattributes = list(df.columns)\n#removendo valores nulos\ndf.dropna()\n\n#preencher os nulos\ndf.fillna(df.mean(0))\n\n#remover duplicados\ndf.drop_duplicates()\n\n#Passo 3 - Definir x e y\nx = df.iloc[:, :-1].values\ny = df.iloc[:, -1].values\n\nprint(x)\nprint(y)\n\n#Passo 4 - Média\naverageX = np.mean(x)\naverageY = np.mean(y)\n\nprint(\"Média de x: \" + str(averageX)) #calcula a média de x\nprint(\"Média de y: \" + str(averageY)) #calcula a média de y\n\n#Passo 5 - Variância\nvarianceX = np.var(x)\nvarianceY = np.var(y)\n\nprint(\"Variância de x: \" + str(varianceX))\nprint(\"Variância de y: \" + str(varianceY))\n\n#Passo 6 - Desvio Padrão\ndeviationX = np.std(x)\ndeviationY = np.std(y)\n\nprint(\"Desvio Padrão de x: \" + str(deviationX)) #calcula o desvio padrão de x\nprint(\"Desvio Padrão de y: \" + str(deviationY)) #calcula o desvio padrão de y\n\n#Passo 7 - Mediana\nmedianX = np.median(x)\nmedianY = np.median(y)\n\nprint(\"Mediana de x: \" + str(medianX)) #calcula o desvio padrão de x\nprint(\"Mediana de y: \" + str(medianY)) #calcula o desvio padrão de y\n\n#Passo 8 - Histograma\n\n# Histograma de x\nh = np.histogram(x, bins='auto') #calcula o histograma\nprint(x)\nplt.hist(y, bins='auto')\nplt.title('Dados')\nplt.ylabel('Frequência')\nplt.xlabel('Valores')\nplt.show()\n\n# Histograma de y\nh = np.histogram(y, bins='auto') #calcula o histograma\nprint(h)\nplt.hist(y, bins='auto')\nplt.title('Dados')\nplt.ylabel('Frequência')\nplt.xlabel('Valores')\nplt.show()\n\n#Passo 9 - Coeficiente de Correlação\nprint('\\n\\n\\n\\nPearson')\nprint(df.corr(method='pearson'))\n\n#Passo 10 - Teste de Normalidade\nfrom scipy.stats import shapiro\nimport pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\ndata = pd.read_csv('data/concrete_data.csv')\nprint(\"Número de linhas e colunas:\", data.shape)\ndata.head(25)\n\n# Analisar se a coluna sepal.length tem distribuição normal\n\ndata = data.to_numpy()\nx = data[:, 0]\n# normalidade test\nstat, p = shapiro(x)\nprint('Statistics=%.3f, p=%.3f' % (stat, p))\n# interpretação\nalpha = 0.05\nif p > alpha:\n print('Amostra Gaussiana (aceita H0)')\nelse:\n print('Amostra não Gausssiana (rejeita H0)')\n# Verificação atrav´s do histograma\nplt.hist(x, bins='auto')\nplt.title('Dados')\nplt.ylabel('Frequência')\nplt.xlabel('Valores')\nplt.show()\n\nfrom scipy.stats import shapiro\nimport pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\ndata = pd.read_csv('data/concrete_data.csv')\nprint(\"Número de linhas e colunas:\", data.shape)\ndata.head(25)\n\n# Analisar se a coluna sepal.length tem distribuição normal\n\ndata = data.to_numpy()\ny = data[0, :]\n# normalidade test\nstat, p = shapiro(y)\nprint('Statistics=%.3f, p=%.3f' % (stat, p))\n# interpretação\nalpha = 0.05\nif p > alpha:\n print('Amostra Gaussiana (aceita H0)')\nelse:\n print('Amostra não Gausssiana (rejeita H0)')\n# Verificação atrav´s do histograma\nplt.hist(y, bins='auto')\nplt.title('Dados')\nplt.ylabel('Frequência')\nplt.xlabel('Valores')\nplt.show()\n"
]
| [
[
"numpy.histogram",
"scipy.stats.shapiro",
"numpy.median",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"numpy.mean",
"numpy.std",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"pandas.read_csv",
"numpy.var"
]
]
|
carmelrabinov/contrastive-domain-randomization | [
"f6fc6173a072da821d1b2ab6bd9558bf7e609bd4"
]
| [
"franka_panda/pybullet_simulation/environment.py"
]
| [
"import os\nimport numpy as np\n\nfrom franka_panda.environment import PushPandaEnv\nfrom franka_panda.pybullet_simulation.push_simulation import Sim2RealPushPandaSimulation\n\n\nclass PybulletPushPandaEnv(PushPandaEnv):\n def __init__(self, config_path: str = \"configs/default_push.yml\",\n load_frame: bool = False,\n random_texture: bool = True,\n random_light: bool = True,\n random_size: bool = False,\n random_camera: bool = False,\n use_ui: bool = False,\n textures_path: str = '../textures',\n alternative_textures_path: str = '../textures'):\n super().__init__()\n # transform to sim_coordinates\n self.convert_real_2_sim_spaces()\n\n self._simulation = Sim2RealPushPandaSimulation(use_ui=use_ui, config_path=config_path)\n self._default_orientation = self._simulation.default_orientation\n\n # objects parameters\n self._object_id = None\n self._random_sized_object = random_size\n\n # env parameters\n self._random_light = random_light\n self._random_camera = random_camera\n if load_frame:\n self._simulation.load_state_space_frame()\n self._random_texture = random_texture\n self._simulation.texture_dir = textures_path\n self._simulation.alternative_texture_dir = alternative_textures_path\n if not os.path.exists(textures_path):\n print(f\"Warning, texture directory: {textures_path} does not exists, will not use textures\")\n self._simulation.texture_dir = None\n self._random_texture = False\n if not os.path.exists(alternative_textures_path):\n print(f\"Warning, alternative texture directory: {alternative_textures_path} does not exists, will not use textures\")\n self._simulation.alternative_texture_dir = None\n self._random_texture = False\n\n self.reset()\n self.reset_environment_appearance()\n\n @property\n def state(self) -> np.ndarray:\n raise NotImplementedError\n\n @state.setter\n def state(self, state: np.ndarray):\n raise NotImplementedError\n\n def render(self, mode: str = \"regular\") -> np.ndarray:\n if mode not in self.metadata[\"render.modes\"]:\n print(f\"Warning! no render mode: {mode}\")\n if mode == \"alternative\":\n return self._simulation.render(alternative=True)\n\n image, segmantaion_mask = self._simulation.render(return_seg_mask=True)\n if mode == \"segmentation\":\n return segmantaion_mask\n\n return image\n\n def close(self):\n self._simulation.disconnect()\n\n def step(self, action: np.ndarray) -> [np. ndarray, float, bool, dict]:\n \"\"\"\n :param action:\n :return: observation, reward, done, info\n \"\"\"\n assert len(action) == 4, \"Action space must be a 4 length vector [x_init, y_init, x_goal, y_goal]\"\n\n # validate action space\n cliped_action = self._clip_to_action_space(action)\n if np.linalg.norm(action - cliped_action) > 1.e-4:\n print(f\"Warning, action provided was out of action space and was cliped from {action} to {cliped_action}\")\n\n # move to source position\n source = [cliped_action[0], self._ee_push_hight, cliped_action[1]] + self._default_orientation\n self._simulation.step_to_state(source, eps=0.005)\n\n # record the actual source coordinates\n actual_source = np.array(self._simulation.ee_position[[0, 2]])\n\n # move to target position\n target = [cliped_action[2], self._ee_push_hight, cliped_action[3]] + self._default_orientation\n self._simulation.step_to_state(target, eps=0.005)\n\n # record the actual target coordinates\n actual_target = np.array(self._simulation.ee_position[[0, 2]])\n\n # lift panda arm and render\n self._simulation.panda_robot.reset()\n observation = self.render()\n\n done = self.out_of_state_space(self.state)\n actual_action = np.concatenate((actual_source, actual_target))\n info = {\"actual_action\": actual_action}\n\n return observation, 0., done, info\n\n def reset_environment_appearance(self):\n # random light\n if self._random_light:\n self._simulation.init_random_light_directions()\n\n # random texture\n if self._random_texture:\n self._simulation.init_random_textures()\n\n # random camera\n if self._random_camera:\n self._simulation.init_random_cameras()\n\n def reset(self) -> None:\n raise NotImplementedError\n\n def image2world(self, image_coordinates: np.ndarray):\n \"\"\" move from image pixels to world coordinates \"\"\"\n return self._simulation.image2world(image_coordinates)\n\n def world2image(self, world_coordinates: np.ndarray):\n \"\"\" move from world coordinates to image pixels \"\"\"\n return self._simulation.world2image(world_coordinates)\n\n\nclass PybulletPushCubePandaEnv(PybulletPushPandaEnv):\n\n @property\n def state(self) -> np.ndarray:\n id = list(self._simulation.objects)[-1]\n return self._simulation.get_object_xz_position(id)\n\n @state.setter\n def state(self, state: np.ndarray):\n if self._object_id is not None:\n self._simulation.remove_object(self._object_id)\n cube_size = np.random.uniform(low=0.018, high=0.022) if self._random_sized_object else 0.018\n position = [state[0], 0.1, state[1]]\n self._object_id = self._simulation.load_cube(size=cube_size, position=position, mass=0.1, friction=0.3)\n self._simulation.let_objects_settle()\n self._simulation.panda_robot.reset()\n\n def reset(self) -> None:\n\n self._simulation.panda_robot.reset()\n\n # remove cube if exists, and load new one\n cube_potision = np.array([np.random.uniform(-0.2, 0.2), np.random.uniform(-0.6, -0.4)])\n self.state = cube_potision\n\n\nclass PybulletPushRopePandaEnv(PybulletPushPandaEnv):\n\n @property\n def state(self) -> np.ndarray:\n dummy_state = np.array([0., -0.5])\n return dummy_state\n\n @state.setter\n def state(self, state: np.ndarray):\n self._simulation.panda_robot.reset()\n\n def reset(self) -> None:\n\n self._simulation.panda_robot.reset()\n self._simulation.y = 0.015\n # remove rope if exists, and load new one\n if self._object_id is not None:\n self._simulation.remove_object(self._object_id)\n self._object_id = self._simulation.load_rope(random_positions=True,\n random_rope=self._random_sized_object,\n random_texture=self._random_texture)\n self._simulation.let_objects_settle()\n\n def sample_point_on_rope(self):\n return self._simulation.sample_point_on_object(object_id=self._object_id)\n\n"
]
| [
[
"numpy.concatenate",
"numpy.array",
"numpy.linalg.norm",
"numpy.random.uniform"
]
]
|
crazystone1314/poker-hands-classification | [
"131ad27d7344e480dc54e5b3efc520f3cf39adb6"
]
| [
"predict_model.py"
]
| [
"# -*- coding: utf-8 -*-\r\nimport numpy as np\r\nimport pandas as pd\r\nimport catboost as cb\r\n\r\n\r\n# 原始数据路径\r\ntraining_path = 'D://train_data.csv'\r\nsource_data_path = 'D://preliminary-testing.csv'\r\n\r\n# --------读取原始数据-------\r\n# training数据\r\ntrain_data = pd.read_csv(training_path, names=['S1', 'C1', 'S2', 'C2', 'S3', 'C3', 'S4', 'C4', 'S5', 'C5', 'hand'])\r\n# preliminary-testing数据\r\ntest_data = pd.read_csv(source_data_path, names=['S1', 'C1', 'S2', 'C2', 'S3', 'C3', 'S4', 'C4', 'S5', 'C5'])\r\n\r\n\r\n\r\n# --------将J、Q、K映射成11、12、13------\r\ndef transform_jkq(x):\r\n if x == 'J':\r\n return 11\r\n elif x == 'Q':\r\n return 12\r\n elif x == 'K':\r\n return 13\r\n else:\r\n return x\r\n\r\n# train_data数据处理\r\ntrain_data['C1'] = train_data['C1'].apply(transform_jkq)\r\ntrain_data['C2'] = train_data['C2'].apply(transform_jkq)\r\ntrain_data['C3'] = train_data['C3'].apply(transform_jkq)\r\ntrain_data['C4'] = train_data['C4'].apply(transform_jkq)\r\ntrain_data['C5'] = train_data['C5'].apply(transform_jkq)\r\n\r\n# preliminary-testing数据处理\r\ntest_data['C1'] = test_data['C1'].apply(transform_jkq)\r\ntest_data['C2'] = test_data['C2'].apply(transform_jkq)\r\ntest_data['C3'] = test_data['C3'].apply(transform_jkq)\r\ntest_data['C4'] = test_data['C4'].apply(transform_jkq)\r\ntest_data['C5'] = test_data['C5'].apply(transform_jkq)\r\n\r\n\r\n# -------将C、D、H、S 映射为1、2、3、4--------\r\nencode_map = {'C':1, 'D':2, 'H':3,'S':4}\r\n# training数据处理\r\ntrain_data['S1'] = train_data['S1'].map(encode_map)\r\ntrain_data['S2'] = train_data['S2'].map(encode_map)\r\ntrain_data['S3'] = train_data['S3'].map(encode_map)\r\ntrain_data['S4'] = train_data['S4'].map(encode_map)\r\ntrain_data['S5'] = train_data['S5'].map(encode_map)\r\n\r\n# preliminary-testing数据处理\r\ntest_data['S1'] = test_data['S1'].map(encode_map)\r\ntest_data['S2'] = test_data['S2'].map(encode_map)\r\ntest_data['S3'] = test_data['S3'].map(encode_map)\r\ntest_data['S4'] = test_data['S4'].map(encode_map)\r\ntest_data['S5'] = test_data['S5'].map(encode_map)\r\n\r\n\r\n# --------计算四种花色的数量和13种排名的有无---------\r\ndef bincount2D_vectorized(a):\r\n N = a.max()+1\r\n a_offs = a + np.arange(a.shape[0])[:,None]*N\r\n return np.bincount(a_offs.ravel(), minlength=a.shape[0]*N).reshape(-1,N)\r\n\r\n# training数据处理\r\n# 计算四种花色的数量\r\nS_training = train_data.iloc[:, [0, 2, 4, 6, 8]].astype(int)\r\nS_training = pd.DataFrame(bincount2D_vectorized(S_training.values),columns=['suitCount0','suitCount1','suitCount2','suitCount3','suitCount4'])\r\ntrain_data = pd.merge(train_data, S_training, how='left', left_index=True, right_index=True).drop(['suitCount0'], axis=1)\r\n#计算13种排名的有无\r\nR_training = train_data.iloc[:, np.arange(1, 10, 2)].astype(int)\r\ncols = ['rank{}'.format(x) for x in range(0,14,1)]\r\nR_training = pd.DataFrame(bincount2D_vectorized(R_training.values),columns=cols)\r\ntrain_data = pd.merge(train_data, R_training, how='left', left_index=True, right_index=True).drop(['rank0'], axis=1)\r\n\r\n# preliminary-testing数据处理\r\n#计算13种排名的有无\r\nS_source_data = test_data.iloc[:, [0, 2, 4, 6, 8]].astype(int)\r\nS_source_data = pd.DataFrame(bincount2D_vectorized(S_source_data.values),columns=['suitCount0','suitCount1','suitCount2','suitCount3','suitCount4'])\r\ntest_data = pd.merge(test_data, S_source_data, how='left', left_index=True, right_index=True).drop(['suitCount0'], axis=1)\r\n#计算13种排名的有无\r\nR_source_data = test_data.iloc[:, np.arange(1, 10, 2)].astype(int)\r\ncols = ['rank{}'.format(x) for x in range(0,14,1)]\r\nR_source_data = pd.DataFrame(bincount2D_vectorized(R_source_data.values),columns=cols)\r\ntest_data = pd.merge(test_data, R_source_data, how='left', left_index=True, right_index=True).drop(['rank0'], axis=1)\r\n\r\n\r\n# ------各种排名的种类数------\r\n\r\n# training数据处理\r\nR_training = train_data.loc[:, ['rank{}'.format(n) for n in range(1, 14, 1)]].astype(int)\r\nR_training = pd.DataFrame(bincount2D_vectorized(R_training.values),columns=['rankCount{}'.format(n) for n in range(0,5,1)])\r\ntrain_data = pd.merge(train_data, R_training, how='left', left_index=True, right_index=True).drop(['rankCount0'], axis=1)\r\n\r\n# preliminary-testing数据处理\r\nR_source_data = test_data.loc[:, ['rank{}'.format(n) for n in range(1, 14, 1)]].astype(int)\r\nR_source_data = pd.DataFrame(bincount2D_vectorized(R_source_data.values),columns=['rankCount{}'.format(n) for n in range(0,5,1)])\r\ntest_data = pd.merge(test_data, R_source_data, how='left', left_index=True, right_index=True).drop(['rankCount0'], axis=1)\r\n\r\n\r\n# ------13种排名各排名之间的差值的绝对值-----\r\n\r\n# training数据处理\r\ntrain_data['diff1_13'] = np.abs(train_data['rank1'] - train_data['rank13'])\r\nfor i in range(2,14,1):\r\n train_data['diff{}_{}'.format(i, i - 1)] = np.abs(train_data['rank{}'.format(i)] - train_data['rank{}'.format(i - 1)])\r\n# train_data['diff13_1'] = np.abs(train_data['rank13'] - train_data['rank1'])\r\n\r\n# preliminary-testing数据处理\r\ntest_data['diff1_13'] = np.abs(test_data['rank1'] - test_data['rank13'])\r\nfor i in range(2,14,1):\r\n test_data['diff{}_{}'.format(i, i - 1)] = np.abs(test_data['rank{}'.format(i)] - test_data['rank{}'.format(i - 1)])\r\n# train_data['diff13_1'] = np.abs(train_data['rank13'] - train_data['rank1'])\r\n\r\n\r\n# ------删除原始特征和13种花色的有无-----\r\n\r\n# training数据处理\r\ntrain_data = train_data.drop(['S1', 'C1', 'S2', 'C2', 'S3', 'C3', 'S4', 'C4', 'S5', 'C5'], axis=1)\r\ntrain_data = train_data.drop(['rank{}'.format(n) for n in range(1, 14, 1)], axis=1)\r\n\r\n# preliminary-testing数据处理\r\ntest_data = test_data.drop(['S1', 'C1', 'S2', 'C2', 'S3', 'C3', 'S4', 'C4', 'S5', 'C5'], axis=1)\r\ntest_data = test_data.drop(['rank{}'.format(n) for n in range(1, 14, 1)], axis=1)\r\n\r\n\r\n# --------训练模型并用模型预测数据--------\r\nX = train_data.drop(['hand'], axis=1)\r\ny = train_data.hand\r\n\r\nparams = {\r\n 'l2_leaf_reg':0.8,\r\n 'learning_rate':0.09,\r\n 'depth':11,\r\n 'iterations':250\r\n }\r\ncat = cb.CatBoostClassifier(loss_function='MultiClassOneVsAll', random_seed=1234)\r\n# 设置模型参数\r\ncat.set_params(**params)\r\n# 训练模型\r\ncat_model = cat.fit(X, y, verbose=False)\r\n#用模型进行预测\r\npreds_class = cat_model.predict(test_data, prediction_type='Class')\r\nresult = pd.DataFrame(preds_class)\r\n# 将结果转化为整型\r\nresult_1 = result[0].apply(int)\r\nresult_2 = pd.DataFrame(result_1)\r\n# 将数据保存到文件dsjyycxds_preliminary.txt中\r\nresult_2.to_csv('D://dsjyycxds_preliminary.txt', index=False, header=False)"
]
| [
[
"pandas.merge",
"pandas.DataFrame",
"numpy.arange",
"numpy.abs",
"pandas.read_csv"
]
]
|
DeepDarkOdyssey/exalt | [
"82f06b47d735c56d277ac2df37f0459e6fba6fc9"
]
| [
"exalt/text_encoder/utils.py"
]
| [
"from copy import copy\nfrom typing import List, Tuple, Callable, Set\nimport io\nimport os\nimport numpy as np\nimport spacy\n\n\ndef gram_schmidt_process(A: np.array) -> Tuple[np.ndarray]:\n d, n = A.shape\n Q = np.zeros((d, n))\n R = np.zeros((n, n))\n for i in range(n):\n v_i = A[:, i]\n qs = Q[:, :i]\n rs = v_i @ qs\n R[:i, i] = rs\n q_i = v_i - np.sum((v_i @ qs) / np.sum((qs ** 2), axis=0) * qs, axis=1)\n norm = np.linalg.norm(q_i, ord=2)\n Q[:, i] = q_i / norm\n R[i, i] = norm\n return Q, R\n\n\ndef ngrams(\n text: str, max_n: int, tokenizer: Callable[[str], List[str]] = lambda x: list(x)\n) -> Set:\n tokens = tokenizer(text)\n grams = set()\n for n in range(1, max_n + 1):\n grams = grams.union(\n set([\"\".join(tokens[i : i + n]) for i in range(len(tokens) - n + 1)])\n )\n return grams\n\n\ndef get_related_cache_path(file_path: str) -> str:\n dir_path = os.path.dirname(file_path)\n file_name = os.path.splitext(os.path.basename(file_path))[0]\n return os.path.join(dir_path, file_name + '.cache')"
]
| [
[
"numpy.sum",
"numpy.linalg.norm",
"numpy.zeros"
]
]
|
kgb0255/deepCR | [
"bb449d77fb99abc052bf76d01e85dfdce326b779"
]
| [
"deepCR/model.py"
]
| [
"\"\"\"main module to instantiate deepCR models and use them\n\"\"\"\nfrom os import path, mkdir\nimport math\nimport shutil\nimport secrets\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch import from_numpy\nfrom joblib import Parallel, delayed\nfrom joblib import dump, load\nfrom joblib import wrap_non_picklable_objects\n\nfrom deepCR.unet import WrappedModel, UNet2Sigmoid\nfrom deepCR.util import medmask\nfrom learned_models import mask_dict, inpaint_dict, default_model_path\n\n__all__ = ['deepCR']\n\n\nclass deepCR():\n\n def __init__(self, mask='ACS-WFC-F606W-2-32', inpaint=None, device='CPU', hidden=32):\n\n \"\"\"\n Instantiation of deepCR with specified model configurations\n\n Parameters\n ----------\n mask : str\n Either name of existing deepCR-mask model, or file path of your own model (incl. '.pth')\n inpaint : (optional) str\n Name of existing inpainting model to use. If left as None then by default use a simple 5x5 median mask\n sampling for inpainting\n device : str\n One of 'CPU' or 'GPU'\n hidden : int\n Number of hidden channel for first deepCR-mask layer. Specify only if using custom deepCR-mask model.\n Returns\n -------\n None\n \"\"\"\n if device == 'GPU':\n if not torch.cuda.is_available():\n raise AssertionError('No CUDA device detected!')\n self.dtype = torch.cuda.FloatTensor\n self.dint = torch.cuda.ByteTensor\n wrapper = nn.DataParallel\n else:\n self.dtype = torch.FloatTensor\n self.dint = torch.ByteTensor\n wrapper = WrappedModel\n if mask in mask_dict.keys():\n self.scale = mask_dict[mask][2]\n mask_path = default_model_path + '/mask/' + mask + '.pth'\n self.maskNet = wrapper(mask_dict[mask][0](*mask_dict[mask][1]))\n else:\n self.scale = 1\n mask_path = mask\n self.maskNet = wrapper(UNet2Sigmoid(1, 1, hidden))\n self.maskNet.type(self.dtype)\n if device != 'GPU':\n self.maskNet.load_state_dict(torch.load(mask_path, map_location='cpu'))\n else:\n self.maskNet.load_state_dict(torch.load(mask_path))\n self.maskNet.eval()\n for p in self.maskNet.parameters():\n p.required_grad = False\n\n if inpaint is not None:\n inpaint_path = default_model_path + '/inpaint/' + inpaint + '.pth'\n self.inpaintNet = wrapper(inpaint_dict[inpaint][0](*inpaint_dict[inpaint][1])).type(self.dtype)\n if device != 'GPU':\n self.inpaintNet.load_state_dict(torch.load(inpaint_path, map_location='cpu'))\n else:\n self.inpaintNet.load_state_dict(torch.load(inpaint_path))\n self.inpaintNet.eval()\n for p in self.inpaintNet.parameters():\n p.required_grad = False\n else:\n self.inpaintNet = None\n\n # Unused features to be implemented in a future version\n self.norm = False\n self.percentile = None\n self.median = None\n self.std = None\n\n def clean(self, img0, threshold=0.5, inpaint=True, binary=True, segment=False,\n patch=256, n_jobs=1):\n \"\"\"\n Identify cosmic rays in an input image, and (optionally) inpaint with the predicted cosmic ray mask\n :param img0: (np.ndarray) 2D input image conforming to model requirements. For HST ACS/WFC, must be from\n _flc.fits and in units of electrons in native resolution.\n :param threshold: (float; [0, 1]) applied to probabilistic mask to generate binary mask\n :param inpaint: (bool) return clean, inpainted image only if True\n :param binary: return binary CR mask if True. probabilistic mask if False\n :param segment: (bool) if True, segment input image into chunks of patch * patch before performing CR rejection.\n Used for memory control.\n :param patch: (int) Use 256 unless otherwise required. if segment==True, segment image into chunks of\n patch * patch.\n :param n_jobs: (int) number of jobs to run in parallel, passed to `joblib.` default: 1.\n :return: CR mask and (optionally) clean inpainted image\n \"\"\"\n\n # data pre-processing\n\n inpaint = inpaint and binary\n\n img0 = img0.astype(np.float32) / self.scale\n img0 = img0.copy()\n if self.norm:\n limit = np.percentile(img0, self.percentile)\n clip = img0[img0 < limit]\n self.median = np.percentile(clip, 50)\n self.std = clip.std()\n img0 -= self.median\n img0 /= self.std\n\n if not segment and n_jobs == 1:\n return self.clean_(img0, threshold=threshold,\n inpaint=inpaint, binary=binary)\n else:\n if n_jobs == 1:\n return self.clean_large(img0, threshold=threshold,\n inpaint=inpaint, binary=binary, patch=patch)\n else:\n return self.clean_large_parallel(img0, threshold=threshold,\n inpaint=inpaint, binary=binary, patch=patch,\n n_jobs=n_jobs)\n\n def clean_(self, img0, threshold=0.5, inpaint=True, binary=True):\n\n \"\"\"\n given input image\n return cosmic ray mask and (optionally) clean image\n mask could be binary or probabilistic\n :param img0: (np.ndarray) 2D input image\n :param threshold: for creating binary mask from probabilistic mask\n :param inpaint: return clean image only if True\n :param binary: return binary mask if True. probabilistic mask otherwise.\n :return: CR mask and (optionally) clean inpainted image\n \"\"\"\n\n shape = img0.shape\n pad_x = 4 - shape[0] % 4\n pad_y = 4 - shape[1] % 4\n if pad_x == 4:\n pad_x = 0\n if pad_y == 4:\n pad_y = 0\n img0 = np.pad(img0, ((pad_x, 0), (pad_y, 0)), mode='constant')\n\n shape = img0.shape[-2:]\n img0 = from_numpy(img0).type(self.dtype).view(1, -1, shape[0], shape[1])\n mask = self.maskNet(img0)\n\n if not binary:\n return mask.detach().cpu().view(shape[0], shape[1]).numpy()[pad_x:, pad_y:]\n\n binary_mask = (mask > threshold).type(self.dtype)\n\n if inpaint:\n if self.inpaintNet is not None:\n cat = torch.cat((img0 * (1 - binary_mask), binary_mask), dim=1)\n img1 = self.inpaintNet(cat)\n img1 = img1.detach()\n inpainted = img1 * binary_mask + img0 * (1 - binary_mask)\n binary_mask = binary_mask.detach().cpu().view(shape[0], shape[1]).numpy()\n inpainted = inpainted.detach().cpu().view(shape[0], shape[1]).numpy()\n else:\n binary_mask = binary_mask.detach().cpu().view(shape[0], shape[1]).numpy()\n img0 = img0.detach().cpu().view(shape[0], shape[1]).numpy()\n img1 = medmask(img0, binary_mask)\n inpainted = img1 * binary_mask + img0 * (1 - binary_mask)\n if binary:\n inpainted = inpainted[pad_x:, pad_y:]\n if self.norm:\n inpainted *= self.std\n inpainted += self.median\n return binary_mask[pad_x:, pad_y:], inpainted * self.scale\n else:\n mask = mask.detach().cpu().view(shape[0], shape[1]).numpy()\n inpainted = inpainted[pad_x:, pad_y:]\n if self.norm:\n inpainted *= self.std\n inpainted += self.median\n return mask[pad_x:, pad_y:], inpainted * self.scale\n\n else:\n if binary:\n binary_mask = binary_mask.detach().cpu().view(shape[0], shape[1]).numpy()\n return binary_mask[pad_x:, pad_y:]\n else:\n mask = mask.detach().cpu().view(shape[0], shape[1]).numpy()\n return mask[pad_x:, pad_y:]\n\n def clean_large_parallel(self, img0, threshold=0.5, inpaint=True, binary=True,\n patch=256, n_jobs=-1):\n \"\"\"\n given input image\n return cosmic ray mask and (optionally) clean image\n mask could be binary or probabilistic\n :param img0: (np.ndarray) 2D input image\n :param threshold: for creating binary mask from probabilistic mask\n :param inpaint: return clean image only if True\n :param binary: return binary mask if True. probabilistic mask otherwise.\n :param patch: (int) Use 256 unless otherwise required. patch size to run deepCR on.\n :param n_jobs: (int) number of jobs to run in parallel, passed to `joblib.` Beware of memory overflow for\n larger n_jobs.\n :return: CR mask and (optionally) clean inpainted image\n \"\"\"\n folder = './joblib_memmap_' + secrets.token_hex(3)\n try:\n mkdir(folder)\n except FileExistsError:\n folder = './joblib_memmap_' + secrets.token_hex(3)\n mkdir(folder)\n\n im_shape = img0.shape\n img0_dtype = img0.dtype\n hh = int(math.ceil(im_shape[0]/patch))\n ww = int(math.ceil(im_shape[1]/patch))\n\n img0_filename_memmap = path.join(folder, 'img0_memmap')\n dump(img0, img0_filename_memmap)\n img0 = load(img0_filename_memmap, mmap_mode='r')\n\n if inpaint:\n img1_filename_memmap = path.join(folder, 'img1_memmap')\n img1 = np.memmap(img1_filename_memmap, dtype=img0.dtype,\n shape=im_shape, mode='w+')\n else:\n img1 = None\n\n mask_filename_memmap = path.join(folder, 'mask_memmap')\n mask = np.memmap(mask_filename_memmap, dtype=np.int8 if binary else img0_dtype,\n shape=im_shape, mode='w+')\n\n @wrap_non_picklable_objects\n def fill_values(i, j, img0, img1, mask, patch, inpaint, threshold, binary):\n img = img0[i * patch: min((i + 1) * patch, im_shape[0]), j * patch: min((j + 1) * patch, im_shape[1])]\n if inpaint:\n mask_, clean_ = self.clean_(img, threshold=threshold, inpaint=True, binary=binary)\n mask[i * patch: min((i + 1) * patch, im_shape[0]), j * patch: min((j + 1) * patch, im_shape[1])] = mask_\n img1[i * patch: min((i + 1) * patch, im_shape[0]), j * patch: min((j + 1) * patch, im_shape[1])] = clean_\n else:\n mask_ = self.clean_(img, threshold=threshold, inpaint=False, binary=binary)\n mask[i * patch: min((i + 1) * patch, im_shape[0]), j * patch: min((j + 1) * patch, im_shape[1])] = mask_\n\n results = Parallel(n_jobs=n_jobs, verbose=0)\\\n (delayed(fill_values)(i, j, img0, img1, mask, patch, inpaint, threshold, binary)\n for i in range(hh) for j in range(ww))\n\n mask = np.array(mask)\n if inpaint:\n img1 = np.array(img1)\n try:\n shutil.rmtree(folder)\n except:\n print('Could not clean-up automatically.')\n\n if inpaint:\n return mask, img1\n else:\n return mask\n\n def clean_large(self, img0, threshold=0.5, inpaint=True, binary=True,\n patch=256):\n \"\"\"\n given input image\n return cosmic ray mask and (optionally) clean image\n mask could be binary or probabilistic\n :param img0: (np.ndarray) 2D input image\n :param threshold: for creating binary mask from probabilistic mask\n :param inpaint: return clean image only if True\n :param binary: return binary mask if True. probabilistic mask otherwise.\n :return: mask or binary mask; or None if internal call\n \"\"\"\n im_shape = img0.shape\n hh = int(math.ceil(im_shape[0]/patch))\n ww = int(math.ceil(im_shape[1]/patch))\n\n img1 = np.zeros((im_shape[0], im_shape[1]))\n mask = np.zeros((im_shape[0], im_shape[1]))\n\n if inpaint:\n for i in range(hh):\n for j in range(ww):\n img = img0[i * patch: min((i + 1) * patch, im_shape[0]), j * patch: min((j + 1) * patch, im_shape[1])]\n mask_, clean_ = self.clean_(img, threshold=threshold, inpaint=True, binary=binary)\n mask[i * patch: min((i + 1) * patch, im_shape[0]), j * patch: min((j + 1) * patch, im_shape[1])] = mask_\n img1[i * patch: min((i + 1) * patch, im_shape[0]), j * patch: min((j + 1) * patch, im_shape[1])] = clean_\n return mask, img1\n\n else:\n for i in range(hh):\n for j in range(ww):\n img = img0[i * patch: min((i + 1) * patch, im_shape[0]), j * patch: min((j + 1) * patch, im_shape[1])]\n mask_ = self.clean_(img, threshold=threshold, inpaint=False, binary=binary)\n mask[i * patch: min((i + 1) * patch, im_shape[0]), j * patch: min((j + 1) * patch, im_shape[1])] = mask_\n return mask\n\n def inpaint(self, img0, mask):\n\n \"\"\"\n inpaint img0 under mask\n :param img0: (np.ndarray) input image\n :param mask: (np.ndarray) inpainting mask\n :return: inpainted clean image\n \"\"\"\n img0 = img0.astype(np.float32) / self.scale\n mask = mask.astype(np.float32)\n shape = img0.shape[-2:]\n if self.inpaintNet is not None:\n img0 = from_numpy(img0).type(self.dtype). \\\n view(1, -1, shape[0], shape[1])\n mask = from_numpy(mask).type(self.dtype). \\\n view(1, -1, shape[0], shape[1])\n cat = torch.cat((img0 * (1 - mask), mask), dim=1)\n img1 = self.inpaintNet(cat)\n img1 = img1.detach()\n inpainted = img1 * mask + img0 * (1 - mask)\n inpainted = inpainted.detach().cpu(). \\\n view(shape[0], shape[1]).numpy()\n else:\n img1 = medmask(img0, mask)\n inpainted = img1 * mask + img0 * (1 - mask)\n return inpainted * self.scale\n\n"
]
| [
[
"numpy.array",
"numpy.pad",
"torch.cat",
"numpy.zeros",
"numpy.percentile",
"torch.from_numpy",
"numpy.memmap",
"torch.cuda.is_available",
"torch.load"
]
]
|
hj424/heterocl | [
"e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b"
]
| [
"tvm/tests/verilog/unittest/test_vpi_mmap.py"
]
| [
"import tvm\nimport numpy as np\nfrom tvm.contrib import verilog\n\ndef test_mmap():\n n = 10\n # context for VPI RAM\n ctx = tvm.vpi(0)\n a_np = np.arange(n).astype('int8')\n a = tvm.nd.array(a_np, ctx)\n\n # head ptr of a\n a_ptr = int(a.handle[0].data)\n sess = verilog.session([\n verilog.find_file(\"test_vpi_mmap.v\"),\n verilog.find_file(\"tvm_vpi_mmap.v\")\n ])\n rst = sess.main.rst\n read_addr = sess.main.read_addr\n read_data = sess.main.read_data\n write_addr = sess.main.write_addr\n write_data = sess.main.write_data\n write_en = sess.main.write_en\n mmap_addr = sess.main.mmap_addr\n\n # setup memory map.\n rst.put_int(1)\n sess.yield_until_next_cycle()\n rst.put_int(0)\n write_en.put_int(0)\n mmap_addr.put_int(a_ptr)\n sess.yield_until_next_cycle()\n\n # read test\n for i in range(n):\n read_addr.put_int(i)\n sess.yield_until_next_cycle()\n # read addr get set this cycle\n sess.yield_until_next_cycle()\n # get the data out\n assert(read_data.get_int() == i)\n\n # write test\n for i in reversed(range(n)):\n write_addr.put_int(i)\n write_en.put_int(1)\n write_data.put_int(i + 1)\n sess.yield_until_next_cycle()\n write_en.put_int(0)\n sess.yield_until_next_cycle()\n\n np.testing.assert_equal(a.asnumpy(), a_np + 1)\n\n\nif __name__ == \"__main__\":\n test_mmap()\n"
]
| [
[
"numpy.arange"
]
]
|
adwasser/modds | [
"9720e6550c0a5c23a9e868794eabcd9acd0ae2fa"
]
| [
"modds/measurement.py"
]
| [
"\"\"\"\nmeasurement.py\n\nMeasurementModel wraps up a halo.HaloDensityProfile instance with both a set of \nobservables (e.g., {r, DeltaSigma, DeltaSigma_err}) and a prior on the model \nparameters of interest.\n\"\"\"\n\nfrom collections import OrderedDict\n\nimport numpy as np\nfrom scipy import optimize\nfrom colossus.halo.profile_base import HaloDensityProfile\n\nfrom modds.parameter import Parameter\n\n__all__ = ['MeasurementModel']\n\n\ndef lnlike_gauss(q_model, q, q_err):\n \"\"\"\n Log of Gaussian likelihood.\n\n Parameters\n ----------\n q_model : array_like\n predicted value\n q : array_like\n observed value\n q_err : array_like\n observational uncertainty\n\n Returns\n -------\n lnl : array_like\n log likehood array, same size as input observations\n \"\"\"\n var = q_err**2\n return -0.5 * (np.log(2 * np.pi * var) + (q - q_model)**2 / var)\n\n\nclass MeasurementModel():\n \"\"\"\n A wrapped up halo density profile with data constraints and priors.\n\n Parameters\n ----------\n profile : halo.profile_bass.HaloDensityProfile\n observables : dict or iterable\n Should have keys of 'r', 'q', 'q_err', else it is assumed to be an\n iterable containing three arrays in the order r, q, q_err.\n quantity : str\n One of {'rho', 'M', 'Sigma', 'DeltaSigma'}, with standard colossus \n units. Denotes volume mass density, enclosed mass, surface density,\n and surface density deviation (i.e., the weak lensing observable)\n respectively.\n parameters : list of colossus.modeling.Parameter instances\n constants : dict\n Map from constant name (str) to fixed physical value (float).\n Should contain redshift (named \"z\") if not defined as a parameter\n lnlike : callable, optional\n Map from (q_model, q, q_err) to log likelihood at each point. Defaults\n to a Gaussian likelihood, i.e., q ~ N(q_model, q_err**2)\n priors : iterable, optional\n List of functions, f(profile, **kwargs) -> log prior.\n Additional priors to consider (e.g., halo mass concentration relation,\n See examples in joint_priors.py.\n Keywords should be either constants or parameters of the model.\n \"\"\"\n\n # mapping of inputtable quantities to colossus halo profile function name\n _quantities = dict(rho='density', density='density', m='enclosedMass',\n mass='enclosedMass', enclosedmass='enclosedMass',\n sigma='surfaceDensity', surfacedensity='surfaceDensity',\n deltasigma='deltaSigma')\n _obskeys = ['r', 'q', 'q_err']\n\n def __init__(self, profile, observables, quantity, parameters,\n constants=None, lnlike=lnlike_gauss, priors=None):\n # check this is an actual halo density profile\n assert isinstance(profile, HaloDensityProfile)\n self.profile = profile\n # check this is an allowed quantity\n quantity = quantity.replace(' ', '').lower()\n assert quantity in self._quantities\n self.quantity = self._quantities[quantity]\n # construct ordered dict of observables\n if isinstance(observables, OrderedDict):\n assert all([key in observables for key in self._obskeys])\n self.observables = observables\n elif isinstance(observables, dict):\n assert all([key in observables for key in self._obskeys])\n self.observables = OrderedDict([(key, observables[key])\n for key in self._obskeys])\n else:\n self.observables = OrderedDict(zip(self._obskeys, observables))\n # check that everything is a proper parameter\n assert all([isinstance(p, Parameter) for p in parameters])\n self.parameters = OrderedDict([(p.name, p) for p in parameters])\n self.ndim = len(parameters)\n # set default constants to empty dictionary\n if constants is None:\n constants = {}\n self.constants = constants\n self.lnlike = lnlike\n self.priors = priors\n assert (\"z\" in self.constants) or (\"z\" in self.parameters)\n\n \n def _get_required_parameters(self, sample):\n \"\"\"The parameters the sampler sees are different than what colossus \n needs. This glues the two together. `sample` is what the sampler \n sees.\n \"\"\"\n # construct array for profile prediction\n new_pars = np.zeros(len(self.profile.par))\n for i, required_param in enumerate(self.profile.par_names):\n try:\n new_pars[i] = self.constants[required_param]\n except KeyError:\n # must be a free parameter, transform if need be\n p = self.parameters[required_param]\n # index into values for free parameters\n idx = list(self.parameters.keys()).index(required_param)\n if p.transform is None:\n new_pars[i] = sample[idx]\n else:\n # need to do the inverse transform to physical values\n new_pars[i] = p.inverse_transform(sample[idx])\n return new_pars\n\n \n def _get_kwargs(self, sample):\n \"\"\"Construct a dictionary of keyword arguments from a point in sample\n space. Includes constant values.\n \"\"\"\n kwargs = {}\n for i, (name, p) in enumerate(self.parameters.items()):\n if p.transform is not None:\n kwargs[name] = p.inverse_transform(sample[i])\n else:\n kwargs[name] = sample[i]\n kwargs = {**kwargs, **self.constants}\n return kwargs\n\n \n def update(self, sample):\n \"\"\"Update the profile with the passed values.\n\n Parameters\n ----------\n sample : array_like\n Size of ndim, values as the sampler would see them (i.e., \n transformed) \n\n Returns\n -------\n bool\n True if successful\n \"\"\"\n # set new profile parameters and update\n \n new_pars = self._get_required_parameters(sample)\n if 'z' in self.parameters:\n # update redshift with new value\n p = self.parameters['z']\n idx = list(self.parameters.keys()).index('z')\n if p.transform is not None:\n z = p.inverse_transform(sample[idx])\n else:\n z = sample[idx]\n self.profile.opt['z'] = z\n else:\n assert self.profile.opt['z'] == self.constants['z']\n \n self.profile.setParameterArray(new_pars)\n try:\n self.profile.update()\n return True\n except:\n # TODO: catch the specific exception\n # handle case where the halo density is too small\n return False\n\n def __call__(self, sample,\n return_lnlike=False,\n return_profile=False,\n return_vir=False,\n return_sp=False,\n r_grid=None,\n mdef=\"vir\",\n log_rsp_search_min=2.5,\n log_rsp_search_max=3.5):\n \"\"\"\n Calculate the log posterior probability for the model.\n\n Parameters\n ----------\n sample : array_like\n length ndim array of transformed parameters\n return_lnlike : bool, optional\n if True, also return the log likelihood\n return_profile : bool, optional\n if True, also return the model interpolated on the specified grid\n return_vir : bool, optional\n if True, also return the halo virial mass and concentration\n return_sp : bool, optional\n if True, also return the halo splashback radius and steepest slope\n r_grid : array_like, optional\n Radial interpolation grid for when caching the posterior prediction\n mdef : str, optional\n halo virial mass definition\n log_rsp_search_min : float, optional\n log of kpc, minimum radius to search for rsp\n log_rsp_search_max : float, optional\n log of kpc, maximum radius to search for rsp\n\n Returns\n -------\n lnpost : float\n log of posterior probability\n blobs : tuple\n tuple of optional returns, ordered as (lnlike, profile, Mvir, cvir, \n rsp, gamma_min)\n \"\"\"\n return_blobs = np.any([return_lnlike, return_profile, return_vir,\n return_sp])\n \n # update profile with new values\n successful_update = self.update(sample)\n \n # calculate log prior, returning early on bad values\n lnp = 0\n for i, p in enumerate(self.parameters.values()):\n lnp += p(sample[i])\n if self.priors is not None:\n for prior in self.priors:\n lnp += prior(self.profile, **self._get_kwargs(sample))\n if not np.isfinite(lnp) or not successful_update:\n if return_blobs:\n # construct rejected blobs\n blobs = []\n if return_lnlike:\n blobs.append(np.nan)\n if return_profile:\n blobs.append(np.nan * np.ones(r_grid.shape))\n if return_vir:\n blobs.append(np.nan)\n blobs.append(np.nan)\n if return_sp:\n blobs.append(np.nan)\n blobs.append(np.nan)\n return -np.inf, tuple(blobs)\n\n # calculate log likelihood\n r = self.observables['r']\n\n q = self.observables['q']\n q_err = self.observables['q_err']\n try:\n q_model = getattr(self.profile, self.quantity)(r)\n except:\n # TODO: selectively catch interpolation failure\n # try with interpolation off\n try:\n kwargs = dict(interpolate=False,\n interpolate_surface_density=False)\n q_model = getattr(self.profile, self.quantity)(r, **kwargs)\n except:\n # and fail somewhat gracefully if that doesn't work\n q_model = np.nan * r\n lnl = np.sum(self.lnlike(q_model, q, q_err))\n\n if return_blobs:\n kwargs = self._get_kwargs(sample)\n blobs = []\n if return_lnlike:\n blobs.append(lnl)\n if return_profile:\n q_grid = np.interp(r_grid, r, q_model) \n blobs.append(q_grid)\n if return_vir:\n z = kwargs['z']\n rvir, Mvir = self.profile.RMDelta(z=z, mdef=mdef)\n rs = kwargs['rs']\n cvir = rvir / rs\n blobs.append(Mvir)\n blobs.append(cvir)\n if return_sp:\n rsp = optimize.fminbound(self.profile.densityDerivativeLog,\n 10**log_rsp_search_min,\n 10**log_rsp_search_max)\n gamma_min = self.profile.densityDerivativeLog(rsp)\n blobs.append(rsp)\n blobs.append(gamma_min)\n return lnp + lnl, tuple(blobs)\n else:\n return lnp + lnl\n"
]
| [
[
"numpy.log",
"numpy.ones",
"numpy.interp",
"numpy.any",
"numpy.isfinite",
"scipy.optimize.fminbound"
]
]
|
jerjohste/exopy_hqc_legacy | [
"c746beea6b175697ae3bfdab94309dc872d3d908"
]
| [
"exopy_hqc_legacy/pulses/contexts/awg_context.py"
]
| [
"# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright 2015-2018 by ExopyPulses Authors, see AUTHORS for more details.\n#\n# Distributed under the terms of the BSD license.\n#\n# The full license is in the file LICENCE, distributed with this software.\n# -----------------------------------------------------------------------------\n\"\"\"Context compiling sequences for the Tektronix AWG5014.\n\n\"\"\"\nimport numpy as np\nfrom atom.api import Unicode, Float, Bool, set_default\n\nfrom exopy_pulses.pulses.api import BaseContext, TIME_CONVERSION\n\nto_bytes = np.ndarray.tobytes\n\n\nclass AWG5014Context(BaseContext):\n \"\"\"Context compiling sequences for the Tektronix AWG5014.\n\n \"\"\"\n #: Generic name used when storing the sequence on the instrument.\n #: The channel name (Ch1, Ch2, ...) will be appended to it when\n #: transferring.\n sequence_name = Unicode().tag(pref=True, fmt=False)\n\n #: Sampling frequency in Hz\n sampling_frequency = Float(1e9).tag(pref=True)\n\n #: Should the transferred sequences be selected on the matching channels.\n select_after_transfer = Bool(True).tag(pref=True)\n\n #: Should the unused channels be cleared (to avoid attempting to play an\n #: old sequence).\n clear_unused_channels = Bool(True).tag(pref=True)\n\n #: Should the instrument be made to run the sequences after a successful\n #: transfer.\n run_after_transfer = Bool(True).tag(pref=True)\n\n time_unit = set_default('mus')\n\n analogical_channels = set_default(('Ch1_A', 'Ch2_A', 'Ch3_A', 'Ch4_A'))\n\n logical_channels = set_default(('Ch1_M1', 'Ch2_M1', 'Ch3_M1', 'Ch4_M1',\n 'Ch1_M2', 'Ch2_M2', 'Ch3_M2', 'Ch4_M2'))\n\n def compile_and_transfer_sequence(self, sequence, driver=None):\n \"\"\"Compile the pulse sequence and send it to the instruments.\n\n As this context does not support any special sequence it will always\n get a flat list of pulses.\n\n Parameters\n ----------\n sequence : RootSequence\n Sequence to compile and transfer.\n\n driver : object, optional\n Instrument driver to use to transfer the sequence once compiled.\n If absent the context should do its best to assert that the\n compilation can succeed.\n\n Returns\n -------\n result : bool\n Whether the compilation succeeded.\n\n infos : dict\n Infos about the transferred and compiled sequence. The keys\n should match the ones listed in sequence_infos_keys.\n\n errors : dict\n Errors that occured during compilation.\n\n \"\"\"\n items, errors = self.preprocess_sequence(sequence)\n\n if errors:\n return False, {}, errors\n\n duration = max([pulse.stop for pulse in items])\n if sequence.time_constrained:\n # Total length of the sequence to send to the AWG\n duration = sequence.duration\n\n # Collect the channels used in the pulses' sequence\n used_channels = set([pulse.channel[:3] for pulse in items])\n\n # Coefficient to convert the start and stop of pulses in second and\n # then in index integer for array\n time_to_index = TIME_CONVERSION[self.time_unit]['s'] * \\\n self.sampling_frequency\n\n # Length of the sequence\n sequence_length = int(round(duration * time_to_index))\n\n # create 3 array for each used_channels\n array_analog = {}\n array_M1 = {}\n array_M2 = {}\n for channel in used_channels:\n # numpy array for analog channels int16 init 2**13\n array_analog[channel] = np.ones(sequence_length,\n dtype=np.uint16)*(2**13)\n # numpy array for marker1 init False. For AWG M1 = 0 = off\n array_M1[channel] = np.zeros(sequence_length, dtype=np.int8)\n # numpy array for marker2 init False. For AWG M2 = 0 = off\n array_M2[channel] = np.zeros(sequence_length, dtype=np.int8)\n\n for pulse in [i for i in items if i.duration != 0.0]:\n\n waveform = pulse.waveform\n channel = pulse.channel[:3]\n channeltype = pulse.channel[4:]\n\n start_index = int(round(pulse.start*time_to_index))\n stop_index = start_index + len(waveform)\n\n if channeltype == 'A' and pulse.kind == 'Analogical':\n array_analog[channel][start_index:stop_index] +=\\\n np.require(np.rint(8191*waveform), np.uint16)\n elif channeltype == 'M1' and pulse.kind == 'Logical':\n array_M1[channel][start_index:stop_index] += waveform\n elif channeltype == 'M2' and pulse.kind == 'Logical':\n array_M2[channel][start_index:stop_index] += waveform\n else:\n msg = 'Selected channel does not match kind for pulse {} ({}).'\n return (False, dict(),\n {'Kind issue': msg.format(pulse.index,\n (pulse.kind, pulse.channel))}\n )\n\n # Check the overflows\n traceback = {}\n for channel in used_channels:\n analog = array_analog[channel]\n if analog.max() > 16383 or analog.min() < 0:\n mes = 'Analogical values out of range.'\n traceback['{}_A'.format(channel)] = mes\n\n elif array_M1[channel].max() > 1 or array_M1[channel].min() < 0:\n mes = 'Overflow in marker 1.'\n traceback['{}_M1'.format(channel)] = mes\n\n elif array_M2[channel].max() > 1 or array_M2[channel].min() < 0:\n mes = 'Overflow in marker 2.'\n traceback['{}_M2'.format(channel)] = mes\n\n if traceback:\n return False, dict(), traceback\n\n # Invert marked logical channels.\n for i_ch in self.inverted_log_channels:\n ch, m = i_ch.split('_')\n if m == 'M1':\n np.logical_not(array_M1[ch], array_M1[ch])\n else:\n np.logical_not(array_M2[ch], array_M2[ch])\n\n # Byte arrays to send to the AWG\n to_send = {}\n for channel in used_channels:\n # Convert to sixteen bits integers\n array = array_analog[channel] +\\\n array_M1[channel]*(2**14) + array_M2[channel]*(2**15)\n # Creating and filling a byte array for each channel.\n aux = np.empty(2*sequence_length, dtype=np.uint8)\n aux[::2] = array % 2**8\n aux[1::2] = array // 2**8\n to_send[int(channel[-1])] = to_bytes(aux)\n\n # Build sequence infos\n name = self._cache['sequence_name']\n infos = dict(sampling_frequency=self.sampling_frequency,\n sequence_ch1='',\n sequence_ch2='',\n sequence_ch3='',\n sequence_ch4='')\n for c in used_channels:\n infos['sequence_ch%s' % c[2]] = name + '_' + c\n\n # In the absence of a driver we stop here\n if not driver:\n return True, infos, traceback\n\n # If we do have a driver proceed to the transfer.\n\n return self._transfer_sequences(driver, to_send, infos)\n\n def list_sequence_infos(self):\n \"\"\"List the sequence infos returned after a successful completion.\n\n Returns\n -------\n infos : dict\n Dict mimicking the one returned on successful completion of\n a compilation and transfer. The values types should match the\n the ones found in the real infos.\n\n \"\"\"\n return dict(sampling_frequency=1e9,\n sequence_ch1='Seq_Ch1',\n sequence_ch2='Seq_Ch2',\n sequence_ch3='Seq_Ch3',\n sequence_ch4='Seq_Ch4')\n\n # =========================================================================\n # --- Private API ---------------------------------------------------------\n # =========================================================================\n\n def _transfer_sequences(self, driver, sequences, infos):\n \"\"\"Transfer a previously compiled sequence.\n\n \"\"\"\n for ch_id in driver.defined_channels:\n if ch_id in sequences:\n driver.to_send(infos['sequence_ch%s' % ch_id],\n sequences[ch_id])\n\n if self.select_after_transfer:\n driver.sampling_frequency = self.sampling_frequency\n for ch_id in driver.defined_channels:\n ch = driver.get_channel(ch_id)\n if ch_id in sequences:\n ch.select_sequence(infos['sequence_ch%s' % ch_id])\n elif self.clear_unused_channels:\n ch.clear_sequence()\n\n if self.run_after_transfer:\n for ch_id in sequences:\n ch = driver.get_channel(ch_id)\n ch.output_state = 'ON'\n driver.running = True\n\n return True, infos, {}\n\n def _get_sampling_time(self):\n \"\"\"Getter for the sampling time prop of BaseContext.\n\n \"\"\"\n return 1/self.sampling_frequency*TIME_CONVERSION['s'][self.time_unit]\n\n def _post_setattr_time_unit(self, old, new):\n \"\"\"Reset sampling time as the conversion changed.\n\n \"\"\"\n self._reset_sampling_time()\n\n def _post_setattr_sampling_frequency(self, old, new):\n \"\"\"Reset sampling when the frequency change.\n\n \"\"\"\n self._reset_sampling_time()\n\n def _reset_sampling_time(self):\n \"\"\"Reset the sampling_time property.\n\n \"\"\"\n member = self.get_member(str('sampling_time')) # HINT C API\n member.reset(self)\n"
]
| [
[
"numpy.logical_not",
"numpy.empty",
"numpy.zeros",
"numpy.rint",
"numpy.ones"
]
]
|
khirotaka/enchanter | [
"71faa51f998da5c8d9185a979a4f5849c9b5f9e6"
]
| [
"enchanter/engine/saving.py"
]
| [
"from typing import Union, Optional, Dict\nfrom collections import OrderedDict\nfrom time import ctime\nfrom pathlib import Path\nfrom copy import deepcopy\n\nimport torch\nimport torch.nn as nn\n\n\n__all__ = [\"RunnerIO\"]\n\n\nclass RunnerIO:\n \"\"\"\n A class responsible for loading and saving parameters such as PyTorch model weights and Optimizer state.\n\n \"\"\"\n\n def __init__(self):\n self.model = NotImplemented\n self.optimizer = NotImplemented\n self.experiment = NotImplemented\n self.save_dir: Optional[str] = None\n\n def model_name(self) -> str:\n \"\"\"\n fetch model name\n\n Returns: model name\n\n \"\"\"\n if isinstance(self.model, nn.DataParallel) or isinstance(self.model, nn.parallel.DistributedDataParallel):\n model_name = self.model.module.__class__.__name__\n else:\n model_name = self.model.__class__.__name__\n\n return model_name\n\n def save_checkpoint(self) -> Dict[str, Union[Dict[str, torch.Tensor], dict]]:\n \"\"\"\n A method to output model weights and Optimizer state as a dictionary.\n\n Returns:\n Returns a dictionary with the following keys and values.\n - ``model_state_dict``: model weights\n - ``optimizer_state_dict``: Optimizer state\n\n \"\"\"\n if isinstance(self.model, nn.DataParallel) or isinstance(self.model, nn.parallel.DistributedDataParallel):\n model = self.model.module.state_dict()\n else:\n model = self.model.state_dict()\n\n checkpoint = {\n \"model_state_dict\": deepcopy(model),\n \"optimizer_state_dict\": deepcopy(self.optimizer.state_dict()),\n }\n return checkpoint\n\n def load_checkpoint(self, checkpoint: Dict[str, OrderedDict]):\n \"\"\"\n Takes a dictionary with keys ``model_state_dict`` and ``optimizer_state_dict``\n and uses them to restore the state of the model and the Optimizer.\n\n Args:\n checkpoint:\n Takes a dictionary with the following keys and values.\n - ``model_state_dict``: model weights\n - ``optimizer_state_dict``: Optimizer state\n \"\"\"\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n return self\n\n def save(self, directory: Optional[str] = None, epoch: Optional[int] = None, filename: Optional[str] = None):\n\n \"\"\"\n Save the model and the Optimizer state file in the specified directory.\n\n Notes:\n ``enchanter_checkpoints_epoch_{}.pth`` file contains ``model_state_dict`` & ``optimizer_state_dict``.\n\n Args:\n directory (Optional[str]):\n epoch (Optional[int]):\n filename (Optional[str]):\n\n \"\"\"\n if directory is None and self.save_dir:\n directory = self.save_dir\n\n if directory is None:\n if filename is None:\n raise ValueError(\"The argument `directory` or `filename` must be specified.\")\n else:\n path = filename\n else:\n directory_path = Path(directory)\n if not directory_path.exists():\n directory_path.mkdir(parents=True)\n\n if epoch is None:\n epoch_str = ctime().replace(\" \", \"_\")\n else:\n epoch_str = str(epoch)\n\n if not filename:\n filename = \"enchanter_checkpoints_epoch_{}.pth\".format(epoch_str)\n\n path = str(directory_path / filename)\n\n checkpoint = self.save_checkpoint()\n torch.save(checkpoint, path)\n\n model_name = self.model_name()\n self.experiment.log_model(model_name, str(path))\n\n def load(self, filename: str, map_location: str = \"cpu\"):\n \"\"\"\n Restores the model and Optimizer state based on the specified file.\n\n Args:\n filename (str):\n map_location (str): default: 'cpu'\n\n \"\"\"\n checkpoint = torch.load(filename, map_location=map_location)\n self.load_checkpoint(checkpoint)\n\n return self\n"
]
| [
[
"torch.save",
"torch.load"
]
]
|
octree-nn/ocnn-pytorch | [
"cdff2f5e589fd8a0b79c4f7b90042a8cefabf2d0"
]
| [
"test/test_utils.py"
]
| [
"import os\nimport torch\nimport unittest\n\nimport ocnn\n\n\nclass TestScatter(unittest.TestCase):\n\n def test_scatter_add(self):\n devices = ['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']\n for device in devices:\n src = torch.arange(1, 11, device=device).view(2, 5)\n idx = torch.tensor([0, 1, 3, 2, 0], device=device)\n gt = torch.tensor([[6, 2, 4, 3, 0], [16, 7, 9, 8, 0]], device=device)\n\n output = ocnn.utils.scatter_add(src, idx, dim=1, dim_size=5)\n self.assertTrue(torch.equal(output, gt))\n\n def test_cumsum(self):\n data = torch.tensor([[1, 2, 3], [4, 5, 6]])\n gt1 = torch.tensor([[1, 3, 6], [4, 9, 15]])\n gt2 = torch.tensor([[0, 1, 3, 6], [0, 4, 9, 15]])\n gt3 = torch.tensor([[0, 0, 0], [1, 2, 3], [5, 7, 9]])\n\n out1 = ocnn.utils.cumsum(data, dim=1, exclusive=False)\n out2 = ocnn.utils.cumsum(data, dim=1, exclusive=True)\n out3 = ocnn.utils.cumsum(data, dim=0, exclusive=True)\n self.assertTrue(torch.equal(gt1, out1))\n self.assertTrue(torch.equal(gt2, out2))\n self.assertTrue(torch.equal(gt3, out3))\n\n\nif __name__ == \"__main__\":\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n unittest.main()\n"
]
| [
[
"torch.cuda.is_available",
"torch.tensor",
"torch.equal",
"torch.arange"
]
]
|
conansherry/Ultra-Light-Fast-Generic-Face-Detector-1MB | [
"5a6ad4cacb0a8545a5b9c126bd3f344eec56fb8d"
]
| [
"tf/det_image.py"
]
| [
"import argparse\nimport sys\n\nimport cv2\nimport tensorflow as tf\nimport numpy as np\n\nparser = argparse.ArgumentParser(\n description='convert model')\n\nparser.add_argument('--net_type', default=\"RFB\", type=str,\n help='The network architecture ,optional: RFB (higher precision) or slim (faster)')\nparser.add_argument('--img_path', default='imgs/test_input.jpg', type=str,\n help='Image path for inference')\nargs = parser.parse_args()\n\n\ndef main():\n if args.net_type == 'slim':\n model_path = \"export_models/slim/\"\n elif args.net_type == 'RFB':\n model_path = \"export_models/RFB/\"\n else:\n print(\"The net type is wrong!\")\n sys.exit(1)\n\n model = tf.keras.models.load_model(model_path)\n\n img = cv2.imread(args.img_path)\n h, w, _ = img.shape\n img_resize = cv2.resize(img, (320, 240))\n img_resize = cv2.cvtColor(img_resize, cv2.COLOR_BGR2RGB)\n img_resize = img_resize - 127.0\n img_resize = img_resize / 128.0\n\n results = model.predict(np.expand_dims(img_resize, axis=0)) # result=[background,face,x1,y1,x2,y2]\n\n for result in results:\n start_x = int(result[2] * w)\n start_y = int(result[3] * h)\n end_x = int(result[4] * w)\n end_y = int(result[5] * h)\n\n cv2.rectangle(img, (start_x, start_y), (end_x, end_y), (0, 255, 0), 2)\n\n cv2.imwrite(f'imgs/test_output_{args.net_type}.jpg', img)\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"tensorflow.keras.models.load_model",
"numpy.expand_dims"
]
]
|
NegatioN/pytorch-tutorial | [
"1ec90fad7260e7871a4912fce552cad90f6c2f4a"
]
| [
"tutorials/03-advanced/image_captioning/model.py"
]
| [
"import torch\nimport torch.nn as nn\nimport torchvision.models as models\nfrom torch.nn.utils.rnn import pack_padded_sequence\nfrom torch.autograd import Variable\n\n\nclass EncoderCNN(nn.Module):\n def __init__(self, embed_size):\n \"\"\"Load the pretrained ResNet-152 and replace top fc layer.\"\"\"\n super(EncoderCNN, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)\n self.init_weights()\n \n def init_weights(self):\n \"\"\"Initialize the weights.\"\"\"\n self.linear.weight.data.normal_(0.0, 0.02)\n self.linear.bias.data.fill_(0)\n \n def forward(self, images):\n \"\"\"Extract the image feature vectors.\"\"\"\n features = self.resnet(images)\n features = Variable(features.data)\n features = features.view(features.size(0), -1)\n features = self.bn(self.linear(features))\n return features\n \n \nclass DecoderRNN(nn.Module):\n def __init__(self, embed_size, hidden_size, vocab_size, num_layers):\n \"\"\"Set the hyper-parameters and build the layers.\"\"\"\n super(DecoderRNN, self).__init__()\n self.embed = nn.Embedding(vocab_size, embed_size)\n self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)\n self.linear = nn.Linear(hidden_size, vocab_size)\n self.init_weights()\n \n def init_weights(self):\n \"\"\"Initialize weights.\"\"\"\n self.embed.weight.data.uniform_(-0.1, 0.1)\n self.linear.weight.data.uniform_(-0.1, 0.1)\n self.linear.bias.data.fill_(0)\n \n def forward(self, features, captions, lengths):\n \"\"\"Decode image feature vectors and generates captions.\"\"\"\n embeddings = self.embed(captions)\n embeddings = torch.cat((features.unsqueeze(1), embeddings), 1)\n packed = pack_padded_sequence(embeddings, lengths, batch_first=True) \n hiddens, _ = self.lstm(packed)\n outputs = self.linear(hiddens[0])\n return outputs\n \n def sample(self, features, states=None):\n \"\"\"Samples captions for given image features (Greedy search).\"\"\"\n sampled_ids = []\n inputs = features.unsqueeze(1)\n for i in range(20): # maximum sampling length\n hiddens, states = self.lstm(inputs, states) # (batch_size, 1, hidden_size), \n outputs = self.linear(hiddens.squeeze(1)) # (batch_size, vocab_size)\n predicted = outputs.max(1)[1]\n sampled_ids.append(predicted)\n inputs = self.embed(predicted)\n inputs = inputs.unsqueeze(1) # (batch_size, 1, embed_size)\n sampled_ids = torch.cat(sampled_ids, 1) # (batch_size, 20)\n return sampled_ids.squeeze()\n"
]
| [
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.LSTM",
"torch.nn.Sequential",
"torch.autograd.Variable",
"torch.nn.BatchNorm1d",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.Embedding"
]
]
|
thcasey3/lsframe | [
"f1d667a305ecd860417b7d1cbbfa1bbfcc40107e"
]
| [
"unittests/test_lsframe.py"
]
| [
"import unittest\nimport numpy as np\nimport os\nimport pandas as pd\nfrom datetime import date\nfrom lsframe import start, intake, engine, tools\nfrom numpy.testing import assert_array_equal\n\n\nclass startTester(unittest.TestCase):\n def setUp(self):\n self.path = os.path.normpath(\"./data/\")\n self.subd = os.listdir(self.path)\n self.meas = [\"meas1\", \"meas2\"]\n self.patt = [\"patt1\", \"patt2\"]\n self.skip = [\"skip1\", \"skip2\"]\n self.func = tools.utest.utest\n self.farg = {\"key\": \"value\"}\n self.date_format = \"YYYYMMDD\"\n\n self.start_result = start(\n self.path,\n patterns=self.patt,\n skip=self.skip,\n classifiers=self.meas,\n function=self.func,\n function_args=self.farg,\n date_format=self.date_format,\n )\n\n def test_a_start_object(self):\n\n self.assertEqual(self.path, self.start_result.directory)\n self.assertEqual(self.subd, os.listdir(self.start_result.directory))\n self.assertEqual(self.meas, self.start_result.classifiers)\n self.assertEqual(self.patt, self.start_result.patterns)\n self.assertEqual(self.skip, self.start_result.skip)\n self.assertEqual(self.func, self.start_result.function)\n self.assertEqual(self.farg, self.start_result.function_args)\n self.assertEqual(self.date_format, self.start_result.date_format)\n\n with self.assertRaises(TypeError):\n result = start(\n self.path,\n classifiers=(\"one\", \"two\"),\n function=self.func,\n function_args=self.farg,\n )\n\n with self.assertRaises(TypeError):\n result = start(\n self.path,\n patterns=(\"one\", \"two\"),\n function=self.func,\n function_args=self.farg,\n )\n\n with self.assertRaises(TypeError):\n result = start(\n self.path,\n skip=(\"one\", \"two\"),\n function=self.func,\n function_args=self.farg,\n )\n\n with self.assertRaises(TypeError):\n result = start(directory=[\"one\", \"two\"])\n\n with self.assertRaises(ValueError):\n result = start(self.path, date_format=\"YearMonthDay\")\n\n def test_b_drive_method(self):\n\n self.start_result.drive()\n self.assertEqual(len(self.start_result.frame.columns), 7)\n self.assertEqual(self.start_result.frame.columns[0], \"name\")\n self.assertEqual(self.start_result.frame.columns[3], \"patt1\")\n self.assertEqual(self.start_result.frame.columns[4], \"patt2\")\n self.assertEqual(self.start_result.frame.columns[1], \"date\")\n self.assertEqual(self.start_result.frame.columns[2], \"date_delta\")\n find_loc = self.start_result.frame[\n self.start_result.frame[\"name\"] == \"example.csv\"\n ].index[0]\n self.assertEqual(self.start_result.frame.loc[find_loc, \"date_delta\"], 0)\n find_loc = self.start_result.frame[\n self.start_result.frame[\"name\"] == \"19850802_example_short.csv\"\n ].index[0]\n self.assertEqual(\n self.start_result.frame.loc[find_loc, \"date\"], date(1985, 8, 2)\n )\n self.assertNotEqual(self.start_result.frame.loc[find_loc, \"date_delta\"], 0)\n self.assertEqual(self.start_result.frame.columns[5], \"meas1\")\n self.assertEqual(self.start_result.frame.columns[6], \"meas2\")\n\n result = start(self.path)\n result.drive(\n classifiers=[\"new1\", \"new2\"], function=self.func, function_args=self.farg\n )\n self.assertEqual(result.frame.columns[1], \"new1\")\n self.assertEqual(result.frame.columns[2], \"new2\")\n\n self.start_result.function = tools.utest2.proc_epr\n self.start_result.drive()\n self.assertTrue([x == \"null\" for x in self.start_result.frame[\"meas1\"]])\n self.assertTrue([x == \"null\" for x in self.start_result.frame[\"meas2\"]])\n\n result = start(\n self.path,\n classifiers=[\"justone\"],\n function=self.func,\n function_args=self.farg,\n )\n with self.assertRaises(ValueError):\n result.drive()\n\n result = start(\n self.path,\n classifiers=\"justone\",\n function=self.func,\n function_args=self.farg,\n )\n with self.assertRaises(ValueError):\n result.drive()\n\n result = start(self.path, function=self.func, function_args=[\"arg1\", \"arg2\"])\n with self.assertRaises(TypeError):\n result.drive()\n\n def test_c_patterns_methods(self):\n\n result = start(self.path, patterns={\"example\": \"_test\", \"short\": bool})\n find_loc = result.frame[result.frame[\"example\"] == \"_test\"].index[0]\n self.assertFalse(result.frame.loc[find_loc, \"short\"])\n result.reduce_names(remove=[\"short\"])\n find_loc = result.frame[result.frame[\"name\"] == \"example.csv\"].index[0]\n self.assertFalse(result.frame.loc[find_loc, \"example\"])\n find_loc = result.frame[\n result.frame[\"name\"] == \"850802_example_test.csv\"\n ].index[0]\n self.assertEqual(result.frame.loc[find_loc, \"example\"], \"_test\")\n\n def test_d_skip_methods(self):\n\n result = start(self.path, skip=\"short\")\n self.assertEqual(len(os.listdir(self.path)) - 2, len(result.frame))\n\n with self.assertRaises(TypeError):\n result = start(self.path, skip=1)\n\n def test_e_dates_methods(self):\n\n self.start_result.date_format = \"any\"\n self.start_result.find_dates()\n find_loc = self.start_result.frame[\n self.start_result.frame[\"name\"] == \"19850802_example_short.csv\"\n ].index[0]\n self.assertTrue(isinstance(self.start_result.frame.loc[find_loc, \"date\"], list))\n self.start_result.reduce_dates(keep=\"YYYYMMDD\")\n self.assertTrue(isinstance(self.start_result.frame.loc[find_loc, \"date\"], date))\n\n self.start_result.find_dates()\n self.start_result.reduce_dates(remove=[\"YYYYMMDD\"])\n self.assertTrue(isinstance(self.start_result.frame.loc[find_loc, \"date\"], list))\n self.assertTrue(\n [\n x\n for x in self.start_result.frame.loc[find_loc, \"date\"]\n if isinstance(x, date)\n ]\n )\n\n find_loc = self.start_result.frame[\n self.start_result.frame[\"name\"] == \"850802_example_test.csv\"\n ].index[0]\n self.start_result.find_dates()\n self.assertTrue(\n date(1985, 8, 2) in self.start_result.frame.loc[find_loc, \"date\"]\n )\n self.start_result.reduce_dates(remove=[\"YYMMDD\", \"YYDDMM\"])\n self.assertTrue(isinstance(self.start_result.frame.loc[find_loc, \"date\"], list))\n self.assertTrue(\n self.start_result.frame.loc[find_loc, \"date_format\"]\n == [\"YYMDD\", \"MDDYY\", \"YYDMM\", \"DMMYY\", \"MDYY\", \"DMYY\"]\n )\n self.assertFalse(\n date(1985, 8, 2) in self.start_result.frame.loc[find_loc, \"date\"]\n )\n\n self.start_result.frame = pd.DataFrame(\n [\n \"19950509\",\n \"19950905\",\n \"05091995\",\n \"09051995\",\n \"950509\",\n \"950905\",\n \"050995\",\n \"090595\",\n \"1995509\",\n \"1995095\",\n \"5091995\",\n \"0951995\",\n \"95509\",\n \"95095\",\n \"50995\",\n \"09595\",\n \"1995059\",\n \"1995905\",\n \"0591995\",\n \"9051995\",\n \"95059\",\n \"95905\",\n \"05995\",\n \"90595\",\n \"199559\",\n \"199595\",\n \"591995\",\n \"951995\",\n \"9559\",\n \"9595\",\n \"5995\",\n \"9595\",\n \"1995-05-09\",\n \"1995-09-05\",\n \"05-09-1995\",\n \"09-05-1995\",\n \"95-05-09\",\n \"95-09-05\",\n \"05;09:95\",\n \"09-05-95\",\n \"1995-5-09\",\n \"1995-09-5\",\n \"5_09-1995\",\n \"09-5-1995\",\n \"95-5-09\",\n \"95-09-5\",\n \"5-09-95\",\n \"09-5-95\",\n \"1995-05-9\",\n \"1995-9-05\",\n \"05-9-1995\",\n \"9-05-1995\",\n \"95-05-9\",\n \"95-9-05\",\n \"05-9-95\",\n \"9-05-95\",\n \"1995;5;9\",\n \"1995:9:5\",\n \"5/9/1995\",\n \"9_5_1995\",\n \"95-5-9\",\n \"95-9-5\",\n \"5-9-95\",\n \"9-5-95\",\n ],\n columns=[\"name\"],\n )\n\n date_strngs = [\n \"YYYYMMDD\",\n \"YYYYDDMM\",\n \"MMDDYYYY\",\n \"DDMMYYYY\",\n \"YYMMDD\",\n \"YYDDMM\",\n \"MMDDYY\",\n \"DDMMYY\",\n \"YYYYMDD\",\n \"YYYYDDM\",\n \"MDDYYYY\",\n \"DDMYYYY\",\n \"YYMDD\",\n \"YYDDM\",\n \"MDDYY\",\n \"DDMYY\",\n \"YYYYMMD\",\n \"YYYYDMM\",\n \"MMDYYYY\",\n \"DMMYYYY\",\n \"YYMMD\",\n \"YYDMM\",\n \"MMDYY\",\n \"DMMYY\",\n \"YYYYMD\",\n \"YYYYDM\",\n \"MDYYYY\",\n \"DMYYYY\",\n \"YYMD\",\n \"YYDM\",\n \"MDYY\",\n \"DMYY\",\n \"YYYY-MM-DD\",\n \"YYYY-DD-MM\",\n \"MM-DD-YYYY\",\n \"DD-MM-YYYY\",\n \"YY;MM;DD\",\n \"YY-DD-MM\",\n \"MM-DD-YY\",\n \"DD-MM-YY\",\n \"YYYY-M-DD\",\n \"YYYY/DD/M\",\n \"M-DD-YYYY\",\n \"DD-M-YYYY\",\n \"YY-M-DD\",\n \"YY-DD-M\",\n \"M-DD-YY\",\n \"DD-M-YY\",\n \"YYYY:MM:D\",\n \"YYYY-D-MM\",\n \"MM-D-YYYY\",\n \"D-MM-YYYY\",\n \"YY-MM-D\",\n \"YY-D-MM\",\n \"MM-D-YY\",\n \"D-MM-YY\",\n \"YYYY-M-D\",\n \"YYYY-D-M\",\n \"M-D-YYYY\",\n \"D-M-YYYY\",\n \"YY-M-D\",\n \"YY_D_M\",\n \"M-D-YY\",\n \"D-M-YY\",\n ]\n\n for indx, dat in enumerate(self.start_result.frame[\"name\"]):\n self.start_result.date_format = date_strngs[indx]\n self.start_result.find_dates()\n self.assertTrue(isinstance(self.start_result.frame.loc[indx, \"date\"], date))\n self.assertEqual(\n self.start_result.frame.loc[indx, \"date\"], date(1995, 5, 9)\n )\n\n self.start_result.date_format = \"any\"\n self.start_result.find_dates()\n self.assertEqual(len(self.start_result.frame.loc[0, \"date_format\"]), 9)\n self.start_result.reduce_dates()\n self.assertEqual(len(self.start_result.frame.loc[0, \"date_format\"]), 6)\n\n self.start_result.frame = pd.DataFrame(\n [\"19900509\", \"19950509\", \"20000509\", \"20050509\", \"20100509\"],\n columns=[\"name\"],\n )\n self.start_result.date_format = \"YYYYMMDD\"\n self.start_result.find_dates()\n self.assertEqual(len(self.start_result.frame), 5)\n self.start_result.in_range(\n keep=[[\"1989-05-09\", \"1999-05-09\"], [\"2001-05-09\", \"2011-05-09\"]]\n )\n self.assertEqual(len(self.start_result.frame), 4)\n self.start_result.on_date(remove=\"1990-05-09\")\n self.assertEqual(len(self.start_result.frame), 3)\n\n self.start_result.frame = pd.DataFrame(\n [\n \"19900509\",\n \"19950509\",\n \"20000509\",\n \"20050509\",\n \"20100509\",\n \"test1\",\n \"test2\",\n \"test3\",\n ],\n columns=[\"name\"],\n )\n self.start_result.date_format = \"YYYYMMDD\"\n self.start_result.find_dates()\n self.assertEqual(len(self.start_result.frame), 8)\n self.start_result.in_range(\n remove=[[\"1989-05-09\", \"1994-05-09\"], [\"2001-05-09\", \"2006-05-09\"]],\n strip_zeros=False,\n )\n self.assertEqual(len(self.start_result.frame), 6)\n self.start_result.on_date(keep=\"2010-05-09\", strip_zeros=False)\n self.assertEqual(len(self.start_result.frame), 4)\n self.start_result.reduce_dates(strip_zeros=True)\n self.assertEqual(len(self.start_result.frame), 1)\n\n def test_f_map_directory_methods(self):\n\n self.start_result.map_directory()\n self.assertTrue(\n self.start_result.directory_map[self.start_result.directory].__contains__,\n \"example.csv\",\n )\n\n lsobject = start()\n lsobject.directory_map = {\n \"test_red\": [\"r\", \"re\", \"rd\", \"ed\"],\n \"test_blue\": [\"b\", \"bl\", \"blu\"],\n \"test_green\": [\"g\", \"gr\"],\n os.path.join(\"test_red\", \"maroon\"): [\"r\", \"re\", \"rd\", \"ed\"],\n os.path.join(\"test_blue\", \"navy\", \"sky\"): [\"b\", \"bl\", \"blu\"],\n os.path.join(\"test_green\", \"winter\", \"neon\", \"forest\"): [\"g\", \"gr\"],\n }\n self.assertEqual(len(lsobject.frame), 0)\n lsobject.map_to_frame(depth=2, kind=\"folders\", to_frame=True)\n self.assertEqual(len(lsobject.frame), 1)\n new = lsobject.map_to_frame(depth=1, kind=\"files\", to_frame=False)\n self.assertEqual(len(new), 9)\n new = lsobject.map_to_frame(depth=3, kind=\"files\", to_frame=False)\n self.assertEqual(len(new), 3)\n lsobject.map_to_frame(depth=4, kind=\"any\", to_frame=True)\n self.assertEqual(len(lsobject.frame), 2)\n\n lsobject.map_directory(self.path, skip=[\"short\", \".DS_Store\"])\n self.assertEqual(len(lsobject.directory_map.keys()), 1)\n self.assertEqual(\n len(lsobject.directory_map[list(lsobject.directory_map.keys())[0]]), 2\n )\n\n with self.assertRaises(ValueError):\n self.start_result.map_directory(only_hidden=True)\n\n def test_g_sea_method(self):\n\n with self.assertRaises(KeyError):\n self.start_result.sea(seaborn_args={})\n\n with self.assertRaises(ValueError):\n self.start_result.sea(\n kind=\"scatterplot\", seaborn_args={\"x\": 0, \"y\": 0, \"hue\": 1}\n )\n\n def test_h_save_method(self):\n\n with self.assertRaises(TypeError):\n self.start_result.save(filename=100)\n\n if \"utest.csv\" in os.listdir(self.path):\n os.remove(os.path.join(self.path, \"utest.csv\"))\n\n self.start_result.save(filename=os.path.join(self.path, \"utest\"))\n self.assertTrue(\"utest.csv\" in os.listdir(self.path))\n\n os.remove(os.path.join(self.path, \"utest.csv\"))\n\n self.start_result.save(filename=os.path.join(self.path, \"utest.csv\"))\n self.assertTrue(\"utest.csv\" in os.listdir(self.path))\n self.assertFalse(\"utest.csv.csv\" in os.listdir(self.path))\n os.remove(os.path.join(self.path, \"utest.csv\"))\n\n if \"utest.csv.csv\" in os.listdir(self.path):\n os.remove(os.path.join(self.path, \"utest.csv.csv\"))\n\n if str(date.today()) + \"_DataFrame.csv\" in os.listdir(self.path):\n os.remove(os.path.join(self.path, str(date.today()) + \"_DataFrame.csv\"))\n\n self.start_result.save()\n self.assertTrue(str(date.today()) + \"_DataFrame.csv\" in os.listdir(self.path))\n os.remove(os.path.join(self.path, str(date.today()) + \"_DataFrame.csv\"))\n\n\nif __name__ == \"__main__\":\n pass\n"
]
| [
[
"pandas.DataFrame"
]
]
|
Jd8111997/Speech-Enhancer | [
"2f9a5c16c171d447328f3dc80ac7d3a5c53bf7ad"
]
| [
"VirtualBatchNorm.py"
]
| [
"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn.parameter import Parameter\nfrom torch.nn.modules import Module\n\nclass VirtualBatchNorm1d(Module):\n\n\t\"\"\"\n\n\tModule for Virtual Batch Normalizaton\n\n\tImplementation borrowed and modified from Rafael_Valle's code + help of SimonW from this discussion thread:\n\thttps://discuss.pytorch.org/t/parameter-grad-of-conv-weight-is-none-after-virtual-batch-normalization/9036\n\t\"\"\"\n\n\tdef __init__(self, num_features, eps=1e-5):\n\t\tsuper().__init__()\n\t\tself.num_features = num_features\n\t\tself.eps = eps\n\t\tself.gamma = Parameter(torch.normal(mean=1.0, std=0.02, size=(1, num_features, 1)))\n\t\tself.beta = Parameter(torch.zeros(1, num_features, 1))\n\n\tdef get_stats(self, x):\n\n\t\tmean = x.mean(2, keepdim=True).mean(0, keepdim=True)\n\t\tmean_sq = (x ** 2).mean(2, keepdim=True).mean(0, keepdim=True)\n\t\treturn mean, mean_sq\n\n\tdef forward(self, x, ref_mean, ref_mean_sq):\n\n\t\tmean, mean_sq = self.get_stats(x)\n\t\tif ref_mean is None or ref_mean_sq is None:\n\t\t\tmean = mean.clone().detach()\n\t\t\tmean_sq = mean_sq.clone().detach()\n\t\t\tout = self.normalize(x, mean, mean_sq)\n\t\telse:\n\t\t\tbatch_size = x.size(0)\n\t\t\tnew_coeff = 1. / (batch_size + 1.)\n\t\t\told_coeff = 1. - new_coeff\n\t\t\tmean = new_coeff * mean + old_coeff * ref_mean\n\t\t\tmean_sq = new_coeff * mean_sq + old_coeff * ref_mean_sq\n\t\t\tout = self.normalize(x, mean, mean_sq)\n\t\treturn out, mean, mean_sq\n\n\tdef normalize(self, x, mean, mean_sq):\n\n\t\tassert mean_sq is not None\n\t\tassert mean is not None\n\t\tassert len(x.size()) == 3\n\t\tif mean.size(1) != self.num_features:\n\t\t\traise Exception('Mean tensor size not equal to number of features : given {}, expected {}'\n .format(mean.size(1), self.num_features))\n\t\tif mean_sq.size(1) != self.num_features:\n\t\t\traise Exception('Squared mean tensor size not equal to number of features : given {}, expected {}'\n .format(mean_sq.size(1), self.num_features))\n\n\t\tstd = torch.sqrt(self.eps + mean_sq - mean ** 2)\n\t\tx = x - mean\n\t\tx = x / std\n\t\tx = x * self.gamma\n\t\tx = x + self.beta\n\t\treturn x\n\n\tdef __repr__(self):\n\t\treturn ('{name}(num_features={num_features}, eps={eps}'\n .format(name=self.__class__.__name__, **self.__dict__))\n\t\t\n"
]
| [
[
"torch.zeros",
"torch.sqrt",
"torch.normal"
]
]
|
JibranKalia/tfx | [
"05ce31aa71ed38f7978f6cb7c7571202f8283e93"
]
| [
"tfx/examples/chicago_taxi/chicago_taxi_client.py"
]
| [
"# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A client for the chicago_taxi demo.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport base64\nimport json\nimport os\nimport subprocess\nimport tempfile\n\nimport requests\nimport tensorflow as tf\n\nfrom tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import\nfrom tfx.examples.chicago_taxi.trainer import taxi\n\n_LOCAL_INFERENCE_TIMEOUT_SECONDS = 5.0\n\n\ndef _do_local_inference(host, port, serialized_examples):\n \"\"\"Performs inference on a model hosted by the host:port server.\"\"\"\n\n json_examples = []\n for serialized_example in serialized_examples:\n # The encoding follows the guidelines in:\n # https://www.tensorflow.org/tfx/serving/api_rest\n example_bytes = base64.b64encode(serialized_example).decode('utf-8')\n predict_request = '{ \"b64\": \"%s\" }' % example_bytes\n json_examples.append(predict_request)\n\n json_request = '{ \"instances\": [' + ','.join(map(str, json_examples)) + ']}'\n\n server_url = 'http://' + host + ':' + port + '/v1/models/chicago_taxi:predict'\n response = requests.post(\n server_url, data=json_request, timeout=_LOCAL_INFERENCE_TIMEOUT_SECONDS)\n response.raise_for_status()\n prediction = response.json()\n print(json.dumps(prediction, indent=4))\n\n\ndef _do_aiplatform_inference(model, version, serialized_examples):\n \"\"\"Performs inference on the model:version in AI Platform.\"\"\"\n working_dir = tempfile.mkdtemp()\n instances_file = os.path.join(working_dir, 'test.json')\n json_examples = []\n for serialized_example in serialized_examples:\n # The encoding follows the example in:\n # https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/quests/tpu/invoke_model.py\n json_examples.append(\n '{ \"inputs\": { \"b64\": \"%s\" } }' % base64.b64encode(serialized_example))\n file_io.write_string_to_file(instances_file, '\\n'.join(json_examples))\n gcloud_command = [\n 'gcloud', 'ai-platform', 'predict', '--model', model, '--version',\n version, '--json-instances', instances_file\n ]\n print(subprocess.check_output(gcloud_command))\n\n\ndef _do_inference(model_handle, examples_file, num_examples, schema):\n \"\"\"Sends requests to the model and prints the results.\n\n Args:\n model_handle: handle to the model. This can be either\n \"aiplatform:model:version\" or \"host:port\"\n examples_file: path to csv file containing examples, with the first line\n assumed to have the column headers\n num_examples: number of requests to send to the server\n schema: a Schema describing the input data\n\n Returns:\n Response from model server\n \"\"\"\n filtered_features = [\n feature for feature in schema.feature if feature.name != taxi.LABEL_KEY\n ]\n del schema.feature[:]\n schema.feature.extend(filtered_features)\n\n csv_coder = taxi.make_csv_coder(schema)\n proto_coder = taxi.make_proto_coder(schema)\n\n input_file = open(examples_file, 'r')\n input_file.readline() # skip header line\n\n serialized_examples = []\n for _ in range(num_examples):\n one_line = input_file.readline()\n if not one_line:\n print('End of example file reached')\n break\n one_example = csv_coder.decode(one_line)\n\n serialized_example = proto_coder.encode(one_example)\n serialized_examples.append(serialized_example)\n\n parsed_model_handle = model_handle.split(':')\n if parsed_model_handle[0] == 'aiplatform':\n _do_aiplatform_inference(\n model=parsed_model_handle[1],\n version=parsed_model_handle[2],\n serialized_examples=serialized_examples)\n else:\n _do_local_inference(\n host=parsed_model_handle[0],\n port=parsed_model_handle[1],\n serialized_examples=serialized_examples)\n\n\ndef main(_):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--num_examples',\n help=('Number of examples to send to the server.'),\n default=1,\n type=int)\n\n parser.add_argument(\n '--server',\n help=('Prediction service host:port or aiplatform:model:version'),\n required=True)\n\n parser.add_argument(\n '--examples_file',\n help=('Path to csv file containing examples.'),\n required=True)\n\n parser.add_argument(\n '--schema_file', help='File holding the schema for the input data')\n known_args, _ = parser.parse_known_args()\n _do_inference(known_args.server,\n known_args.examples_file, known_args.num_examples,\n taxi.read_schema(known_args.schema_file))\n\n\nif __name__ == '__main__':\n tf.app.run()\n"
]
| [
[
"tensorflow.app.run"
]
]
|
moosichu/introduction-to-neural-networks | [
"8cf481d97d4feb98e9df532e174cc9376434c82a"
]
| [
"presentation_scripts/single_neuron_sample_data_generator.py"
]
| [
"import random\nimport matplotlib.pyplot as plt\nimport numpy\n\n\ndef dcdw_dcdb(xs, ys, w, b):\n total_w = 0\n total_b = 0\n for x, y in zip(xs, ys):\n total_w += 2 * x * ((b + (w * x)) - y)\n total_b += 2 * ((b + (w * x)) - y)\n return total_w, total_b\n\n# TODO: look into https://stackoverflow.com/questions/7130474/3d-vector-field-in-matplotlib\n\ndef main():\n m = -0.7\n c = 3\n xs = []\n ys = []\n for _ in range(10):\n x = random.randrange(100)\n x = x/10. - 5.\n y = m * x + c\n y = numpy.random.normal(y, 0.35)\n xs.append(x)\n ys.append(y)\n \n # xs = [-2.4, 4.300000000000001, -2.0, -3.7, -0.7999999999999998, 3.5, 4.0, -4.6, 0.9000000000000004, -0.20000000000000018, 1.5999999999999996, 0.0, 1.7000000000000002, 1.0999999999999996, -3.4, -4.9, -4.6, 1.0, -0.40000000000000036, -2.7]\n # ys = [4.759293605346915, 0.13560844662018068, 4.409004322428809, 5.951626163611069, 3.535332546158398, 0.4345618659832912, 0.19630767796951268, 6.180286480925455, 2.3955829525915457, 3.2350001397287773, 1.982982490017593, 3.160544994893823, 2.1304015064786053, 2.2043924820289433, 5.3960684168484265, 6.283015091058907, 6.204641342258453, 2.342950919346456, 3.3687053847893824, 4.926563213159792]\n # xs = [2.5999999999999996, 1.0999999999999996, -1.2999999999999998, 3.5, 1.9000000000000004, 1.7000000000000002, 1.9000000000000004, 2.5, 0.5, 2.0999999999999996]\n # ys = [1.1652351304587412, 2.705812861324486, 3.6677296482192934, 0.5936572691701745, 1.6920558774240564, 1.811585400937681, 2.153338300485198, 0.8427302922065774, 2.5413553300250222, 1.977319644626561]\n \n xs = [-2.0, 3.6999999999999993, 4.699999999999999, 1.0, 3.4000000000000004, 1.9000000000000004, 0.5, 3.6999999999999993, -3.7, -1.4]\n ys = [4.416856407182695, 0.47057967159614744, -0.29887819819469424, 1.4013832483099857, 0.4316035832132115, 1.4101057523285265, 2.0118609471559328, 0.5242922714864773, 5.620627159141899, 3.494602189431038]\n \n fig, ax = plt.subplots()\n ax.scatter(xs, ys)\n \n ms, cs = numpy.polyfit(xs, ys, 1)\n\n plt.xlim((-5, 5))\n plt.ylim((ms * 5 + cs, ms * -5 + cs))\n\n plt.savefig(\"presentation/img/single-neuron-example-data.svg\")\n\n\n regression_line = ax.plot([-5, 5], [ms * -5 + cs, ms * 5 + cs], color='red', label='y = {:.2f}x + {:.2f}'.format(ms, cs))\n ax.legend(handles=regression_line)\n \n plt.savefig(\"presentation/img/single-neuron-example-data-with-regression-line.svg\")\n\n w = 1\n b = 1\n mu = 0.1\n\n\n weights = [(w, b)]\n print(\"({}, {})\".format(w, b))\n for i in range(100):\n dw, db = dcdw_dcdb(xs, ys, w, b)\n w = w - mu * dw\n b = b - mu * db\n weights.append((w, b))\n\n\n print(xs)\n print(ys)\n for x, y in zip(xs, ys):\n print(\"<tr><th>{:.1f}</th><th>{:.1f}</th></tr>\".format(x, y))\n\n\n def print_weight(index):\n w, b = weights[index]\n print(\"<li class=\\\"fragment\\\" value=\\\"{}\\\">$w$ = {:.4g}, $b$ = {:.4g}</li>\".format(index + 1, w, b))\n\n # mu is 0.01/0.1\n print_weight(0)\n print_weight(1)\n print_weight(2)\n print_weight(3)\n print_weight(4)\n print_weight(7)\n print_weight(17)\n print_weight(25)\n print_weight(35)\n\n # mu is 0.001\n # print_weight(0)\n # print_weight(1)\n # print_weight(2)\n # print_weight(4)\n # print_weight(7)\n # print_weight(17)\n # print_weight(56)\n # print_weight(125)\n # print_weight(356)\n # print_weight(357)\n\nif __name__ == \"__main__\":\n main()"
]
| [
[
"numpy.random.normal",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.subplots",
"numpy.polyfit"
]
]
|
zdx3578/gym-zdx | [
"b72e638095e23b256fe72fc38ef45d2ca1652b6c"
]
| [
"gym/spaces/box.py"
]
| [
"import numpy as np\n\nimport gym\nfrom gym.spaces import prng\n\nclass Box(gym.Space):\n \"\"\"\n A box in R^n.\n I.e., each coordinate is bounded.\n \"\"\"\n def __init__(self, low, high, shape=None):\n \"\"\"\n Two kinds of valid input:\n Box(-1.0, 1.0, (3,4)) # low and high are scalars, and shape is provided\n Box(np.array([-1.0,-2.0]), np.array([2.0,4.0])) # low and high are arrays of the same shape\n \"\"\"\n if shape is None:\n assert low.shape == high.shape\n self.low = low\n self.high = high\n else:\n assert np.isscalar(low) and np.isscalar(high)\n self.low = low + np.zeros(shape)\n self.high = high + np.zeros(shape)\n def sample(self):\n return prng.np_random.uniform(low=self.low, high=self.high, size=self.low.shape)\n def contains(self, x):\n return x.shape == self.shape and (x >= self.low).all() and (x <= self.high).all()\n\n def to_jsonable(self, sample_n):\n return np.array(sample_n).tolist()\n def from_jsonable(self, sample_n):\n return [np.asarray(sample) for sample in sample_n]\n\n @property\n def shape(self):\n return self.low.shape\n def __repr__(self):\n return \"Box\" + str(self.shape)\n def __eq__(self, other):\n return np.allclose(self.low, other.low) and np.allclose(self.high, other.high)\n"
]
| [
[
"numpy.array",
"numpy.asarray",
"numpy.zeros",
"numpy.allclose",
"numpy.isscalar"
]
]
|
danforthcenter/plantcv-dev-scripts | [
"57ebc9a031b7141b8965c927c3b7b01ba6504dc1"
]
| [
"dev/analyze_vis_results.py"
]
| [
"#!/usr/bin/env python\n\nimport sys, traceback\nimport os\nimport re\nimport sqlite3 as sq\nimport distutils.core\nimport cv2\nimport numpy as np\nimport argparse\nimport string\nimport plantcv as pcv\nfrom datetime import datetime\nimport Image\nimport matplotlib\nif not os.getenv('DISPLAY'):\n matplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\n\n\ndef handle_vis_output(directory,imgtype,outdir,action):\n # directory = path to directory you want to grab images from\n # imgtype = type of output image you want to move or copy (options are rgb_slice, pseudo_on_img, pseudo_on_white, shapes, or, histogram)\n # outdir = where you want the images to go\n # action = either 'copy' or 'move'\n \n if imgtype=='rgb_slice':\n path=str(directory)\n opendir=os.listdir(path)\n for filename in opendir:\n if re.search(\"rgb_norm_slice\\.png$\",filename):\n fromDirectory = str(directory)+str(filename)\n toDirectory = str(outdir)+str(filename)\n if action=='copy':\n distutils.file_util.copy_file(fromDirectory, toDirectory)\n elif action=='move':\n distutils.file_util.move_file(fromDirectory, toDirectory)\n else:\n pcv.fatal_error('action' + (str(action) + ' is not move or copy'))\n else:\n pcv.fatal_error(\"Sorry no \"+str(imgtype)+ \" images found\")\n elif imgtype=='pseudo_on_img':\n path=str(directory)\n opendir=os.listdir(path)\n for filename in opendir:\n if re.search(\"pseudo_on_img\\.png$\",filename):\n fromDirectory = str(directory)+str(filename)\n toDirectory = str(outdir)+str(filename)\n if action=='copy':\n distutils.file_util.copy_file(fromDirectory, toDirectory)\n elif action=='move':\n distutils.file_util.move_file(fromDirectory, toDirectory)\n else:\n pcv.fatal_error('action' + (str(action) + ' is not move or copy'))\n else:\n pcv.fatal_error(\"Sorry no \"+str(imgtype)+ \" images found\")\n elif imgtype=='pseudo_on_white':\n path=str(directory)\n opendir=os.listdir(path)\n for filename in opendir:\n if re.search(\"pseudo_on_white\\.png$\",filename):\n fromDirectory = str(directory)+str(filename)\n toDirectory = str(outdir)+str(filename)\n if action=='copy':\n distutils.file_util.copy_file(fromDirectory, toDirectory)\n elif action=='move':\n distutils.file_util.move_file(fromDirectory, toDirectory)\n else:\n pcv.fatal_error('action' + (str(action) + ' is not move or copy'))\n else:\n pcv.fatal_error(\"Sorry no \"+str(imgtype)+ \" images found\")\n elif imgtype=='shapes':\n path=str(directory)\n opendir=os.listdir(path)\n for filename in opendir:\n if re.search(\"shapes\\.png$\",filename):\n fromDirectory = str(directory)+str(filename)\n toDirectory = str(outdir)+str(filename)\n if action=='copy':\n distutils.file_util.copy_file(fromDirectory, toDirectory)\n elif action=='move':\n distutils.file_util.move_file(fromDirectory, toDirectory)\n else:\n pcv.fatal_error('action' + (str(action) + ' is not move or copy'))\n else:\n pcv.fatal_error(\"Sorry no \"+str(imgtype)+ \" images found\")\n elif imgtype=='histogram':\n path=str(directory)\n opendir=os.listdir(path)\n for filename in opendir:\n if re.search(\"hist\\.png$\",filename):\n fromDirectory = str(directory)+str(filename)\n toDirectory = str(outdir)+str(filename)\n if action=='copy':\n distutils.file_util.copy_file(fromDirectory, toDirectory)\n elif action=='move':\n distutils.file_util.move_file(fromDirectory, toDirectory)\n else:\n pcv.fatal_error('action' + (str(action) + ' is not move or copy'))\n else:\n pcv.fatal_error(\"Sorry no \"+str(imgtype)+ \" images found\")\n else:\n pcv.fatal_error('imgtype' + (str(imgtype) + ' is not rgb_slice, pseudo_on_img, pseudo_on_white, shapes, or, histogram!'))\n \n\ndef dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\ndef slice_stitch(sqlitedb, outdir, camera_label='vis_sv', spacer='on',makefig='yes'):\n #sqlitedb = sqlite database to query (path to db)\n #outdir = path to outdirectory\n #camera_label = either 'vis_tv','vis_sv',or 'fluor_tv'\n #spacer = either 'on' or 'off', adds a white line between day breaks\n #makefig = either 'yes' or 'no', adds labels to days and a title\n\n i=datetime.now()\n timenow=i.strftime('%m-%d-%Y_%H:%M:%S')\n newfolder=\"slice_analysis_\"+(str(timenow))\n \n os.mkdir((str(outdir)+newfolder))\n \n connect=sq.connect(sqlitedb)\n connect.row_factory = dict_factory\n connect.text_factory=str\n c = connect.cursor()\n h = connect.cursor()\n m = connect.cursor()\n k = connect.cursor()\n\n id_array=[]\n path_array=[]\n unique_array=[]\n \n for date in c.execute('select min(datetime) as first from snapshots'):\n firstday=date['first']\n \n for i, group in enumerate(m.execute('select * from snapshots inner join analysis_images on snapshots.image_id = analysis_images.image_id where type = \"slice\" order by plant_id asc')):\n plant_id=group['plant_id']\n plantgeno=re.match('^([A-Z][a-zA-Z]\\d*[A-Z]{2})',plant_id)\n if plantgeno==None:\n plantgeno_id=group['plant_id']\n id_array.append(plantgeno_id,)\n else:\n span1,span2=plantgeno.span()\n plantgeno_id=group['plant_id'][span1:span2]\n id_array.append(plantgeno_id,)\n id_unique=np.unique(id_array)\n \n if spacer=='on':\n for group_label in id_unique:\n ch1=[]\n ch2=[]\n ch3=[]\n time_array=[]\n for i, data in enumerate(h.execute('select * from snapshots inner join analysis_images on snapshots.image_id = analysis_images.image_id where plant_id like ? and type = \"slice\" and camera=? order by datetime asc', (\"%\"+group_label+\"%\",camera_label,))):\n date_int=((data['datetime']-firstday)/86400) \n time_array.append(date_int)\n for i, data in enumerate(k.execute('select * from snapshots inner join analysis_images on snapshots.image_id = analysis_images.image_id where plant_id like ? and type = \"slice\" and camera=? order by datetime asc', (\"%\"+group_label+\"%\",camera_label,))): \n if i==0:\n line1=cv2.imread(data['image_path'])\n split1, split2, split3=np.dsplit(line1,3)\n split1_f=split1.flatten()\n split2_f=split2.flatten()\n split3_f=split3.flatten()\n \n stacked_1=np.column_stack((split1_f,split1_f, split1_f, split1_f, split1_f))\n stacked_2=np.column_stack((split2_f,split2_f, split2_f, split2_f, split2_f))\n stacked_3=np.column_stack((split3_f,split3_f, split3_f, split3_f, split3_f)) \n stacked_1t=np.transpose(stacked_1)\n stacked_2t=np.transpose(stacked_2)\n stacked_3t=np.transpose(stacked_3)\n \n ch1.extend(stacked_1t)\n ch2.extend(stacked_2t)\n ch3.extend(stacked_3t)\n elif time_array[i-1]==time_array[i]:\n line1=cv2.imread(data['image_path'])\n split1, split2, split3=np.dsplit(line1,3)\n \n split1_f=split1.flatten()\n split2_f=split2.flatten()\n split3_f=split3.flatten()\n \n stacked_1=np.column_stack((split1_f,split1_f, split1_f, split1_f, split1_f))\n stacked_2=np.column_stack((split2_f,split2_f, split2_f, split2_f, split2_f))\n stacked_3=np.column_stack((split3_f,split3_f, split3_f, split3_f, split3_f)) \n stacked_1t=np.transpose(stacked_1)\n stacked_2t=np.transpose(stacked_2)\n stacked_3t=np.transpose(stacked_3)\n \n ch1.extend(stacked_1t)\n ch2.extend(stacked_2t)\n ch3.extend(stacked_3t)\n else:\n line1=cv2.imread(data['image_path'])\n split1, split2, split3=np.dsplit(line1,3)\n \n split1_f=split1.flatten()\n split2_f=split2.flatten()\n split3_f=split3.flatten()\n \n spacer_size=np.shape(split1_f)\n spacer1=np.zeros(spacer_size)\n spacer_f=spacer1+255\n \n stacked_1=np.column_stack((spacer_f, spacer_f, spacer_f, spacer_f, spacer_f,spacer_f, spacer_f, spacer_f, spacer_f, spacer_f))\n stacked_2=np.column_stack((spacer_f, spacer_f, spacer_f, spacer_f, spacer_f,spacer_f, spacer_f, spacer_f, spacer_f, spacer_f))\n stacked_3=np.column_stack((spacer_f, spacer_f, spacer_f, spacer_f, spacer_f,spacer_f, spacer_f, spacer_f, spacer_f, spacer_f)) \n stacked_1t=np.transpose(stacked_1)\n stacked_2t=np.transpose(stacked_2)\n stacked_3t=np.transpose(stacked_3)\n \n ch1.extend(stacked_1t)\n ch2.extend(stacked_2t)\n ch3.extend(stacked_3t)\n \n stacked_4=np.column_stack((split1_f,split1_f, split1_f, split1_f, split1_f))\n stacked_5=np.column_stack((split2_f,split2_f, split2_f, split2_f, split2_f))\n stacked_6=np.column_stack((split3_f,split3_f, split3_f, split3_f, split3_f)) \n stacked_4t=np.transpose(stacked_4)\n stacked_5t=np.transpose(stacked_5)\n stacked_6t=np.transpose(stacked_6)\n \n ch1.extend(stacked_4t)\n ch2.extend(stacked_5t)\n ch3.extend(stacked_6t)\n \n color_cat=np.dstack((ch1,ch2,ch3))\n pcv.print_image(color_cat,(str(outdir)+str(newfolder)+\"/\"+str(group_label)+\"_\"+str(camera_label)+\"_spacer_\"+str(spacer)+\"_slice_joined_img.png\"))\n \n if spacer=='off':\n for group_label in id_unique:\n ch1=[]\n ch2=[]\n ch3=[]\n for i, data in enumerate(h.execute('select * from snapshots inner join analysis_images on snapshots.image_id = analysis_images.image_id where plant_id like ? and type = \"slice\" and camera=? order by datetime asc', (\"%\"+group_label+\"%\",camera_label,))):\n line1=cv2.imread(data['image_path'])\n \n split1, split2, split3=np.dsplit(line1,3)\n \n split1_f=split1.flatten()\n split2_f=split2.flatten()\n split3_f=split3.flatten()\n \n stacked_1=np.column_stack((split1_f,split1_f, split1_f, split1_f, split1_f))\n stacked_2=np.column_stack((split2_f,split2_f, split2_f, split2_f, split2_f))\n stacked_3=np.column_stack((split3_f,split3_f, split3_f, split3_f, split3_f)) \n stacked_1t=np.transpose(stacked_1)\n stacked_2t=np.transpose(stacked_2)\n stacked_3t=np.transpose(stacked_3)\n \n ch1.extend(stacked_1t)\n ch2.extend(stacked_2t)\n ch3.extend(stacked_3t)\n \n color_cat=np.dstack((ch1,ch2,ch3))\n pcv.print_image(color_cat,(str(outdir)+newfolder+\"/\"+str(group_label)+\"_\"+str(camera_label)+\"_spacer_\"+str(spacer)+\"_slice_joined_img.png\"))\n \n folder_path=(str(outdir)+newfolder)\n \n if makefig=='yes':\n list_files=os.listdir(folder_path)\n sorted_list=sorted(list_files)\n \n for group_label in id_unique:\n time_array=[]\n length_time=[]\n ypos=[]\n ypos1=[]\n ypos2=[]\n unique_time1=[]\n for i, data in enumerate(h.execute('select * from snapshots inner join analysis_images on snapshots.image_id = analysis_images.image_id where plant_id like ? and type = \"slice\" and camera=? order by datetime asc', (\"%\"+group_label+\"%\",camera_label,))):\n date_int=((data['datetime']-firstday)/86400) \n time_array.append(date_int)\n \n unique_time=np.unique(time_array)\n \n for times in unique_time:\n length=[]\n for i,time in enumerate(time_array):\n if time_array[i]==times:\n tm=1\n length.append(tm)\n else:\n tm=0\n length.append(tm)\n sum_time=np.sum(length)\n length_time.append(sum_time) \n \n if spacer=='off': \n for i,length in enumerate(length_time):\n if i==0:\n yadd=length*5\n ypos.append(yadd)\n else:\n yadd=(length*5)\n ypos.append(yadd)\n elif spacer=='on':\n for i,length in enumerate(length_time):\n if i==0:\n yadd=length*5\n ypos.append(yadd)\n else:\n yadd=(length*5)+10\n ypos.append(yadd)\n \n for i,y in enumerate(ypos):\n if i==0:\n y1=y\n ypos1.append(y1)\n else:\n y1=y+ypos1[i-1]\n ypos1.append(y1)\n \n for time in unique_time:\n time1=time+1\n unique_time1.append(time1)\n \n file_name=str(group_label)+\"_\"+str(camera_label)+\"_spacer_\"+str(spacer)+\"_slice_joined_img.png\"\n img1=cv2.imread((str(folder_path)+\"/\"+str(file_name)), -1)\n if len(np.shape(img1))==3: \n ch1,ch2,ch3=np.dsplit(img1,3)\n img=np.dstack((ch3,ch2,ch1))\n \n plt.imshow(img)\n ax = plt.subplot(111)\n ax.set_ylabel('Days on Bellwether Phenotyper', size=10)\n ax.set_yticks(ypos1)\n ax.set_yticklabels(unique_time1,size=5)\n ax.yaxis.tick_left()\n ax.set_xticks([0,255])\n ax.set_xticklabels([0,255],size=5)\n for t in ax.yaxis.get_ticklines(): t.set_color('white')\n for t in ax.xaxis.get_ticklines(): t.set_color('white')\n for line in ax.get_xticklines() + ax.get_yticklines(): line.set_alpha(0)\n ax.spines['bottom'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.spines['left'].set_color('none')\n ax.spines['right'].set_color('none')\n #ax.tick_params(axis='y',direction='out')\n\n plt.title(str(group_label))\n fig_name=(str(folder_path)+\"/\"+str(group_label)+\"_spacer_\"+str(spacer)+\"_slice_join_figure_img.png\")\n plt.savefig(fig_name, dpi=300)\n plt.clf()\n \n return folder_path\n\n\n\n\n\n "
]
| [
[
"matplotlib.use",
"matplotlib.pyplot.subplot",
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.savefig",
"numpy.shape",
"numpy.transpose",
"numpy.dstack",
"matplotlib.pyplot.clf",
"numpy.column_stack",
"numpy.dsplit",
"numpy.unique",
"matplotlib.pyplot.imshow"
]
]
|
boti996/onlab-public | [
"3ee399b9f40979a54236cd646cc7566a3639a03f"
]
| [
"codes/train_roma_segmentation.py"
]
| [
"import os\nimport pickle\n\nimport numpy as np\nfrom keras.losses import categorical_crossentropy\nfrom keras.optimizers import Adam\nfrom sklearn.model_selection import train_test_split\n\nimport codes.my_helper as helper\nimport codes.my_models as my_models\nimport codes.my_losses as my_losses\n\n\ndef main():\n\n # DATA\n\n # Train images: read from images folder, resize, normalize to 0..1 range\n data_path = '../datas/'\n images = helper.read_images(data_path + 'images_roma/image/')\n\n size = (320, 256)\n images = helper.resize_images(images, size)\n\n images = np.array(images) / 255\n\n # Train labels: read transformed labels from file if exists\n # else read images from folder,\n # resize, transform, and save transformed labels to file\n labels_path = data_path + 'labels_transformed_roma.p'\n\n if os.path.exists(labels_path):\n labels = pickle.load(open(labels_path, \"rb\"))\n else:\n labels = helper.read_images(data_path + 'images_roma/label/')\n\n labels = helper.resize_images(labels, size)\n\n labels = np.array(labels)\n classes = [[255, 255, 255]]\n labels = helper.rgb_to_classes(labels, classes) # TODO: rgb_to_binary!\n\n pickle.dump(labels, open(labels_path, \"wb\"))\n\n # Shuffle dateset, then create training- and validation arrays\n img_train, img_val, label_train, label_val = train_test_split(images, labels, test_size=0.15,\n shuffle=True, random_state=helper.random_state)\n # helper.blend_images(images=img_val, labels=label_val, folder_url=data_path + 'images_roma/output/')\n\n # MODEL\n\n # Main model parameters\n batch_size = 20\n epochs = 500\n input_shape = img_train.shape[1:]\n dropout_rate = 0.2\n pool_size = (2, 2)\n learning_rate = 0.001\n\n # Load model structure\n model = my_models.get_model_segm(input_shape, pool_size, dropout_rate, decoder_prefix='roma_', train_enc=True)\n\n # TODO: custom loss!\n # Initialize model, load pretrained encoder part weights + print model\n model.compile(optimizer=Adam(lr=learning_rate), loss=my_losses.weighted_binary_crossentropy) # categorical_crossentropy\n\n model_path = '../models/camvid_weights.500.h5'\n model.load_weights(model_path, by_name=True)\n\n my_models.train_model(model, img_train, img_val, label_train, label_val, batch_size, epochs,\n log_path='../logs/roma', save_path='../models/roma', out_path=data_path + 'images_roma/output/', datagen=True)\n\n\nmain()\n"
]
| [
[
"sklearn.model_selection.train_test_split",
"numpy.array"
]
]
|
YSL-1997/DBx1000 | [
"1e2ecfd21316a09967a5420e31bd9d2a5f98fe2b"
]
| [
"plot4_7.py"
]
| [
"import itertools\nimport operator\n\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\n\n\ndef parse(field, t=float):\n _, field = field[:-1].split(\"=\")\n return t(field)\n\n\ndef read_result(results_dir):\n for result_path in sorted(results_dir.iterdir()):\n job_name = result_path.name\n result_txt = result_path / \"result.txt\"\n\n if not result_txt.exists():\n print(f\"{job_name} does not exist\")\n continue\n\n with open(result_txt) as f:\n summary = f.readline()\n (\n _,\n txn_cnt,\n abort_cnt,\n run_time,\n time_wait,\n time_ts_alloc,\n time_man,\n time_index,\n time_abort,\n time_cleanup,\n latency,\n deadlock_cnt,\n cycle_detect,\n dl_detect_time,\n dl_wait_time,\n time_query,\n *_,\n ) = summary.split()\n\n workload, alg, index_type, num_threads, hotset_perc = job_name.split(\",\")\n\n yield workload, alg, index_type, int(num_threads), parse(txn_cnt) / parse(time_index), hotset_perc\n\n\ndef main(results_dir):\n res = sorted(read_result(results_dir))\n\n grouped_res = {\n key: list(items)\n for key, items in itertools.groupby(res, lambda item: item[:3])\n }\n\n plt.figure(figsize=(16, 10))\n\n for i, (key, items) in enumerate(grouped_res.items()):\n workload, alg, index_type = key\n\n hotset_perc_lst = [e[5] for e in items]\n run_time_lst = [e[4] for e in items]\n print(hotset_perc_lst)\n print(run_time_lst)\n\n d = {hotset_perc_lst[i]:run_time_lst[i] for i in range(len(hotset_perc_lst))}\n\n sorted_hotset_perc_lst = []\n sorted_run_time_lst = []\n\n for key in sorted(d.keys()):\n print(\"append\", key)\n sorted_hotset_perc_lst.append(key)\n sorted_run_time_lst.append(d[key])\n\n print(sorted_hotset_perc_lst)\n print(sorted_run_time_lst)\n \n label = \" \".join(key)\n\n index = {\n (\"IDX_HASH\", \"TPCC\"): 1,\n (\"IDX_BTREE\", \"TPCC\"): 2,\n (\"IDX_HASH\", \"YCSB\"): 3,\n (\"IDX_BTREE\", \"YCSB\"): 4,\n }\n\n plt.subplot(2, 2, index[(index_type, workload)])\n\n plt.plot(sorted_hotset_perc_lst, sorted_run_time_lst, label=alg, marker='o')\n # plt.xscale(\"log\", basex=2)\n plt.xlabel(\"Hotset Percentage\")\n plt.ylabel(\"Throughput (txn/sec)\")\n plt.legend()\n plt.title(f\"{workload} {index_type}\")\n\n plt.savefig(results_dir / \"4_7_hotset_perc_plot.png\")\n\n\nif __name__ == \"__main__\":\n results_dir = Path(\"4_7_results\")\n main(results_dir)"
]
| [
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot"
]
]
|
PolKul/CASA-Dialogue-Act-Classifier | [
"32214d64d556505424b1efe54905371e7f417dcb"
]
| [
"models/UtteranceRNN.py"
]
| [
"\nimport torch\nimport torch.nn as nn\nfrom transformers import AutoConfig, AutoModel, AutoTokenizer\n\n\nclass UtteranceRNN(nn.Module):\n \n def __init__(self, model_name=\"roberta-base\", hidden_size=768, bidirectional=True, num_layers=1, device=torch.device(\"cpu\")):\n super(UtteranceRNN, self).__init__()\n self.device=device\n \n # embedding layer is replaced by pretrained roberta's embedding\n self.base = AutoModel.from_pretrained(pretrained_model_name_or_path=model_name, return_dict=False)\n self.base.to(device)\n # freeze the model parameters\n for param in self.base.parameters():\n param.requires_grad = False\n \n #self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_size)\n self.rnn = nn.RNN(\n input_size=hidden_size, \n hidden_size=hidden_size, \n num_layers=num_layers, \n bidirectional=bidirectional,\n batch_first=True,\n )\n \n def forward(self, input_ids, attention_mask, seq_len):\n \"\"\"\n x.shape = [batch_size, seq_len]\n \"\"\"\n \n \n hidden_states,_ = self.base(input_ids, attention_mask) # hidden_states.shape = [batch, max_len, hidden_size]\n \n # padding and packing \n #packed_hidden_states = nn.utils.rnn.pack_padded_sequence(hidden_states, seq_len, batch_first=True, enforce_sorted=False) \n \n #packed_outputs, _ = self.rnn(packed_hidden_states)\n \n #packed_outputs is a packed sequence containing all hidden states\n #hidden is now from the final non-padded element in the batch\n \n #outputs, _ = nn.utils.rnn.pad_packed_sequence(packed_outputs, batch_first=True)\n \n outputs,_ = self.rnn(hidden_states)\n \n return outputs"
]
| [
[
"torch.nn.RNN",
"torch.device"
]
]
|
ravescovi/ffn | [
"82329f728757f732c381fed9d769fd70ad79db2f"
]
| [
"ffn/inference/inference.py"
]
| [
"# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utilities for running FFN inference.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import namedtuple\nimport functools\nimport json\nimport logging\nimport os\nimport threading\nimport time\nimport numpy as np\nfrom numpy.lib.stride_tricks import as_strided\n\nfrom scipy.special import expit\nfrom scipy.special import logit\nfrom skimage import transform\n\nimport tensorflow as tf\n\nfrom tensorflow import gfile\nfrom . import align\nfrom . import executor\nfrom . import inference_pb2\nfrom . import inference_utils\nfrom . import movement\nfrom . import seed\nfrom . import storage\nfrom .inference_utils import Counters\nfrom .inference_utils import TimedIter\nfrom .inference_utils import timer_counter\nfrom . import segmentation\nfrom ..training.import_util import import_symbol\nfrom ..utils import ortho_plane_visualization\nfrom ..utils import bounding_box\n\nMSEC_IN_SEC = 1000\nMAX_SELF_CONSISTENT_ITERS = 32\n\n\n# Visualization.\n# ---------------------------------------------------------------------------\nclass DynamicImage(object):\n def UpdateFromPIL(self, new_img):\n from io import BytesIO\n from IPython import display\n display.clear_output(wait=True)\n image = BytesIO()\n new_img.save(image, format='png')\n display.display(display.Image(image.getvalue()))\n\n\ndef _cmap_rgb1(drw):\n \"\"\"Default color palette from gnuplot.\"\"\"\n r = np.sqrt(drw)\n g = np.power(drw, 3)\n b = np.sin(drw * np.pi)\n\n return (np.dstack([r, g, b]) * 250.0).astype(np.uint8)\n\n\ndef visualize_state(seed_logits, pos, movement_policy, dynimage):\n \"\"\"Visualizes the inference state.\n\n Args:\n seed_logits: ndarray (z, y, x) with the current predicted mask\n pos: current FoV position within 'seed' as z, y, x\n movement_policy: movement policy object\n dynimage: DynamicImage object which is to be updated with the\n state visualization\n \"\"\"\n from PIL import Image\n\n planes = ortho_plane_visualization.cut_ortho_planes(\n seed_logits, center=pos, cross_hair=True)\n to_vis = ortho_plane_visualization.concat_ortho_planes(planes)\n\n if isinstance(movement_policy.scored_coords, np.ndarray):\n scores = movement_policy.scored_coords\n # Upsample the grid.\n zf, yf, xf = movement_policy.deltas\n zz, yy, xx = scores.shape\n zs, ys, xs = scores.strides\n new_sh = (zz, zf, yy, yf, xx, xf)\n new_st = (zs, 0, ys, 0, xs, 0)\n scores_up = as_strided(scores, new_sh, new_st)\n scores_up = scores_up.reshape((zz * zf, yy * yf, xx * xf))\n # TODO(mkillinger) might need padding in some cases, if crashes: fix.\n # The grid might be too large, cut it to be symmetrical\n cut = (np.array(scores_up.shape) - np.array(seed_logits.shape)) // 2\n sh = seed_logits.shape\n scores_up = scores_up[cut[0]:cut[0] + sh[0],\n cut[1]:cut[1] + sh[1],\n cut[2]:cut[2] + sh[2]]\n grid_planes = ortho_plane_visualization.cut_ortho_planes(\n scores_up, center=pos, cross_hair=True)\n grid_view = ortho_plane_visualization.concat_ortho_planes(grid_planes)\n grid_view *= 4 # Looks better this way\n to_vis = np.concatenate((to_vis, grid_view), axis=1)\n\n val = _cmap_rgb1(expit(to_vis))\n y, x = pos[1:]\n\n # Mark seed in the xy plane.\n val[(y - 1):(y + 2), (x - 1):(x + 2), 0] = 255\n val[(y - 1):(y + 2), (x - 1):(x + 2), 1:] = 0\n\n vis = Image.fromarray(val)\n dynimage.UpdateFromPIL(vis)\n\n\n# Self-prediction halting\n# ---------------------------------------------------------------------------\nHALT_SILENT = 0\nPRINT_HALTS = 1\nHALT_VERBOSE = 2\n\nHaltInfo = namedtuple('HaltInfo', ['is_halt', 'extra_fetches'])\n\n\ndef no_halt(verbosity=HALT_SILENT, log_function=logging.info):\n \"\"\"Dummy HaltInfo.\"\"\"\n def _halt_signaler(*unused_args, **unused_kwargs):\n return False\n\n def _halt_signaler_verbose(fetches, pos, **unused_kwargs):\n log_function('%s, %s' % (pos, fetches))\n return False\n\n if verbosity == HALT_VERBOSE:\n return HaltInfo(_halt_signaler_verbose, [])\n else:\n return HaltInfo(_halt_signaler, [])\n\n\ndef self_prediction_halt(\n threshold, orig_threshold=None, verbosity=HALT_SILENT,\n log_function=logging.info):\n \"\"\"HaltInfo based on FFN self-predictions.\"\"\"\n\n def _halt_signaler(fetches, pos, orig_pos, counters, **unused_kwargs):\n \"\"\"Returns true if FFN prediction should be halted.\"\"\"\n if pos == orig_pos and orig_threshold is not None:\n t = orig_threshold\n else:\n t = threshold\n\n # [0] is by convention the total incorrect proportion prediction.\n halt = fetches['self_prediction'][0] > t\n\n if halt:\n counters['halts'].Increment()\n\n if verbosity == HALT_VERBOSE or (\n halt and verbosity == PRINT_HALTS):\n log_function('%s, %s' % (pos, fetches))\n\n return halt\n\n # Add self_prediction to the extra_fetches.\n return HaltInfo(_halt_signaler, ['self_prediction'])\n\n# ---------------------------------------------------------------------------\n\n\n# TODO(mjanusz): Add support for sparse inference.\nclass Canvas(object):\n \"\"\"Tracks state of the inference progress and results within a subvolume.\"\"\"\n\n def __init__(self,\n model,\n tf_executor,\n image,\n options,\n counters=None,\n restrictor=None,\n movement_policy_fn=None,\n halt_signaler=no_halt(),\n keep_history=False,\n checkpoint_path=None,\n checkpoint_interval_sec=0,\n corner_zyx=None):\n \"\"\"Initializes the canvas.\n\n Args:\n model: FFNModel object\n tf_executor: Executor object to use for inference\n image: 3d ndarray-like of shape (z, y, x)\n options: InferenceOptions proto\n counters: (optional) counter container, where __getitem__ returns a\n counter compatible with the MR Counter API\n restrictor: (optional) a MovementRestrictor object which can exclude\n some areas of the data from the segmentation process\n movement_policy_fn: callable taking the Canvas object as its\n only argument and returning a movement policy object\n (see movement.BaseMovementPolicy)\n halt_signaler: HaltInfo object determining early stopping policy\n keep_history: whether to maintain a record of locations visited by the\n FFN, together with any associated metadata; note that this data is\n kept only for the object currently being segmented\n checkpoint_path: (optional) path at which to save a checkpoint file\n checkpoint_interval_sec: how often to save a checkpoint file (in\n seconds); if <= 0, no checkpoint are going to be saved\n corner_zyx: 3 element array-like indicating the spatial corner of the\n image in (z, y, x)\n \"\"\"\n self.model = model\n self.image = image\n self.executor = tf_executor\n self._exec_client_id = None\n\n self.options = inference_pb2.InferenceOptions()\n self.options.CopyFrom(options)\n # Convert thresholds, etc. to logit values for efficient inference.\n for attr in ('init_activation', 'pad_value', 'move_threshold',\n 'segment_threshold'):\n setattr(self.options, attr, logit(getattr(self.options, attr)))\n\n self.halt_signaler = halt_signaler\n\n self.counters = counters if counters is not None else Counters()\n self.checkpoint_interval_sec = checkpoint_interval_sec\n self.checkpoint_path = checkpoint_path\n self.checkpoint_last = time.time()\n\n self._keep_history = keep_history\n self.corner_zyx = corner_zyx\n self.shape = image.shape\n\n if restrictor is None:\n self.restrictor = movement.MovementRestrictor()\n else:\n self.restrictor = restrictor\n\n # Cast to array to ensure we can do elementwise expressions later.\n # All of these are in zyx order.\n self._pred_size = np.array(model.pred_mask_size[::-1])\n self._input_seed_size = np.array(model.input_seed_size[::-1])\n self._input_image_size = np.array(model.input_image_size[::-1])\n self.margin = self._input_image_size // 2\n\n self._pred_delta = (self._input_seed_size - self._pred_size) // 2\n assert np.all(self._pred_delta >= 0)\n\n # Current working area. This represents an object probability map\n # in logit form, and is fed directly as the mask input to the FFN\n # model.\n self.seed = np.zeros(self.shape, dtype=np.float32)\n self.segmentation = np.zeros(self.shape, dtype=np.int32)\n self.seg_prob = np.zeros(self.shape, dtype=np.uint8)\n\n # When an initial segmentation is provided, maps the global ID space\n # to locally used IDs.\n self.global_to_local_ids = {}\n\n self.seed_policy = None\n self._seed_policy_state = None\n\n # Maximum segment ID already assigned.\n self._max_id = 0\n\n # Maps of segment id -> ..\n self.origins = {} # seed location\n self.overlaps = {} # (ids, number overlapping voxels)\n\n # Whether to always create a new seed in segment_at.\n self.reset_seed_per_segment = True\n\n if movement_policy_fn is None:\n # The model.deltas are (for now) in xyz order and must be swapped to zyx.\n self.movement_policy = movement.FaceMaxMovementPolicy(\n self, deltas=model.deltas[::-1],\n score_threshold=self.options.move_threshold)\n else:\n self.movement_policy = movement_policy_fn(self)\n\n self.reset_state((0, 0, 0))\n self.t_last_predict = None\n\n def _register_client(self):\n if self._exec_client_id is None:\n self._exec_client_id = self.executor.start_client()\n logging.info('Registered as client %d.', self._exec_client_id)\n\n def _deregister_client(self):\n if self._exec_client_id is not None:\n logging.info('Deregistering client %d', self._exec_client_id)\n self.executor.finish_client(self._exec_client_id)\n self._exec_client_id = None\n\n def __del__(self):\n # Note that the presence of this method will cause a memory leak in\n # case the Canvas object is part of a reference cycle. Use weakref.proxy\n # where such cycles are really needed.\n self._deregister_client()\n\n def local_id(self, segment_id):\n return self.global_to_local_ids.get(segment_id, segment_id)\n\n def reset_state(self, start_pos):\n # Resetting the movement_policy is currently necessary to update the\n # policy's bitmask for whether a position is already segmented (the\n # canvas updates the segmented mask only between calls to segment_at\n # and therefore the policy does not update this mask for every call.).\n self.movement_policy.reset_state(start_pos)\n self.history = []\n self.history_deleted = []\n\n self._min_pos = np.array(start_pos)\n self._max_pos = np.array(start_pos)\n self._register_client()\n\n def is_valid_pos(self, pos, ignore_move_threshold=False):\n \"\"\"Returns True if segmentation should be attempted at the given position.\n\n Args:\n pos: position to check as (z, y, x)\n ignore_move_threshold: (boolean) when starting a new segment at pos the\n move threshold can and must be ignored.\n\n Returns:\n Boolean indicating whether to run FFN inference at the given position.\n \"\"\"\n\n if not ignore_move_threshold:\n if self.seed[pos] < self.options.move_threshold:\n self.counters['skip_threshold'].Increment()\n logging.debug('.. seed value below threshold.')\n return False\n\n # Not enough image context?\n np_pos = np.array(pos)\n low = np_pos - self.margin\n high = np_pos + self.margin\n\n if np.any(low < 0) or np.any(high >= self.shape):\n self.counters['skip_invalid_pos'].Increment()\n logging.debug('.. too close to border: %r', pos)\n return False\n\n # Location already segmented?\n if self.segmentation[pos] > 0:\n self.counters['skip_invalid_pos'].Increment()\n logging.debug('.. segmentation already active: %r', pos)\n return False\n\n return True\n\n def predict(self, pos, logit_seed, extra_fetches):\n \"\"\"Runs a single step of FFN prediction.\n\n Args:\n pos: (z, y, x) position of the center of the FoV\n logit_seed: current seed to feed to the model as input, z, y, x ndarray\n extra_fetches: dict of additional fetches to retrieve, can be empty\n\n Returns:\n tuple of:\n (logistic prediction, logits)\n dict of additional fetches corresponding to extra_fetches\n \"\"\"\n with timer_counter(self.counters, 'predict'):\n # Top-left corner of the FoV.\n start = np.array(pos) - self.margin\n end = start + self._input_image_size\n img = self.image[tuple([slice(s, e) for s, e in zip(start, end)])]\n\n # Record the amount of time spent on non-prediction tasks.\n if self.t_last_predict is not None:\n delta_t = time.time() - self.t_last_predict\n self.counters['inference-not-predict-ms'].IncrementBy(\n delta_t * MSEC_IN_SEC)\n\n extra_fetches['logits'] = self.model.logits\n with timer_counter(self.counters, 'inference'):\n fetches = self.executor.predict(self._exec_client_id,\n logit_seed, img, extra_fetches)\n\n self.t_last_predict = time.time()\n\n logits = fetches.pop('logits')\n prob = expit(logits)\n return (prob[..., 0], logits[..., 0]), fetches\n\n def update_at(self, pos, start_pos):\n \"\"\"Updates object mask prediction at a specific position.\n\n Note that depending on the settings of the canvas, the update might involve\n more than 1 inference run of the FFN.\n\n Args:\n pos: (z, y, x) position of the center of the FoV\n start_pos: (z, y, x) position from which the segmentation of the current\n object has started\n\n Returns:\n ndarray of the predicted mask in logit space\n \"\"\"\n with timer_counter(self.counters, 'update_at'):\n off = self._input_seed_size // 2 # zyx\n\n start = np.array(pos) - off\n end = start + self._input_seed_size\n logit_seed = np.array(\n self.seed[tuple([slice(s, e) for s, e in zip(start, end)])])\n init_prediction = np.isnan(logit_seed)\n logit_seed[init_prediction] = np.float32(self.options.pad_value)\n\n extra_fetches = {f: getattr(self.model, f) for f\n in self.halt_signaler.extra_fetches}\n\n prob_seed = expit(logit_seed)\n for _ in range(MAX_SELF_CONSISTENT_ITERS):\n (prob, logits), fetches = self.predict(pos, logit_seed,\n extra_fetches=extra_fetches)\n if self.options.consistency_threshold <= 0:\n break\n\n diff = np.average(np.abs(prob_seed - prob))\n if diff < self.options.consistency_threshold:\n break\n\n prob_seed, logit_seed = prob, logits\n\n if self.halt_signaler.is_halt(fetches=fetches, pos=pos,\n orig_pos=start_pos,\n counters=self.counters):\n logits[:] = np.float32(self.options.pad_value)\n\n start += self._pred_delta\n end = start + self._pred_size\n sel = [slice(s, e) for s, e in zip(start, end)]\n\n # Bias towards oversegmentation by making it impossible to reverse\n # disconnectedness predictions in the course of inference.\n if self.options.disco_seed_threshold >= 0:\n th_max = logit(0.5)\n old_seed = self.seed[tuple(sel)]\n\n if self._keep_history:\n self.history_deleted.append(\n np.sum((old_seed >= logit(0.8)) & (logits < th_max)))\n\n if (np.mean(logits >= self.options.move_threshold) >\n self.options.disco_seed_threshold):\n # Because (x > NaN) is always False, this mask excludes positions that\n # were previously uninitialized (i.e. set to NaN in old_seed).\n try:\n old_err = np.seterr(invalid='ignore')\n mask = ((old_seed < th_max) & (logits > old_seed))\n finally:\n np.seterr(**old_err)\n logits[mask] = old_seed[mask]\n\n # Update working space.\n self.seed[tuple(sel)] = logits\n\n return logits\n\n def init_seed(self, pos):\n \"\"\"Reinitiailizes the object mask with a seed.\n\n Args:\n pos: position at which to place the seed (z, y, x)\n \"\"\"\n self.seed[...] = np.nan\n self.seed[pos] = self.options.init_activation\n\n def segment_at(self, start_pos, dynamic_image=None,\n vis_update_every=10,\n vis_fixed_z=False):\n \"\"\"Runs FFN segmentation starting from a specific point.\n\n Args:\n start_pos: location at which to run segmentation as (z, y, x)\n dynamic_image: optional DynamicImage object which to update with\n a visualization of the segmentation state\n vis_update_every: number of FFN iterations between subsequent\n updates of the dynamic image\n vis_fixed_z: if True, the z position used for visualization is\n fixed at the starting value specified in `pos`. Otherwise,\n the current FoV of the FFN is used to determine what to\n visualize.\n\n Returns:\n number of iterations performed\n \"\"\"\n if self.reset_seed_per_segment:\n self.init_seed(start_pos)\n # This includes a reset of the movement policy, see comment in method body.\n self.reset_state(start_pos)\n\n num_iters = 0\n\n if not self.movement_policy:\n # Add first element with arbitrary priority 1 (it will be consumed\n # right away anyway).\n item = (self.movement_policy.score_threshold * 2, start_pos)\n self.movement_policy.append(item)\n\n with timer_counter(self.counters, 'segment_at-loop'):\n for pos in self.movement_policy:\n # Terminate early if the seed got too weak.\n if self.seed[start_pos] < self.options.move_threshold:\n self.counters['seed_got_too_weak'].Increment()\n break\n\n if not self.restrictor.is_valid_pos(pos):\n self.counters['skip_restriced_pos'].Increment()\n continue\n\n pred = self.update_at(pos, start_pos)\n self._min_pos = np.minimum(self._min_pos, pos)\n self._max_pos = np.maximum(self._max_pos, pos)\n num_iters += 1\n\n with timer_counter(self.counters, 'movement_policy'):\n self.movement_policy.update(pred, pos)\n\n with timer_counter(self.counters, 'segment_at-overhead'):\n if self._keep_history:\n self.history.append(pos)\n\n if dynamic_image is not None and num_iters % vis_update_every == 0:\n vis_pos = pos if not vis_fixed_z else (start_pos[0], pos[1],\n pos[2])\n visualize_state(self.seed, vis_pos, self.movement_policy,\n dynamic_image)\n\n assert np.all(pred.shape == self._pred_size)\n\n self._maybe_save_checkpoint()\n\n return num_iters\n\n def log_info(self, string, *args, **kwargs):\n logging.info('[cl %d] ' + string, self._exec_client_id,\n *args, **kwargs)\n\n def segment_all(self, seed_policy=seed.PolicyPeaks):\n \"\"\"Segments the input image.\n\n Segmentation is attempted from all valid starting points provided by\n the seed policy.\n\n Args:\n seed_policy: callable taking the image and the canvas object as arguments\n and returning an iterator over proposed seed point.\n \"\"\"\n self.seed_policy = seed_policy(self)\n if self._seed_policy_state is not None:\n self.seed_policy.set_state(self._seed_policy_state)\n self._seed_policy_state = None\n\n with timer_counter(self.counters, 'segment_all'):\n mbd = self.options.min_boundary_dist\n mbd = np.array([mbd.z, mbd.y, mbd.x])\n\n for pos in TimedIter(self.seed_policy, self.counters, 'seed-policy'):\n # When starting a new segment the move_threshold on the probability\n # should be ignored when determining if the position is valid.\n if not (self.is_valid_pos(pos, ignore_move_threshold=True)\n and self.restrictor.is_valid_pos(pos)\n and self.restrictor.is_valid_seed(pos)):\n continue\n\n self._maybe_save_checkpoint()\n\n # Too close to an existing segment?\n low = np.array(pos) - mbd\n high = np.array(pos) + mbd + 1\n sel = [slice(s, e) for s, e in zip(low, high)]\n if np.any(self.segmentation[sel] > 0):\n logging.debug('Too close to existing segment.')\n self.segmentation[pos] = -1\n continue\n\n self.log_info('Starting segmentation at %r (zyx)', pos)\n\n # Try segmentation.\n seg_start = time.time()\n num_iters = self.segment_at(pos)\n t_seg = time.time() - seg_start\n\n # Check if segmentation was successful.\n if num_iters <= 0:\n self.counters['invalid-other-time-ms'].IncrementBy(t_seg *\n MSEC_IN_SEC)\n self.log_info('Failed: num iters was %d', num_iters)\n continue\n\n # Original seed too weak?\n if self.seed[pos] < self.options.move_threshold:\n # Mark this location as excluded.\n if self.segmentation[pos] == 0:\n self.segmentation[pos] = -1\n self.log_info('Failed: weak seed')\n self.counters['invalid-weak-time-ms'].IncrementBy(t_seg * MSEC_IN_SEC)\n continue\n\n # Restrict probability map -> segment processing to a bounding box\n # covering the area that was actually changed by the FFN. In case the\n # segment is going to be rejected due to small size, this can\n # significantly reduce processing time.\n sel = [slice(max(s, 0), e + 1) for s, e in zip(\n self._min_pos - self._pred_size // 2,\n self._max_pos + self._pred_size // 2)]\n\n # We only allow creation of new segments in areas that are currently\n # empty.\n mask = self.seed[sel] >= self.options.segment_threshold\n raw_segmented_voxels = np.sum(mask)\n\n # Record existing segment IDs overlapped by the newly added object.\n overlapped_ids, counts = np.unique(self.segmentation[sel][mask],\n return_counts=True)\n valid = overlapped_ids > 0\n overlapped_ids = overlapped_ids[valid]\n counts = counts[valid]\n\n mask &= self.segmentation[sel] <= 0\n actual_segmented_voxels = np.sum(mask)\n\n # Segment too small?\n if actual_segmented_voxels < self.options.min_segment_size:\n if self.segmentation[pos] == 0:\n self.segmentation[pos] = -1\n self.log_info('Failed: too small: %d', actual_segmented_voxels)\n self.counters['invalid-small-time-ms'].IncrementBy(t_seg *\n MSEC_IN_SEC)\n continue\n\n self.counters['voxels-segmented'].IncrementBy(actual_segmented_voxels)\n self.counters['voxels-overlapping'].IncrementBy(\n raw_segmented_voxels - actual_segmented_voxels)\n\n # Find the next free ID to assign.\n self._max_id += 1\n while self._max_id in self.origins:\n self._max_id += 1\n\n self.segmentation[sel][mask] = self._max_id\n self.seg_prob[sel][mask] = storage.quantize_probability(\n expit(self.seed[sel][mask]))\n\n self.log_info('Created supervoxel:%d seed(zyx):%s size:%d iters:%d',\n self._max_id, pos,\n actual_segmented_voxels, num_iters)\n\n self.overlaps[self._max_id] = np.array([overlapped_ids, counts])\n\n # Record information about how a given supervoxel was created.\n self.origins[self._max_id] = storage.OriginInfo(pos, num_iters, t_seg)\n self.counters['valid-time-ms'].IncrementBy(t_seg * MSEC_IN_SEC)\n\n self.log_info('Segmentation done.')\n\n # It is important to deregister ourselves when the segmentation is complete.\n # This matters particularly if less than a full batch of subvolumes remains\n # to be segmented. Without the deregistration, the executor will wait to\n # fill the full batch (no longer possible) instead of proceeding with\n # inference.\n self._deregister_client()\n\n # def init_segmentation_from_volume(self, volume, corner, end,\n # align_and_crop=None, axes='zyx', \n # contiguous=True):\n # \"\"\"Initializes segmentation from an existing VolumeStore.\n\n # This is useful to start inference with an existing segmentation.\n # The segmentation does not need to be generated with an FFN.\n\n # Args:\n # volume: volume object, as returned by storage.decorated_volume.\n # corner: location at which to read data as (z, y, x)\n # end: location at which to stop reading data as (z, y, x)\n # align_and_crop: callable to align & crop a 3d segmentation subvolume\n # \"\"\"\n \n # self.log_info('Loading initial segmentation from (zyx) %r:%r',\n # corner, end)\n\n # if axes == 'zyx':\n # init_seg = volume[:, #\n # corner[0]:end[0], #\n # corner[1]:end[1], #\n # corner[2]:end[2]]\n\n # if contiguous:\n # init_seg, global_to_local = segmentation.make_labels_contiguous(init_seg)\n # init_seg = init_seg[0, ...]\n # elif axes == 'xyz':\n # # For precomputed volume, default axes are [x, y, z, c]\n # init_seg = volume[corner[2]:end[2], #\n # corner[1]:end[1], #\n # corner[0]:end[0],\n # :]\n # if contiguous:\n # init_seg, global_to_local = segmentation.make_labels_contiguous(init_seg)\n # init_seg = init_seg[..., 0].transpose([2,1,0]) # unify to z, y, x order\n\n\n # self.global_to_local_ids = dict(global_to_local)\n\n # self.log_info('Segmentation loaded, shape: %r. Canvas segmentation is %r',\n # init_seg.shape, self.segmentation.shape)\n # if align_and_crop is not None:\n # init_seg = align_and_crop(init_seg)\n # self.log_info('Segmentation cropped to: %r', init_seg.shape)\n\n # self.segmentation[:] = init_seg\n # self.seg_prob[self.segmentation > 0] = storage.quantize_probability(\n # np.array([1.0]))\n # self._max_id = np.max(self.segmentation)\n # self.log_info('Max restored ID is: %d.', self._max_id)\n\n def init_segmentation_from_volume(self, volume, corner, end,\n align_and_crop=None):\n \"\"\"Initializes segmentation from an existing VolumeStore.\n\n This is useful to start inference with an existing segmentation.\n The segmentation does not need to be generated with an FFN.\n\n Args:\n volume: volume object, as returned by storage.decorated_volume.\n corner: location at which to read data as (z, y, x)\n end: location at which to stop reading data as (z, y, x)\n align_and_crop: callable to align & crop a 3d segmentation subvolume\n \"\"\"\n self.log_info('Loading initial segmentation from (zyx) %r:%r',\n corner, end)\n\n init_seg = volume[:, #\n corner[0]:end[0], #\n corner[1]:end[1], #\n corner[2]:end[2]]\n\n init_seg, global_to_local = segmentation.make_labels_contiguous(init_seg)\n init_seg = init_seg[0, ...]\n\n self.global_to_local_ids = dict(global_to_local)\n\n self.log_info('Segmentation loaded, shape: %r. Canvas segmentation is %r',\n init_seg.shape, self.segmentation.shape)\n if align_and_crop is not None:\n init_seg = align_and_crop(init_seg)\n self.log_info('Segmentation cropped to: %r', init_seg.shape)\n\n self.segmentation[:] = init_seg\n self.seg_prob[self.segmentation > 0] = storage.quantize_probability(\n np.array([1.0]))\n self._max_id = np.max(self.segmentation)\n self.log_info('Max restored ID is: %d.', self._max_id)\n\n\n def restore_checkpoint(self, path):\n \"\"\"Restores state from the checkpoint at `path`.\"\"\"\n self.log_info('Restoring inference checkpoint: %s', path)\n with open(path, 'rb') as f:\n data = np.load(f, allow_pickle=True)\n\n self.segmentation[:] = data['segmentation']\n self.seed[:] = data['seed']\n self.seg_prob[:] = data['seg_qprob']\n self.history_deleted = list(data['history_deleted'])\n self.history = list(data['history'])\n self.origins = data['origins'].item()\n if 'overlaps' in data:\n self.overlaps = data['overlaps'].item()\n\n segmented_voxels = np.sum(self.segmentation != 0)\n self.counters['voxels-segmented'].Set(segmented_voxels)\n self._max_id = np.max(self.segmentation)\n\n self.movement_policy.restore_state(data['movement_policy'])\n\n seed_policy_state = data['seed_policy_state']\n # When restoring the state of a previously unused Canvas, the seed\n # policy will not be defined. We just save the seed policy state here\n # for future use in .segment_all().\n self._seed_policy_state = seed_policy_state\n\n self.counters.loads(data['counters'].item())\n\n self.log_info('Inference checkpoint restored.')\n\n def save_checkpoint(self, path):\n \"\"\"Saves a inference checkpoint to `path`.\"\"\"\n self.log_info('Saving inference checkpoint to %s.', path)\n with timer_counter(self.counters, 'save_checkpoint'):\n gfile.MakeDirs(os.path.dirname(path))\n with storage.atomic_file(path) as fd:\n seed_policy_state = None\n if self.seed_policy is not None:\n seed_policy_state = self.seed_policy.get_state()\n\n np.savez_compressed(fd,\n movement_policy=self.movement_policy.get_state(),\n segmentation=self.segmentation,\n seg_qprob=self.seg_prob,\n seed=self.seed,\n origins=self.origins,\n overlaps=self.overlaps,\n history=np.array(self.history),\n history_deleted=np.array(self.history_deleted),\n seed_policy_state=seed_policy_state,\n counters=self.counters.dumps())\n self.log_info('Inference checkpoint saved.')\n\n def _maybe_save_checkpoint(self):\n \"\"\"Attempts to save a checkpoint.\n\n A checkpoint is only saved if the canvas is configured to keep checkpoints\n and if sufficient time has passed since the last one was saved.\n \"\"\"\n if self.checkpoint_path is None or self.checkpoint_interval_sec <= 0:\n return\n\n if time.time() - self.checkpoint_last < self.checkpoint_interval_sec:\n return\n\n self.save_checkpoint(self.checkpoint_path)\n self.checkpoint_last = time.time()\n\n\nclass Runner(object):\n \"\"\"Helper for managing FFN inference runs.\n\n Takes care of initializing the FFN model and any related functionality\n (e.g. movement policies), as well as input/output of the FFN inference\n data (loading inputs, saving segmentations).\n \"\"\"\n\n ALL_MASKED = 1\n\n def __init__(self, use_cpu=False, use_gpu=None):\n self.counters = inference_utils.Counters()\n self.executor = None\n self.use_cpu = use_cpu\n self.use_gpu = use_gpu\n\n def __del__(self):\n self.stop_executor()\n\n def stop_executor(self):\n \"\"\"Shuts down the executor.\n\n No-op when no executor is active.\n \"\"\"\n if self.executor is not None:\n self.executor.stop_server()\n self.executor = None\n\n def _load_model_checkpoint(self, checkpoint_path):\n \"\"\"Restores the inference model from a training checkpoint.\n\n Args:\n checkpoint_path: the string path to the checkpoint file to load\n \"\"\"\n with timer_counter(self.counters, 'restore-tf-checkpoint'):\n logging.info('Loading checkpoint.')\n self.model.saver.restore(self.session, checkpoint_path)\n logging.info('Checkpoint loaded.')\n\n def start(self, request, batch_size=1, exec_cls=None, session=None):\n \"\"\"Opens input volumes and initializes the FFN.\"\"\"\n self.request = request\n assert self.request.segmentation_output_dir\n\n logging.debug('Received request:\\n%s', request)\n\n if not gfile.Exists(request.segmentation_output_dir):\n gfile.MakeDirs(request.segmentation_output_dir)\n\n with timer_counter(self.counters, 'volstore-open'):\n # Disabling cache compression can improve access times by 20-30%\n # as of Aug 2016.\n self._image_volume = storage.decorated_volume(\n request.image, cache_max_bytes=int(1e8),\n cache_compression=False)\n assert self._image_volume is not None\n\n if request.HasField('init_segmentation'):\n self.init_seg_volume = storage.decorated_volume(\n request.init_segmentation, cache_max_bytes=int(1e8))\n else:\n self.init_seg_volume = None\n\n def _open_or_none(settings):\n if settings.WhichOneof('volume_path') is None:\n return None\n return storage.decorated_volume(\n settings, cache_max_bytes=int(1e7), cache_compression=False)\n self._mask_volumes = {}\n self._shift_mask_volume = _open_or_none(request.shift_mask)\n\n alignment_options = request.alignment_options\n null_alignment = inference_pb2.AlignmentOptions.NO_ALIGNMENT\n if not alignment_options or alignment_options.type == null_alignment:\n self._aligner = align.Aligner()\n else:\n type_name = inference_pb2.AlignmentOptions.AlignType.Name(\n alignment_options.type)\n error_string = 'Alignment for type %s is not implemented' % type_name\n logging.error(error_string)\n raise NotImplementedError(error_string)\n\n def _open_or_none(settings):\n if settings.WhichOneof('volume_path') is None:\n return None\n return storage.decorated_volume(\n settings, cache_max_bytes=int(1e7), cache_compression=False)\n self._mask_volumes = {}\n self._shift_mask_volume = _open_or_none(request.shift_mask)\n\n if request.reference_histogram:\n with gfile.Open(request.reference_histogram, 'r') as f:\n data = np.load(f)\n self._reference_lut = data['lut']\n else:\n self._reference_lut = None\n\n self.stop_executor()\n\n if session is None:\n if self.use_cpu:\n config = tf.ConfigProto(\n device_count={'GPU': 0})\n elif self.use_gpu is not None:\n gpu_options = tf.GPUOptions(visible_device_list=self.use_gpu, allow_growth=True)\n config=tf.ConfigProto(gpu_options=gpu_options)\n else:\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n tf.reset_default_graph()\n session = tf.Session(config=config)\n self.session = session\n logging.info('Available TF devices: %r', self.session.list_devices())\n\n # Initialize the FFN model.\n model_class = import_symbol(request.model_name)\n if request.model_args:\n args = json.loads(request.model_args)\n else:\n args = {}\n\n args['batch_size'] = batch_size\n self.model = model_class(**args)\n\n if exec_cls is None:\n exec_cls = executor.ThreadingBatchExecutor\n\n self.executor = exec_cls(\n self.model, self.session, self.counters, batch_size)\n self.movement_policy_fn = movement.get_policy_fn(request, self.model)\n\n self.saver = tf.train.Saver()\n self._load_model_checkpoint(request.model_checkpoint_path)\n\n self.executor.start_server()\n\n def make_restrictor(self, corner, subvol_size, image, alignment):\n \"\"\"Builds a MovementRestrictor object.\"\"\"\n kwargs = {}\n\n if self.request.masks:\n with timer_counter(self.counters, 'load-mask'):\n final_mask = storage.build_mask(self.request.masks,\n corner, subvol_size,\n self._mask_volumes,\n image, alignment)\n\n if np.all(final_mask):\n logging.info('Everything masked.')\n return self.ALL_MASKED\n\n kwargs['mask'] = final_mask\n\n if self.request.seed_masks:\n with timer_counter(self.counters, 'load-seed-mask'):\n seed_mask = storage.build_mask(self.request.seed_masks,\n corner, subvol_size,\n self._mask_volumes,\n image, alignment)\n\n if np.all(seed_mask):\n logging.info('All seeds masked.')\n return self.ALL_MASKED\n\n kwargs['seed_mask'] = seed_mask\n\n if self._shift_mask_volume:\n with timer_counter(self.counters, 'load-shift-mask'):\n s = self.request.shift_mask_scale\n shift_corner = np.array(corner) // (1, s, s)\n shift_size = -(-np.array(subvol_size) // (1, s, s))\n\n shift_alignment = alignment.rescaled(\n np.array((1.0, 1.0, 1.0)) / (1, s, s))\n src_corner, src_size = shift_alignment.expand_bounds(\n shift_corner, shift_size, forward=False)\n src_corner, src_size = storage.clip_subvolume_to_bounds(\n src_corner, src_size, self._shift_mask_volume)\n src_end = src_corner + src_size\n\n expanded_shift_mask = self._shift_mask_volume[\n 0:2, #\n src_corner[0]:src_end[0], #\n src_corner[1]:src_end[1], #\n src_corner[2]:src_end[2]]\n shift_mask = np.array([\n shift_alignment.align_and_crop(\n src_corner, expanded_shift_mask[i], shift_corner, shift_size)\n for i in range(2)])\n shift_mask = alignment.transform_shift_mask(corner, s, shift_mask)\n\n if self.request.HasField('shift_mask_fov'):\n shift_mask_fov = bounding_box.BoundingBox(\n start=self.request.shift_mask_fov.start,\n size=self.request.shift_mask_fov.size)\n else:\n shift_mask_diameter = np.array(self.model.input_image_size)\n shift_mask_fov = bounding_box.BoundingBox(\n start=-(shift_mask_diameter // 2), size=shift_mask_diameter)\n\n kwargs.update({\n 'shift_mask': shift_mask,\n 'shift_mask_fov': shift_mask_fov,\n 'shift_mask_scale': self.request.shift_mask_scale,\n 'shift_mask_threshold': self.request.shift_mask_threshold})\n\n if kwargs:\n return movement.MovementRestrictor(**kwargs)\n else:\n return None\n\n # def make_canvas(self, corner, subvol_size, axes='zyx', contiguous=True, **canvas_kwargs):\n # \"\"\"Builds the Canvas object for inference on a subvolume.\n\n # Args:\n # corner: start of the subvolume (z, y, x)\n # subvol_size: size of the subvolume (z, y, x)\n # **canvas_kwargs: passed to Canvas\n\n # Returns:\n # A tuple of:\n # Canvas object\n # Alignment object\n # \"\"\"\n # subvol_counters = self.counters.get_sub_counters()\n # with timer_counter(subvol_counters, 'load-image'):\n # logging.info('Process subvolume: %r', corner)\n\n # # A Subvolume with bounds defined by (src_size, src_corner) is guaranteed\n # # to result in no missing data when aligned to (dst_size, dst_corner).\n # # Likewise, one defined by (dst_size, dst_corner) is guaranteed to result\n # # in no missing data when reverse-aligned to (corner, subvol_size).\n # alignment = self._aligner.generate_alignment(corner, subvol_size)\n\n # # Bounding box for the aligned destination subvolume.\n # dst_corner, dst_size = alignment.expand_bounds(\n # corner, subvol_size, forward=True)\n # # Bounding box for the pre-aligned imageset to be fetched from the volume.\n # src_corner, src_size = alignment.expand_bounds(\n # dst_corner, dst_size, forward=False)\n # # Ensure that the request bounds don't extend beyond volume bounds.\n # src_corner, src_size = storage.clip_subvolume_to_bounds(\n # src_corner, src_size, self._image_volume)\n\n # logging.info('Requested bounds are %r + %r', corner, subvol_size)\n # logging.info('Destination bounds are %r + %r', dst_corner, dst_size)\n # logging.info('Fetch bounds are %r + %r', src_corner, src_size)\n\n # # Fetch the image from the volume using the src bounding box.\n # def get_data_3d(volume, bbox):\n # slc = bbox.to_slice()\n # if volume.ndim == 4:\n # slc = np.index_exp[0:1] + slc\n # data = volume[slc]\n # if data.ndim == 4:\n # data = data.squeeze(axis=0)\n # return data\n # src_bbox = bounding_box.BoundingBox(\n # start=src_corner[::-1], size=src_size[::-1])\n # src_image = get_data_3d(self._image_volume, src_bbox)\n # logging.info('Fetched image of size %r prior to transform',\n # src_image.shape)\n\n # def align_and_crop(image):\n # return alignment.align_and_crop(src_corner, image, dst_corner, dst_size,\n # forward=True)\n\n # # Align and crop to the dst bounding box.\n # image = align_and_crop(src_image)\n # # image now has corner dst_corner and size dst_size.\n\n # logging.info('Image data loaded, shape: %r.', image.shape)\n\n # restrictor = self.make_restrictor(dst_corner, dst_size, image, alignment)\n\n # try:\n # if self._reference_lut is not None:\n # if self.request.histogram_masks:\n # histogram_mask = storage.build_mask(self.request.histogram_masks,\n # dst_corner, dst_size,\n # self._mask_volumes,\n # image, alignment)\n # else:\n # histogram_mask = None\n\n # inference_utils.match_histogram(image, self._reference_lut,\n # mask=histogram_mask)\n # except ValueError as e:\n # # This can happen if the subvolume is relatively small because of tiling\n # # done by CLAHE. For now we just ignore these subvolumes.\n # # TODO(mjanusz): Handle these cases by reducing the number of tiles.\n # logging.info('Could not match histogram: %r', e)\n # return None, None\n\n # image = (image.astype(np.float32) -\n # self.request.image_mean) / self.request.image_stddev\n # if restrictor == self.ALL_MASKED:\n # return None, None\n\n # if self.request.HasField('self_prediction'):\n # halt_signaler = self_prediction_halt(\n # self.request.self_prediction.threshold,\n # orig_threshold=self.request.self_prediction.orig_threshold,\n # verbosity=PRINT_HALTS)\n # else:\n # halt_signaler = no_halt()\n\n # canvas = Canvas(\n # self.model,\n # self.executor,\n # image,\n # self.request.inference_options,\n # counters=subvol_counters,\n # restrictor=restrictor,\n # movement_policy_fn=self.movement_policy_fn,\n # halt_signaler=halt_signaler,\n # checkpoint_path=storage.checkpoint_path(\n # self.request.segmentation_output_dir, corner),\n # checkpoint_interval_sec=self.request.checkpoint_interval,\n # corner_zyx=dst_corner,\n # **canvas_kwargs)\n\n # if self.request.HasField('init_segmentation'):\n # canvas.init_segmentation_from_volume(self.init_seg_volume, src_corner,\n # src_bbox.end[::-1], align_and_crop,\n # axes, contiguous\n # )\n # return canvas, alignment\n def make_canvas(self, corner, subvol_size, **canvas_kwargs):\n \"\"\"Builds the Canvas object for inference on a subvolume.\n\n Args:\n corner: start of the subvolume (z, y, x)\n subvol_size: size of the subvolume (z, y, x)\n **canvas_kwargs: passed to Canvas\n\n Returns:\n A tuple of:\n Canvas object\n Alignment object\n \"\"\"\n subvol_counters = self.counters.get_sub_counters()\n with timer_counter(subvol_counters, 'load-image'):\n logging.info('Process subvolume: %r', corner)\n\n # A Subvolume with bounds defined by (src_size, src_corner) is guaranteed\n # to result in no missing data when aligned to (dst_size, dst_corner).\n # Likewise, one defined by (dst_size, dst_corner) is guaranteed to result\n # in no missing data when reverse-aligned to (corner, subvol_size).\n alignment = self._aligner.generate_alignment(corner, subvol_size)\n\n # Bounding box for the aligned destination subvolume.\n dst_corner, dst_size = alignment.expand_bounds(\n corner, subvol_size, forward=True)\n # Bounding box for the pre-aligned imageset to be fetched from the volume.\n src_corner, src_size = alignment.expand_bounds(\n dst_corner, dst_size, forward=False)\n # Ensure that the request bounds don't extend beyond volume bounds.\n src_corner, src_size = storage.clip_subvolume_to_bounds(\n src_corner, src_size, self._image_volume)\n\n logging.info('Requested bounds are %r + %r', corner, subvol_size)\n logging.info('Destination bounds are %r + %r', dst_corner, dst_size)\n logging.info('Fetch bounds are %r + %r', src_corner, src_size)\n\n # Fetch the image from the volume using the src bounding box.\n def get_data_3d(volume, bbox):\n slc = bbox.to_slice()\n if volume.ndim == 4:\n slc = np.index_exp[0:1] + slc\n data = volume[slc]\n if data.ndim == 4:\n data = data.squeeze(axis=0)\n return data\n src_bbox = bounding_box.BoundingBox(\n start=src_corner[::-1], size=src_size[::-1])\n src_image = get_data_3d(self._image_volume, src_bbox)\n logging.info('Fetched image of size %r prior to transform',\n src_image.shape)\n\n def align_and_crop(image):\n return alignment.align_and_crop(src_corner, image, dst_corner, dst_size,\n forward=True)\n\n # Align and crop to the dst bounding box.\n image = align_and_crop(src_image)\n # image now has corner dst_corner and size dst_size.\n\n logging.info('Image data loaded, shape: %r.', image.shape)\n\n restrictor = self.make_restrictor(dst_corner, dst_size, image, alignment)\n\n try:\n if self._reference_lut is not None:\n if self.request.histogram_masks:\n histogram_mask = storage.build_mask(self.request.histogram_masks,\n dst_corner, dst_size,\n self._mask_volumes,\n image, alignment)\n else:\n histogram_mask = None\n\n inference_utils.match_histogram(image, self._reference_lut,\n mask=histogram_mask)\n except ValueError as e:\n # This can happen if the subvolume is relatively small because of tiling\n # done by CLAHE. For now we just ignore these subvolumes.\n # TODO(mjanusz): Handle these cases by reducing the number of tiles.\n logging.info('Could not match histogram: %r', e)\n return None, None\n\n image = (image.astype(np.float32) -\n self.request.image_mean) / self.request.image_stddev\n if restrictor == self.ALL_MASKED:\n return None, None\n\n if self.request.HasField('self_prediction'):\n halt_signaler = self_prediction_halt(\n self.request.self_prediction.threshold,\n orig_threshold=self.request.self_prediction.orig_threshold,\n verbosity=PRINT_HALTS)\n else:\n halt_signaler = no_halt()\n\n canvas = Canvas(\n self.model,\n self.executor,\n image,\n self.request.inference_options,\n counters=subvol_counters,\n restrictor=restrictor,\n movement_policy_fn=self.movement_policy_fn,\n halt_signaler=halt_signaler,\n checkpoint_path=storage.checkpoint_path(\n self.request.segmentation_output_dir, corner),\n checkpoint_interval_sec=self.request.checkpoint_interval,\n corner_zyx=dst_corner,\n **canvas_kwargs)\n\n if self.request.HasField('init_segmentation'):\n canvas.init_segmentation_from_volume(self.init_seg_volume, src_corner,\n src_bbox.end[::-1], align_and_crop)\n return canvas, alignment\n \n def get_seed_policy(self, corner, subvol_size):\n \"\"\"Get seed policy generating callable.\n\n Args:\n corner: the original corner of the requested subvolume, before any\n modification e.g. dynamic alignment.\n subvol_size: the original requested size.\n\n Returns:\n A callable for generating seed policies.\n \"\"\"\n policy_cls = getattr(seed, self.request.seed_policy)\n kwargs = {'corner': corner, 'subvol_size': subvol_size}\n if self.request.seed_policy_args:\n kwargs.update(json.loads(self.request.seed_policy_args))\n return functools.partial(policy_cls, **kwargs)\n\n def save_segmentation(self, canvas, alignment, target_path, prob_path):\n \"\"\"Saves segmentation to a file.\n\n Args:\n canvas: Canvas object containing the segmentation\n alignment: the local Alignment used with the canvas, or None\n target_path: path to the file where the segmentation should\n be saved\n prob_path: path to the file where the segmentation probability\n map should be saved\n \"\"\"\n def unalign_image(im3d):\n if alignment is None:\n return im3d\n return alignment.align_and_crop(\n canvas.corner_zyx,\n im3d,\n alignment.corner,\n alignment.size,\n forward=False)\n\n def unalign_origins(origins, canvas_corner):\n out_origins = dict()\n for key, value in origins.items():\n zyx = np.array(value.start_zyx) + canvas_corner\n zyx = alignment.transform(zyx[:, np.newaxis], forward=False).squeeze()\n zyx -= canvas_corner\n out_origins[key] = value._replace(start_zyx=tuple(zyx))\n return out_origins\n\n # Remove markers.\n canvas.segmentation[canvas.segmentation < 0] = 0\n\n # Save segmentation results. Reduce # of bits per item if possible.\n storage.save_subvolume(\n unalign_image(canvas.segmentation),\n unalign_origins(canvas.origins, np.array(canvas.corner_zyx)),\n target_path,\n request=self.request.SerializeToString(),\n counters=canvas.counters.dumps(),\n overlaps=canvas.overlaps)\n\n # Save probability map separately. This has to happen after the\n # segmentation is saved, as `save_subvolume` will create any necessary\n # directories.\n prob = unalign_image(canvas.seg_prob)\n with storage.atomic_file(prob_path) as fd:\n np.savez_compressed(fd, qprob=prob)\n\n def run(self, corner, subvol_size, reset_counters=True):\n \"\"\"Runs FFN inference over a subvolume.\n\n Args:\n corner: start of the subvolume (z, y, x)\n subvol_size: size of the subvolume (z, y, x)\n reset_counters: whether to reset the counters\n\n Returns:\n Canvas object with the segmentation or None if the canvas could not\n be created or the segmentation subvolume already exists.\n \"\"\"\n if reset_counters:\n self.counters.reset()\n\n seg_path = storage.segmentation_path(\n self.request.segmentation_output_dir, corner)\n prob_path = storage.object_prob_path(\n self.request.segmentation_output_dir, corner)\n cpoint_path = storage.checkpoint_path(\n self.request.segmentation_output_dir, corner)\n\n if gfile.Exists(seg_path):\n return None\n\n canvas, alignment = self.make_canvas(corner, subvol_size)\n if canvas is None:\n return None\n\n if gfile.Exists(cpoint_path):\n canvas.restore_checkpoint(cpoint_path)\n\n if self.request.alignment_options.save_raw:\n image_path = storage.subvolume_path(self.request.segmentation_output_dir,\n corner, 'align')\n with storage.atomic_file(image_path) as fd:\n np.savez_compressed(fd, im=canvas.image)\n\n canvas.segment_all(seed_policy=self.get_seed_policy(corner, subvol_size))\n self.save_segmentation(canvas, alignment, seg_path, prob_path)\n\n # Attempt to remove the checkpoint file now that we no longer need it.\n try:\n gfile.Remove(cpoint_path)\n except: # pylint: disable=bare-except\n pass\n\n return canvas\n"
]
| [
[
"numpy.lib.stride_tricks.as_strided",
"numpy.minimum",
"numpy.load",
"numpy.mean",
"scipy.special.logit",
"numpy.concatenate",
"numpy.max",
"numpy.sin",
"tensorflow.train.Saver",
"numpy.seterr",
"tensorflow.ConfigProto",
"tensorflow.gfile.MakeDirs",
"numpy.savez_compressed",
"numpy.sqrt",
"numpy.array",
"numpy.zeros",
"tensorflow.gfile.Exists",
"scipy.special.expit",
"tensorflow.Session",
"numpy.float32",
"numpy.power",
"numpy.dstack",
"numpy.isnan",
"tensorflow.gfile.Remove",
"numpy.sum",
"tensorflow.gfile.Open",
"tensorflow.reset_default_graph",
"numpy.any",
"numpy.abs",
"numpy.all",
"tensorflow.GPUOptions",
"numpy.unique",
"numpy.maximum"
]
]
|
Gorilla-Lab-SCUT/SSTNet | [
"c50ae25faceb223457e6e906663d3400bfcba559"
]
| [
"sstnet/data/scannetv2.py"
]
| [
"# Copyright (c) Gorilla-Lab. All rights reserved.\nimport os\nimport time\nimport math\nimport glob\nimport multiprocessing as mp\nfrom typing import Dict, List, Sequence, Tuple, Union\n\nimport gorilla\nimport open3d as o3d\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\n\nimport segmentator\nimport pointgroup_ops\nfrom .utils import elastic\n\n\nclass GetSuperpoint(mp.Process):\n def __init__(self, path: str, scene: str, mdict: Dict):\n # must call this before anything else\n mp.Process.__init__(self)\n self.path = path\n self.scene = scene\n self.mdict = mdict\n\n def run(self):\n mesh_file = os.path.join(os.path.join(self.path, self.scene, self.scene+\"_vh_clean_2.ply\"))\n mesh = o3d.io.read_triangle_mesh(mesh_file)\n vertices = torch.from_numpy(np.array(mesh.vertices).astype(np.float32))\n faces = torch.from_numpy(np.array(mesh.triangles).astype(np.int64))\n superpoint = segmentator.segment_mesh(vertices, faces).numpy()\n self.mdict.update({self.scene: superpoint}) \n\n\[email protected]_module(force=True)\nclass ScanNetV2Inst(Dataset):\n def __init__(self,\n data_root: str,\n full_scale: List[int]=[128, 512],\n scale: int=50,\n max_npoint: int=250000,\n task: str=\"train\",\n with_elastic: bool=False,\n test_mode: bool=False,\n prefetch_superpoints: bool=True,\n **kwargs):\n # initialize dataset parameters\n self.logger = gorilla.derive_logger(__name__)\n self.data_root = data_root\n self.full_scale = full_scale\n self.scale = scale\n self.max_npoint = max_npoint\n self.test_mode = test_mode\n self.with_elastic = with_elastic\n self.prefetch_superpoints = prefetch_superpoints\n self.task = task\n self.aug_flag = \"train\" in self.task\n \n # load files\n self.load_files()\n \n def load_files(self):\n file_names = sorted(glob.glob(os.path.join(self.data_root, self.task, \"*.pth\")))\n self.files = [torch.load(i) for i in gorilla.track(file_names)]\n self.logger.info(f\"{self.task} samples: {len(self.files)}\")\n self.superpoints = {}\n \n if self.prefetch_superpoints:\n self.logger.info(\"begin prefetch superpoints...\")\n sub_dir = \"scans_test\" if \"test\" in self.task else \"scans\"\n path = os.path.join(self.data_root, sub_dir)\n with gorilla.Timer(\"prefetch superpoints:\"):\n workers = []\n mdict = mp.Manager().dict()\n # multi-processing generate superpoints\n for f in self.files:\n workers.append(GetSuperpoint(path, f[-1], mdict))\n for worker in workers:\n worker.start()\n # wait for multi-processing\n while len(mdict) != len(self.files):\n time.sleep(0.1)\n self.superpoints.update(mdict)\n\n # # single processing (comparison)\n # if self.prefetch_superpoints:\n # self.logger.info(\"prefetch superpoints:\")\n # for f in gorilla.utils.track(self.files):\n # self.get_superpoint(f[-1])\n # import ipdb; ipdb.set_trace()\n\n\n def get_superpoint(self, scene: str):\n if scene in self.superpoints:\n return\n sub_dir = \"scans_test\" if \"test\" in self.task else \"scans\"\n mesh_file = os.path.join(self.data_root, sub_dir, scene, scene+\"_vh_clean_2.ply\")\n mesh = o3d.io.read_triangle_mesh(mesh_file)\n vertices = torch.from_numpy(np.array(mesh.vertices).astype(np.float32))\n faces = torch.from_numpy(np.array(mesh.triangles).astype(np.int64))\n superpoint = segmentator.segment_mesh(vertices, faces).numpy()\n self.superpoints[scene] = superpoint\n\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, index: int) -> Tuple:\n if \"test\" in self.task:\n xyz_origin, rgb, faces, scene = self.files[index]\n # construct fake label for label-lack testset\n semantic_label = np.zeros(xyz_origin.shape[0], dtype=np.int32)\n instance_label = np.zeros(xyz_origin.shape[0], dtype=np.int32)\n else:\n xyz_origin, rgb, faces, semantic_label, instance_label, coords_shift, scene = self.files[index]\n\n if not self.prefetch_superpoints:\n self.get_superpoint(scene)\n superpoint = self.superpoints[scene]\n\n ### jitter / flip x / rotation\n if self.aug_flag:\n xyz_middle = self.data_aug(xyz_origin, True, True, True)\n else:\n xyz_middle = self.data_aug(xyz_origin, False, False, False)\n\n ### scale\n xyz = xyz_middle * self.scale\n\n ### elastic\n if self.with_elastic:\n xyz = elastic(xyz, 6 * self.scale // 50, 40 * self.scale / 50)\n xyz = elastic(xyz, 20 * self.scale // 50, 160 * self.scale / 50)\n\n ### offset\n xyz_offset = xyz.min(0)\n xyz -= xyz_offset\n\n ### crop\n valid_idxs = np.ones(len(xyz_middle), dtype=np.bool)\n if not self.test_mode:\n xyz, valid_idxs = self.crop(xyz)\n\n xyz_middle = xyz_middle[valid_idxs]\n xyz = xyz[valid_idxs]\n rgb = rgb[valid_idxs]\n semantic_label = semantic_label[valid_idxs]\n superpoint = np.unique(superpoint[valid_idxs], return_inverse=True)[1]\n instance_label = self.get_cropped_inst_label(instance_label, valid_idxs)\n\n ### get instance information\n inst_num, inst_infos = self.get_instance_info(xyz_middle, instance_label.astype(np.int32))\n inst_info = inst_infos[\"instance_info\"] # [n, 9], (cx, cy, cz, minx, miny, minz, maxx, maxy, maxz)\n inst_pointnum = inst_infos[\"instance_pointnum\"] # [num_inst], list\n \n loc = torch.from_numpy(xyz).long()\n loc_offset = torch.from_numpy(xyz_offset).long()\n loc_float = torch.from_numpy(xyz_middle)\n feat = torch.from_numpy(rgb)\n if self.aug_flag:\n feat += torch.randn(3) * 0.1\n semantic_label = torch.from_numpy(semantic_label)\n instance_label = torch.from_numpy(instance_label)\n superpoint = torch.from_numpy(superpoint)\n\n inst_info = torch.from_numpy(inst_info)\n\n return scene, loc, loc_offset, loc_float, feat, semantic_label, instance_label, superpoint, inst_num, inst_info, inst_pointnum\n\n def data_aug(self, xyz, jitter=False, flip=False, rot=False):\n m = np.eye(3)\n if jitter:\n m += np.random.randn(3, 3) * 0.1\n if flip:\n m[0][0] *= np.random.randint(0, 2) * 2 - 1 # flip x randomly\n if rot:\n theta = np.random.rand() * 2 * math.pi\n m = np.matmul(m, [[math.cos(theta), math.sin(theta), 0], [-math.sin(theta), math.cos(theta), 0], [0, 0, 1]]) # rotation\n return np.matmul(xyz, m)\n\n def crop(self, xyz: np.ndarray) -> Union[np.ndarray, np.ndarray]:\n r\"\"\"\n crop the point cloud to reduce training complexity\n\n Args:\n xyz (np.ndarray, [N, 3]): input point cloud to be cropped\n\n Returns:\n Union[np.ndarray, np.ndarray]: processed point cloud and boolean valid indices\n \"\"\"\n xyz_offset = xyz.copy()\n valid_idxs = (xyz_offset.min(1) >= 0)\n assert valid_idxs.sum() == xyz.shape[0]\n\n full_scale = np.array([self.full_scale[1]] * 3)\n room_range = xyz.max(0) - xyz.min(0)\n while (valid_idxs.sum() > self.max_npoint):\n offset = np.clip(full_scale - room_range + 0.001, None, 0) * np.random.rand(3)\n xyz_offset = xyz + offset\n valid_idxs = (xyz_offset.min(1) >= 0) * ((xyz_offset < full_scale).sum(1) == 3)\n full_scale[:2] -= 32\n\n return xyz_offset, valid_idxs\n\n def get_instance_info(self,\n xyz: np.ndarray,\n instance_label: np.ndarray) -> Union[int, Dict]:\n r\"\"\"\n get the informations of instances (amount and coordinates)\n\n Args:\n xyz (np.ndarray, [N, 3]): input point cloud data\n instance_label (np.ndarray, [N]): instance ids of point cloud\n\n Returns:\n Union[int, Dict]: the amount of instances andinformations\n (coordinates and the number of points) of instances\n \"\"\"\n instance_info = np.ones((xyz.shape[0], 9), dtype=np.float32) * -100.0 # [n, 9], float, (cx, cy, cz, minx, miny, minz, maxx, maxy, maxz)\n instance_pointnum = [] # [num_inst], int\n instance_num = int(instance_label.max()) + 1\n for i_ in range(instance_num):\n inst_idx_i = np.where(instance_label == i_)\n\n ### instance_info\n xyz_i = xyz[inst_idx_i]\n min_xyz_i = xyz_i.min(0)\n max_xyz_i = xyz_i.max(0)\n mean_xyz_i = xyz_i.mean(0)\n instance_info_i = instance_info[inst_idx_i]\n instance_info_i[:, 0:3] = mean_xyz_i\n instance_info_i[:, 3:6] = min_xyz_i\n instance_info_i[:, 6:9] = max_xyz_i\n instance_info[inst_idx_i] = instance_info_i\n\n ### instance_pointnum\n instance_pointnum.append(inst_idx_i[0].size)\n\n return instance_num, {\"instance_info\": instance_info, \"instance_pointnum\": instance_pointnum}\n\n def get_cropped_inst_label(self,\n instance_label: np.ndarray,\n valid_idxs: np.ndarray) -> np.ndarray:\n r\"\"\"\n get the instance labels after crop operation and recompact\n\n Args:\n instance_label (np.ndarray, [N]): instance label ids of point cloud\n valid_idxs (np.ndarray, [N]): boolean valid indices\n\n Returns:\n np.ndarray: processed instance labels\n \"\"\"\n instance_label = instance_label[valid_idxs]\n j = 0\n while (j < instance_label.max()):\n if (len(np.where(instance_label == j)[0]) == 0):\n instance_label[instance_label == instance_label.max()] = j\n j += 1\n return instance_label\n\n def collate_fn(self, batch: Sequence[Sequence]) -> Dict:\n locs = []\n loc_offset_list = []\n locs_float = []\n feats = []\n semantic_labels = []\n instance_labels = []\n\n instance_infos = [] # [N, 9]\n instance_pointnum = [] # [total_num_inst], int\n\n batch_offsets = [0]\n scene_list = []\n superpoint_list = []\n superpoint_bias = 0\n\n total_inst_num = 0\n for i, data in enumerate(batch):\n scene, loc, loc_offset, loc_float, feat, semantic_label, instance_label, superpoint, inst_num, inst_info, inst_pointnum = data\n \n scene_list.append(scene)\n superpoint += superpoint_bias\n superpoint_bias += (superpoint.max() + 1)\n\n invalid_ids = np.where(instance_label != -100)\n instance_label[invalid_ids] += total_inst_num\n total_inst_num += inst_num\n\n ### merge the scene to the batch\n batch_offsets.append(batch_offsets[-1] + loc.shape[0])\n\n locs.append(torch.cat([torch.LongTensor(loc.shape[0], 1).fill_(i), loc], 1))\n loc_offset_list.append(loc_offset)\n locs_float.append(loc_float)\n feats.append(feat)\n semantic_labels.append(semantic_label)\n instance_labels.append(instance_label)\n superpoint_list.append(superpoint)\n\n instance_infos.append(inst_info)\n instance_pointnum.extend(inst_pointnum)\n\n ### merge all the scenes in the batchd\n batch_offsets = torch.tensor(batch_offsets, dtype=torch.int) # int [B+1]\n\n locs = torch.cat(locs, 0) # long [N, 1 + 3], the batch item idx is put in locs[:, 0]\n locs_float = torch.cat(locs_float, 0).to(torch.float32) # float [N, 3]\n superpoint = torch.cat(superpoint_list, 0).long() # long[N]\n feats = torch.cat(feats, 0) # float [N, C]\n semantic_labels = torch.cat(semantic_labels, 0).long() # long [N]\n instance_labels = torch.cat(instance_labels, 0).long() # long [N]\n locs_offset = torch.stack(loc_offset_list) # long [B, 3]\n\n instance_infos = torch.cat(instance_infos, 0).to(torch.float32) # float [N, 9] (meanxyz, minxyz, maxxyz)\n instance_pointnum = torch.tensor(instance_pointnum, dtype=torch.int) # int [total_num_inst]\n\n spatial_shape = np.clip((locs.max(0)[0][1:] + 1).numpy(), self.full_scale[0], None) # long [3]\n\n ### voxelize\n batch_size = len(batch)\n voxel_locs, p2v_map, v2p_map = pointgroup_ops.voxelization_idx(locs, batch_size, 4)\n\n return {\"locs\": locs, \"locs_offset\": locs_offset, \"voxel_locs\": voxel_locs,\n \"scene_list\": scene_list, \"p2v_map\": p2v_map, \"v2p_map\": v2p_map,\n \"locs_float\": locs_float, \"feats\": feats,\n \"semantic_labels\": semantic_labels, \"instance_labels\": instance_labels,\n \"instance_info\": instance_infos, \"instance_pointnum\": instance_pointnum,\n \"offsets\": batch_offsets, \"spatial_shape\": spatial_shape, \"superpoint\": superpoint}\n\n"
]
| [
[
"numpy.array",
"torch.cat",
"torch.stack",
"numpy.matmul",
"numpy.zeros",
"numpy.random.rand",
"numpy.ones",
"numpy.random.randn",
"numpy.eye",
"torch.from_numpy",
"numpy.where",
"torch.tensor",
"numpy.random.randint",
"torch.load",
"numpy.clip",
"torch.LongTensor",
"torch.randn",
"numpy.unique"
]
]
|
sgtc-stanford/scCRISPR | [
"46edd390b80576aeb4555a9c727544588149b59f"
]
| [
"select_best_barcode.py"
]
| [
"#!/usr/bin/env python\n\"\"\"\n\n:Author: Ji Research Group/Stanford Genome Technology Center\n:Contact: [email protected]\n:Creation date: 04/19/2021\n:Description: \n\nThis script selects the best barcode per read, using edit distance as primary criteria,\n with ties broken using cosine-similarity score.\n \nRevisions: \n\n- mm/dd/yyyy\tDescription\n\n\"\"\"\nimport argparse, sys, os, re, csv, gzip, string\nimport numpy as np, pandas as pd\n\nimport sc_barcodes as scb\n\nscript_name = os.path.basename(__file__)\nprint(\"Running \", script_name)\n\n#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#\n# Read program arguments and file(s) #\n#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+# \ndef parse_commandline():\n parser=argparse.ArgumentParser()\n parser.add_argument('--input', '-i', help='barcodes file (from soft-clips)', type=str, required=True)\n \n args=parser.parse_args()\n print(args, file=sys.stderr)\n return args\n\nargs = parse_commandline()\nout_fn = args.input.split('.')[0] + '.barcode_match.tsv'\ndf_softclip = pd.read_csv(args.input, sep='\\t', header=0)\n\n#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#\n# Functions for determining best barcode match #\n#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#\ndef find_best_barcode(sc_rows):\n best_rows = sc_rows.loc[sc_rows['dist'] == min(sc_rows['dist'])]\n if len(best_rows) > 1:\n best_cosine_sim = best_rows.loc[best_rows['score'] == max(best_rows['score'])]\n best_row = best_cosine_sim.iloc[0,]\n else:\n best_row = best_rows.iloc[0,]\n return best_row\n \n#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#\n# Main program logic #\n#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#+#\ndf_best_barcodes = df_softclip.groupby('rd_name').apply(find_best_barcode)\ndf_best_barcodes.loc[df_best_barcodes['dist'] < 5].to_csv(out_fn, sep='\\t', index=False)\n"
]
| [
[
"pandas.read_csv"
]
]
|
Cray-HPE/canu | [
"3a92ce1e9b63f35aa30b9135afaa734e61909407"
]
| [
"network_modeling/NetworkDrawing.py"
]
| [
"# MIT License\n#\n# (C) Copyright [2022] Hewlett Packard Enterprise Development LP\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\n# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n# OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"NetworkDrawing creates network diagram image files.\"\"\"\n\n# Skip Testing this page, not currently used in CANU\n# This creates network diagram image files.\n# TODO: requires much work!\n# import matplotlib.image as pltimg # noqa\nimport matplotlib.pyplot as plt # pragma: no cover\nimport networkx as nx # pragma: no cover\n\n\n# Por reference: https://plotly.com/python/network-graphs/\nclass NetworkDrawing: # pragma: no cover\n \"\"\"A class to create network diagram image files.\n\n Attributes\n ----------\n nodes :\n\n prune_nodes :\n\n image_type :\n\n image_size :\n\n\n\n Methods\n -------\n draw():\n Create a network diagram image.\n create_topology(nodes):\n Not implemented\n assign_ports():\n Not implemented\n \"\"\"\n\n def __init__(self, nodes, prune_nodes=False, image_type=\"svg\", image_size=500):\n \"\"\"Construct the necessary attributes for the network diagram.\n\n Args:\n nodes: nodes\n prune_nodes: prune_nodes\n image_type: image_type\n image_size: image_size\n \"\"\"\n self.__image_type = image_type\n self.__image_size = image_size\n self.__nodes = nodes\n self.__prune = prune_nodes # Don't show server/cabinet devices\n\n def draw(self):\n \"\"\"Create a network diagram image.\"\"\"\n #\n # Convert the generated to a graph\n # MultiGraph allows multiple edges (cables) between nodes.\n #\n G = nx.MultiGraph() # noqa\n\n # Edges first - will implicitly create graph nodes\n # Quick hack is that the leafs will show both self, spine and node connections\n # for leaf in leafs:\n node_list = self.__nodes\n node_color = [\"red\"] * len(self.__nodes)\n # TODO fix\n # if self.__prune:\n # for node in self.__nodes:\n\n for node in node_list:\n print(node.arch_type())\n for edge in node.edges():\n print(\"Adding edge: {}\".format((node.id(), edge)))\n G.add_edge(node.id(), edge)\n print()\n\n # node_list is = spines + leafs + nodes\n # Quick hack - this should be autodiscovered\n for node in node_list:\n if node.arch_type().find(\"spine\") != -1:\n G.nodes[node.id()][\"tier\"] = 3\n node_color[node.id()] = \"red\"\n elif node.arch_type().find(\"edge\") != -1:\n G.nodes[node.id()][\"tier\"] = 4\n node_color[node.id()] = \"purple\"\n elif node.arch_type().find(\"leaf\") != -1:\n G.nodes[node.id()][\"tier\"] = 2\n node_color[node.id()] = \"orange\"\n elif node.arch_type().find(\"bmc\") != -1:\n G.nodes[node.id()][\"tier\"] = 1\n node_color[node.id()] = \"yellow\"\n else:\n G.nodes[node.id()][\"tier\"] = 0\n node_color[node.id()] = \"blue\"\n\n G.nodes[node.id()][\"name\"] = node.common_name()\n print()\n\n #\n # Graph layout - generate coordinates for the graph\n #\n pos = nx.multipartite_layout(\n G,\n subset_key=\"tier\",\n align=\"horizontal\",\n scale=3,\n ) # scale positions\n\n #\n # xxx\n #\n\n #\n # Connections (with calcs for labels) (edges)\n #\n nx.draw(G, pos=pos)\n # nx.draw_networkx(G, pos=pos, with_labels=False, width=0.5) # noqa\n # nx.draw_networkx(G, pos=pos, node_color=node_color, with_labels=True) # noqa\n plt.savefig(\"fig2.png\")\n"
]
| [
[
"matplotlib.pyplot.savefig"
]
]
|
robingather/com-thesis | [
"30a6e815c4f71edc332a4e74a25faf0dd21c1244"
]
| [
"renderer.py"
]
| [
"import matplotlib.pyplot as plt\nimport numpy as np\nimport pygame\nimport seaborn as sns\nfrom helper import Point\nimport constants as C\n\n# rgb colors\nWHITE = (250,250,250)\nLIGHT_BLUE = (167, 148, 246)\nLIGHT_RED = (242, 121, 125)\nLIGHTER_RED = (252, 198, 164)\nDARK_RED = (100, 0, 0)\nLIGHT_GREEN = (146,247,173)\nLIGHTER_PURPLE = (247, 152, 250)\nDARK_GREEN = (28,169,66)\nBLACK = (0,0,0)\nBOTTOM_BG = (0, 36, 81)\nFADED_RED = (200,220,168)\nDEEP_RED = (51, 102, 242)\n\npygame.init()\nfont = pygame.font.Font('ROBOTOSLAB-REGULAR.TTF', 14)\nfont_s = pygame.font.Font('ROBOTOSLAB-REGULAR.TTF', 10)\n\nclass Renderer:\n\n BOTTOM_SLOTS = 6\n BOTTOM_MARGIN = BOTTOM_SLOTS*25\n\n def __init__(self,w,h):\n self.w = w*C.BLOCK_SIZE\n self.h = h*C.BLOCK_SIZE\n self.h_full = self.h + self.BOTTOM_MARGIN\n self.display = pygame.display.set_mode((self.w,self.h_full))\n self.clock = pygame.time.Clock()\n pygame.display.set_caption('PredAItor')\n\n def tick(self):\n self.clock.tick(C.SPEED)\n\n def render(self, env):\n self.render_world(env)\n self.render_bottom(env)\n pygame.display.flip()\n\n def render_world(self, env):\n # render the simulation world\n self.display.fill(LIGHT_GREEN, pygame.Rect(0,0,self.w,self.h))\n BS = C.BLOCK_SIZE\n mBS = BS/4\n\n for pred in env.preds.agents:\n if pred.com != 0 and C.RENDER_COMMUNICATION:\n self.render_circle(env, pred.pos, C.RAD_COM)\n\n for pred in env.preds.agents:\n agent_color = LIGHT_RED\n pygame.draw.rect(self.display, agent_color, pygame.Rect(pred.pos.x*BS, pred.pos.y*BS, BS, BS))\n\n if env.preys.get_amount() == 0:\n break\n\n state = pred.get_state(env)\n if state[2] == 1:\n pygame.draw.rect(self.display, LIGHTER_RED, pygame.Rect(pred.pos.x*BS,pred.pos.y*BS, mBS, BS))\n if state[3] == 1:\n pygame.draw.rect(self.display, LIGHTER_RED, pygame.Rect(pred.pos.x*BS+BS-mBS,pred.pos.y*BS, mBS, BS))\n if state[4] == 1:\n pygame.draw.rect(self.display, LIGHTER_RED, pygame.Rect(pred.pos.x*BS,pred.pos.y*BS, BS, mBS))\n if state[5] == 1:\n pygame.draw.rect(self.display, LIGHTER_RED, pygame.Rect(pred.pos.x*BS,pred.pos.y*BS+BS-mBS, BS, mBS))\n target = env.preys.get_agent_by_id(pred.target_id)\n if target != None:\n pygame.draw.line(self.display, LIGHTER_RED, (pred.pos.x*BS+BS/2,pred.pos.y*BS+BS/2), (pred.pos.x*BS+BS/2, target.pos.y*BS+BS/2))\n pygame.draw.line(self.display, LIGHTER_RED, (pred.pos.x*BS+BS/2,target.pos.y*BS+BS/2), (target.pos.x*BS+BS/2, target.pos.y*BS+BS/2))\n\n for obst in env.obstacles:\n pygame.draw.rect(self.display, DARK_GREEN, pygame.Rect(obst.x*BS, obst.y*BS, BS, BS))\n\n for prey in env.preys.agents:\n agent_color = self.mix_colors(LIGHT_BLUE, LIGHT_GREEN, prey.health/C.MAX_HEALTH.prey)\n pygame.draw.rect(self.display, agent_color, pygame.Rect(prey.pos.x*BS, prey.pos.y*BS, BS, BS))\n\n def render_circle(self, env, pos, r):\n BS = C.BLOCK_SIZE\n pygame.draw.circle(self.display, FADED_RED, Point(pos.x*BS,pos.y*BS), r*BS)\n\n def render_bottom(self, env):\n # render stats on the bottom\n self.display.fill(BOTTOM_BG, pygame.Rect(0,self.h,self.w,self.BOTTOM_MARGIN))\n\n pred_score = 0\n for pred in env.preds.agents:\n if pred.scores['killed'] > pred_score:\n pred_score = pred.scores['killed']\n\n prey_score = 0\n for prey in env.preys.agents:\n if prey.scores['survived'] > prey_score:\n prey_score = prey.scores['survived']\n\n speed_string = 'Slow' if C.SPEED < 10 else ('Med' if C.SPEED < 100 else 'Max')\n dist_pred = env.preds.stats.c_avg_distances['pred']\n dist_pred = np.round(dist_pred[-1],1) if len(dist_pred) > 0 else -1\n dist_prey = env.preys.stats.c_avg_distances['prey']\n dist_prey = np.round(dist_prey[-1],1) if len(dist_prey) > 0 else -1\n\n values = [\n 'PREDATORS STATS',\n 'Gen: '+str(env.preds.stats.get('it_gen')),\n 'Batch: '+str(env.preds.agent_index-C.N_PRED)+'-'+str(env.preds.agent_index)+' of '+str(C.POP_AMOUNT.pred),\n 'Learning: '+str(C.LEARN.pred)+' (L)',\n 'Comms: '+str(C.COMMUNICATE_WITHIN_POP.pred)+' (T)',\n 'Hear Preys: '+str(C.HEAR_BETWEEN_POP.pred)+' (Y)',\n\n '',\n 'Score: '+str(pred_score),\n 'Record: '+str(np.round(env.preds.stats.stats['records']['killed'],2)),\n 'N Alive: '+str(env.preds.get_amount()),\n 'Frame: '+str(self.human_format(env.preds.stats.get('it_frames'))),\n 'Avg Dist: '+str(dist_pred),\n\n 'PREYS STATS',\n 'Gen: '+str(env.preys.stats.get('it_gen')),\n 'Batch: '+str(env.preys.agent_index-C.N_PRED)+'-'+str(env.preys.agent_index)+' of '+str(C.POP_AMOUNT.prey),\n 'Learning: '+str(C.LEARN.prey)+' (O)',\n 'Comms: '+str(C.COMMUNICATE_WITHIN_POP.prey)+' (U)',\n 'Hear Preds: '+str(C.HEAR_BETWEEN_POP.prey)+' (I)',\n '',\n 'Score: '+str(prey_score),\n 'Record: '+str(np.round(env.preys.stats.stats['records']['survived'],2)),\n 'N Alive: '+str(env.preys.get_amount()),\n 'Frame: '+str(self.human_format(env.preys.stats.get('it_frames'))),\n 'Avg Dist: '+str(dist_prey),\n\n 'MISC STATS',\n 'Render: '+str(True)+' (Tab)',\n 'Render Com: '+str(C.RENDER_COMMUNICATION)+' (C)',\n 'Speed: '+speed_string+' (< >)',\n 'Save: '+str(C.SAVE_MODEL)+' (X/S)',\n\n ]\n\n val_idx = 0\n for i in range(5):\n for j in range(self.BOTTOM_SLOTS):\n if val_idx < len(values):\n value = values[val_idx]\n txt_y = self.h+8+22*j\n txt_x = 8+180*i\n self.display.blit(font.render(value, True, WHITE), [txt_x, txt_y])\n val_idx += 1\n\n def render_text(self, text, i):\n text = font.render(text, True, BLACK)\n self.display.blit(text, [5, 5+i*20])\n \n def human_format(self, num):\n magnitude = 0\n while abs(num) >= 1000:\n magnitude += 1\n num /= 1000.0\n return '%.1f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])\n\n def plot_com(self, env):\n # plot communication statistics\n plt.ion()\n plt.figure(8,figsize=(18,6))\n plt.clf()\n plt.tight_layout()\n plt.suptitle('Communication Stats')\n plt.subplots_adjust(wspace=0.55, hspace=0.55)\n fig, axs = plt.subplots(2,4,num=8)\n fig.canvas.set_window_title(C.MODEL_NAME+\", Communication Stats\")\n\n cases=list(env.preds.stats.gen_stats['com_investigations'].keys())\n vals=list(env.preds.stats.gen_stats['com_investigations'].values())\n summed_usage = 0\n for v in vals:\n summed_usage += v[1]\n for i in range(len(vals)):\n vals[i] = (vals[i][1] / summed_usage) if summed_usage > 0 else 0\n\n ax = axs[0,0]\n ax.set_xlabel('Com Usage')\n ax.set_ylabel('Case')\n cases=list(env.preds.stats.gen_stats['com_investigations'].keys())\n vals=list(env.preds.stats.gen_stats['com_investigations'].values())\n summed_usage = 0\n for v in vals:\n summed_usage += v[1]\n ax.set_title('Com Usage For Cases (N='+str(self.human_format(int(summed_usage)))+')')\n for i in range(len(vals)):\n vals[i] = (vals[i][1] / summed_usage) if summed_usage > 0 else 0\n plt.sca(ax)\n sns.barplot(x=vals[:-1],y=cases[:-1])\n\n ax = axs[1,0]\n ax.set_title('Avg Com Value For Cases')\n ax.set_xlabel('Avg Com Value')\n ax.set_ylabel('Case')\n cases=list(env.preds.stats.gen_stats['com_investigations'].keys())\n vals=list(env.preds.stats.gen_stats['com_investigations'].values())\n for i in range(len(vals)):\n vals[i] = (vals[i][0]/vals[i][1]) if vals[i][1] > 0 else 0\n plt.sca(ax)\n sns.barplot(x=vals[:-1],y=cases[:-1])\n\n # Correlation\n vals = env.preds.stats.c_com_actions.copy()\n for key in vals.keys():\n vals[key] = vals[key][:4]\n\n any_tot_amount = sum(vals['C>0'])\n if any_tot_amount != 0:\n vals['C>0'] = [x/any_tot_amount for x in vals['C>0']]\n\n ax = axs[0,3]\n plt.sca(ax)\n sns.barplot(x=vals['C>0'],y=C.ACTION_LABELS[:4])\n\n axes = [[0,1],[1,1],[0,2],[1,2],[0,3],[1,3]]\n for i,key in enumerate(vals.keys()):\n if not key in ['C_L>0','C_R>0','C_U>0','C_D>0','C==0']:\n continue\n ax = axs[axes[i][0],axes[i][1]]\n \n vals[key] = vals[key]\n tot_amount = sum(vals[key])\n if tot_amount != 0:\n vals[key] = [x/tot_amount for x in vals[key]]\n \n vals[key] = [x-vals['C>0'][i] for i,x in enumerate(vals[key])]\n print(key+' '+str(vals[key]))\n\n plt.sca(ax)\n ax.set_title('Actions correlation with '+key+' (N='+str(self.human_format(int(tot_amount)))+\")\")\n ax.set_xlabel('Difference from Avg')\n ax.set_ylabel('Action')\n sns.barplot(x=vals[key],y=C.ACTION_LABELS[:4])\n\n plt.ioff()\n plt.show(block=False)\n plt.pause(.1)\n\n def plot_preds(self, env):\n # plot predator distance statistics (unused)\n plt.ion()\n plt.figure(12,figsize=(8,8))\n plt.clf()\n plt.tight_layout()\n plt.suptitle('Pred Distances and Coms')\n plt.subplots_adjust(wspace=0.35, hspace=0.35)\n\n fig, axs = plt.subplots(4,4,num=12)\n fig.canvas.set_window_title(C.MODEL_NAME+\", Distance / Coms\")\n\n pop_dists = env.preds.stats.c_indiv_distances\n pop_coms = env.preds.stats.c_indiv_coms\n\n for i in range(4):\n for j in range(4):\n k = i*4+j\n ax = axs[i,j]\n coms = []\n dists = []\n for l in range(len(pop_dists)):\n if len(pop_dists) == 0 or l >= len(pop_dists) or k >= len(pop_dists[l]):\n return\n dists.append(pop_dists[l][k]['pred'])\n tot_com = pop_coms[l][k][0] + pop_coms[l][k][1] + pop_coms[l][k][2] + pop_coms[l][k][3]\n coms.append(tot_com > 0)\n for l, val in enumerate(coms):\n if val:\n ax.axvline(l, color='lightgray')\n sns.lineplot(x=range(1,len(dists)+1),y=dists,ax=ax)\n ax.set_title(f'pred {k} pred-pred dist')\n\n plt.ioff()\n plt.show(block=False)\n plt.pause(.1)\n\n def plot_gen(self, env):\n # plot one generation statistics\n self.plot_com(env)\n #self.plot_preds(env)\n\n def plot(self, env):\n # plot main generational statistics\n if not env.preds.stats.is_plottable('killed'):\n return\n\n plt.ion()\n plt.figure(1,figsize=(5,7))\n plt.clf()\n plt.tight_layout()\n plt.suptitle('Generation Stats')\n plt.subplots_adjust(wspace=0.35, hspace=0.45)\n fig, axs = plt.subplots(3,1,num=1)\n fig.canvas.set_window_title(C.MODEL_NAME+\", Generation Stats\")\n\n # pred score\n ax = axs[0]\n ax.set_xlabel('Generation')\n ax.set_ylabel('Prey eaten',color='orange')\n scores = env.preds.stats.stats['scores']['eaten']\n print(\"SCORES\")\n print(scores)\n sns.lineplot(x=range(1,len(scores)+1),y=scores,ax=ax,color='orange')\n ax.set_title('Predator Score')\n \n # com use\n ax = axs[2]\n ax.set_xlabel('Generation')\n ax.set_ylabel('\\% of pred actions',color='purple')\n\n com_usage = env.preds.stats.stats['com_usage']\n print(\"USAGE\")\n print(com_usage)\n #x_usage = env.preds.stats.stats['x_usage']\n #sns.lineplot(x=range(1,len(com_usage)+1),y=np.array(com_usage)/np.array(x_usage),ax=ax,color='purple')\n sns.lineplot(x=range(1,len(com_usage)+1),y=com_usage,ax=ax,color='pink')\n\n plt.ioff()\n plt.show(block=False)\n plt.pause(.1)\n \n def mix_colors(self, c1, c2, perc):\n # mix one color with a percentage of another\n c3 = [0,0,0]\n for i in range(3):\n c3[i] = int(c1[i] + (c2[i] - c1[i]) * (1-perc))\n return tuple(c3)"
]
| [
[
"matplotlib.pyplot.ion",
"numpy.round",
"matplotlib.pyplot.sca",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.subplots_adjust"
]
]
|
zhupengjia/nlptools | [
"a0afc4873ee1b3adb383d38075ad5ae5e0293055"
]
| [
"nlptools/utils/utils.py"
]
| [
"#!/usr/bin/env python\nimport os, zlib, numpy, re, pickle\nfrom collections import Counter\nfrom sklearn.utils import murmurhash3_32\n\n'''\n Author: Pengjia Zhu ([email protected])\n Some tool functions\n'''\n\n\ndef zdump(value,filename):\n ''' \n serialize compress variable to file using zlib and pickle\n \n input: \n - value: python variable\n - filename: saved file path\n '''\n with open(filename,\"wb\",-1) as fpz:\n fpz.write(zlib.compress(pickle.dumps(value,-1),9))\n\n\n#load compressed pkl file from zdump\ndef zload(filename):\n ''' \n load compressed pkl file from zdump\n \n input: \n - filename: saved file path\n \n output:\n - python variable\n '''\n with open(filename,\"rb\") as fpz:\n value=fpz.read()\n try:return pickle.loads(zlib.decompress(value))\n except:return pickle.loads(value)\n\n\ndef zdumps(value):\n '''\n serialize and compress variable to string using zlib and pickle\n \n input:\n - value: python variable\n \n output:\n - serialized string\n '''\n return zlib.compress(pickle.dumps(value,-1),9)\n\n\ndef zloads(value):\n ''' \n load serialized string from zdumps\n \n input: \n - value: serialized string\n \n output:\n - python variable\n '''\n try:return pickle.loads(zlib.decompress(value))\n except:return pickle.loads(value)\n\n\ndef ldumps(value):\n '''\n serialize and compress variable to string using lzo and pickle\n \n input:\n - value: python variable\n \n output:\n - serialized string\n '''\n import lzo\n return lzo.compress(pickle.dumps(value,-1),9)\n\n\ndef lloads(value):\n ''' \n load serialized string from ldumps\n \n input: \n - value: serialized string\n \n output:\n - python variable\n '''\n import lzo\n try:return pickle.loads(lzo.decompress(value))\n except:return pickle.loads(value)\n\n\ndef status_save(filename, status):\n '''\n save a status string to file\n \n input:\n - filename: file path\n - status: status string\n '''\n with open(filename, 'w') as f:\n f.write(str(status))\n\n\ndef status_check(filename):\n '''\n load a status string from file\n \n input:\n - filename: file path\n \n output:\n - status string\n - if filename not existed, return 0\n '''\n if not os.path.exists(filename):\n return 0\n with open(filename, 'r') as f:\n return f.readlines()[0].strip()\n\n\ndef flat_list(l):\n '''\n flatten a 2-d list to 1-d list\n \n input:\n - l: 2-d list\n \n output:\n - 1-d list\n '''\n return [item for sublist in l for item in sublist]\n\n\ndef hashword(word, hashsize=16777216):\n '''\n hash the word using murmurhash3_32 to a positive int value\n \n input:\n - word: string format word\n - hashsize: maximum number, default is 16777216\n \n output:\n - int\n '''\n return murmurhash3_32(word, positive=True) % (hashsize)\n\n\ndef normalize(text):\n '''\n resolve different type of unicode encodings using unicodedata.normalize\n \n input:\n - text: string\n \n output:\n - string\n '''\n import unicodedata\n try:\n return unicodedata.normalize('NFD', text)\n except Exception as err:\n print(err)\n raise(err)\n\n\n#rest client post\ndef restpost(url, data):\n '''\n rest client post using requests\n \n input:\n - url: restapi's url\n - data: python dictionary\n \n output:\n - json format post return, if failed will return None\n '''\n import requests, json\n data = requests.post(url=url, data=json.dumps(data))\n try: return data.json()\n except: return None\n\n\ndef envread(keys):\n '''\n use environment variables to cover original environment\n \n input:\n - keys: a key list need to read from environment\n \n output:\n - python dictionary {key:value, ...}\n '''\n cfg = {}\n for k in keys:\n if k in os.environ:\n cfg[k] = os.environ[k]\n return cfg\n\n\ndef decode_child_id(ids):\n '''\n convert child id to list\n '''\n if isinstance(ids, str):\n ids2 = []\n for i in re.split('[,,]', ids):\n if i.isdigit():\n ids2.append(int(i))\n else:\n itmp = [int(x) for x in re.split('[~-]', i) if x.strip()]\n if len(itmp) > 1:\n ids2 += range(itmp[0], itmp[1]+1)\n else:\n ids2.append(int(itmp[0]))\n return ids2\n if isinstance(ids, int):\n return [ids]\n if isinstance(ids, list):\n if isinstance(ids[0], int):\n return ids\n if isinstance(ids[0], str):\n return [int(x) for x in ids]\n return None\n\n\ndef distance2similarity(distance):\n '''\n Convert distance to similarity\n '''\n return 1./(1+distance)\n\n\ndef eval_str_list(x, type=float):\n '''\n apply type to list x, if x is a string, then eval first\n '''\n if x is None:\n return None\n if isinstance(x, str):\n x = eval(x)\n try:\n return list(map(type, x))\n except TypeError:\n return [type(x)]\n\n\ndef pad_sequence(M, padding_value=0, return_length=False):\n '''\n pad array of numpy array to matrix\n\n Input:\n - M: list of sequence\n - padding_value: default is 0\n - return_length: if True then return padded matrix and raw lengths, otherwise return padded matrix only. default is False\n '''\n if not isinstance(M[0], numpy.ndarray):\n return M\n length = [len(x) for x in M]\n maxlen = max(len(x) for x in M)\n seq = numpy.zeros((len(M), maxlen), dtype=M[0].dtype) + padding_value\n for i, m in enumerate(M):\n seq[i][:len(m)] = m\n if return_length:\n return seq, length\n else:\n return seq\n\n\n"
]
| [
[
"sklearn.utils.murmurhash3_32"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.