repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
jorag/pgnlm | [
"798a1c08505713ad6176be3568df2fe05308843f"
]
| [
"pgnlm/datasets.py"
]
| [
"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: jorag\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport os \r\nfrom osgeo import gdal\r\nfrom helperfuncs import iq2complex, norm01\r\n\r\n\r\ndef _test_dataset(load_options):\r\n \"\"\" Load and crop the simulated test dataset.\r\n\r\n Input:\r\n load_options - not used\r\n Output:\r\n polsar - simulated PolSAR image with complex target vectors, \r\n complex numpy tensor with shape (H, W, 3)\r\n optical - coregistered optical Sentinel-2 image, \r\n numpy tensor with shape (x, y, 4)\r\n geotransform - geotransform read from input is GeoTIFF,\r\n tuple=(lon_min, xres, rot1, lat_min, rot2, yres)\r\n \"\"\"\r\n \r\n # Path to directories and input file\r\n parent_dir = os.path.realpath('..') \r\n data_dir = os.path.join(parent_dir, 'data')\r\n sat_file = os.path.join(data_dir, 'simulated_polsar.tif')\r\n\r\n # Data is a single GeoTIFF file, specify bands for the optical guide\r\n optical_bands = [6,7,8,9,10,11]\r\n\r\n # Data is a single GeoTIFF file, specify bands for guide SAR I and Q bands\r\n polsar_bands = [0, 1, 2, 3, 4, 5]\r\n\r\n # Load data \r\n dataset = gdal.Open(sat_file)\r\n # Read all bands \r\n raster_data = dataset.ReadAsArray()\r\n # Get geotransform\r\n lon_min, xres, rot1, lat_min, rot2, yres = dataset.GetGeoTransform()\r\n\r\n # Get geo bands to update geotransform\r\n lat = raster_data[12, :,:]\r\n lon = raster_data[13, :,:]\r\n\r\n # Indices for cropping data\r\n x_min = 50\r\n x_max = 250\r\n y_min = 200\r\n y_max = 400\r\n \r\n # Read input (SAR) and guide (Optical), lat and lon \r\n polsar = raster_data[polsar_bands, x_min:x_max, y_min:y_max]\r\n optical = raster_data[optical_bands, x_min:x_max, y_min:y_max]\r\n lat = lat[x_min:x_max, y_min:y_max]\r\n lon = lon[x_min:x_max, y_min:y_max]\r\n\r\n # Change order to H, W, channels\r\n polsar = np.transpose(polsar, (1, 2, 0))\r\n optical = np.transpose(optical, (1, 2, 0))\r\n\r\n # Normalise guide so that values are between 0 to 1\r\n optical = norm01(optical, norm_type='global')\r\n\r\n # Convert input to complex data type\r\n polsar = iq2complex(polsar, reciprocity=True)\r\n\r\n # Calculate min and max of lat/lon\r\n xmin, ymin, xmax, ymax = np.min(lon), np.min(lat), np.max(lon), np.max(lat)\r\n # Set geotransform, use pixel spacing from original\r\n geotransform = (xmin, xres, rot1, ymax, rot2, yres)\r\n\r\n return polsar, optical, geotransform\r\n\r\n\r\ndef _full_test_dataset(load_options):\r\n \"\"\" Load the simulated test dataset.\r\n\r\n Input:\r\n load_options - not used\r\n Output:\r\n polsar - simulated PolSAR image with complex target vectors, \r\n complex numpy tensor with shape (H, W, 3)\r\n optical - coregistered optical Sentinel-2 image, \r\n numpy tensor with shape (x, y, 4)\r\n geotransform - geotransform read from input is GeoTIFF,\r\n tuple=(lon_min, xres, rot1, lat_min, rot2, yres)\r\n \"\"\"\r\n \r\n # Path to directories and input file\r\n parent_dir = os.path.realpath('..') \r\n data_dir = os.path.join(parent_dir, 'data')\r\n sat_file = os.path.join(data_dir, 'simulated_polsar.tif')\r\n\r\n # Data is a single GeoTIFF file, specify bands for guide\r\n optical_bands = [6,7,8,9,10,11]\r\n\r\n # Data is a single GeoTIFF file, specify bands for SAR I and Q bands\r\n polsar_bands = [0, 1, 2, 3, 4, 5]\r\n\r\n # Load data \r\n dataset = gdal.Open(sat_file)\r\n # Read all bands \r\n raster_data = dataset.ReadAsArray()\r\n # Get geotransform\r\n geotransform = dataset.GetGeoTransform()\r\n\r\n # Get bands by indice, do not crop\r\n polsar = raster_data[polsar_bands, :,:]\r\n optical = raster_data[optical_bands, :,:]\r\n\r\n # Change order to H, W, channels\r\n polsar = np.transpose(polsar, (1, 2, 0))\r\n optical = np.transpose(optical, (1, 2, 0))\r\n\r\n # Normalise guide so that values are between 0 to 1\r\n optical = norm01(optical, norm_type='global')\r\n\r\n # Convert input to complex data type\r\n polsar = iq2complex(polsar, reciprocity=True)\r\n\r\n return polsar, optical, geotransform\r\n\r\n\r\n# Dict linking each specific dataset with a load function\r\ndatasets = {\r\n 'test': _test_dataset,\r\n 'full_test': _full_test_dataset,\r\n}\r\n\r\n\r\ndef fetch_dataset(name, load_options=None):\r\n \"\"\" Fetch the specified dataset.\r\n\r\n Input:\r\n name - dataset name, should be in datasets dict\r\n load_options - passed directly to the specific load methods,\r\n can be none if not needed\r\n Output:\r\n polsar - PolSAR image with complex target vectors, \r\n complex numpy tensor with shape (H, W, n_chans),\r\n n_chans = 3 (S_hh, S_hv/vh, S_vv)\r\n optical - coregistered optical image, \r\n numpy tensor with shape (x, y, bands)\r\n geotransform - geotransform for writing GeoTIFF output, \r\n can be read and reused if input is GeoTIFF,\r\n tuple=(lon_min, xres, rot1, lat_min, rot2, yres)\r\n \"\"\"\r\n polsar, optical, geotransform = datasets[name](load_options)\r\n\r\n return polsar, optical, geotransform\r\n\r\n"
]
| [
[
"numpy.max",
"numpy.transpose",
"numpy.min"
]
]
|
harrys17451/CryptocurrencyPrediction | [
"7ec542bcd6bf960b115638484737f097120badcd"
]
| [
"DataProcessor.py"
]
| [
"\n# coding: utf-8\n\n# In[2]:\n\n\nimport pandas as pd\nimport numpy as np\nimport h5py\n\n\n# In[24]:\n\n\ninput_step_size = 50\noutput_size = 30\nsliding_window = False\nfile_name= 'bitcoin2012_2017_50_30_prediction.h5' \n\n\n# In[19]:\n\n\ndf = pd.read_csv('data/bitstampUSD_1-min_data_2012-01-01_to_2017-05-31.csv').dropna().tail(1000000)\ndf['Datetime'] = pd.to_datetime(df['Timestamp'],unit='s')\ndf.head()\n\n\n# In[30]:\n\n\nprices= df.loc[:,'Close'].values\ntimes = df.loc[:,'Close'].values\nprices.shape\n\n\n# In[31]:\n\n\noutputs = []\ninputs = []\noutput_times = []\ninput_times = []\nif sliding_window:\n for i in range(len(prices)-input_step_size-output_size):\n inputs.append(prices[i:i + input_step_size])\n input_times.append(times[i:i + input_step_size])\n outputs.append(prices[i + input_step_size: i + input_step_size+ output_size])\n output_times.append(times[i + input_step_size: i + input_step_size+ output_size])\nelse:\n for i in range(0,len(prices)-input_step_size-output_size, input_step_size):\n inputs.append(prices[i:i + input_step_size])\n input_times.append(times[i:i + input_step_size])\n outputs.append(prices[i + input_step_size: i + input_step_size+ output_size])\n output_times.append(times[i + input_step_size: i + input_step_size+ output_size])\ninputs= np.array(inputs)\noutputs= np.array(outputs)\noutput_times = np.array(output_times)\ninput_times = np.array(input_times)\n\n\n# In[34]:\n\n\nwith h5py.File(file_name, 'w') as f:\n f.create_dataset(\"inputs\", data = inputs)\n f.create_dataset('outputs', data = outputs)\n f.create_dataset(\"input_times\", data = input_times)\n f.create_dataset('output_times', data = output_times)\n\n"
]
| [
[
"pandas.to_datetime",
"numpy.array",
"pandas.read_csv"
]
]
|
DCGM/pero_ocr_web | [
"e901027712827278f9ace914f6ccba16d3ac280f"
]
| [
"app/ocr/general.py"
]
| [
"import os\nimport numpy as np\nimport shutil\n\nfrom app.db.model import RequestState, RequestType, Request, DocumentState, TextLine, Annotation, TextRegion, Document\nfrom app.db.user import User\nfrom app.db.general import get_text_region_by_id, get_text_line_by_id\nfrom app import db_session\nfrom flask import jsonify\nimport uuid\n\nfrom pero_ocr.document_ocr.layout import PageLayout\nfrom pero_ocr.force_alignment import force_align\nfrom pero_ocr.confidence_estimation import get_letter_confidence\nfrom pero_ocr.confidence_estimation import get_letter_confidence, get_line_confidence\n\n\ndef insert_lines_to_db(ocr_results_folder, file_names):\n\n base_file_names = [os.path.splitext(file_name)[0] for file_name in file_names]\n base_file_names = list(set(base_file_names))\n\n for base_file_name in base_file_names:\n print(base_file_name)\n xml_path = os.path.join(ocr_results_folder, \"{}.{}\".format(base_file_name, \"xml\"))\n logits_path = os.path.join(ocr_results_folder, \"{}.{}\".format(base_file_name, \"logits\"))\n page_layout = PageLayout()\n page_layout.from_pagexml(xml_path)\n page_layout.load_logits(logits_path)\n for region in page_layout.regions:\n db_region = get_text_region_by_id(region.id)\n db_line_map = dict([(str(line.id), line) for line in db_region.textlines])\n if db_region is not None:\n for order, line in enumerate(region.lines):\n if line.id in db_line_map:\n db_line = db_line_map[line.id]\n if len(db_line.annotations) == 0:\n db_line.text = line.transcription\n db_line.np_confidences = get_confidences(line)\n else:\n line_id = uuid.uuid4()\n line.id = str(line_id)\n text_line = TextLine(id=line_id,\n order=order,\n np_points=line.polygon,\n np_baseline=line.baseline,\n np_heights=line.heights,\n np_confidences=get_confidences(line),\n text=line.transcription,\n deleted=False)\n db_region.textlines.append(text_line)\n db_session.commit()\n page_layout.to_pagexml(xml_path)\n page_layout.save_logits(logits_path)\n\n\ndef get_confidences(line):\n if line.transcription is not None and line.transcription != \"\":\n char_map = dict([(c, i) for i, c in enumerate(line.characters)])\n c_idx = np.asarray([char_map[c] for c in line.transcription])\n try:\n confidences = get_line_confidence(line, c_idx)\n except ValueError:\n print('ERROR: Known error in get_line_confidence() - Please, fix it. Logit slice has zero length.')\n confidences = np.ones(len(line.transcription)) * 0.5\n return confidences\n return np.asarray([])\n\n\ndef insert_annotations_to_db(user, annotations):\n for annotation in annotations:\n text_line = get_text_line_by_id(annotation['id'])\n annotation_db = Annotation(text_original=annotation['text_original'], text_edited=annotation['text_edited'], deleted=False, user_id=user.id)\n text_line.annotations.append(annotation_db)\n db_session.commit()\n\n\ndef update_text_lines(annotations):\n for annotation in annotations:\n text_line = get_text_line_by_id(annotation['id'])\n text_line.text = annotation['text_edited']\n text_line.confidences = ' '.join([str(1) for _ in annotation['text_edited']])\n db_session.commit()\n\n\ndef set_delete_flag(text_line, delete_flag):\n text_line.deleted = delete_flag\n db_session.commit()\n\n\ndef set_training_flag(text_line, training_flag):\n text_line.for_training = training_flag\n db_session.commit()\n\n\ndef check_document_processed(document):\n for image in document.images:\n for textregion in image.textregions:\n if (len(list(textregion.textlines))):\n return True\n return False\n\n\ndef create_json_from_request(request):\n val = {'id': request.id, 'baseline_id': request.baseline_id, 'ocr_id': request.ocr_id,\n 'language_model_id': request.language_model_id, 'document': {'id': request.document.id, 'images': []}}\n for image in request.document.images:\n if not image.deleted:\n val['document']['images'].append(image.id)\n return jsonify(val)\n\n\ndef post_files_to_folder(request, folder):\n files = request.files\n file_names = []\n for file_id in files:\n file = files[file_id]\n path = os.path.join(folder, file.filename)\n file.save(path)\n file_names.append(file.filename)\n return file_names\n\n\ndef change_ocr_request_and_document_state(request, request_state, document_state):\n request.state = request_state\n request.document.state = document_state\n db_session.commit()\n\n\ndef change_ocr_request_and_document_state_on_success_handler(request):\n change_ocr_request_and_document_state(request, RequestState.SUCCESS, DocumentState.COMPLETED_OCR)\n return\n\n\ndef change_ocr_request_and_document_state_in_progress_handler(request):\n change_ocr_request_and_document_state(request, RequestState.IN_PROGRESS, DocumentState.RUNNING_OCR)\n return\n\n\ndef change_ocr_request_to_fail_and_document_state_to_completed_layout_analysis_handler(request):\n change_ocr_request_and_document_state(request, RequestState.FAILURE, DocumentState.COMPLETED_LAYOUT_ANALYSIS)\n return\n\n\ndef change_ocr_request_to_fail_and_document_state_to_success_handler(request):\n change_ocr_request_and_document_state(request, RequestState.FAILURE, DocumentState.COMPLETED_OCR)\n return\n\n\ndef get_page_annotated_lines(image_id):\n lines = db_session.query(TextLine.id).join(TextRegion).join(Annotation).filter(TextRegion.image_id == image_id)\\\n .distinct().all()\n return [x[0] for x in lines]\n\n\ndef create_ocr_request(document, baseline_id, ocr_id, language_model_id):\n return Request(document=document,\n request_type=RequestType.OCR, state=RequestState.PENDING, baseline_id=baseline_id, ocr_id=ocr_id,\n language_model_id=language_model_id)\n\n\ndef can_start_ocr(document):\n if not Request.query.filter_by(document_id=document.id, request_type=RequestType.OCR,\n state=RequestState.PENDING).first() and (document.state == DocumentState.COMPLETED_LAYOUT_ANALYSIS or document.state == DocumentState.COMPLETED_OCR):\n return True\n return False\n\n\ndef add_ocr_request_and_change_document_state(request):\n request.document.state = DocumentState.WAITING_OCR\n db_session.add(request)\n db_session.commit()\n\n\ndef get_first_ocr_request():\n requests = Request.query.filter_by(state=RequestState.PENDING, request_type=RequestType.OCR) \\\n .order_by(Request.created_date)\n if False:\n requests = requests.join(Document).join(User).filter(User.trusted > 0)\n return requests.first()\n\n"
]
| [
[
"numpy.asarray"
]
]
|
lammySup/ncscli | [
"1758b7c894f2b890c7462d63a9c46ce47bf1262b"
]
| [
"examples/batchMode/plotGatlingOutput.py"
]
| [
"#!/usr/bin/env python3\n\"\"\"\nplots loadtest results produced by runBatchJMeter\n\"\"\"\n# standard library modules\nimport argparse\nimport csv\nimport glob\nimport json\nimport logging\nimport math\nimport os\nimport re\nimport sys\nimport warnings\n# third-party modules\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\ndef demuxResults( inFilePath ):\n instanceList = []\n with open( inFilePath, 'rb' ) as inFile:\n for line in inFile:\n decoded = json.loads( line )\n # print( 'decoded', decoded ) # just for debugging, would be verbose\n # iid = decoded.get( 'instanceId', '<unknown>')\n if 'args' in decoded:\n # print( decoded['args'] )\n if 'state' in decoded['args']:\n if decoded['args']['state'] == 'retrieved':\n # print(\"%s %s\" % (decoded['args']['frameNum'],decoded['instanceId']))\n instanceList.append([decoded['args']['frameNum'],decoded['instanceId']])\n return instanceList\n\ndef ingestGatlingLog( inFilePath ):\n '''read the tab-delimited file; return contents as a list of dicts'''\n fieldNames = ['scope', 'class', 'which', 'startTime', 'endTime', 'status' ]\n rows = []\n with open( inFilePath, newline='') as csvfile:\n reader = csv.DictReader(csvfile, dialect='excel-tab', fieldnames=fieldNames)\n for row in reader:\n rows.append( row )\n return rows\n\ndef getColumn(inputList,column):\n return [inputList[i][column] for i in range(0,len(inputList))]\n\ndef flattenList(inputList):\n return [num for elem in inputList for num in elem]\n\ndef makeTimelyXTicks():\n # x-axis tick marks at multiples of 60 and 10\n ax = plt.gca()\n ax.xaxis.set_major_locator( mpl.ticker.MultipleLocator(60) )\n ax.xaxis.set_minor_locator( mpl.ticker.MultipleLocator(10) )\n\n\nif __name__ == \"__main__\":\n # configure logger formatting\n logFmt = '%(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s'\n logDateFmt = '%Y/%m/%d %H:%M:%S'\n formatter = logging.Formatter(fmt=logFmt, datefmt=logDateFmt )\n logging.basicConfig(format=logFmt, datefmt=logDateFmt)\n\n # treat numpy deprecations as errors\n warnings.filterwarnings('error', category=np.VisibleDeprecationWarning)\n\n ap = argparse.ArgumentParser( description=__doc__, fromfile_prefix_chars='@', formatter_class=argparse.ArgumentDefaultsHelpFormatter )\n ap.add_argument( '--dataDirPath', required=True, help='the path to to directory for input and output data' )\n args = ap.parse_args()\n\n logger.info( 'plotting data in directory %s', os.path.realpath(args.dataDirPath) )\n\n \n outputDir = args.dataDirPath\n launchedJsonFilePath = outputDir + \"/recruitLaunched.json\"\n print(\"launchedJsonFilePath = %s\" % launchedJsonFilePath)\n jlogFilePath = outputDir + \"/batchRunner_results.jlog\"\n print(\"jlogFilePath = %s\\n\" % jlogFilePath)\n\n if not os.path.isfile( launchedJsonFilePath ):\n logger.error( 'file not found: %s', launchedJsonFilePath )\n sys.exit( 1 )\n\n launchedInstances = []\n with open( launchedJsonFilePath, 'r') as jsonInFile:\n try:\n launchedInstances = json.load(jsonInFile) # an array\n except Exception as exc:\n sys.exit( 'could not load json (%s) %s' % (type(exc), exc) )\n if False:\n print(len(launchedInstances))\n print(launchedInstances[0])\n print(launchedInstances[0][\"instanceId\"])\n # print(launchedInstances[0][\"device-location\"])\n print(launchedInstances[0][\"device-location\"][\"latitude\"])\n print(launchedInstances[0][\"device-location\"][\"longitude\"])\n print(launchedInstances[0][\"device-location\"][\"display-name\"])\n print(launchedInstances[0][\"device-location\"][\"country\"])\n\n completedJobs = demuxResults(jlogFilePath)\n\n mappedFrameNumLocation = []\n mappedFrameNumLocationUnitedStates = []\n mappedFrameNumLocationRussia = []\n mappedFrameNumLocationOther = []\n \n for i in range(0,len(completedJobs)):\n for j in range(0,len(launchedInstances)):\n if launchedInstances[j][\"instanceId\"] == completedJobs[i][1]:\n mappedFrameNumLocation.append([completedJobs[i][0],\n launchedInstances[j][\"device-location\"][\"latitude\"],\n launchedInstances[j][\"device-location\"][\"longitude\"],\n launchedInstances[j][\"device-location\"][\"display-name\"],\n launchedInstances[j][\"device-location\"][\"country\"]\n ])\n if launchedInstances[j][\"device-location\"][\"country\"] == \"United States\":\n mappedFrameNumLocationUnitedStates.append([completedJobs[i][0],\n launchedInstances[j][\"device-location\"][\"latitude\"],\n launchedInstances[j][\"device-location\"][\"longitude\"],\n launchedInstances[j][\"device-location\"][\"display-name\"],\n launchedInstances[j][\"device-location\"][\"country\"]\n ])\n elif launchedInstances[j][\"device-location\"][\"country\"] == \"Russia\":\n mappedFrameNumLocationRussia.append([completedJobs[i][0],\n launchedInstances[j][\"device-location\"][\"latitude\"],\n launchedInstances[j][\"device-location\"][\"longitude\"],\n launchedInstances[j][\"device-location\"][\"display-name\"],\n launchedInstances[j][\"device-location\"][\"country\"]\n ])\n else:\n mappedFrameNumLocationOther.append([completedJobs[i][0],\n launchedInstances[j][\"device-location\"][\"latitude\"],\n launchedInstances[j][\"device-location\"][\"longitude\"],\n launchedInstances[j][\"device-location\"][\"display-name\"],\n launchedInstances[j][\"device-location\"][\"country\"]\n ])\n \n\n print(\"\\nLocations:\")\n for i in range(0,len(mappedFrameNumLocation)):\n print(\"%s\" % mappedFrameNumLocation[i][3])\n \n \n\n print(\"\\nReading Response Time data\")\n resultFilePaths = []\n\n workerDirs = glob.glob( os.path.join( outputDir, 'gatlingResults_*' ) )\n print( 'workerDirs', workerDirs )\n for workerDir in workerDirs:\n dirContents = os.listdir( workerDir )\n if dirContents:\n innerPath = os.path.join( workerDir, dirContents[0] )\n if os.path.isdir( innerPath ):\n #print( 'found dir', innerPath )\n filePath = os.path.join( innerPath, 'simulation.log' )\n if os.path.isfile( filePath ):\n #print( 'found file', filePath )\n resultFilePaths.append( filePath )\n\n numResultFiles = len(resultFilePaths) \n\n # read the simulation.log files (which contain timings for every request)\n pat = r'gatlingResults_([^/]*)'\n responseData = []\n for i in range(0,numResultFiles):\n inFilePath = resultFilePaths[i]\n logger.debug( 'reading %s', inFilePath )\n rows = ingestGatlingLog( inFilePath )\n if not rows:\n logger.info( 'no rows in %s', inFilePath )\n continue\n match = re.search( pat, inFilePath ).group(1)\n print( 'found frameNum', match, 'in', inFilePath )\n frameNum = int( match )\n startTimes = []\n elapsedTimes = []\n for row in rows:\n #print( 'considering row', row )\n if row['scope'] == 'REQUEST': # and row['which'] == 'request_1'\n if not row.get( 'startTime'):\n logger.warning( 'no startTime in row of %s', inFilePath)\n continue\n startTime = float(row['startTime'])/1000\n endTime = float(row['endTime'])/1000\n startTimes.append( startTime )\n elapsedTimes.append( endTime-startTime )\n if startTimes:\n minStartTimeForDevice = min(startTimes)\n jIndex = -1\n for j in range (0,len(mappedFrameNumLocation)):\n if frameNum == mappedFrameNumLocation[j][0]:\n jIndex = j\n responseData.append([frameNum,minStartTimeForDevice,startTimes,elapsedTimes,mappedFrameNumLocation[jIndex]])\n if not responseData:\n sys.exit( 'no plottable data was found' )\n\n # first, time-shift all startTimes by subtracting the minStartTime for each device\n # and compute the maxStartTime (i.e. test duration) for each device\n relativeResponseData = []\n for i in range(0,len(responseData)):\n relativeStartTimes = []\n for ii in range(0,len(responseData[i][2])):\n # difference = responseData[i][2][ii]-globalMinStartTime\n # if i==2 and ii<3700 and difference > 500:\n # print(\"i = %d ii = %d difference = %f data = %f\" % (i,ii,difference,responseData[i][2][ii] ))\n # relativeStartTimes.append(responseData[i][2][ii]-globalMinStartTime)\n relativeStartTimes.append(responseData[i][2][ii]-responseData[i][1])\n maxStartTime = max(relativeStartTimes)\n relativeResponseData.append([responseData[i][0],relativeStartTimes,responseData[i][3],responseData[i][4],maxStartTime])\n\n # compute median maxStartTime\n medianMaxStartTime = np.median(getColumn(relativeResponseData,4))\n print(\"medianMaxStartTime = %f\" % medianMaxStartTime)\n\n # remove device records which ran too long\n # print(relativeResponseData[0])\n culledRelativeResponseData = []\n cullResponseData = True\n excessDurationThreshold = 30 # in seconds\n for i in range(0,len(relativeResponseData)):\n if cullResponseData:\n # print(\"i = %d min, max = %f %f\" % (i,min(relativeResponseData[i][1]),max(relativeResponseData[i][1])))\n if relativeResponseData[i][4]<(medianMaxStartTime+excessDurationThreshold):\n # print(\"min, max = %f %f\" % (min(relativeResponseData2[i][1]),max(relativeResponseData2[i][1])))\n culledRelativeResponseData.append(relativeResponseData[i])\n else:\n culledRelativeResponseData.append(relativeResponseData[i])\n\n print(\"Number of devices = %d\" % len(relativeResponseData))\n print(\"Culled Number of devices = %d\" %len(culledRelativeResponseData))\n culledLocations = getColumn(getColumn(culledRelativeResponseData,3),3)\n\n print(\"\\nCulled Locations:\")\n for i in range(0,len(culledLocations)):\n print(\"%s\" % culledLocations[i])\n \n print(\"\\nAnalyzing Location data\")\n startRelTimesAndMSPRsUnitedStatesMuxed = []\n startRelTimesAndMSPRsRussiaMuxed = []\n startRelTimesAndMSPRsOtherMuxed = []\n clipTimeInSeconds = 3.0 # normally 3.00\n\n for i in range(0,len(culledRelativeResponseData)):\n # print(culledRelativeResponseData[i][3][4])\n if culledRelativeResponseData[i][3][4]==\"United States\" :\n startRelTimesAndMSPRsUnitedStatesMuxed.append([culledRelativeResponseData[i][1],culledRelativeResponseData[i][2] ])\n elif culledRelativeResponseData[i][3][4]==\"Russia\" : \n startRelTimesAndMSPRsRussiaMuxed.append([culledRelativeResponseData[i][1],culledRelativeResponseData[i][2] ])\n else:\n startRelTimesAndMSPRsOtherMuxed.append([culledRelativeResponseData[i][1],culledRelativeResponseData[i][2] ])\n\n startRelTimesAndMSPRsUnitedStates = [flattenList(getColumn(startRelTimesAndMSPRsUnitedStatesMuxed,0)),flattenList(getColumn(startRelTimesAndMSPRsUnitedStatesMuxed,1))]\n startRelTimesAndMSPRsRussia = [flattenList(getColumn(startRelTimesAndMSPRsRussiaMuxed,0)),flattenList(getColumn(startRelTimesAndMSPRsRussiaMuxed,1))]\n startRelTimesAndMSPRsOther = [flattenList(getColumn(startRelTimesAndMSPRsOtherMuxed,0)),flattenList(getColumn(startRelTimesAndMSPRsOtherMuxed,1))]\n\n # print(len(startRelTimesAndMSPRsUnitedStates[0]))\n # print(len(startRelTimesAndMSPRsRussia[0]))\n # print(len(startRelTimesAndMSPRsOther[0]))\n\n print(\"Determining Delivered Load\")\n timeBinSeconds = 5\n culledRequestTimes = []\n for i in range(0,len(culledRelativeResponseData)):\n # print(\"min, max = %f %f\" % (min(culledRelativeResponseData[i][1]),max(culledRelativeResponseData[i][1])))\n culledRequestTimes.append(culledRelativeResponseData[i][1])\n\n flattenedCulledRequestTimes = flattenList(culledRequestTimes)\n maxCulledRequestTimes = max(flattenedCulledRequestTimes)\n print(\"Number of Responses = %d\" %len(flattenedCulledRequestTimes))\n print(\"Max Culled Request Time = %.2f\" % maxCulledRequestTimes)\n numBins = int(np.floor(maxCulledRequestTimes / timeBinSeconds + 3))\n # print(numBins)\n deliveredLoad = np.zeros(numBins)\n deliveredLoadTimes = np.zeros(numBins)\n for i in range(0,len(flattenedCulledRequestTimes)):\n bin = int(np.floor(flattenedCulledRequestTimes[i]/timeBinSeconds))+1\n deliveredLoad[bin] += 1/timeBinSeconds\n\n for i in range(0,len(deliveredLoadTimes)):\n deliveredLoadTimes[i] = i*timeBinSeconds\n # print(deliveredLoad)\n # print(deliveredLoadTimes)\n\n\n\n print(\"\\nReading World Map data\")\n mapFileName = \"./WorldCountryBoundaries.csv\"\n mapFile = open(mapFileName, \"r\")\n mapLines = mapFile.readlines()\n mapFile.close()\n mapNumLines = len(mapLines) \n\n CountryData = []\n CountrySphericalData = []\n\n # for i in range(1,8) :\n for i in range(1,mapNumLines) :\n firstSplitString = mapLines[i].split(\"\\\"\")\n nonCoordinateString = firstSplitString[2] \n noncoordinates = nonCoordinateString.split(\",\")\n countryString = noncoordinates[6]\n\n if firstSplitString[1].startswith('<Polygon><outerBoundaryIs><LinearRing><coordinates>') and firstSplitString[1].endswith('</coordinates></LinearRing></outerBoundaryIs></Polygon>'):\n coordinateString = firstSplitString[1].replace('<Polygon><outerBoundaryIs><LinearRing><coordinates>','').replace('</coordinates></LinearRing></outerBoundaryIs></Polygon>','').replace(',0 ',',0,')\n # print(\"coordinateString = %s\" % coordinateString)\n # print(\"nonCoordinateString = %s\" % nonCoordinateString)\n coordinates = [float(j) for j in coordinateString.split(\",\")] \n coordinateList = np.zeros([int(len(coordinates)/3),2])\n for j in range(0,len(coordinateList)) :\n coordinateList[j,:] = coordinates[j*3:j*3+2]\n coordinateSphericalList = np.zeros([int(len(coordinates)/3),3])\n for j in range(0,len(coordinateSphericalList)) :\n r = 1\n phi = 2*math.pi*coordinates[j*3]/360\n theta = 2*math.pi*(90-coordinates[j*3+1])/360\n coordinateSphericalList[j,0] = r * np.sin(theta) * np.cos(phi)\n coordinateSphericalList[j,1] = r * np.sin(theta) * np.sin(phi)\n coordinateSphericalList[j,2] = r * np.cos(theta)\n\n # print(\"noncoordinates = %s\" % str(noncoordinates))\n # print(\"countryString = %s\" % countryString)\n # print(\"coordinateList = %s\" % str(coordinateList))\n CountryData.append([countryString,coordinateList])\n CountrySphericalData.append([countryString,coordinateSphericalList])\n else :\n # print(\"Exception Line %i %s\" % (i,countryString))\n # if firstSplitString[1].startswith(\"<MultiGeometry>\") :\n # print(\"MultiGeometry Line %i %s\" % (i,countryString))\n # else :\n # print(\"Inner Boundary Line %i %s\" % (i,countryString))\n reducedCoordinateString = firstSplitString[1].replace('<MultiGeometry>','').replace('</MultiGeometry>','').replace('<Polygon>','').replace('</Polygon>','').replace('<outerBoundaryIs>','').replace('</outerBoundaryIs>','').replace('<innerBoundaryIs>','').replace('</innerBoundaryIs>','').replace('<LinearRing>','').replace('</LinearRing>','').replace('</coordinates>','').replace(',0 ',',0,')\n # print(\"reducedCoordinateString = %s\" % reducedCoordinateString)\n coordinateStringSets = reducedCoordinateString.split(\"<coordinates>\")\n # print(\"coordinateStringSets = %s\" % str(coordinateStringSets))\n coordinateSets= []\n for j in range(1,len(coordinateStringSets)) :\n coordinateSets.append([float(k) for k in coordinateStringSets[j].split(\",\")])\n # print(\"coordinateSets = %s\" % str(coordinateSets))\n coordinateList = []\n coordinateSphericalList = []\n for j in range(0,len(coordinateSets)) :\n # print(\"\\ncoordinateSets[%i] = %s\" % (j,str(coordinateSets[j])))\n coordinateList.append(np.zeros([int(len(coordinateSets[j])/3),2]))\n for k in range(0,len(coordinateList[j])) :\n coordinateList[j][k,:] = coordinateSets[j][k*3:k*3+2]\n # print(\"\\ncoordinateList[%i] = %s\" % (j,str(coordinateList[j])))\n coordinateSphericalList.append(np.zeros([int(len(coordinateSets[j])/3),3]))\n for k in range(0,len(coordinateSphericalList[j])) :\n r = 1\n phi = 2*math.pi*coordinateSets[j][k*3]/360\n theta = 2*math.pi*(90-coordinateSets[j][k*3+1])/360\n coordinateSphericalList[j][k,0] = r * np.sin(theta) * np.cos(phi)\n coordinateSphericalList[j][k,1] = r * np.sin(theta) * np.sin(phi)\n coordinateSphericalList[j][k,2] = r * np.cos(theta)\n\n CountryData.append([countryString,coordinateList])\n CountrySphericalData.append([countryString,coordinateSphericalList])\n\n figSize1 = (19.2, 10.8)\n fontFactor = 0.75\n mpl.rcParams.update({'font.size': 22})\n mpl.rcParams['axes.linewidth'] = 2 #set the value globally\n markerSizeValue = 10\n\n # plot world map\n fig = plt.figure(3, figsize=figSize1)\n ax = fig.gca()\n # Turn off tick labels\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n # ax.set_aspect('equal')\n # for i in range(0,20) :\n colorValue = 0.85\n edgeColor = (colorValue*.85, colorValue*.85, colorValue*.85)\n\n for i in range(0,len(CountryData)) :\n if isinstance( CountryData[i][1], np.ndarray ):\n ax.add_artist(plt.Polygon(CountryData[i][1],edgecolor=edgeColor,\n facecolor=(colorValue,colorValue,colorValue),aa=True))\n else :\n for j in range(0,len(CountryData[i][1])) :\n ax.add_artist(plt.Polygon(CountryData[i][1][j],edgecolor=edgeColor,\n facecolor=(colorValue,colorValue,colorValue),aa=True))\n\n plt.plot(getColumn(mappedFrameNumLocationUnitedStates,2),getColumn(mappedFrameNumLocationUnitedStates,1),linestyle='', color=(0.0, 0.5, 1.0),marker='o',markersize=markerSizeValue)\n plt.plot(getColumn(mappedFrameNumLocationRussia,2),getColumn(mappedFrameNumLocationRussia,1),linestyle='', color=(1.0, 0.0, 0.0),marker='o',markersize=markerSizeValue)\n plt.plot(getColumn(mappedFrameNumLocationOther,2),getColumn(mappedFrameNumLocationOther,1),linestyle='', color=(0.0, 0.9, 0.0),marker='o',markersize=markerSizeValue)\n plt.xlim([-180,180])\n plt.ylim([-60,90])\n #plt.show()\n plt.savefig( outputDir+'/worldMap.png', bbox_inches='tight')\n\n plotMarkerSize = 3\n plt.figure(10, figsize=figSize1)\n plt.plot(startRelTimesAndMSPRsUnitedStates[0],startRelTimesAndMSPRsUnitedStates[1], linestyle='', color=(0.0, 0.6, 1.0),marker='o',markersize=plotMarkerSize)\n plt.plot(startRelTimesAndMSPRsRussia[0],startRelTimesAndMSPRsRussia[1], linestyle='', color=(1.0, 0.0, 0.0),marker='o',markersize=plotMarkerSize)\n plt.plot(startRelTimesAndMSPRsOther[0],startRelTimesAndMSPRsOther[1], linestyle='', color=(0.0, 1.0, 0.0),marker='o',markersize=plotMarkerSize)\n plt.ylim([0,clipTimeInSeconds])\n #makeTimelyXTicks()\n plt.title(\"Response Times (s)\\n\", fontsize=42*fontFactor)\n plt.xlabel(\"Time during Test (s)\", fontsize=32*fontFactor) \n plt.ylabel(\"Response Times (s)\", fontsize=32*fontFactor) \n plt.savefig( outputDir+'/responseTimesByRegion.png', bbox_inches='tight' )\n #plt.show() \n # plt.clf()\n # plt.close() \n\n plt.figure(2, figsize=figSize1)\n plt.plot( deliveredLoadTimes, deliveredLoad, linewidth=5, color=(0.0, 0.6, 1.0) )\n #makeTimelyXTicks()\n # plt.xlim([0,270])\n plt.title(\"Delivered Load During Test\\n\", fontsize=42*fontFactor)\n plt.xlabel(\"Time during Test (s)\", fontsize=32*fontFactor) \n plt.ylabel(\"Requests per second\", fontsize=32*fontFactor) \n plt.savefig( outputDir+'/deliveredLoad.png', bbox_inches='tight' )\n #plt.show()\n"
]
| [
[
"numpy.sin",
"matplotlib.pyplot.xlim",
"matplotlib.ticker.MultipleLocator",
"numpy.zeros",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.Polygon",
"matplotlib.pyplot.figure",
"matplotlib.rcParams.update",
"matplotlib.pyplot.ylabel",
"numpy.cos",
"matplotlib.pyplot.gca",
"numpy.floor"
]
]
|
pagiux/passflow | [
"602d86896c0ed69fa828b51cafb1584286e2782c"
]
| [
"models/real_nvp/coupling_layer.py"
]
| [
"import torch\nimport torch.nn as nn\n\nfrom enum import IntEnum\nimport numpy as np\n\nfrom models.nn import MLP, ResNet\n\n\nclass MaskType(IntEnum):\n CHECKERBOARD = 0\n HORIZONTAL = 1\n CHAR_RUN = 2\n\n\nclass AffineTransform(nn.Module):\n def __init__(self, dim, device, mask_type, mask_pattern, net_type, n_hidden=2, hidden_size=256):\n \"\"\"\n :param dim: dimension of x\n :param device: gpu or cpu\n :param mask_type: normal(left) or inverted (right) mask\n :param mask_pattern: tuple of (pattern type, param). Defines the pattern to use for splitting: horizontal splitting\n (split), checkerboard and checkerboard with runs of n chars (char_run). param defines the parameters of the split\n :param net_type: mlp or resnet\n :param n_hidden: number of hidden layers in s and t\n :param hidden_size: size of hidden layers in s and t\n \"\"\"\n super().__init__()\n assert mask_type in {'left', 'right'}\n assert net_type in {'mlp', 'resnet'}\n\n self.dim = dim\n self.device = device\n self.mask = self.build_mask(mask_type=mask_type, mask_pattern=mask_pattern)\n self.scale = nn.Parameter(torch.zeros(1), requires_grad=True)\n self.scale_shift = nn.Parameter(torch.zeros(1), requires_grad=True)\n if net_type == 'mlp':\n self.net = MLP(input_size=self.dim,\n n_hidden=n_hidden,\n hidden_size=hidden_size,\n output_size=2 * self.dim)\n elif net_type == 'resnet':\n self.net = ResNet(in_features=self.dim,\n hidden_features=hidden_size,\n out_features=2 * self.dim,\n num_blocks=n_hidden)\n else:\n raise NotImplementedError\n\n def build_mask(self, mask_type, mask_pattern):\n assert mask_type in {'left', 'right'}\n if mask_type == 'left':\n mask = self.get_mask_pattern(mask_pattern)\n elif mask_type == 'right':\n mask = 1 - self.get_mask_pattern(mask_pattern)\n else:\n raise NotImplementedError\n return mask\n\n def get_mask_pattern(self, mask_pattern, param=2):\n half_dim = int(self.dim / 2)\n if mask_pattern == MaskType.HORIZONTAL:\n return torch.FloatTensor(np.concatenate([np.ones(half_dim), np.zeros(half_dim)], axis=0)).to(self.device)\n elif mask_pattern == MaskType.CHECKERBOARD:\n return torch.FloatTensor(np.tile([0, 1], half_dim)).to(self.device)\n elif mask_pattern == MaskType.CHAR_RUN:\n if (self.dim / param) % 2:\n raise Exception(f'Cannot use char run mask of run {param} with feature vector of length {self.dim}.'\n f'len(feature vector) / char_run must be even.')\n # TODO find a cleaner way to use param\n return torch.FloatTensor(np.tile([1] * param + [0] * param, int(self.dim / (2 * param)))).to(self.device)\n else:\n raise NotImplementedError\n\n def forward(self, x, reverse=False):\n # returns transform(x), log_det\n batch_size = x.shape[0]\n mask = self.mask.repeat(batch_size, 1)\n x_ = x * mask\n\n log_s, t = self.net(x_).chunk(2, dim=1)\n log_s = self.scale * torch.tanh(log_s) + self.scale_shift\n t = t * (1.0 - mask)\n log_s = log_s * (1.0 - mask)\n\n if reverse: # inverting the transformation\n x = (x - t) * torch.exp(-log_s)\n else:\n x = x * torch.exp(log_s) + t\n return x, log_s\n"
]
| [
[
"torch.zeros",
"numpy.zeros",
"numpy.ones",
"numpy.tile",
"torch.tanh",
"torch.exp"
]
]
|
KedoKudo/tomoproc | [
"b20270e87af4ce7459004a6ed928037ae8573b1e"
]
| [
"tomoproc/prep/correction.py"
]
| [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\nContain functions operating on tomography sinograsm\n\nNOTE:\n Different experiment requires different type of correction, the choice of\n which should be established via trial and error.\n\"\"\"\nimport tomopy\nimport numpy as np\nimport concurrent.futures as cf\n\nfrom typing import Optional\nfrom typing import Tuple\nfrom scipy.signal import medfilt2d\nfrom scipy.ndimage import gaussian_filter\nfrom scipy.ndimage import shift\nfrom scipy.ndimage import affine_transform\nfrom tomoproc.prep.detection import detect_sample_in_sinogram\nfrom tomoproc.prep.detection import detect_corrupted_proj\nfrom tomoproc.prep.detection import detect_slit_corners\nfrom tomoproc.prep.detection import detect_rotation_center\nfrom tomoproc.util.npmath import calc_affine_transform\nfrom tomoproc.util.npmath import rescale_image\nfrom tomoproc.util.npmath import binded_minus_log\n\ndef denoise(\n sino: np.ndarray,\n method: str='smd',\n config: dict={\n 'kernel_size': 3,\n 'threshold': 0.1,\n },\n ) -> np.ndarray:\n \"\"\"\n Description\n -----------\n Use selected method \n [median, selective median, SVD] \n to reduce the impulse noise in the sinogram. All methods used here are\n lossy, especially the SVD method.\n \n Parameters\n ----------\n sino: np.ndarray\n single sinograms for denoising\n method: str, [median, smd, SVD]\n method for denoising includes\n **median**\n simple 2D median filtering using scipy.signal.medfilt2d\n config = {'kernel_size': 3}\n **selective median filtering**\n only replace the noisy pixels with the corresponding median value\n config = {'kernel_size': 3, 'threshold': 0.05}\n **singular value decomposition**\n retain upto given threshold level of details in the corresponding\n eigen space\n config = {'threshold': 0.95}\n config: dict\n parameters for different methods\n \n Returns\n -------\n np.ndarray\n denoised sinogram\n \"\"\"\n if method.lower() in ['median', 'simple']:\n return medfilt2d(sino, kernel_size=config['kernel_size'])\n elif method.lower() in ['selective median', 'smd']:\n sino_md = medfilt2d(sino, kernel_size=config['kernel_size'])\n diff = np.absolute(sino_md - sino)/(sino+1)\n return np.where(diff>config['threshold'], sino_md, sino)\n elif method.lower() in ['svd']:\n U, S, V = np.linalg.svd(sino, full_matrices=True)\n eigen_cut = int(min(U.shape[1], V.shape[0])*config['threshold'])\n return (U[:,:eigen_cut]*S[:eigen_cut])@V[:eigen_cut,:]\n else:\n raise NotImplementedError\n\n\ndef beam_intensity_fluctuation_correction(\n sino: np.ndarray,\n detect_bg: bool=True,\n left_bg: int=5, # in pixels\n right_bg: int=5, # in pixels\n interpolate: bool=True,\n ) -> np.ndarray:\n \"\"\"\n Description\n -----------\n The beam intensity always varies during an experiment, leading to varying\n background (non-sample) region. This artifacts will leads to strong linear\n artifacts in the final reconstruction, therefore need to be corrected by\n forcing all non-sample region (background) to be one (zero attenuation).\n By default, the sample bound (left and right edge) will be automatically \n detected with \n tomoproc.prep.detection.detect_sample_in_sinogram\n which can be bypassed (for speed) if the sample limit is known.\n\n Parameters\n ----------\n sino: np.ndarray\n sinogram as attenuation map (befor the minus-log step)\n detect_bg: bool\n whether to use automated background pixel detection\n left_bg, right_bg: int\n designated background pixels, superceeded by detect_bg\n interpolate: bool\n whether to interpolate background or not\n ========\n | NOTE |\n ========\n linear interpolation is recommended as the beam are not alawys stable,\n which could lead to intensity shift that cannot be correct through\n background removal.\n\n Returns\n -------\n np.ndarray\n sinogram with non-sample region (background) normalized to one\n one -> zero attenuation\n\n NOTE\n ----\n * This method will not work if the sample occupy the entire filed of view.\n * Sometimes two or four iterations are necessary for unstable beam.\n \"\"\"\n sino = np.sqrt(sino) # for better auto background detection\n \n # get sample location\n if detect_bg:\n ledge, redge = detect_sample_in_sinogram(sino)\n else:\n ledge, redge = left_bg, sino.shape[1]-right_bg\n\n # locate the left and right background\n # NOTE:\n # Due to hardware issue, the first and last pixels are not always\n # reliable, therefore throw them out...\n lbg = np.average(sino[:, 1:ledge], axis=1)\n rbg = np.average(sino[:, redge:-1], axis=1)\n\n # calculate the correction matrix alpha\n alpha = np.ones(sino.shape)\n if interpolate:\n for n in range(sino.shape[0]):\n alpha[n,:] = np.linspace(lbg[n], rbg[n], sino.shape[1])\n else:\n alpha *= ((lbg+rbg)/2)[:,None]\n \n # apply the correction\n return (sino/alpha)**2\n\n\ndef remove_corrupted_projs(\n projs: np.ndarray,\n omegas: np.ndarray,\n idx_good: Optional[np.ndarray]=None,\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Description\n -----------\n Remove corrupted prjections/frames in a given tomo image stack\n\n Parameters\n ----------\n projs: np.ndarray\n Tomo image stack [axis_omega, axis_imgrow, axis_imgcol]\n idx_good: np.ndarray|None\n index (along omega axis) for good frames \n\n Returns\n -------\n Tuple[np.ndarray, np.ndarray]\n Pruned tomo image stack and corresponding angular position (omegas)\n \"\"\"\n # get the index of good frames if not provided\n idx_good = detect_corrupted_proj(projs, omegas)[1] if idx_good is None else idx_good\n\n return projs[idx_good,:,:], omegas[idx_good]\n\n\ndef correct_horizontal_jittering(\n projs: np.ndarray,\n omegas: np.ndarray,\n remove_bad_frames: bool=True,\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Description\n -----------\n Correct the horizontal jittering of sample by shifting all projections\n such that the rotation center is alawys at the center of each projection.\n\n Parameters\n ----------\n projs: np.ndarray\n tomogaphy image stacks [axis_omega, axis_imgrow, axis_imgcol]\n omegas: np.ndarray\n rotary position array\n remove_bad_frames: bool\n remove corrupted frames from the projs\n\n Returns\n -------\n tuple(np.ndarray, np.ndarray)\n corrected (and pruned) projs ready for tomography reconstruction and\n corresponding omegas\n \"\"\"\n # assume equal step, find the index range equals to 180 degree\n dn = int(np.pi/(omegas[1] - omegas[0]))\n\n # identify bad frames if necesary\n if remove_bad_frames:\n _, idx_good = detect_corrupted_proj(projs, omegas)\n\n # get the cnts from each 180 pairs\n with cf.ProcessPoolExecutor() as e:\n _jobs = [\n e.submit(\n tomopy.find_center_pc,\n rescale_image(binded_minus_log(projs[nimg,:,:])), \n rescale_image(binded_minus_log(projs[nimg+dn,:,:])), \n )\n for nimg in range(dn)\n ]\n \n cnts = [me.results() for me in _jobs]\n\n # 180 -> 360\n cnts = cnts + cnts\n shift_vals = [\n np.array([0, projs.shape[2]/2 - val])\n for val in cnts\n ]\n\n # shift each proj so that the rotation center is the central col\n for n in range(len(shift_vals)):\n projs[n,:,:] = shift(\n projs[n,:,:],\n shift_vals[n],\n mode='constant', cval=0, order=1,\n )\n\n # remove the corrupted frames if requested\n if remove_bad_frames:\n projs = projs[idx_good,:,:]\n omegas = omegas[idx_good]\n \n return projs, omegas\n\n\ndef correct_detector_drifting(\n projs: np.ndarray,\n ) -> Tuple[np.ndarray, list]:\n \"\"\"\n Description\n -----------\n Systematic shifting and rotation could happen to a tomo imagestack\n due to detetor stage settling.\n\n Parameters\n ----------\n projs: np.ndarray\n Tomo imagestack with [axis_omega, axis_row, axis_col]\n\n Returns\n -------\n (np.ndarray, list)\n Tomo imagestack with detector drifting corrected and the list of the\n affine transformation matrix used for correction.\n\n NOTE\n ----\n Use ALL available resource possible by default (aggresive approach)\n \"\"\"\n # -- detect the four corners\n with cf.ProcessPoolExecutor() as e:\n _jobs = [\n e.submit(\n detect_slit_corners, \n projs[n_omega,:,:],\n )\n for n_omega in range(projs.shape[0])\n ]\n \n cnrs_all = [me.result() for me in _jobs]\n\n # -- calculate the transformation matrix using the first frames as the\n # -- reference frame\n with cf.ProcessPoolExecutor() as e:\n _jobs = [\n e.submit(\n calc_affine_transform, \n np.array(me), # source\n np.array(cnrs_all[0]), # reference/target\n )\n for me in cnrs_all\n ]\n \n correction_matrix = [me.result() for me in _jobs]\n\n # -- apply the affine transformation to each frame\n with cf.ProcessPoolExecutor() as e:\n _jobs = [\n e.submit(\n affine_transform,\n projs[n_omega,:,:],\n correction_matrix[n_omega][0:2,0:2], # rotation\n offset=correction_matrix[n_omega][0:2,2] # translation\n )\n for n_omega in range(projs.shape[0])\n ]\n projs = np.stack([me.result() for me in _jobs], axis=0)\n\n return projs, correction_matrix\n\n\ndef correct_detector_tilt(\n projs: np.ndarray, \n omegas: np.ndarray,\n tor: int=1, \n nchunk: int=4,\n ) -> np.ndarray:\n \"\"\"\n Description\n -----------\n Due to detector mounting process, the vertical axis of the detector (hence\n the image) might not be parallel to the actual rotation axis. Therefore,\n the projections need to be slighly rotated until the rotation axis is\n parallel to the vertial axis of the image.\n\n Parameters\n ----------\n projs: np.ndarray\n Tomo imagestack with [axis_omega, axis_row, axis_col]\n omegas: np.ndarray\n rotary position array\n tor: int\n tolerance for horizontal shift in pixels\n nchunk: int\n number of subdivisions used to identify the rotation axis tilt\n \n Returns\n -------\n np.ndarray\n Correct projection images.\n \"\"\"\n # calculate slab thickness (allow overlap)\n _st = int(np.ceil(projs.shape[1]/nchunk))\n\n _err = 10 #\n _cnt = 0 #\n while(_err > tor):\n cnt_cols = [\n detect_rotation_center(projs[:,n*_st:min((n+1)*_st, projs.shape[2]),:], omegas)\n for n in range(nchunk)\n ]\n \n cnt_col = np.average(cnt_cols)\n\n # update the error\n _err = np.max([abs(me-cnt_col) for me in cnt_cols])\n _cnt = _cnt + 1\n\n # safe guard and lazy update\n if _cnt > 10000: break\n if _err < tor: break\n\n # calcualte the correction matrix\n pts_src = np.array([[(n+0.5)*_st, cnt_cols[n]] for n in range(nchunk)])\n pts_tgt = np.array([[(n+0.5)*_st, cnt_col ] for n in range(nchunk)])\n _afm = calc_affine_transform(pts_src, pts_tgt)\n\n # -- apply the affine transformation to each frame\n with cf.ProcessPoolExecutor() as e:\n _jobs = [\n e.submit(\n affine_transform,\n projs[n_omega,:,:],\n _afm[0:2,0:2], # rotation\n offset=_afm[0:2,2] # translation\n )\n for n_omega in range(projs.shape[0])\n ]\n projs = np.stack([me.result() for me in _jobs], axis=0)\n \n return projs\n\n\nif __name__ == \"__main__\":\n testimg = np.random.random((500,500))\n sino = denoise(testimg, method='svd')\n"
]
| [
[
"numpy.array",
"numpy.ceil",
"scipy.ndimage.shift",
"numpy.ones",
"scipy.signal.medfilt2d",
"numpy.where",
"numpy.sqrt",
"numpy.absolute",
"numpy.average",
"numpy.random.random",
"numpy.linspace",
"numpy.linalg.svd"
]
]
|
AJTYNAN/rlcard | [
"7370c4c81bd5fc3e087df29d1d73cfc4514c081a"
]
| [
"rlcard/games/nolimitholdem/game.py"
]
| [
"from enum import Enum\n\nimport numpy as np\nfrom copy import deepcopy\nfrom rlcard.games.limitholdem import Game\nfrom rlcard.games.limitholdem import PlayerStatus\n\nfrom rlcard.games.nolimitholdem import Dealer\nfrom rlcard.games.nolimitholdem import Player\nfrom rlcard.games.nolimitholdem import Judger\nfrom rlcard.games.nolimitholdem import Round, Action\n\n\nclass Stage(Enum):\n\n PREFLOP = 0\n FLOP = 1\n TURN = 2\n RIVER = 3\n END_HIDDEN = 4\n SHOWDOWN = 5\n\n\nclass NolimitholdemGame(Game):\n\n def __init__(self, allow_step_back=False, num_players=2):\n ''' Initialize the class nolimitholdem Game\n '''\n self.allow_step_back = allow_step_back\n self.np_random = np.random.RandomState()\n\n # small blind and big blind\n self.small_blind = 1\n self.big_blind = 2 * self.small_blind\n\n # config players\n self.num_players = num_players\n self.init_chips = [100] * num_players\n\n # If None, the dealer will be randomly chosen\n self.dealer_id = None\n\n def configure(self, game_config):\n ''' Specifiy some game specific parameters, such as number of players, initial chips, and dealer id.\n If dealer_id is None, he will be randomly chosen\n '''\n self.num_players = game_config['game_num_players']\n # must have num_players length\n self.init_chips = [game_config['chips_for_each']] * game_config[\"game_num_players\"]\n self.dealer_id = game_config['dealer_id']\n\n def init_game(self):\n ''' Initialilze the game of Limit Texas Hold'em\n\n This version supports two-player limit texas hold'em\n\n Returns:\n (tuple): Tuple containing:\n\n (dict): The first state of the game\n (int): Current player's id\n '''\n if self.dealer_id is None:\n self.dealer_id = self.np_random.randint(0, self.num_players)\n\n # Initilize a dealer that can deal cards\n self.dealer = Dealer(self.np_random)\n\n # Initilize players to play the game\n self.players = [Player(i, self.init_chips[i], self.np_random) for i in range(self.num_players)]\n\n # Initialize a judger class which will decide who wins in the end\n self.judger = Judger(self.np_random)\n\n # Deal cards to each player to prepare for the first round\n for i in range(2 * self.num_players):\n self.players[i % self.num_players].hand.append(self.dealer.deal_card())\n\n # Initilize public cards\n self.public_cards = []\n self.stage = Stage.PREFLOP\n\n # Big blind and small blind\n s = (self.dealer_id + 1) % self.num_players\n b = (self.dealer_id + 2) % self.num_players\n self.players[b].bet(chips=self.big_blind)\n self.players[s].bet(chips=self.small_blind)\n\n # The player next to the small blind plays the first\n self.game_pointer = (b + 1) % self.num_players\n\n # Initilize a bidding round, in the first round, the big blind and the small blind needs to\n # be passed to the round for processing.\n self.round = Round(self.num_players, self.big_blind, dealer=self.dealer, np_random=self.np_random)\n\n self.round.start_new_round(game_pointer=self.game_pointer, raised=[p.in_chips for p in self.players])\n\n # Count the round. There are 4 rounds in each game.\n self.round_counter = 0\n\n # Save the hisory for stepping back to the last state.\n self.history = []\n\n state = self.get_state(self.game_pointer)\n\n return state, self.game_pointer\n\n def get_legal_actions(self):\n ''' Return the legal actions for current player\n\n Returns:\n (list): A list of legal actions\n '''\n return self.round.get_nolimit_legal_actions(players=self.players)\n\n def step(self, action):\n ''' Get the next state\n\n Args:\n action (str): a specific action. (call, raise, fold, or check)\n\n Returns:\n (tuple): Tuple containing:\n\n (dict): next player's state\n (int): next plater's id\n '''\n\n if action not in self.get_legal_actions():\n print(action, self.get_legal_actions())\n print(self.get_state(self.game_pointer))\n raise Exception('Action not allowed')\n\n if self.allow_step_back:\n # First snapshot the current state\n r = deepcopy(self.round)\n b = self.game_pointer\n r_c = self.round_counter\n d = deepcopy(self.dealer)\n p = deepcopy(self.public_cards)\n ps = deepcopy(self.players)\n self.history.append((r, b, r_c, d, p, ps))\n\n # Then we proceed to the next round\n self.game_pointer = self.round.proceed_round(self.players, action)\n\n players_in_bypass = [1 if player.status in (PlayerStatus.FOLDED, PlayerStatus.ALLIN) else 0 for player in self.players]\n if self.num_players - sum(players_in_bypass) == 1:\n last_player = players_in_bypass.index(0)\n if self.round.raised[last_player] >= max(self.round.raised):\n # If the last player has put enough chips, he is also bypassed\n players_in_bypass[last_player] = 1\n\n # If a round is over, we deal more public cards\n if self.round.is_over():\n # Game pointer goes to the first player not in bypass after the dealer, if there is one\n self.game_pointer = (self.dealer_id + 1) % self.num_players\n if sum(players_in_bypass) < self.num_players:\n while players_in_bypass[self.game_pointer]:\n self.game_pointer = (self.game_pointer + 1) % self.num_players\n\n # For the first round, we deal 3 cards\n if self.round_counter == 0:\n self.stage = Stage.FLOP\n self.public_cards.append(self.dealer.deal_card())\n self.public_cards.append(self.dealer.deal_card())\n self.public_cards.append(self.dealer.deal_card())\n if len(self.players) == np.sum(players_in_bypass):\n self.round_counter += 1\n # For the following rounds, we deal only 1 card\n if self.round_counter == 1:\n self.stage = Stage.TURN\n self.public_cards.append(self.dealer.deal_card())\n if len(self.players) == np.sum(players_in_bypass):\n self.round_counter += 1\n if self.round_counter == 2:\n self.stage = Stage.RIVER\n self.public_cards.append(self.dealer.deal_card())\n if len(self.players) == np.sum(players_in_bypass):\n self.round_counter += 1\n\n self.round_counter += 1\n self.round.start_new_round(self.game_pointer)\n\n state = self.get_state(self.game_pointer)\n\n return state, self.game_pointer\n\n def get_state(self, player_id):\n ''' Return player's state\n\n Args:\n player_id (int): player id\n\n Returns:\n (dict): The state of the player\n '''\n self.dealer.pot = np.sum([player.in_chips for player in self.players])\n\n chips = [self.players[i].in_chips for i in range(self.num_players)]\n legal_actions = self.get_legal_actions()\n state = self.players[player_id].get_state(self.public_cards, chips, legal_actions)\n state['stakes'] = [self.players[i].remained_chips for i in range(self.num_players)]\n state['current_player'] = self.game_pointer\n state['pot'] = self.dealer.pot\n state['stage'] = self.stage\n return state\n\n def step_back(self):\n ''' Return to the previous state of the game\n\n Returns:\n (bool): True if the game steps back successfully\n '''\n if len(self.history) > 0:\n self.round, self.game_pointer, self.round_counter, self.dealer, self.public_cards, self.players = self.history.pop()\n self.stage = Stage(self.round_counter)\n return True\n return False\n\n def get_num_players(self):\n ''' Return the number of players in No Limit Texas Hold'em\n\n Returns:\n (int): The number of players in the game\n '''\n return self.num_players\n\n def get_payoffs(self):\n ''' Return the payoffs of the game\n\n Returns:\n (list): Each entry corresponds to the payoff of one player\n '''\n hands = [p.hand + self.public_cards if p.status in (PlayerStatus.ALIVE, PlayerStatus.ALLIN) else None for p in self.players]\n chips_payoffs = self.judger.judge_game(self.players, hands)\n return chips_payoffs\n\n @staticmethod\n def get_num_actions():\n ''' Return the number of applicable actions\n\n Returns:\n (int): The number of actions. There are 6 actions (call, raise_half_pot, raise_pot, all_in, check and fold)\n '''\n return len(Action)\n"
]
| [
[
"numpy.sum",
"numpy.random.RandomState"
]
]
|
ricoai/ricar_ryc | [
"811aefaf9893f3fe9c61d2070d8dc3a949f36697"
]
| [
"manage.py"
]
| [
"#!/usr/bin/env python3\n\"\"\"\nScripts to drive a donkey 2 car and train a model for it. \n\nUsage:\n manage.py drive [--model=<model>] [--web=<True/False>] [--throttle=<Throttle 0.0-1.0>] [--js]\n manage.py train (--tub=<tub>) (--model=<model>)\n manage.py calibrate\n manage.py (calibrate)\n manage.py (check) [--tub=<tub1,tub2,..tubn>] [--fix]\n manage.py (analyze) [--tub=<tub1,tub2,..tubn>] (--op=<histogram>) (--rec=<\"user/angle\">)\n\"\"\"\n\n\nimport os\nfrom docopt import docopt\nfrom vehicle import Vehicle, Lambda\nfrom cameras import PiCamera\nfrom joystick import JoystickController\nfrom keras import KerasRicar\nfrom actuators import PCA9685, PWMSteering, PWMThrottle\nfrom tub import TubHandler, Tub\nfrom utils import linear_bin\nfrom config import load_config\n\n\ndef drive(cfg, model_path=None, use_joystick=True):\n \"\"\"\n Drive the car.\n You will either drive to record data for training or drive to test the autonomous mode.\n Either use Web controls or Joystick to control the vehicle.\n If driving autonomous, give the model to load.\n :param cfg: Configuration for user defined values.\n :param model_path: Path to load the model.\n :param use_joystick Use parameter in startup to use joystick.\n \"\"\"\n #Initialized car\n V = Vehicle()\n\n # Setup camera\n cam = PiCamera()\n V.add(cam, outputs=['cam/image_array'], threaded=True)\n\n # Select if only use bluetooth PS3 controller\n ctr = JoystickController(max_throttle=cfg.JOYSTICK_MAX_THROTTLE,\n steering_scale=cfg.JOYSTICK_STEERING_SCALE,\n auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)\n\n V.add(ctr,\n inputs=['cam/image_array'],\n outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],\n threaded=True)\n \n # See if we should even run the pilot module.\n # This is only needed because the part run_contion only accepts boolean\n def pilot_condition(mode):\n if mode == 'user':\n return False\n else:\n return True\n \n pilot_condition_part = Lambda(pilot_condition)\n V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot'])\n\n # Load the model configuration\n kl = KerasRicar()\n\n if model_path:\n print(model_path)\n kl.load(model_path)\n \n V.add(kl, inputs=['cam/image_array'], \n outputs=['pilot/angle', 'pilot/throttle'],\n run_condition='run_pilot')\n\n # Choose what inputs should change the car.\n def drive_mode(mode, \n user_angle, user_throttle,\n pilot_angle, pilot_throttle):\n if mode == 'user':\n return user_angle, user_throttle\n \n elif mode == 'local_angle':\n return pilot_angle, user_throttle\n \n else: \n return pilot_angle, pilot_throttle\n \n drive_mode_part = Lambda(drive_mode)\n V.add(drive_mode_part, \n inputs=['user/mode', 'user/angle', 'user/throttle',\n 'pilot/angle', 'pilot/throttle'], \n outputs=['angle', 'throttle'])\n\n # Configure the throttle and angle control hardware\n # Calibrate min/max for steering angle\n # Calibrate min/max/zero for throttle\n steering_controller = PCA9685(1)\n steering = PWMSteering(controller=steering_controller,\n left_pulse=460, right_pulse=260,\n invert_steering_angle=cfg.INVERT_STEERING_ANGLE)\n \n throttle_controller = PCA9685(0)\n throttle = PWMThrottle(controller=throttle_controller,\n max_pulse=500, zero_pulse=370, min_pulse=220)\n \n V.add(steering, inputs=['angle'])\n V.add(throttle, inputs=['throttle'])\n \n # Add tub to save data\n inputs = ['cam/image_array',\n 'user/angle', 'user/throttle',\n 'pilot/angle', 'pilot/throttle',\n 'user/mode']\n types = ['image_array',\n 'float', 'float',\n 'float', 'float',\n 'str']\n \n th = TubHandler(path=cfg.DATA_PATH)\n tub_writer = th.new_tub_writer(inputs=inputs, types=types)\n V.add(tub_writer, inputs=inputs, run_condition='recording')\n \n # Run the vehicle for 20 seconds\n V.start(rate_hz=cfg.FPS, max_loop_count=100000)\n \n print(\"You can now go to <your pi ip address>:8887 to drive your car.\")\n\n\ndef expand_path_masks(paths):\n '''\n take a list of paths and expand any wildcards\n returns a new list of paths fully expanded\n '''\n import glob\n expanded_paths = []\n for path in paths:\n if '*' in path or '?' in path:\n mask_paths = glob.glob(path)\n expanded_paths += mask_paths\n else:\n expanded_paths.append(path)\n\n return expanded_paths\n\n\ndef gather_tubs(cfg, tub_names):\n if tub_names:\n tub_paths = [os.path.expanduser(n) for n in tub_names.split(',')]\n tub_paths = expand_path_masks(tub_paths)\n else:\n tub_paths = [os.path.join(cfg.DATA_PATH, n) for n in os.listdir(cfg.DATA_PATH)]\n\n tubs = [Tub(p) for p in tub_paths]\n return tubs\n\n\ndef train(cfg, tub_names, model_name):\n \"\"\"\n Train the model using the neural network based off the tubs given.\n The tubs contain the recorded data.\n :param cfg: Configuration for user settings.\n :param tub_names: Tubs to load. This must be the full path.\n :param model_name: Name of the model to create.\n \"\"\"\n # Get the configuration\n is_tensorboard = cfg.IS_TENSORBOARD\n is_plot = cfg.IS_PLOT_RESULTS\n epochs = cfg.EPOCHS\n lr = cfg.LEARNING_RATE\n is_stop_early = cfg.IS_EARLY_STOP\n early_stop_count = cfg.EARLY_STOP_COUNT\n dropout_1 = cfg.DROPOUT_1\n dropout_2 = cfg.DROPOUT_2\n optimizer = cfg.OPTIMIZER\n loss_weight_angle = cfg.LOSS_WEIGHT_ANGLE\n loss_weight_throttle = cfg.LOSS_WEIGHT_THROTTLE\n is_categorical = cfg.IS_CATEGORICAL\n is_lr_decay = cfg.IS_LR_DECAY\n\n X_keys = ['cam/image_array']\n y_keys = ['user/angle', 'user/throttle']\n\n def rt(record):\n record['user/angle'] = linear_bin(record['user/angle'])\n return record\n\n # Load the model\n kl = KerasRicar(dropout_1=dropout_1,\n dropout_2=dropout_2,\n optimizer=optimizer,\n learning_rate=lr,\n loss_weight_angle=loss_weight_angle,\n loss_weight_throttle=loss_weight_throttle,\n is_categorical=is_categorical,\n is_lr_decay=is_lr_decay)\n\n tubs = gather_tubs(cfg, tub_names)\n\n import itertools\n\n gens = [tub.train_val_gen(X_keys, y_keys, record_transform=rt, batch_size=cfg.BATCH_SIZE, train_split=cfg.TRAIN_TEST_SPLIT) for tub in tubs]\n\n # Training data generator is the one that keeps cycling through training data generator of all tubs chained together\n # The same for validation generator\n train_gens = itertools.cycle(itertools.chain(*[gen[0] for gen in gens]))\n val_gens = itertools.cycle(itertools.chain(*[gen[1] for gen in gens]))\n\n model_path = os.path.expanduser(model_name)\n\n total_records = sum([t.get_num_records() for t in tubs])\n total_train = int(total_records * cfg.TRAIN_TEST_SPLIT)\n total_val = total_records - total_train\n print('train: %d, validation: %d' %(total_train, total_val))\n steps_per_epoch = total_train // cfg.BATCH_SIZE\n print('steps_per_epoch', steps_per_epoch)\n\n # Train with the data loaded from the tubs\n kl.train(train_gens,\n val_gens,\n epochs=epochs,\n saved_model_path=model_path,\n is_early_stop=is_stop_early,\n early_stop_count=early_stop_count,\n is_tensorboard=is_tensorboard,\n is_plot_results=is_plot)\n\n\ndef calibrate():\n channel = int(input('Enter the channel your actuator uses (0-15).'))\n c = PCA9685(channel)\n \n for i in range(10):\n pmw = int(input('Enter a PWM setting to test(100-600)'))\n c.run(pmw)\n\n\ndef check(cfg, tub_names, fix=True):\n '''\n Check for any problems. Looks at tubs and find problems in any records or images that won't open.\n If fix is True, then delete images and records that cause problems.\n '''\n tubs = gather_tubs(cfg, tub_names)\n\n for tub in tubs:\n tub.check(fix=fix)\n\n\ndef anaylze(cfg, tub_names, op, record):\n '''\n look at the tub data and produce some analysis\n '''\n tubs = gather_tubs(cfg, tub_names)\n\n if op == 'histogram':\n import matplotlib.pyplot as plt\n samples = []\n for tub in tubs:\n num_records = tub.get_num_records()\n for iRec in range(0, num_records):\n json_data = tub.get_json_record(iRec)\n sample = json_data[record]\n samples.append(float(sample))\n\n plt.hist(samples, 50)\n plt.xlabel(record)\n plt.show()\n\n\nif __name__ == '__main__':\n args = docopt(__doc__)\n\n # Put config.py in the same location as manage.py\n my_cfg = load_config()\n\n if args['drive']:\n drive(my_cfg, model_path=args['--model'])\n\n elif args['calibrate']:\n calibrate()\n\n elif args['train']:\n tub = args['--tub']\n model = args['--model']\n train(my_cfg, tub, model)\n\n elif args['check']:\n tub = args['--tub']\n fix = args['--fix']\n check(my_cfg, tub, fix)\n\n elif args['analyze']:\n tub = args['--tub']\n op = args['--op']\n rec = args['--rec']\n anaylze(my_cfg, tub, op, rec)\n\n\n\n"
]
| [
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.show"
]
]
|
fy-meng/lunarlander-saliency | [
"294a2008ad010a42ebbde4fa039c711611e96044"
]
| [
"lunarLander.py"
]
| [
"\"\"\"\nFile name: lunarLander.py\n Agent for landing successfully the 'Lunar Lander' which is implemented in\n OpenAI gym (reference [1]).\n\nUsage: python lunarLander.py -h\n\nusage: lunarLander.py [-h] [-v {0,1,2}] -e {train,test} [-a A]\n\nLunar Lander with DQN\n\noptional arguments:\n -h, --help show this help message and exit\n -v {0,1,2} verbose level (0: None, 1: INFO, 2: DEBUG)\n -e {train,test} execute (train, test)\n -a A trained agent file\n\nNote: Default convergence criteria met when for 150 consecutive episodes the average reward is > 200.\n\nusage example: Execute step 1.1 with rendering enabled and verbose level set to INFO.\n python project2.py -e 1.1 -r -v 1\n\nAuthor: Vasileios Saveris\nenail: [email protected]\n\nLicense: MIT\n\nDate last modified: 02.12.2019\n\nReferences:\n [1] arXiv:1312.5602 [cs.LG]\n\nPython Version: 3.6\n\"\"\"\n\nimport argparse\nimport os\nimport time\n\n# Other classes\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nimport deepNeuralNetwork\nimport deepQNetwork\n# My classes\nimport emulator as em\nimport sarfa_saliency\n\nplt.switch_backend('agg')\n\n'''\nConstants\n'''\nC_VERBOSE_NONE = 0 # Printing is disabled\nC_VERBOSE_INFO = 1 # Only information printouts (constructor)\nC_VERBOSE_DEBUG = 2 # Debugging printing level (all printouts)\n\nNUM_STATE = 8\nNUM_ACTION = 4\nNUM_SALIENCY_TESTS = 100\nSALIENCY_PERTURBATION = 0.1\nSTATE_LABELS = [\n 'x_pos', 'y_pos', 'x_vel', 'y_vel',\n 'angle', 'ang_vel', 'left_leg', 'right_leg'\n]\n\n\ndef executionTimeToString(execution_time, digits_precision=3):\n \"\"\"\n Summary:\n Creates a printable string regarding the execution time\n\n Args:\n execution_time: float\n Execution time in seconds\n\n digits_precision: integer\n Defines the precision in the decimal digits\n\n Raises:\n -\n\n Returns:\n time_string: string\n Printable time string\n i.e. 1 hour 2 minutes 12 seconds 113 milliseconds (3732.113 seconds)\n\n notes:\n -\n \"\"\"\n\n # Three decimal digits accuracy\n execution_time = round(execution_time, digits_precision)\n\n hours = int(execution_time / 3600)\n minutes = int((execution_time - hours * 3600) / 60)\n seconds = int(execution_time - hours * 3600 - minutes * 60)\n milliseconds = int(round(execution_time - int(execution_time), 3) * 1000)\n\n time_string = ''\n\n if hours > 1:\n time_string += str(hours) + ' hours '\n elif hours == 1:\n time_string += str(hours) + ' hour '\n\n if minutes > 1:\n time_string += str(minutes) + ' minutes '\n elif minutes == 1:\n time_string += str(minutes) + ' minute '\n\n if seconds > 1:\n time_string += str(seconds) + ' seconds '\n elif seconds == 1:\n time_string += str(seconds) + ' second '\n\n if milliseconds > 0:\n time_string += str(milliseconds) + ' milliseconds '\n\n time_string += '(' + str(execution_time) + ' seconds)'\n\n return time_string\n\n\ndef applySeed(seed, verbose):\n \"\"\"\n Summary:\n Applies the given seed to the numpy and core python random functions.\n\n Args:\n seed: int\n Seed value.\n\n verbose: int\n Verbose level (0: None, 1: INFO, 2: DEBUG)\n\n Raises:\n -\n\n Returns:\n -\n\n notes:\n -\n \"\"\"\n\n if verbose != C_VERBOSE_NONE:\n print('Apply Random Seed to the execution environment (seed = ', seed, ')', sep='')\n\n # Numpy random function\n import numpy\n numpy.random.seed(seed)\n\n # Python random function\n import random\n random.seed(seed)\n\n\ndef trial(model_file_name, scenario, number_of_trials, rendering=False, graphs_suffix='', verbose=C_VERBOSE_NONE,\n store_history=False, compute_saliency=False, history_save_path='./output/history_test.pkl'):\n \"\"\"\n Summary:\n Evaluate the trained DQN for a number of trials (number_of_trials).\n\n Args:\n model_file_name: string\n The saved trained DQN (Keras DNN h5 file).\n\n scenario: string\n The OpenAI gym scenario to be loaded by the Emulator.\n\n number_of_trials: int\n How many trials to execute.\n\n rendering: boolean\n If True, OpenAI gym environment rendering is enabled.\n\n graphs_suffix: string\n A suffix added in the graphs file names. To be used in case of multiple trials.\n\n verbose: int\n Verbose level (0: None, 1: INFO, 2: DEBUG)\n\n store_history: bool\n Store history data or not.\n\n compute_saliency: bool\n Computes saliency or not.\n\n history_save_path: str\n Where to store the history file.\n\n Raises:\n -\n\n Returns:\n trials_average_reward: float\n The average reward for the trial-episode (100 episodes)\n\n notes:\n -\n \"\"\"\n\n if verbose > C_VERBOSE_NONE:\n print('\\nEvaluate the trained DQN in ', str(number_of_trials), ' trials (episodes).', sep='')\n print('- model_file_name = ', model_file_name, ', scenario = ', scenario, ', number_of_trials = ',\n number_of_trials,\n ', rendering = ', rendering, ', graphs_suffix = ', graphs_suffix, sep='')\n\n # Create a Emulator object instance (without a seed)\n emulator = em.Emulator(scenario=scenario, average_reward_episodes=number_of_trials, statistics=True,\n rendering=rendering, seed=42, verbose=verbose)\n\n # Create a Deep Neural Network object instance and load the trained model (model_file_name)\n dnn = deepNeuralNetwork.DeepNeuralNetwork(file_name=model_file_name, verbose=verbose)\n\n # Start measuring Trials time\n start_time = time.time()\n\n history = {\n 'trial': [],\n 'state': [],\n 'action': [],\n 'reward': [],\n 'next_state': [],\n 'done': [],\n 'q_values': []\n }\n if compute_saliency:\n history['saliency'] = []\n\n # Trials\n # used as baseline for perturbation\n # for each feature, apply a random noise of 0.2 * (max(feature) - min(feature))\n state_min = np.array([-0.354871, -0.10391249, -0.468456, -0.89336216, -0.15218297, -0.4017307, 0, 0])\n state_max = np.array([-0.00462484, 1.4088593, 0.12988918, 0.05392841, 0.5564749, 0.8584606, 1, 1])\n for i in range(number_of_trials):\n\n current_state = emulator.start()\n\n while emulator.emulator_started:\n q_values = dnn.predict(current_state)\n action = np.argmax(q_values)\n\n if compute_saliency:\n # compute saliency\n saliency = np.zeros(NUM_STATE)\n for _ in range(NUM_SALIENCY_TESTS):\n for j in range(NUM_STATE):\n # perturb state\n perturbed_state = np.array(current_state)\n if j < 6: # numerical states\n perturbed_state[j] = SALIENCY_PERTURBATION * np.random.rand() \\\n * (state_max[j] - state_min[j]) + state_min[j]\n else: # boolean states\n perturbed_state = current_state.copy()\n perturbed_state[j] = 1 - perturbed_state[j]\n q_values_preturbed = dnn.predict(perturbed_state)\n\n max_q = np.max(q_values)\n q_values /= max_q\n q_values_preturbed /= max_q\n\n q_value_dict = {a: q_values[0, a].astype(np.float64) for a in range(4)}\n q_value_preturbed_dict = {a: q_values_preturbed[0, a].astype(np.float64) for a in range(4)}\n saliency[j] = sarfa_saliency.computeSaliencyUsingSarfa(action,\n q_value_dict,\n q_value_preturbed_dict)[0]\n saliency /= NUM_SALIENCY_TESTS\n\n # Experience [s, a, r, s']\n experience = emulator.applyAction(action)\n\n # save data\n if store_history:\n history['trial'].append(i)\n history['state'].append(current_state)\n history['action'].append(action)\n history['reward'].append(experience[2])\n if experience[3] is not None:\n history['next_state'].append(experience[3])\n history['done'].append(False)\n else:\n history['next_state'].append(current_state)\n history['done'].append(True)\n history['q_values'].append(q_values)\n if compute_saliency:\n history['saliency'].append(saliency)\n\n current_state = experience[3]\n\n if store_history:\n for k in history.keys():\n history[k] = np.array(history[k])\n history_save_dir = os.path.split(history_save_path)[0]\n if not os.path.exists(history_save_dir):\n os.makedirs(history_save_dir)\n pd.to_pickle(history, history_save_path)\n\n if verbose > C_VERBOSE_NONE:\n print('\\nDQN ', str(number_of_trials), ' trials average = ', emulator.execution_statistics.values[-1, 3],\n ', in ',\n executionTimeToString(time.time() - start_time), sep='')\n\n return emulator.execution_statistics.values[-1, 3]\n\n\ndef train(scenario, average_reward_episodes, rendering, hidden_layers, hidden_layers_size, memory_size, minibatch_size,\n optimizer_learning_rate, gamma, epsilon_decay_factor, maximum_episodes, model_file_name,\n converge_criteria=None, graphs_suffix='', seed=None, verbose=C_VERBOSE_NONE, store_history=False,\n history_save_path='./output/history_train.pkl'):\n \"\"\"\n Summary:\n Trains a DQN model for solving the given OpenAI gym scenario.\n\n Args:\n scenario: string\n The OpenAI gym scenario to be solved.\n\n average_reward_episodes: int\n On how many concecutive episodes the averaged reward should be calculated.\n\n rendering: boolean\n If True, OpenAI gym environment rendering is enabled.\n\n hidden_layers: int\n The number of hidden layers of the Deep Neural Network. Not including the first\n and last layer.\n\n hidden_layers_size: int\n The size of each hidden layer of the Neural Network.\n\n memory_size: int\n The size of the replay memory feature which will be used by the DQN.\n\n minibatch_size: int\n The minibatch size which will be retrieved randomly from the memory in each\n iteration in the DQN.\n\n optimizer_learning_rate: float\n The Adam optimizer learning rate used in the DNN.\n\n gamma: float\n The discount factor to be used in the equation (3) of [1].\n\n epsilon_decay_factor: float\n The decay factor of epsilon parameter, for each iteration step.\n\n maximum_episodes: int\n The maximum number of episodes to be executed. If DQN converges earlier the training stops.\n\n model_file_name: string\n The file in which the DQN trained model (DNN Keras) should be saved.\n\n converge_criteria: int or None\n The DQN converge criteria (when for converge_criteria concecutive episodes average reward\n is > 200, the DQN assumed that has been converged).\n If None, the training continues till the maximum_episodes is reached.\n\n graphs_suffix: string\n A suffix added in the graphs file names. To be used in case of multiple trains.\n\n seed: int\n Optional Seed to be used with the OpenAI gym environment, for results reproducability.\n\n verbose: int\n Verbose level (0: None, 1: INFO, 2: DEBUG)\n\n store_history: bool\n Store history or not.\n\n history_save_path: str\n Where to store the history file.\n\n Raises:\n -\n\n Returns:\n convergence_episode: int\n In which episode the DQN convergences\n\n convergence_time: string (time)\n On how much time the DQN convergences\n\n Rturns None if converge_criteria is None\n\n notes:\n -\n \"\"\"\n\n if verbose > C_VERBOSE_NONE:\n print('\\nDQN Training Starts (scenario = ', scenario, ', average_reward_episodes = ', average_reward_episodes,\n ', rendering = ', rendering,\n ', hidden_layers = ', hidden_layers, ', hidden_layers_size = ', hidden_layers_size, ', memory_size = ',\n memory_size,\n ', minibatch_size = ', minibatch_size, ', optimizer_learning_rate = ', optimizer_learning_rate,\n ', gamma = ', gamma,\n ', epsilon_decay_factor = ', epsilon_decay_factor, ', maximum_episodes = ', maximum_episodes,\n ', model_file_name = ', model_file_name,\n ', converge_criteria = ', converge_criteria, ', graphs_suffix = ', graphs_suffix, ', seed = ', seed, ')',\n sep='')\n\n # If seed is given the apply it\n if seed is not None:\n applySeed(seed, verbose)\n\n # Create a Emulator object instance\n emulator = em.Emulator(scenario, average_reward_episodes, statistics=True, rendering=rendering, seed=seed,\n verbose=verbose)\n\n # Create a Deep Neural Network object instance (Keras with Tensor Flow backend)\n dnn = deepNeuralNetwork.DeepNeuralNetwork(inputs=emulator.state_size, outputs=emulator.actions_number,\n hidden_layers=hidden_layers,\n hidden_layers_size=hidden_layers_size,\n optimizer_learning_rate=optimizer_learning_rate, seed=seed,\n verbose=verbose)\n\n # Create a DQN object instance (we start always from epsilon = 1.0, we control each value with the\n # epsilon_decay_factor\n dqn = deepQNetwork.DeepQNetwork(emulator=emulator, dnn=dnn, states_size=emulator.state_size,\n actions_number=emulator.actions_number,\n memory_size=memory_size, minibatch_size=minibatch_size, gamma=gamma, epsilon=1.0,\n epsilon_decay_factor=epsilon_decay_factor,\n seed=seed, verbose=verbose)\n\n # Start measuring training time\n start_time = time.time()\n\n history = {\n 'trial': [],\n 'state': [],\n 'action': [],\n 'reward': [],\n 'next_state': [],\n 'done': [],\n 'q_values': []\n }\n\n if converge_criteria is not None:\n # Holds how many concecutive episodes average reward is > 200\n convergence_counter = 0\n episodes_convergence_counter = [] # Holds the convergence_counter for all episodes\n convergence_episode = 0\n\n # Training starts here\n for i in range(maximum_episodes):\n current_state = emulator.start()\n\n # See Algorithm 1 in [1]\n while emulator.emulator_started:\n q_values = dnn.predict(current_state)\n action = np.argmax(q_values)\n\n # Experience [s, a, r, s']\n experience = emulator.applyAction(action)\n\n # save data\n if store_history:\n history['trial'].append(i)\n history['state'].append(current_state)\n history['action'].append(action)\n history['reward'].append(experience[2])\n if experience[3] is not None:\n history['next_state'].append(experience[3])\n history['done'].append(False)\n else:\n history['next_state'].append(current_state)\n history['done'].append(True)\n history['q_values'].append(q_values)\n\n dqn.storeTransition(experience)\n dqn.sampleRandomMinibatch()\n\n # s = s' at the end of the step, before starting the new step\n current_state = experience[3]\n\n if converge_criteria is not None:\n # Check if convergence counter should be increased or to be reset\n if emulator.average_reward > 200:\n convergence_counter += 1\n else:\n convergence_counter = 0\n\n episodes_convergence_counter.append(convergence_counter)\n\n if verbose > C_VERBOSE_NONE:\n print('Convergence Counter: ', convergence_counter, sep='')\n\n # DQN model assumed that it has been converged\n if convergence_counter >= converge_criteria:\n convergence_episode = i\n break\n\n if store_history:\n for k in history.keys():\n history[k] = np.array(history[k])\n history_save_dir = os.path.split(history_save_path)[0]\n if not os.path.exists(history_save_dir):\n os.makedirs(history_save_dir)\n pd.to_pickle(history, history_save_path)\n\n if converge_criteria is not None:\n convergence_time = time.time() - start_time\n\n if verbose > C_VERBOSE_NONE and converge_criteria is not None:\n print('\\nDQN converged after ', convergence_episode, ' episodes in ', executionTimeToString(convergence_time),\n sep='')\n elif verbose > C_VERBOSE_NONE and converge_criteria is None:\n print('\\nDQN trained for ', maximum_episodes, ' episodes in ', executionTimeToString(time.time() - start_time),\n sep='')\n\n # Create Graphs\n # 1. Steps per Episode\n plt.plot(emulator.execution_statistics.values[:, 0], emulator.execution_statistics.values[:, 1], color='coral',\n linestyle='-')\n plt.grid(b=True, which='major', axis='y', linestyle='--')\n plt.xlabel('Episode', fontsize=12)\n plt.ylabel('Steps', fontsize=12)\n plt.title('Steps per Episode', fontsize=12)\n plt.savefig('Steps_Per_Episode' + graphs_suffix + '.png')\n plt.clf()\n\n # 2. Total Reward per Training Episode\n plt.plot(emulator.execution_statistics.values[:, 0], emulator.execution_statistics.values[:, 2], color='coral',\n linestyle='-',\n label='Total Reward')\n plt.plot(emulator.execution_statistics.values[:, 0], emulator.execution_statistics.values[:, 3],\n color='midnightblue', linestyle='--',\n label='Episodes Reward Average')\n plt.grid(b=True, which='major', axis='y', linestyle='--')\n plt.xlabel('Episode', fontsize=12)\n plt.ylabel('Reward', fontsize=12)\n plt.title('Total Reward per Training Episode', fontsize=12)\n plt.legend(loc='lower right', fontsize=12)\n plt.savefig('Total_Reward_Per_Training_Episode' + graphs_suffix + '.png')\n plt.clf()\n\n # Save the trained model\n dnn.saveModel(model_file_name)\n\n if converge_criteria is not None:\n return convergence_episode\n\n\ndef main():\n # Parse input arguments\n description_message = 'Lunar Lander with DQN'\n\n args_parser = argparse.ArgumentParser(description=description_message,\n formatter_class=argparse.RawTextHelpFormatter)\n\n args_parser.add_argument('-v', action='store', help='verbose level (0: None, 1: INFO, 2: DEBUG)',\n choices=('0', '1', '2'))\n args_parser.add_argument('-e', action='store', required=True, help='execute (train, test)',\n choices=('train', 'test'))\n args_parser.add_argument('-a', action='store', required=False, help='trained agent file')\n args_parser.add_argument('-n', action='store', required=False, default=1, help='number of trials during testing')\n args_parser.add_argument('--rendering', action='store_true', required=False, default=False,\n help='rendering during testing')\n args_parser.add_argument('--store_history', action='store_true', required=False, default=False,\n help='store history during testing')\n args_parser.add_argument('--compute_saliency', action='store_true', required=False, default=False,\n help='compute saliency during testing')\n\n args = args_parser.parse_args()\n\n if args.e == 'test' and args.a is None:\n args_parser.error('When executing \\'test\\', the trained agent file (-a) is required.')\n\n # Verbose level (0: None, 1: INFO, 2: DEBUG)\n verbose = C_VERBOSE_NONE if args.v is None else int(args.v)\n\n num_trials = int(args.n)\n rendering = args.rendering\n store_history = args.store_history\n compute_saliency = args.compute_saliency\n\n # Trigger the requested execution type\n if args.e == 'train':\n if verbose:\n print('\\nTrain a DQN using seed = 1 and default convergence criteria.')\n\n # TRAIN WITH SEED = 1, AND converge_criteria = 150\n\n seed = 1\n train(scenario='LunarLander-v2', average_reward_episodes=100, rendering=False, hidden_layers=1,\n hidden_layers_size=64, memory_size=None, minibatch_size=64, optimizer_learning_rate=0.001, gamma=0.99,\n epsilon_decay_factor=0.99995, maximum_episodes=10000, model_file_name='DQN_Trained.h5',\n converge_criteria=150, graphs_suffix='_Conv_150', seed=seed, verbose=verbose, store_history=store_history)\n\n else:\n if verbose:\n print('\\nTest once the trained DQN agent.')\n\n trial(model_file_name=args.a, scenario='LunarLander-v2', number_of_trials=num_trials, rendering=rendering,\n graphs_suffix='', verbose=verbose, store_history=store_history, compute_saliency=compute_saliency)\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"matplotlib.pyplot.switch_backend",
"numpy.max",
"numpy.array",
"numpy.random.rand",
"pandas.to_pickle",
"numpy.zeros",
"numpy.random.seed",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.legend",
"numpy.argmax",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.clf"
]
]
|
Gjain234/AdaptiveQLearning | [
"4bb9751a3cb76604bdc5ac2225da84e5daa32755"
]
| [
"create_fig_multiple_ambulance.py"
]
| [
"import matplotlib.pyplot as plt\nimport seaborn as sns\nimport matplotlib.patches as patches\nimport pickle\nimport numpy as np\nfrom src import agent\nfrom adaptive_Agent import AdaptiveDiscretization\nfrom eNet_Agent import eNet\nimport pandas as pd\n\nepLen = 5\nnEps = 2000\n\nproblem_type = 'ambulance'\nproblem_list = ['uniform']\nalpha_list = ['0', '0.25', '1']\nplt.rc('font', family='serif')\nplt.rc('xtick', labelsize='x-small')\nplt.rc('ytick', labelsize='x-small')\nplt.rcParams.update({'font.size': 8})\n\nfor alpha in alpha_list:\n for problem in problem_list:\n name_adapt = './data/multiple_ambulance_'+problem+'_adapt_'+ alpha +'.csv'\n name_adapt_stochastic ='./data/multiple_ambulance_'+problem+'_adapt_stochastic_'+ alpha +'.csv'\n name_net = './data/multiple_ambulance_'+problem+'_enet_'+ alpha +'.csv'\n name_net_stochastic = './data/multiple_ambulance_'+problem+'_enet_stochastic_'+ alpha +'.csv'\n #name_obj = './data/multiple_ambulance_'+problem+'_agent_compare.obj'\n fig_name = './figures/multiple_ambulance_'+problem+'_'+alpha +'.png'\n\n #infile = open(name_obj,'rb')\n #agent = pickle.load(infile)\n #infile.close()\n\n\n dt_adapt = pd.read_csv(name_adapt).groupby(['episode']).mean()\n dt_net = pd.read_csv(name_net).groupby(['episode']).mean()\n dt_adapt_stochastic = pd.read_csv(name_adapt_stochastic).groupby(['episode']).mean()\n dt_net_stochastic = pd.read_csv(name_net_stochastic).groupby(['episode']).mean()\n #print(dt_adapt.index.values)\n dt_adapt['episode'] = dt_adapt.index.values\n dt_net['episode'] = dt_net.index.values\n dt_adapt_stochastic['episode'] = dt_adapt_stochastic.index.values\n dt_net_stochastic['episode'] = dt_net_stochastic.index.values\n\n dt_net = dt_net.iloc[::10, :]\n dt_net_stochastic = dt_net_stochastic.iloc[::10, :]\n dt_adapt = dt_adapt.iloc[::10, :]\n dt_adapt_stochastic = dt_adapt_stochastic.iloc[::10, :]\n\n fig = plt.figure(figsize=(10, 10))\n\n # Plot for Comparison of Observed Rewards of Adaptive vs E-Net\n plt.subplot(2,2,1)\n plt.plot(dt_adapt['episode'], dt_adapt['epReward'], label='Adaptive')\n plt.plot(dt_net['episode'], dt_net['epReward'], label = 'Epsilon Net', linestyle='--')\n\n plt.ylim(0,epLen+.1)\n plt.xlabel('Episode')\n plt.ylabel('Observed Reward')\n plt.legend()\n plt.title('Comparison of Observed Rewards of Adaptive vs E-Net')\n\n # Plot for Comparison of Size of Partition of Adaptive vs E-Net\n plt.subplot(2,2,2)\n plt.plot(dt_adapt['episode'], dt_adapt['Number of Balls'])\n plt.plot(dt_net['episode'], dt_net['Number of Balls'], linestyle = '--')\n\n plt.xlabel('Episode')\n plt.ylabel('Size of Partition')\n plt.legend()\n plt.title('Comparison of Size of Partition of Adaptive vs E-Net')\n\n # Plot for Comparison of Observed Rewards of Adaptive vs E-Net (Stochastic)\n plt.subplot(2,2,3)\n\n plt.plot(dt_adapt_stochastic['episode'], dt_adapt_stochastic['epReward'], label='Adaptive')\n plt.plot(dt_net_stochastic['episode'], dt_net_stochastic['epReward'], label = 'Epsilon Net', linestyle='--')\n\n plt.ylim(0,epLen+.1)\n plt.xlabel('Episode')\n plt.ylabel('Observed Reward')\n plt.legend()\n plt.title('Comparison of Observed Rewards of Adaptive vs E-Net (Stochastic)')\n\n # Plot for Comparison of Size of Partition of Adaptive vs E-Net (Stochastic)\n plt.subplot(2,2,4)\n plt.plot(dt_adapt_stochastic['episode'], dt_adapt_stochastic['Number of Balls'])\n plt.plot(dt_net_stochastic['episode'], dt_net_stochastic['Number of Balls'], linestyle = '--')\n\n plt.xlabel('Episode')\n plt.ylabel('Size of Partition')\n plt.legend()\n plt.title('Comparison of Size of Partition of Adaptive vs E-Net (Stochastic)')\n\n\n #plt.subplot(1,3,3)\n #tree = agent.tree_list[1]\n #tree.plot(fig)\n #plt.title('Adaptive Discretization for Step 2')\n plt.tight_layout()\n fig.savefig(fig_name, bbox_inches = 'tight',\n pad_inches = 0.01, dpi=900)\n"
]
| [
[
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylabel",
"pandas.read_csv",
"matplotlib.pyplot.subplot"
]
]
|
ishipachev/UdacitySDCND-CarND-Advanced-Lane-Lines-P4 | [
"a34d728f0062acc29aee312cb3f7b739edcc5c08"
]
| [
"calibrate.py"
]
| [
"import numpy as np\nimport cv2\nimport glob\nimport os\nimport pickle\nimport matplotlib.pyplot as plt\n\n# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\nnx = 9\nny = 6\nobjp = np.zeros((ny*nx, 3), np.float32)\nobjp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)\n\n# Arrays to store object points and image points from all the images.\nobjpoints = [] # 3d points in real world space\nimgpoints = [] # 2d points in image plane.\n\n# Make a list of calibration images\nimages = glob.glob('camera_cal/calibration*.jpg')\n\n# Step through the list and search for chessboard corners\nfor fname in images:\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)\n\n # If found, add object points, image points\n if ret is True:\n objpoints.append(objp)\n imgpoints.append(corners)\n\n # Draw and display the corners\n img = cv2.drawChessboardCorners(img, (nx, ny), corners, ret)\n # cv2.imshow('img', img)\n # cv2.waitKey(500)\n\ncv2.destroyAllWindows()\n\nret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)\n\ncamera_params = {\"mtx\": mtx, \"dist\": dist}\nprint(mtx)\nprint(dist)\n\nparams_path = 'output/camera_params.pickle'\n\nwith open(params_path, 'wb') as f:\n # Pickle the 'data' dictionary using the highest protocol available.\n pickle.dump(camera_params, f, pickle.HIGHEST_PROTOCOL)\n\nwith open(params_path, 'rb') as f:\n # The protocol version used is detected automatically, so we do not\n # have to specify it.\n data = pickle.load(f)\n print(data[\"mtx\"])\n print(data[\"dist\"])\n\n\n\nfor fname in images:\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n dst = cv2.undistort(img, mtx, dist, None, mtx)\n # cv2.imshow('img', dst)\n # cv2.waitKey(500)\n basefname = os.path.basename(fname)\n new_name = os.path.join('output_images', basefname)\n cv2.imwrite(new_name, dst)\n\n\n\ncv2.destroyAllWindows()\n\n"
]
| [
[
"numpy.zeros"
]
]
|
jukiewiczm/guildai | [
"478cc29cb102a8bd0bed693ce9626fe4949257a2"
]
| [
"guild/commands/tensorflow_impl.py"
]
| [
"# Copyright 2017-2019 TensorHub, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport sys\n\nfrom guild import cli\n\ndef inspect_checkpoint(args):\n _check_tensorflow()\n if args.file_name.endswith(\".pb\"):\n _inspect_graph(args)\n else:\n _inspect_checkpoint(args)\n\ndef _check_tensorflow():\n try:\n import tensorflow as _\n except ImportError as e:\n _handle_tensorflow_import_error(e)\n\ndef _handle_tensorflow_import_error(e):\n if \"tensorflow\" in str(e):\n cli.out(\n \"TensorFlow is not installed.\\n\"\n \"Refer to https://www.tensorflow.org/install/ for help \"\n \"installing TensorFlow on your system.\", err=True)\n else:\n cli.out(\"Error loading TensorBoard: %s\" % e, err=True)\n cli.error()\n\n\ndef _inspect_graph(args):\n graph = _load_graph(args.file_name)\n for op in graph.get_operations():\n sys.stdout.write(\"%s\\n\" % op.name)\n for out in op.outputs:\n sys.stdout.write(\"%s\\n\" % out.name)\n\ndef _load_graph(filename):\n # pylint: disable=import-error\n import tensorflow as tf\n graph = tf.Graph()\n sess = tf.Session(graph=graph)\n with tf.gfile.FastGFile(filename, \"rb\") as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n with sess.graph.as_default():\n tf.import_graph_def(graph_def)\n return graph\n\ndef _inspect_checkpoint(args):\n # pylint: disable=import-error,no-name-in-module\n from tensorflow.python.tools import inspect_checkpoint as inspect\n inspect.FLAGS = args\n inspect.main([])\n"
]
| [
[
"tensorflow.python.tools.inspect_checkpoint.main",
"tensorflow.Graph",
"tensorflow.Session",
"tensorflow.GraphDef",
"tensorflow.import_graph_def",
"tensorflow.gfile.FastGFile"
]
]
|
abgcsc/CDANs | [
"7113ed836df1369895054b11c121071faa8392af"
]
| [
"KerasSupplementary.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 06 11:28:39 2016\n\n@author: Drew\n\"\"\"\n\nimport numpy as np\nimport os\nfrom keras.models import model_from_json, Model, Sequential\nfrom keras.layers import merge, Input\nfrom keras.layers.core import Lambda, Masking, Reshape, Dense, Flatten, Dropout\nfrom keras.layers.noise import GaussianNoise\nfrom keras.optimizers import RMSprop\nfrom keras.callbacks import ModelCheckpoint\nfrom keras import backend as K, activations, regularizers, initializations\nfrom keras.engine.topology import Layer, InputSpec\nfrom keras.regularizers import Regularizer\nfrom queryUser import queryUser\nimport sys\nimport marshal\nimport types as python_types\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\nfrom theano.tensor.subtensor import take\nimport theano.tensor as T\nimport warnings\n\n\"\"\"\nNote: K.arange does not exist prior to Keras v1.2.0.\n\nTODO: Make compatible with latest version of Keras (2.0+).\n Make compatible with Tensorflow backend (if possible).\n\"\"\"\n\ndef addMaxoutLayer(model, layer, numPieces):\n \"\"\"\n Adds an layer with a maxout wrapper on the output. \n Maxout computes the piecewise maximum of multiple functions (possibly linear).\n The first (non-batch) dimension of the output of the layer must be divisible by\n numPieces, the number of functions whose maximum is being taken.\n\n Masking not supported.\n\n See http://arxiv.org/pdf/1302.4389.pdf. \n \"\"\"\n model = layer(model)\n if numPieces <= 0:\n raise ValueError(\"The number of pieces must be positive.\")\n if numPieces > 1: #otherwise, just do normal layer\n if model._keras_shape[1] % numPieces != 0:\n raise ValueError(\"The output_shape of the given layer must be divisible by numPieces.\")\n model = Reshape((numPieces, model._keras_shape[1]/numPieces)+ model._keras_shape[2:])(model)\n #use a mask-eating lambda instead of normal lambda since normal lambda is bugged in current version of Keras (1.2.1)\n model = MaskEatingLambda(lambda x: K.max(x, axis=1), output_shape = lambda input_shape: (input_shape[0],) + input_shape[2:])(model)\n return model\n\ndef Maxout(input_shape, layer, numPieces):\n \"\"\"\n Given a layer whose output_shape is divisible by numPieces, return a wrapper\n that computes the maximum of every 'numPieces' consecutive outputs. The wrapper\n may be treated as a normal layer, i.e. model = Maxout(Dense(), k)(model).\n\n Masking not supported. Layers with multiple inputs not supported.\n \"\"\"\n if numPieces > 1:\n inputLayer = Input(input_shape)\n maxout = addMaxoutLayer(inputLayer, layer, numPieces)\n maxout = Model(inputLayer, maxout)\n return maxout\n else:\n return layer\n\ndef addResidualLayer(model, layer, identityMapping=True, **kwargs):\n \"\"\"\n Add a residual connection between the model's current output layer and the output after the given\n layer is added to the model.\n\n If the output_shape does not change after adding the layer and the identityMapping argument is True,\n then the identity mapping is used.\n Otherwise, a linear projection is used and expected to be learned during training.\n\n Masking not supported. See addDenseResidualLayers().\n\n See https://arxiv.org/pdf/1512.03385.pdf.\n \"\"\"\n return addDenseResidualLayers(model, [layer], identityMapping, **kwargs)\n\ndef Residual(input_shape, layer, identityMapping=True, **kwargs):\n return DenseResidual(input_shape, [layer], identityMapping, **kwargs)\n\ndef addDenseResidualLayers(model, layers, identityMappings=[True], **kwargs):\n \"\"\"\n Add a series of layers such that there is a residual feed-forward connection between each pair of \n layers that are to be added to the given model.\n\n For n layers, results in n(n-1)/2 residual connections.\n \n If the output_shape does not change after adding a layer and the identityMapping argument is True,\n then the identity mapping is used.\n Otherwise, a linear projection is used and expected to be learned during training.\n\n If a given layer has a Dropout object as its final sublayer, then the residual connection is incorporated\n in such a way that it will share the same Dropout object.\n\n Masking is not supported, nor are layers with multiple inputs or outputs.\n\n TODO: Make general function that takes adjacency list between layers to determine residual connections.\n\n See https://arxiv.org/abs/1608.06993.\n \"\"\"\n if isinstance(identityMappings, bool):\n identityMappings = [identityMappings]\n try:\n identityMappings = list(identityMappings)\n except:\n raise AttributeError(\"identityMappings must be boolean or iterable.\")\n if len(identityMappings) == 1:\n identityMappings = [identityMappings[0] for layer in layers]\n elif len(identityMappings) != len(layers):\n raise ValueError(\"'identityMappings' must be the same length as 'layers'\")\n #keep a list of intermediate outputs for each layer of the model\n intermediateModels = [model]\n #model will keep track of the topmost output including all residual connections added so far\n for layer in layers:\n #try to transparently remove Dropout layer\n dropoutLayer = None\n if isinstance(layer.layers[-1], Dropout):\n dropoutLayer = layer.layers[-1]\n #should not be possible for Dropout to be preceded by multiple inbound nodes\n assert(len(layer.nodes_by_depth[1]) == 1)\n layer = Model(layer.input, layer.nodes_by_depth[1][0].output_tensors) \n normalModel = layer(model)\n #add residual connection to each previous layer\n residualModels = []\n for residualModel, identityMapping in zip(intermediateModels, identityMappings):\n if residualModel._keras_shape != normalModel._keras_shape or not identityMapping:\n reshape = False\n if residualModel.ndim >= 3:\n reshape = True\n residualModel = Flatten()(residualModel)\n residualModel = Dense(np.prod(normalModel._keras_shape[1:]), activation = 'linear', **kwargs)(residualModel)\n if reshape:\n residualModel = Reshape(normalModel._keras_shape[1:])(residualModel)\n residualModels += [residualModel]\n model = merge(residualModels + [normalModel], mode = 'sum')\n #reapply Dropout to sum if appropriate\n if dropoutLayer is not None:\n model = dropoutLayer(model)\n intermediateModels += [model]\n return model\n\ndef DenseResidual(input_shape, layers, identityMappings=[True], **kwargs):\n inputLayer = Input(input_shape)\n residual = addDenseResidualLayers(inputLayer, layers, identityMappings, **kwargs)\n residual = Model(inputLayer, residual)\n return residual\n\ndef addPermutationalLayer(model, f, pooling_mode = 'ave'):\n \"\"\"\n Add (the equivalent of) a permutational layer to the given model, which provides\n a permutation equivariant output based upon pairwise combinations of the input.\n\n Certain pairs are pooled to limit the size of the output and are controlled by the \n 'pooling_mode' parameter, which is one of 'ave', 'sum', 'max', or 'min'.\n\n The provided function 'f' is applied to each pairwise combination and should be a Keras model\n or layer that supports masking, provided that the given model has masked output.\n\n See http://arxiv.org/1612.04530.\n \"\"\"\n input_shape = model._keras_shape\n model = Pairwise()(model)\n model = f(model)\n model = MaskedPooling(pool_size = input_shape[1], stride = input_shape[1], mode = pooling_mode)(model)\n return model\n\ndef addRealGaussianNoise(model, sigma, masking, maskValue = 0):\n \"\"\"\n Add a layer to a model that adds Gaussian noise with standard deviation sigma \n and ignores timesteps that would normally be masked even if no mask is present.\n \"\"\"\n if not masking: #need to ensure that padded zeros do not get corrupted by noise\n model = Masking(maskValue)(model)\n model = GaussianNoise(sigma)(model)\n if not masking:\n model = MaskEatingLambda(lambda x, mask: K.switch(K.expand_dims(mask, -1), x, maskValue), \n lambda input_shape: input_shape)(model)\n return model\n\nclass MeanWeightRegularizer(Regularizer):\n \"\"\"\n Implements the version of l2 regularization that I was unwittingly using\n with Keras 1.0.6. This uses the mean instead of the sum and is therefore\n not technically l2 regularization but it is similar. Requires a significantly\n different range of values for l1 and l2. \n Pros: Values are mostly model independent.\n Cons: Effects probably not as well understood as actual l1/l2 regularization.\n \"\"\"\n def __init__(self, l1=0., l2=0.):\n self.l1 = K.cast_to_floatx(l1)\n self.l2 = K.cast_to_floatx(l2)\n self.uses_learning_phase = True\n\n def set_param(self, p):\n self.p = p\n\n def __call__(self, loss):\n if not hasattr(self, 'p'):\n raise Exception('Need to call `set_param` on '\n 'MeanWeightRegularizer instance '\n 'before calling the instance. '\n 'Check that you are not passing '\n 'a MeanWeightRegularizer instead of an '\n 'ActivityRegularizer '\n '(i.e. activity_regularizer=\"ml2\" instead '\n 'of activity_regularizer=\"activity_l2\".')\n regularized_loss = loss + K.mean(K.abs(self.p)) * self.l1\n regularized_loss += K.mean(K.square(self.p)) * self.l2\n return K.in_train_phase(regularized_loss, loss)\n\n def get_config(self):\n return {'name': self.__class__.__name__,\n 'l1': float(self.l1),\n 'l2': float(self.l2)}\n\ndef ml2(l=0.01):\n return MeanWeightRegularizer(l2=l)\n\nclass PermutationEquivariant(Layer):\n \"\"\"\n Implements the permutation equivariant layer of Ravanbahksh et al. for use with sets and point clouds.\n\n Specifically, implements Equation 5,\n\n \\sigma(\\beta + (x-\\mathbf{1}\\transpose{\\max_n x})\\Gamma),\n\n where x is n by k, \\max_n x is K by 1, \\mathbf{1} is n by 1, \\beta is k^\\prime by 1, and \\Gamma is k by k^\\prime.\n \\beta and \\Gamma are the trainable parameters\n\n Currently supports a built-in version of maxout with k pieces, specified by the keyword maxout = k.\n\n See http://arxiv.org/1611.04500 (version 3)\n \"\"\"\n def __init__(self, output_dim, init = 'glorot_uniform', activation = None, \n Gamma_regularizer = None, beta_regularizer = None,\n maxout = 1, **kwargs):\n self.init = initializations.get(init)\n self.supports_masking = True\n self.uses_learning_phase = True\n self.output_dim = output_dim\n self.activation = activations.get(activation)\n self.maxout = maxout\n self.Gamma_regularizer = regularizers.get(Gamma_regularizer)\n self.beta_regularizer = regularizers.get(beta_regularizer)\n return super(PermutationEquivariant, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.input_spec = [InputSpec(shape=input_shape)]\n #output channel weights\n self.Gamma = self.add_weight(name = 'Gamma',\n shape = (input_shape[2], self.output_dim * self.maxout),\n initializer = self.init,\n regularizer = self.Gamma_regularizer,\n trainable = True)\n #bias\n self.beta = self.add_weight(name = 'beta',\n shape = (self.output_dim * self.maxout, ),\n initializer = self.init,\n regularizer = self.beta_regularizer,\n trainable = True)\n super(PermutationEquivariant, self).build(input_shape)\n \n def get_output_shape_for(self, input_shape):\n assert input_shape and len(input_shape) == 3\n return (input_shape[0], input_shape[1], self.output_dim)\n\n def compute_mask(self, input, input_mask = None):\n return input_mask\n\n def call(self, x, mask = None):\n if mask is None:\n output = x - K.repeat(TimeDistributedMerge(False, mode = 'max')(x), self.input_spec[0].shape[1])\n else:\n output = x - K.repeat(TimeDistributedMerge(True, mode = 'max')(x, mask), self.input_spec[0].shape[1])\n output = K.dot(output, self.Gamma)\n output = self.beta + output\n if self.activation is not None:\n output = self.activation(output)\n if self.maxout > 1:\n output = K.max(K.reshape(output, (-1, self.input_spec[0].shape[1], self.output_dim, self.maxout)), axis = -1)\n return output\n\n def get_config(self):\n config = {\n 'output_dim': self.output_dim,\n 'init': self.init.__name__,\n 'activation': self.activation.__name__,\n 'maxout': self.maxout,\n 'Gamma_regularizer': self.Gamma_regularizer.get_config() if self.Gamma_regularizer else None,\n 'beta_regularizer': self.beta_regularizer.get_config() if self.beta_regularizer else None,\n }\n base_config = super(PermutationEquivariant, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\nclass Pairwise(Layer):\n \"\"\"\n Compute all pairwise combinations of timesteps (axis 1).\n A pairwise combination is a concatenation of the features of one timestep with those of another.\n If the input has shape (None, N, M, ...), returns shape (None, N*N, 2*M, ...).\n\n TODO: Add support for other axes.\n \"\"\"\n def __init__(self, **kwargs):\n self.supports_masking = True\n self.uses_learning_phase = False\n return super(Pairwise, self).__init__(**kwargs)\n \n def build(self, input_shape):\n self.input_spec = [InputSpec(shape=input_shape)]\n if len(input_shape) < 3:\n raise ValueError(\"The number of dimensions of each sample must be greater than or equal to 2\")\n super(Pairwise, self).build(input_shape)\n\n def get_output_shape_for(self, input_shape):\n return (input_shape[0], input_shape[1]*input_shape[1], 2*input_shape[2]) + input_shape[3:]\n \n def compute_mask(self, x, input_mask = None):\n \"\"\"\n Treat each sample's mask as a vector and compute outer product with itself.\n Then flatten the resulting outer product.\n \"\"\"\n if input_mask is not None:\n return K.reshape(K.expand_dims(input_mask, 1)*K.expand_dims(input_mask, -1), \n (input_mask.shape[0], input_mask.shape[1]*input_mask.shape[1]))\n else:\n return None\n\n def call(self, x, mask = None):\n \"\"\"\n Compute all pairwise combinations of timesteps, ignoring the mask.\n \"\"\"\n input_shape = self.input_spec[0].shape\n x2 = K.repeat_elements(x, input_shape[1], axis=1)\n x2 = K.reshape(x2, (-1, input_shape[1], input_shape[1])+input_shape[2:])\n x3 = K.permute_dimensions(x2, [0, 2, 1] + range(3, K.ndim(x2)))\n x2 = K.reshape(x2, (x.shape[0], -1) + input_shape[2:])\n x3 = K.reshape(x3, (x.shape[0], -1) + input_shape[2:])\n x4 = K.concatenate([x2, x3], axis = 2)\n return x4\n\nclass Reverse(Layer):\n \"\"\"\n Reverse the given tensor along the specified axis. \n If a mask is given and reverseMask is True, the mask is also reversed along the same axis.\n Note that since masks only work along the time axis (axis 1),\n any other given axis has no practical effect on the mask.\n Not compatible with Tensorflow backend.\n \"\"\"\n def __init__(self, axis = 1, reverseMask = True, **kwargs):\n if not isinstance(axis, (int, long)):\n warnings.warn('Attempting to cast provided axis to integer.')\n axis = int(axis)\n if axis == 0:\n raise ValueError('Cannot reverse the batch dimension (axis 0).')\n elif axis < 0:\n raise ValueError('Provided axis must be a positive integer.')\n self.axis = axis\n self.reverseMask = reverseMask\n self.supports_masking = True\n self.uses_learning_phase = False\n super(Reverse, self).__init__(**kwargs)\n\n def compute_mask(self, x, input_mask = None):\n if input_mask is not None:\n if self.axis == 1 and self.reverseMask: #masks are per timestep (dimension 1)\n return take(input_mask, T.arange(input_mask.shape[self.axis]-1, -1, -1), self.axis)\n else: #therefore, reversing other dimensions has no effect on the mask\n return input_mask\n else:\n return None\n\n def call(self, x, mask = None):\n return take(x, T.arange(x.shape[self.axis]-1, -1, -1), self.axis)\n\n def get_config(self):\n config = {'axis': self.axis}\n base_config = super(Reverse, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\nclass MaskedPooling(Layer):\n \"\"\"\n The built-in pooling functions of Keras (as of 4/3/2017) do not appear\n to support masking. Input is implicitly padded with masked zeros.\n\n Pools exclusively along the time dimension for now.\n Not compatible with Tensorflow backend.\n \"\"\"\n def __init__(self, pool_size = 2, stride = 1, mode = 'max', **kwargs):\n modes = ['sum', 'ave', 'max', 'min']\n if pool_size >= 2:\n self.pool_size = pool_size\n else:\n raise ValueError(\"'pool_size' must be greater than or equal to 2.\")\n if stride >= 1:\n self.stride = stride\n else:\n raise ValueError(\"'stride' must be greater than or equal to 1.\")\n if mode in modes:\n self.mode = mode\n else:\n raise ValueError('Illegal pooling mode provided: ' + str(mode) + '.')\n self.supports_masking = True\n self.uses_learning_phase = False\n super(MaskedPooling, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.input_spec = [InputSpec(shape=input_shape)]\n super(MaskedPooling, self).build(input_shape)\n\n def get_output_shape_for(self, input_shape):\n return (input_shape[0], (input_shape[1]-1)/self.stride+1) + input_shape[2:]\n\n def compute_mask(self, input, input_mask = None):\n if input_mask is not None:\n input_shape = self.input_spec[0].shape\n # If the pool contains at least one unmasked input (nonzero mask), \n # then the output is unmasked (nonzero).\n numPools = (input_shape[1]-1)/self.stride+1\n if numPools == 1:\n output_mask = K.expand_dims(K.sum(input_mask[:, T.arange(0, K.minimum(self.pool_size, input_shape[1]))], axis = 1), -1)\n else:\n poolStarts = K.expand_dims(K.expand_dims(T.arange(0, input_shape[1], self.stride), -1), 0)\n _, pools, _ = K.rnn(lambda p, t: (K.expand_dims(K.sum(t[-1][:, T.arange(p[0,0],K.minimum(p[0,0]+self.pool_size,input_shape[1]))], axis = 1), 0), []),\n poolStarts, [], False, None, [input_mask], unroll = True, input_length = numPools)\n output_mask = pools\n #for poolStart in poolStarts:\n # pools += [K.sum(input_mask[:,poolStart:(poolStart+self.pool_size)], axis = 1)]\n #output_mask = K.concatenate(pools, axis = 1)\n return K.switch(output_mask, 1, 0)\n else:\n return None\n\n def call(self, x, mask = None):\n input_shape = self.input_spec[0].shape\n x = K.reshape(x, (x.shape[0], x.shape[1], -1))\n numPools = (input_shape[1]-1)/self.stride+1\n poolStarts = K.expand_dims(K.expand_dims(T.arange(0, input_shape[1], self.stride), -1), 0)\n if mask is not None:\n _, pools, _ = K.rnn(lambda p, t: (K.expand_dims(TimeDistributedMerge(True, self.mode)(\n t[-2][:, T.arange(p[0,0],K.minimum(p[0,0]+self.pool_size,input_shape[1])), :],\n t[-1][:, T.arange(p[0,0],K.minimum(p[0,0]+self.pool_size,input_shape[1]))]\n ), \n 0), []),\n poolStarts, [], False, None, [x, mask], unroll = True, input_length = numPools)\n else:\n _, pools, _ = K.rnn(lambda p, t: (K.expand_dims(TimeDistributedMerge(False, self.mode)(\n t[-1][:, T.arange(p[0,0],K.minimum(p[0,0]+self.pool_size,input_shape[1])), :]\n ), \n 0), []),\n poolStarts, [], False, None, [x], unroll = True, input_length = numPools)\n if numPools == 1:\n pools = K.expand_dims(pools, -1)\n output = pools\n #if mask is not None:\n # # If the pool contains at least one unmasked input (nonzero mask), \n # # then the output is unmasked (nonzero).\n # masks = []\n # for poolStart in poolStarts:\n # masks += [mask[:,poolStart:(poolStart+self.pool_size)]]\n #else:\n # masks = [K.cast_to_floatx(1) for poolStart in poolStarts]\n #pools = []\n #for poolStart, poolmask in zip(poolStarts, masks):\n # pool = x[:, poolStart:(poolStart+self.pool_size), :]\n # pools += [TimeDistributedMerge(mask is not None, self.mode)(pool, poolmask)]\n #output = K.concatenate(pools, axis = 1)\n return K.reshape(output, (x.shape[0], numPools)+input_shape[2:])\n \n def get_config(self):\n config = {'stride': self.stride,\n 'pool_size': self.pool_size,\n 'mode': self.mode}\n base_config = super(MaskedPooling, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\nclass Sort(Layer):\n \"\"\"\n Sort the input along the second to last axis according to the \n sorted order of input[:,:,:,0]. \n For sorting 3D marker positions by their X value.\n Support for other axes and sorting by lambdas may be added in the future.\n Masked inputs are treated as though they are larger than any unmasked inputs,\n although this only applies if the input is a 3D array.\n Not compatible with the TensorFlow backend.\n \"\"\"\n def __init__(self, **kwargs):\n self.supports_masking = True\n self.uses_learning_phase = False\n super(Sort, self).__init__(**kwargs)\n \n def compute_mask(self, x, input_mask = None):\n if input_mask is not None:\n if K.ndim(x) == 3: #masks are per timestep (dimension 1)\n #all masked values are moved to the end\n #based on http://stackoverflow.com/questions/43010969/numpy-matrix-move-all-0s-to-the-end-of-each-row\n flipped_mask = T.gt(K.sum(input_mask, axis = 1, keepdims = 1), T.arange(input_mask.shape[1]-1, -1, -1))\n flipped_mask = take(flipped_mask, T.arange(flipped_mask.shape[1]-1, -1, -1), 1)\n input_mask = T.set_subtensor(input_mask[flipped_mask.nonzero()], input_mask[input_mask.nonzero()])\n input_mask = T.set_subtensor(input_mask[K.switch(flipped_mask, 0, 1).nonzero()], 0)\n return input_mask\n else: #therefore, sorting other dimensions has no effect on the mask\n return input_mask\n else:\n return None\n\n def call(self, x, mask = None):\n if mask is not None and K.ndim(x) == 3:\n #replace masked values with large values\n modX = K.max(x)+K.cast_to_floatx(1)\n modX = K.switch(K.expand_dims(mask, -1), x, modX)\n indices = T.argsort(modX, axis = -2)\n else: #mask has no effect even if it exists\n indices = T.argsort(x, axis = -2)\n # don't know how to do this without reshaping\n input_shape = x.shape\n indices = K.reshape(take(indices, 0, -1), (-1, input_shape[-2]))\n x = K.reshape(x, (-1, input_shape[-2], input_shape[-1]))\n x = x[K.expand_dims(T.arange(input_shape[0]), -1), indices]\n return K.reshape(x, input_shape)\n\n def get_config(self):\n return super(Sort, self).get_config()\n\ndef func_dump(func):\n \"\"\"\n Adapted from Keras fix #2814 (can be found in generic_utils in a later release). \n Didn't want to upgrade at this time (1.0.7) in case of compatibility issues.\n Serializes a lambda function.\n \"\"\"\n py3 = sys.version_info[0] == 3\n if py3:\n code = marshal.dumps(func.__code__).replace(b'\\\\',b'/').decode('raw_unicode_escape')\n defaults = func.__defaults__\n if func.__closure__:\n closure = tuple([c.cell_contents for c in func.__closure__])\n else:\n closure = None\n else:\n code = marshal.dumps(func.func_code).replace(b'\\\\',b'/').decode('raw_unicode_escape')\n defaults = func.func_defaults\n if func.func_closure:\n closure = tuple([c.cell_contents for c in func.func_closure])\n else:\n closure = None\n return code, defaults, closure\n\ndef func_load(code, defaults=None, closure=None, globs=None):\n \"\"\"\n Adapted from Keras fix #2814 (can be found in generic_utils in a later release). \n Didn't want to upgrade at this time (1.0.7) in case of compatibility issues.\n Deserializes a lambda function.\n \"\"\"\n py3 = sys.version_info[0] == 3\n if isinstance(code, (tuple, list)): # unpack previous dump\n code, defaults, closure = code\n code = marshal.loads(code.encode('raw_unicode_escape'))\n if closure is not None:\n closure = func_reconstruct_closure(closure)\n if globs is None:\n globs = globals()\n return python_types.FunctionType(code, globs, name=code.co_name, argdefs=defaults, closure=closure)\n\ndef func_reconstruct_closure(values):\n '''Deserialization helper that reconstructs a closure.'''\n nums = range(len(values))\n src = [\"def func(arg):\"]\n src += [\" _%d = arg[%d]\" % (n, n) for n in nums]\n src += [\" return lambda:(%s)\" % ','.join([\"_%d\" % n for n in nums]), \"\"]\n src = '\\n'.join(src)\n try:\n exec(src)\n except:\n raise SyntaxError(src)\n py3 = sys.version_info[0] == 3\n return func(values).__closure__ if py3 else func(values).func_closure\n\nclass MaskEatingLambda(Layer):\n \"\"\"\n Saw references to this class, but cannot find it anywhere.\n Try to recreate. \n Removes a mask from the pipeline while performing a custom operation.\n Takes two functions as arguments. The first, 'function', computes the output of \n the layer given the input and an optional mask (see definition of call).\n The second, 'output_shape', computes the shape of the output as a function of \n the shape of the input. If not provided, the input_shape is the default value.\n \"\"\"\n def __init__(self, function, output_shape = None, **kwargs):\n self.function = function\n self.supports_masking = True\n self.uses_learning_phase = False\n self._output_shape = output_shape\n super(MaskEatingLambda, self).__init__(**kwargs)\n\n # do not return a mask. Eat it.\n def compute_mask(self, x, mask=None):\n return None\n\n def call(self, x, mask=None):\n if mask is None:\n return self.function(x)\n else:\n return self.function(x, mask)\n \n #from Keras source for normal Lambda layer\n def get_output_shape_for(self, input_shape):\n if self._output_shape is None:\n # if TensorFlow, we can infer the output shape directly:\n if K._BACKEND == 'tensorflow':\n if type(input_shape) is list:\n xs = [K.placeholder(shape=shape) for shape in input_shape]\n x = self.call(xs)\n else:\n x = K.placeholder(shape=input_shape)\n x = self.call(x)\n if type(x) is list:\n return [K.int_shape(x_elem) for x_elem in x]\n else:\n return K.int_shape(x)\n # otherwise, we default to the input shape\n return input_shape\n elif type(self._output_shape) in {tuple, list}:\n nb_samples = input_shape[0] if input_shape else None\n return (nb_samples,) + tuple(self._output_shape)\n else:\n shape = self._output_shape(input_shape)\n if type(shape) not in {list, tuple}:\n raise Exception('output_shape function must return a tuple')\n return tuple(shape)\n \n def get_config(self):\n if isinstance(self.function, python_types.LambdaType):\n function = func_dump(self.function)\n function_type = 'lambda'\n else:\n function = self.function.__name__\n function_type = 'function'\n\n if isinstance(self._output_shape, python_types.LambdaType):\n output_shape = func_dump(self._output_shape)\n output_shape_type = 'lambda'\n elif callable(self._output_shape):\n output_shape = self._output_shape.__name__\n output_shape_type = 'function'\n else:\n output_shape = self._output_shape\n output_shape_type = 'raw'\n\n config = {'function': function,\n 'function_type': function_type,\n 'output_shape': output_shape,\n 'output_shape_type': output_shape_type}\n base_config = super(MaskEatingLambda, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config):\n function_type = config.pop('function_type')\n if function_type == 'function':\n function = globals()[config['function']]\n elif function_type == 'lambda':\n function = func_load(config['function'], globs = globals())\n else:\n raise Exception('Unknown function type: ' + function_type)\n\n output_shape_type = config.pop('output_shape_type')\n if output_shape_type == 'function':\n output_shape = globals()[config['output_shape']]\n elif output_shape_type == 'lambda':\n output_shape = func_load(config['output_shape'], globs = globals())\n else:\n output_shape = config['output_shape']\n\n config['function'] = function\n config['output_shape'] = output_shape\n return cls(**config)\n\ndef TimeDistributedMerge(isMasked = False, mode = 'ave'):\n \"\"\"\n Merge along the time-axis (dimension 1, 0-indexed). Takes a mask to \n determine which time-samples to ignore in the merge. \n\n This effectively performs a sort of pooling operation depending on the mode.\n \"\"\"\n modes = ['sum', 'ave', 'max', 'min', 'concat']\n if mode in modes:\n if isMasked:\n if mode == 'ave':\n # make so that when the mask is all 0, the output is 0.\n def ave(x, mask):\n sums = K.cast(K.sum(mask, axis=-1, keepdims=True), 'float32')\n sums = K.switch(sums, sums, K.ones_like(sums))\n return K.batch_dot(x,mask, axes=1) / sums\n return ave\n #return lambda x, mask: K.batch_dot(x,mask, axes=1) / K.cast(K.sum(mask, axis=-1, keepdims=True), 'float32')\n elif mode =='concat':\n return lambda x, mask: K.batch_flatten(x) #ignores mask so that output is fixed size\n elif mode == 'sum':\n return lambda x, mask: K.batch_dot(x, mask, axes=1)\n elif mode == 'max':\n def max(x, mask):\n #wherever the input is masked, set it equal to the minimum\n mins = K.min(x, axis=1, keepdims = True)\n # expand mask (replace zeros in x with 1 so that 0 = masked)\n mask = K.expand_dims(mask)*K.switch(x, x, K.ones_like(x))\n mins = K.switch(mask, x, mins) # replace values\n #now masked values have no effect on the maximum\n return K.max(mins, axis=1)\n return max\n else:\n def min(x, mask):\n #wherever the input is masked, set it equal to the maximum\n maxes = K.max(x, axis=1, keepdims = True)\n # expand mask (replace zeros in x with 1 so that 0 = masked)\n mask = K.expand_dims(mask)*K.switch(x, x, K.ones_like(x))\n maxes = K.switch(mask, x, maxes)#replace values\n #now masked values have no effect on the minimum\n return K.min(maxes, axis=1)\n return min\n else:\n if mode == 'ave':\n return lambda x: K.mean(x, axis=1)\n elif mode == 'concat':\n return lambda x: K.batch_flatten(x)\n elif mode == 'sum':\n return lambda x: K.cast(K.sum(x, axis=1), 'float32')\n elif mode == 'max':\n return lambda x: K.max(x, axis=1)\n else:\n return lambda x: K.min(x, axis=1)\n else:\n raise ValueError('Illegal merge mode provided:' + str(mode))\n \n \n \nclass SimultaneousDropout(Layer):\n \"\"\"\n Applies Dropout to the input on a temporal basis. Dropout consists in randomly setting\n a fraction `p` of input units to 0 at each update during training time,\n which helps prevent overfitting.\n Simultaneous dropout is an extension of this idea which drops random features across all timesteps.\n # Arguments\n p: float between 0 and 1. Fraction of the features to drop.\n rescale: boolean. Scale remaining timesteps by the percentage dropped. \n If there were n timesteps and r were dropped, multiplies remaining\n timesteps by n/(n-r). \n If r=n, does nothing (activation is zero regardless).\n # References\n - [Deep Unordered Composition Rivals Syntactic Methods for Text Classification]\n \"\"\"\n def __init__(self, p, seed=None, **kwargs):\n self.p = p\n self.seed = seed\n if 0. < self.p < 1.:\n self.uses_learning_phase = True\n self.supports_masking = True\n super(SimultaneousDropout, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.input_spec = [InputSpec(shape = input_shape)]\n return super(SimultaneousDropout, self).build(input_shape)\n\n def call(self, x, mask=None):\n if 0. < self.p < 1.:\n noise_shape = (x.shape[0], 1) + self.input_spec[0].shape[2:]\n\n def dropped_inputs():\n return K.dropout(x, self.p, noise_shape, seed=self.seed)\n x = K.in_train_phase(dropped_inputs, lambda: x)\n return x\n\n def get_config(self):\n config = {'p': self.p}\n base_config = super(SimultaneousDropout, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n \nclass WordDropout(Layer):\n \"\"\"\n Applies Dropout to the input on a temporal basis. Dropout consists in randomly setting\n a fraction `p` of input units to 0 at each update during training time,\n which helps prevent overfitting.\n Word dropout is an extension of this idea which drops random timesteps or \n word embeddings, especially for use with deep averaging networks.\n # Arguments\n p: float between 0 and 1. Fraction of the input units to drop.\n rescale: boolean. Scale remaining timesteps by the percentage dropped. \n If there were n timesteps and r were dropped, multiplies remaining\n timesteps by n/(n-r). \n If r=n, does nothing (activation is zero regardless).\n # References\n - [Deep Unordered Composition Rivals Syntactic Methods for Text Classification]\n \"\"\"\n def __init__(self, p, rescale, **kwargs):\n self.p = p\n self.rescale = rescale \n if 0. < self.p < 1.:\n self.uses_learning_phase = True\n self.supports_masking = True\n super(WordDropout, self).__init__(**kwargs)\n\n def call(self, x, mask=None):\n # adapt normal dropout code (Theano only)\n def wdropout(x, level, mask=None, seed=None, rescale=False):\n if level < 0. or level >= 1:\n raise Exception('Dropout level must be in interval [0, 1[.')\n if seed is None:\n seed = np.random.randint(1, 10e6)\n rng = RandomStreams(seed=seed)\n retain_prob = 1. - level\n dropoutMask = K.expand_dims(rng.binomial((x.shape[0],x.shape[1]), p=retain_prob, dtype=x.dtype))\n x *= dropoutMask\n #x /= retain_prob # this rescaling is part of the dropout algorithm,\n # but we aren't exactly doing real dropout with the same purpose in mind;\n # therefore...don't do this? \n if rescale:\n # scale so that overall activation per sample is maintained\n # first calculate n-r\n # now calculate n\n if mask is not None:\n #restrict to dropped real outputs\n emask = K.expand_dims(mask)\n scale = K.cast(K.sum(dropoutMask*emask, axis=1, keepdims = True), 'float32')\n scale /= K.cast(K.sum(emask, axis=1, keepdims=True), 'float32')\n else:\n scale = K.cast(K.sum(dropoutMask, axis=1, keepdims = True), 'float32')\n scale /= x.shape[1]\n #avoid division by 0. If not zero divide. Otherwise do nothing.\n x = K.switch(scale, x/scale, x)\n return x\n if 0. < self.p < 1.:\n x = K.in_train_phase(wdropout(x, level=self.p, mask=mask), x)\n return x\n \n\n def get_config(self):\n config = {'p': self.p, 'rescale': self.rescale}\n base_config = super(WordDropout, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\ndef accuracy(y_true, y_pred, binaryClassMatrix=False, sample_mask=None):\n \"\"\"\n Compute the accuracy given true and predicted class labels.\n \n If 'binaryClassMatrix' is true, then both y_true and y_pred are matrices\n of shape (numSamples, ..., numClasses). A binary array 'sample_mask' must\n also be provided to determine which samples to ignore as filler data, where\n a weight of 0 (or False) indicates filler data.\n \"\"\"\n if binaryClassMatrix:\n if sample_mask is not None:\n y_true = np.argmax(y_true, axis=-1)\n y_pred = np.argmax(y_pred, axis=-1)\n y_true[sample_mask == 0] = -1\n else:\n raise ValueError('Sample masks must be provided for each target if binaryClassMatrix is True.')\n return float((y_true == y_pred).sum())/float((y_true>=0).sum())\n \ndef balancedAccuracy(y_true, y_pred, binaryClassMatrix=False, sample_mask=None):\n \"\"\"\n Compute the accuracy given true and predicted class labels. Normalizes each\n sample to have the same weight.\n \n If 'binaryClassMatrix' is true, then both y_true and y_pred are matrices\n of shape (numSamples, ..., numClasses). A binary array 'sample_mask' must\n also be provided to determine which samples to ignore as filler data, where\n a weight of 0 (or False) indicates filler data.\n \"\"\"\n if binaryClassMatrix:\n if sample_mask is not None:\n y_true = np.argmax(y_true, axis=-1)\n y_pred = np.argmax(y_pred, axis=-1)\n y_true[sample_mask == 0] = -1\n else:\n raise ValueError('Sample weights must be provided for each target if binaryClassMatrix is True.')\n weights = (y_true>=0).sum(axis=1)\n return np.mean(((y_true == y_pred).T.astype('float32')/weights).sum(axis=0))\n \ndef weightedAccuracy(y_true, y_pred, binaryClassMatrix=False, sample_mask=None,\n forgetFactor = 0, initialWeight = 0.01):\n \"\"\"\n The last term in each sequence is given a weight of 1. Prior terms are \n weighted by their successors' weight multiplied by the forget factor.\n If no forgetFactor is provided, then it is automatically determined for \n each sequence by the initialWeight, which is the weight given to the first\n term in each sequence.\n \n By default (with a forget factor of 0), the accuracy only considers the \n final frame of each sequence and thus serves as a measure of how well the\n network eventually gets the right answer.\n \"\"\"\n if binaryClassMatrix:\n if sample_mask is not None:\n y_true = np.argmax(y_true, axis=-1)\n y_pred = np.argmax(y_pred, axis=-1)\n y_true[sample_mask == 0] = -1\n else:\n raise ValueError('Sample weights must be provided for each target if binaryClassMatrix is True.')\n lengths = (y_true>=0).sum(axis=1)\n if forgetFactor is None:\n weights = [np.concatenate((np.power(np.power(initialWeight, 1.0/length), np.array(range(length-1, -1, -1))),\n np.zeros((y_true.shape[-1]-length,)))) for length in lengths]\n weights = np.asarray(weights)\n else:\n weights = [np.concatenate((np.power(forgetFactor, np.array(range(length-1, -1, -1))),\n np.zeros((y_true.shape[-1]-length,)))) for length in lengths]\n weights = np.asarray(weights)\n return ((y_true == y_pred).T.astype('float32')*weights.T).sum()/weights.sum()\n \ndef weightSamplesByTimeDecay(y_true, binaryClassMatrix=False, sample_mask=None,\n forgetFactor = 0, initialWeight = 0.01):\n \"\"\"\n The last term in each sequence is given a weight of 1. Prior terms are \n weighted by their successors' weight multiplied by the forget factor.\n If no forgetFactor is provided, then it is automatically determined for \n each sequence by the initialWeight, which is the weight given to the first\n term in each sequence.\n \n By default (with a forget factor of 0), the accuracy only considers the \n final frame of each sequence and thus serves as a measure of how well the\n network eventually gets the right answer.\n \"\"\"\n if abs(forgetFactor) > 1:\n raise ValueError(\"Argument 'forgetFactor' must be a number in the range [0,1].\")\n if binaryClassMatrix:\n if sample_mask is not None:\n y_true = np.argmax(y_true, axis=-1)\n y_true[sample_mask == 0] = -1\n else:\n raise ValueError('Sample weights must be provided for each target if binaryClassMatrix is True.')\n lengths = (y_true>=0).sum(axis=1)\n if forgetFactor is None:\n weights = [np.concatenate((np.power(np.power(initialWeight, 1.0/length), np.array(range(length-1, -1, -1))),\n np.zeros((y_true.shape[-1]-length,)))) for length in lengths]\n weights = np.asarray(weights)\n else:\n weights = [np.concatenate((np.power(forgetFactor, np.array(range(length-1, -1, -1))),\n np.zeros((y_true.shape[-1]-length,)))) for length in lengths]\n weights = np.asarray(weights)\n return weights\n\ndef prepareData(sequences, classes, numClasses, sampleWeights):\n numSamples = [sequence.shape[1] for sequence in sequences]\n if np.unique(numSamples).shape[0] > 1:\n raise ValueError('Each input must have the same number of samples. Received inputs with sample counts ' + str(numSamples))\n isTemporal = classes.shape[0] > 1\n isTemporal2 = [sequence.shape[0] > 1 for sequence in sequences]\n #class labels must be made into binary arrays\n binaryClasses = np.zeros((classes.shape[0], classes.shape[1], numClasses))\n # tell cost function which timesteps to ignore\n if sampleWeights is None:\n calcSampleWeights = True\n sampleWeights = np.ones((classes.shape[0], classes.shape[1]))\n else:\n calcSampleWeights = False\n #eh...just use for loops\n for i in range(classes.shape[0]):\n for j in range(classes.shape[1]):\n if classes[i,j] >= 0:\n binaryClasses[i,j, classes[i,j]] = 1\n elif calcSampleWeights:\n sampleWeights[i,j] = 0\n #range over samples (sequences) in first dimension, time in second, features in third\n sequences = [sequence.transpose([1,0] + range(2, len(sequence.shape))) for sequence in sequences]\n binaryClasses = binaryClasses.transpose((1,0,2))\n if not isTemporal:\n binaryClasses = binaryClasses.reshape((binaryClasses.shape[0], binaryClasses.shape[2]))\n if calcSampleWeights:\n sampleWeights = sampleWeights.T\n return isTemporal, isTemporal2, sequences, binaryClasses, sampleWeights\n\ndef getModelOutput(model):\n \"\"\"\n Utility function for examining the training output of a model.\n Assumes the model has only one output.\n \"\"\"\n O = K.function(model.inputs + [K.learning_phase()], model.output)\n return lambda x: O(x, True)\n\ndef getModelGradients(model):\n \"\"\"\n Utility function for examining the gradients during training.\n Returns functions capable of computing gradients with respect to different parts of the network.\n\n To get i-th gradient: GF[i].__call__(x, y, sample_weights, 1)\n \"\"\"\n #get symbolic representations of the gradient updates\n #updates = model.optimizer.get_updates(collect_trainable_weights(model), model.constraints, model.total_loss)\n grads = model.optimizer.get_gradients(model.total_loss, collect_trainable_weights(model))\n GF = [K.function(model.inputs + model.targets + model.sample_weights + [K.learning_phase()], grad) for grad in grads]\n return GF\n\n\ndef trainKerasModel(model, batchSize,\n numEpochs, \n sequences, classes, trainRange, \n valRange, testRange,\n numClasses, modelFile, \n callbacks = None, sampleWeights = None, \n outDirectory = '', trainMode = 'continue',\n custom_objects = {},\n loss_function = 'categorical_crossentropy',\n optimizer = RMSprop(0.001)):\n \"\"\"\n Returns True if training was completed, False if interrupted.\n \n # sequences: List of arrays of sequences. \n Each array of sequences must be the same length (i.e. same number of samples = shape[1]).\n This allows for multiple inputs.\n # classes: List of target values. Integers. Multiple outputs not currently supported.\n # custom_objects: Dictionary of name-object pairs for custom layers and \n such used in the provided model. \n Example:\n \n custom_objects = {'MaskEatingLambda': MaskEatingLambda}\n \"\"\"\n trainModes = ['continue', 'overwrite', 'skip']\n \n if trainMode.lower() not in trainModes:\n raise ValueError(\"Parameter 'trainMode' must be one of 'continue', 'overwrite', or 'skip'\")\n \n if outDirectory is not None and outDirectory != '':\n outDirectory = outDirectory + '\\\\'\n else:\n outDirectory = ''\n\n isTemporal, isTemporal2, sequences, binaryClasses, sampleWeights = prepareData(sequences, classes, numClasses, sampleWeights)\n \n trainData = [[sequence[trainRange,:,:] if isTemp2 else sequence[trainRange,:].squeeze() for sequence, isTemp2 in zip(sequences, isTemporal2)], \n binaryClasses[trainRange,:,:] if isTemporal else binaryClasses[trainRange,:], \n sampleWeights[trainRange, :]]\n valData = [[sequence[valRange,:,:] if isTemp2 else sequence[valRange,0,:].squeeze() for sequence, isTemp2 in zip(sequences, isTemporal2)], \n binaryClasses[valRange,:,:] if isTemporal else binaryClasses[valRange,:], \n sampleWeights[valRange, :]]\n testData = [[sequence[testRange,:,:] if isTemp2 else sequence[testRange,0,:].squeeze() for sequence, isTemp2 in zip(sequences, isTemporal2)], \n binaryClasses[testRange, :, :] if isTemporal else binaryClasses[testRange,:], \n sampleWeights[testRange, :]]\n \n modelFile = outDirectory + 'Keras'+modelFile\n weightsFile = modelFile+'_Weights'\n completedEpochs = 0\n # if a pre-trained model exists and we are not set to overwrite, load it.\n # otherwise, we use the provided model\n if not ((trainMode == 'overwrite') \n or (not os.path.isfile(modelFile+'.json') \n or not os.path.isfile(weightsFile+'.h5'))):\n model = model_from_json(open(modelFile+'.json', 'rb').read(), custom_objects)\n model.load_weights(weightsFile+'.h5')\n \n #compile model and training objective function\n #sgd = SGD(lr=learningRate)\n #adagrad = Adagrad(lr=learningRate)\n model.compile(loss=loss_function, optimizer=optimizer,\n sample_weight_mode='temporal' if isTemporal else 'none', \n metrics=['accuracy'])\n checkp = [ModelCheckpoint(weightsFile + '.h5', save_best_only = True)]\n if callbacks is None:\n callbacks = checkp\n else:\n callbacks += checkp\n try:\n if trainMode != 'skip':\n completedEpochs = model.fit(x=trainData[0], y=trainData[1], \n sample_weight=trainData[2] if isTemporal else None,\n validation_data = valData if isTemporal else (valData[0], valData[1]), \n batch_size = batchSize, \n nb_epoch = numEpochs, callbacks = callbacks,\n verbose = 2)\n completedEpochs = completedEpochs.history\n #completedEpochs = completedEpochs.history['loss']\n except KeyboardInterrupt:\n if(not queryUser('Training interrupted. Compute test statistics?')):\n if isTemporal:\n return 0, float('nan'), float('nan'), float('nan') \n else:\n return 0, float('nan'), float('nan')\n #retrieve the best weights based upon validation set loss\n if os.path.isfile(weightsFile+'.h5'):\n model.load_weights(weightsFile+'.h5')\n scores = model.test_on_batch(x=testData[0], y=testData[1], \n sample_weight=testData[2] if isTemporal else None)\n predictedClasses = predict_classes(model, testData[0], isTemporal)\n if not isTemporal:\n predictedClasses = predictedClasses.reshape((predictedClasses.shape[0], 1))\n scores[1] = accuracy(classes[:, testRange].T, predictedClasses)\n if not isTemporal:\n print(\"Test loss of %.5f\\nAccuracy of %.5f\" % (scores[0], scores[1]))\n else:\n scores.append(balancedAccuracy(classes[:, testRange].T, predictedClasses))\n scores.append(weightedAccuracy(classes[:, testRange].T, predictedClasses, forgetFactor=0))\n print(\"Test loss of %.5f\\nFrame-wise accuracy of %.5f\\nSequence-wise accuracy of %.5f\\nFinal frame accuracy of %0.5f\" % (scores[0], scores[1], scores[2], scores[3]))\n if trainMode != 'skip':\n modelString = model.to_json()\n open(modelFile + '.json', 'wb').write(modelString)\n model.save_weights(weightsFile + '.h5', overwrite=True)\n print('Model and weights saved to %s and %s.' % (modelFile+'.json', weightsFile+'.h5'))\n\n if isTemporal:\n return completedEpochs, scores[0], scores[1], scores[2], scores[3]\n else:\n return completedEpochs, scores[0], scores[1]\n\ndef evaluateEnsemble(modelDicts, \n sequences, classes, \n testRange, numClasses, sampleWeights):\n \"\"\"\n Takes a list of dictionaries (composed of parameters to trainKerasModel()) \n that define models that will be combined into an ensemble by simple averaging\n of their output layers. If the model does not yet exist (indicated by a 'None'\n in the model given in the modelDict), then it is loaded from the modelFile \n given in modelDicts.\n If the model exists but is not trained (indicated by modelFile not existing), \n then it is trained via trainKerasModel(). \n\n If both the model and modelFile exist, then parameters in the corresponding\n modelDicts entry can be ignored. The only required entries are 'outDirectory',\n 'modelFile', and 'model'.\n \"\"\"\n models = []\n for modelDict in modelDicts:\n #does the model exist?\n outDirectory = modelDict['outDirectory'] \n modelFile = ('\\\\Keras' if outDirectory != '' else 'Keras') + modelDict['modelFile']\n if modelDict['model'] is None:\n if (os.path.isfile(modelFile+'.json') \n and os.path.isfile(modelFile+'_Weights.h5')):\n model = model_from_json(open(modelFile+'.json', 'rb').read(), modelDict['custom_objects'])\n model.load_weights(modelFile+'_Weights.h5')\n else:\n raise ValueError('If the model is None, then the modelFile must exist (i.e. the model must be pretrained)!'\n +'The architecture of the model is not known here.')\n elif (not os.path.isfile(modelFile+'.json') \n or not os.path.isfile(modelFile+'_Weights.h5')):\n #does the modelFile exist?\n trainKerasModel(modelDict['model'], modelDict['batchSize'],\n modelDict['numEpochs'], modelDict['learningRate'],\n modelDict['sequences'], modelDict['classes'], \n modelDict['trainRange'], modelDict['valRange'], modelDict['testRange'],\n numClasses, modelDict['modelFile'], modelDict['callbacks'],\n modelDict['sampleWeights'], modelDict['outDirectory'],\n 'continue', modelDict['custom_objects'])\n model = model_from_json(open(modelFile+'.json', 'rb').read(), modelDict['custom_objects'])\n model.load_weights(modelFile+'_Weights.h5')\n else:\n model = modelDict['model']\n models += [model]\n #ensemble parts loaded. evaluate it on the given test set\n #first prepare the test set\n isTemporal, isTemporal2, sequences, binaryClasses, sampleWeights = prepareData(sequences, classes, numClasses, sampleWeights)\n \n testData = [sequences[testRange, :, :] if isTemporal2 else sequences[testRange,:].squeeze(), \n binaryClasses[testRange, :, :] if isTemporal else binaryClasses[testRange,:], \n sampleWeights[testRange, :]]\n #make actual ensemble model and compile\n inputLayer = Input(shape = testData[0].shape[1:])\n if len(models) > 1:\n outputLayer = merge([model(inputLayer) for model in models], mode = 'ave')\n else:\n outputLayer = models[0](inputLayer)\n ensemble = Model(input = inputLayer, output = outputLayer)\n ensemble.compile(loss='categorical_crossentropy', optimizer='rmsprop',\n sample_weight_mode='temporal' if isTemporal else 'none', \n metrics=['accuracy'])\n #test\n scores = ensemble.test_on_batch(x=testData[0], y=testData[1], \n sample_weight=testData[2] if isTemporal else None)\n predictedClasses = predict_classes(ensemble, testData[0], isTemporal)\n scores[1] = accuracy(classes[:, testRange].T, predictedClasses)\n if not isTemporal:\n print(\"Test loss of %.5f\\nAccuracy of %.5f\" % (scores[0], scores[1]))\n else:\n scores.append(balancedAccuracy(classes[:, testRange].T, predictedClasses))\n scores.append(weightedAccuracy(classes[:, testRange].T, predictedClasses, forgetFactor=0))\n print(\"Test loss of %.5f\\nFrame-wise accuracy of %.5f\\nSequence-wise accuracy of %.5f\\nFinal frame accuracy of %0.5f\" % (scores[0], scores[1], scores[2], scores[3]))\n \ndef predict_classes(model, x, isTemporal = False):\n if isinstance(model, Sequential):\n return model.predict_classes(x)\n elif isinstance(model, Model):\n probs = model.predict(x)\n if probs.shape[-1] > 1:\n predictedClasses = probs.argmax(axis=-1)\n else:\n predictedClasses = (probs > 0.5).astype('int32')\n if not isTemporal:\n predictedClasses = predictedClasses.reshape((predictedClasses.shape[0], 1))\n return predictedClasses\n else:\n raise ValueError(\"Unknown type: 'model' must be a Keras 'Model' or 'Sequential' object.\")\n\n\"\"\"\nPotentially could use this instead of my hack by changing supports_masking to True.\nFrom Keras source code.\nclass Lambda(Layer):\n '''Used for evaluating an arbitrary Theano / TensorFlow expression\n on the output of the previous layer.\n # Examples\n ```python\n # add a x -> x^2 layer\n model.add(Lambda(lambda x: x ** 2))\n ```\n ```python\n # add a layer that returns the concatenation\n # of the positive part of the input and\n # the opposite of the negative part\n def antirectifier(x):\n x -= K.mean(x, axis=1, keepdims=True)\n x = K.l2_normalize(x, axis=1)\n pos = K.relu(x)\n neg = K.relu(-x)\n return K.concatenate([pos, neg], axis=1)\n def antirectifier_output_shape(input_shape):\n shape = list(input_shape)\n assert len(shape) == 2 # only valid for 2D tensors\n shape[-1] *= 2\n return tuple(shape)\n model.add(Lambda(antirectifier, output_shape=antirectifier_output_shape))\n ```\n # Arguments\n function: The function to be evaluated.\n Takes one argument: the output of previous layer\n output_shape: Expected output shape from function.\n Can be a tuple or function.\n If a tuple, it only specifies the first dimension onward; \n sample dimension is assumed either the same as the input:\n `output_shape = (input_shape[0], ) + output_shape`\n or, the input is `None` and the sample dimension is also `None`:\n `output_shape = (None, ) + output_shape`\n If a function, it specifies the entire shape as a function of \n the input shape: `output_shape = f(input_shape)`\n arguments: optional dictionary of keyword arguments to be passed\n to the function.\n # Input shape\n Arbitrary. Use the keyword argument input_shape\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n # Output shape\n Specified by `output_shape` argument.\n '''\n def __init__(self, function, output_shape=None, arguments={}, **kwargs):\n self.function = function\n self.arguments = arguments\n self.supports_masking = False\n\n if output_shape is None:\n self._output_shape = None\n elif type(output_shape) in {tuple, list}:\n self._output_shape = tuple(output_shape)\n else:\n if not hasattr(output_shape, '__call__'):\n raise Exception('In Lambda, `output_shape` '\n 'must be a list, a tuple, or a function.')\n self._output_shape = output_shape\n super(Lambda, self).__init__(**kwargs)\n\n def get_output_shape_for(self, input_shape):\n if self._output_shape is None:\n # if TensorFlow, we can infer the output shape directly:\n if K._BACKEND == 'tensorflow':\n if type(input_shape) is list:\n xs = [K.placeholder(shape=shape) for shape in input_shape]\n x = self.call(xs)\n else:\n x = K.placeholder(shape=input_shape)\n x = self.call(x)\n if type(x) is list:\n return [K.int_shape(x_elem) for x_elem in x]\n else:\n return K.int_shape(x)\n # otherwise, we default to the input shape\n return input_shape\n elif type(self._output_shape) in {tuple, list}:\n nb_samples = input_shape[0] if input_shape else None\n return (nb_samples,) + tuple(self._output_shape)\n else:\n shape = self._output_shape(input_shape)\n if type(shape) not in {list, tuple}:\n raise Exception('output_shape function must return a tuple')\n return tuple(shape)\n\n def call(self, x, mask=None):\n arguments = self.arguments\n arg_spec = inspect.getargspec(self.function)\n if 'mask' in arg_spec.args:\n arguments['mask'] = mask\n return self.function(x, **arguments)\n\n def get_config(self):\n py3 = sys.version_info[0] == 3\n\n if isinstance(self.function, python_types.LambdaType):\n if py3:\n function = marshal.dumps(self.function.__code__).decode('raw_unicode_escape')\n else:\n function = marshal.dumps(self.function.func_code).decode('raw_unicode_escape')\n function_type = 'lambda'\n else:\n function = self.function.__name__\n function_type = 'function'\n\n if isinstance(self._output_shape, python_types.LambdaType):\n if py3:\n output_shape = marshal.dumps(self._output_shape.__code__).decode('raw_unicode_escape')\n else:\n output_shape = marshal.dumps(self._output_shape.func_code).decode('raw_unicode_escape')\n output_shape_type = 'lambda'\n elif callable(self._output_shape):\n output_shape = self._output_shape.__name__\n output_shape_type = 'function'\n else:\n output_shape = self._output_shape\n output_shape_type = 'raw'\n\n config = {'function': function,\n 'function_type': function_type,\n 'output_shape': output_shape,\n 'output_shape_type': output_shape_type,\n 'arguments': self.arguments}\n base_config = super(Lambda, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config):\n function_type = config.pop('function_type')\n if function_type == 'function':\n function = globals()[config['function']]\n elif function_type == 'lambda':\n function = marshal.loads(config['function'].encode('raw_unicode_escape'))\n function = python_types.FunctionType(function, globals())\n else:\n raise Exception('Unknown function type: ' + function_type)\n\n output_shape_type = config.pop('output_shape_type')\n if output_shape_type == 'function':\n output_shape = globals()[config['output_shape']]\n elif output_shape_type == 'lambda':\n output_shape = marshal.loads(config['output_shape'].encode('raw_unicode_escape'))\n output_shape = python_types.FunctionType(output_shape, globals())\n else:\n output_shape = config['output_shape']\n\n config['function'] = function\n config['output_shape'] = output_shape\n return cls(**config)\n\"\"\""
]
| [
[
"numpy.asarray",
"numpy.zeros",
"numpy.ones",
"numpy.prod",
"numpy.random.randint",
"numpy.argmax",
"numpy.power",
"numpy.unique"
]
]
|
haquebd/robotic-warehouse | [
"55036332f82e79cf3f60c377c73d2d39733ec9e0"
]
| [
"rware/warehouse.py"
]
| [
"import logging\n\nfrom collections import defaultdict, OrderedDict\nimport gym\nfrom gym import spaces\n\nfrom rware.utils import MultiAgentActionSpace, MultiAgentObservationSpace\n\nfrom enum import Enum\nimport numpy as np\n\nfrom typing import List, Tuple, Optional, Dict\n\nimport networkx as nx\nimport astar\n\n_AXIS_Z = 0\n_AXIS_Y = 1\n_AXIS_X = 2\n\n_COLLISION_LAYERS = 2\n\n_LAYER_AGENTS = 0\n_LAYER_SHELFS = 1\n\n\nclass _VectorWriter:\n def __init__(self, size: int):\n self.vector = np.zeros(size, dtype=np.float32)\n self.idx = 0\n\n def write(self, data):\n data_size = len(data)\n self.vector[self.idx : self.idx + data_size] = data\n self.idx += data_size\n\n def skip(self, bits):\n self.idx += bits\n\n\nclass Action(Enum):\n NOOP = 0\n FORWARD = 1\n LEFT = 2\n RIGHT = 3\n TOGGLE_LOAD = 4\n\n\nclass Direction(Enum):\n UP = 0\n DOWN = 1\n LEFT = 2\n RIGHT = 3\n\n\nclass RewardType(Enum):\n GLOBAL = 0\n INDIVIDUAL = 1\n TWO_STAGE = 2\n\n\nclass ObserationType(Enum):\n DICT = 0\n FLATTENED = 1\n IMAGE = 2\n\nclass ImageLayer(Enum):\n \"\"\"\n Input layers of image-style observations\n \"\"\"\n SHELVES = 0 # binary layer indicating shelves (also indicates carried shelves)\n REQUESTS = 1 # binary layer indicating requested shelves\n AGENTS = 2 # binary layer indicating agents in the environment (no way to distinguish agents)\n AGENT_DIRECTION = 3 # layer indicating agent directions as int (see Direction enum + 1 for values)\n AGENT_LOAD = 4 # binary layer indicating agents with load\n GOALS = 5 # binary layer indicating goal/ delivery locations\n ACCESSIBLE = 6 # binary layer indicating accessible cells (all but occupied cells/ out of map)\n\n\nclass Entity:\n def __init__(self, id_: int, x: int, y: int):\n self.id = id_\n self.prev_x = None\n self.prev_y = None\n self.x = x\n self.y = y\n\n\nclass Agent(Entity):\n counter = 0\n\n def __init__(self, x: int, y: int, dir_: Direction, msg_bits: int):\n Agent.counter += 1\n super().__init__(Agent.counter, x, y)\n self.dir = dir_\n self.message = np.zeros(msg_bits)\n self.req_action: Optional[Action] = None\n self.carrying_shelf: Optional[Shelf] = None\n self.canceled_action = None\n self.has_delivered = False\n\n @property\n def collision_layers(self):\n if self.loaded:\n return (_LAYER_AGENTS, _LAYER_SHELFS)\n else:\n return (_LAYER_AGENTS,)\n\n def req_location(self, grid_size) -> Tuple[int, int]:\n if self.req_action != Action.FORWARD:\n return self.x, self.y\n elif self.dir == Direction.UP:\n return self.x, max(0, self.y - 1)\n elif self.dir == Direction.DOWN:\n return self.x, min(grid_size[0] - 1, self.y + 1)\n elif self.dir == Direction.LEFT:\n return max(0, self.x - 1), self.y\n elif self.dir == Direction.RIGHT:\n return min(grid_size[1] - 1, self.x + 1), self.y\n\n raise ValueError(\n f\"Direction is {self.dir}. Should be one of {[v for v in Direction]}\"\n )\n\n def req_direction(self) -> Direction:\n wraplist = [Direction.UP, Direction.RIGHT, Direction.DOWN, Direction.LEFT]\n if self.req_action == Action.RIGHT:\n return wraplist[(wraplist.index(self.dir) + 1) % len(wraplist)]\n elif self.req_action == Action.LEFT:\n return wraplist[(wraplist.index(self.dir) - 1) % len(wraplist)]\n else:\n return self.dir\n\n\nclass Shelf(Entity):\n counter = 0\n\n def __init__(self, x, y):\n Shelf.counter += 1\n super().__init__(Shelf.counter, x, y)\n\n @property\n def collision_layers(self):\n return (_LAYER_SHELFS,)\n\n\nclass Warehouse(gym.Env):\n\n metadata = {\"render.modes\": [\"human\", \"rgb_array\"]}\n\n def __init__(\n self,\n shelf_columns: int,\n column_height: int,\n shelf_rows: int,\n n_agents: int,\n msg_bits: int,\n sensor_range: int,\n request_queue_size: int,\n max_inactivity_steps: Optional[int],\n max_steps: Optional[int],\n reward_type: RewardType,\n layout: str = None,\n observation_type: ObserationType=ObserationType.FLATTENED,\n image_observation_layers: List[ImageLayer]=[\n ImageLayer.SHELVES,\n ImageLayer.REQUESTS,\n ImageLayer.AGENTS,\n ImageLayer.GOALS,\n ImageLayer.ACCESSIBLE\n ],\n image_observation_directional: bool=True,\n normalised_coordinates: bool=False,\n ):\n \"\"\"The robotic warehouse environment\n\n Creates a grid world where multiple agents (robots)\n are supposed to collect shelfs, bring them to a goal\n and then return them.\n .. note:\n The grid looks like this:\n\n shelf\n columns\n vv\n ----------\n -XX-XX-XX- ^\n -XX-XX-XX- Column Height\n -XX-XX-XX- v\n ----------\n -XX----XX- <\\\n -XX----XX- <- Shelf Rows\n -XX----XX- </\n ----------\n ----GG----\n\n G: is the goal positions where agents are rewarded if\n they bring the correct shelfs.\n\n The final grid size will be\n height: (column_height + 1) * shelf_rows + 2\n width: (2 + 1) * shelf_columns + 1\n\n The bottom-middle column will be removed to allow for\n robot queuing next to the goal locations\n\n :param shelf_columns: Number of columns in the warehouse\n :type shelf_columns: int\n :param column_height: Column height in the warehouse\n :type column_height: int\n :param shelf_rows: Number of columns in the warehouse\n :type shelf_rows: int\n :param n_agents: Number of spawned and controlled agents\n :type n_agents: int\n :param msg_bits: Number of communication bits for each agent\n :type msg_bits: int\n :param sensor_range: Range of each agents observation\n :type sensor_range: int\n :param request_queue_size: How many shelfs are simultaneously requested\n :type request_queue_size: int\n :param max_inactivity: Number of steps without a delivered shelf until environment finishes\n :type max_inactivity: Optional[int]\n :param reward_type: Specifies if agents are rewarded individually or globally\n :type reward_type: RewardType\n :param layout: A string for a custom warehouse layout. X are shelve locations, dots are corridors, and g are the goal locations. Ignores shelf_columns, shelf_height and shelf_rows when used.\n :type layout: str\n :param observation_type: Specifies type of observations\n :param image_observation_layers: Specifies types of layers observed if image-observations\n are used\n :type image_observation_layers: List[ImageLayer]\n :param image_observation_directional: Specifies whether image observations should be\n rotated to be directional (agent perspective) if image-observations are used\n :type image_observation_directional: bool\n :param normalised_coordinates: Specifies whether absolute coordinates should be normalised\n with respect to total warehouse size\n :type normalised_coordinates: bool\n \"\"\"\n\n self.goals: List[Tuple[int, int]] = []\n\n if not layout:\n self._make_layout_from_params(shelf_columns, shelf_rows, column_height)\n else:\n self._make_layout_from_str(layout)\n\n self.n_agents = n_agents\n self.msg_bits = msg_bits\n self.sensor_range = sensor_range\n self.max_inactivity_steps: Optional[int] = max_inactivity_steps\n self.reward_type = reward_type\n self.reward_range = (0, 1)\n\n self._cur_inactive_steps = None\n self._cur_steps = 0\n self.max_steps = max_steps\n \n self.normalised_coordinates = normalised_coordinates\n\n sa_action_space = [len(Action), *msg_bits * (2,)]\n if len(sa_action_space) == 1:\n sa_action_space = spaces.Discrete(sa_action_space[0])\n else:\n sa_action_space = spaces.MultiDiscrete(sa_action_space)\n self.action_space = spaces.Tuple(tuple(n_agents * [sa_action_space]))\n\n self.request_queue_size = request_queue_size\n self.request_queue = []\n\n self.agents: List[Agent] = []\n\n # default values:\n self.fast_obs = None\n self.image_obs = None\n self.observation_space = None\n if observation_type == ObserationType.IMAGE:\n self._use_image_obs(image_observation_layers, image_observation_directional)\n else:\n # used for DICT observation type and needed as preceeding stype to generate\n # FLATTENED observations as well\n self._use_slow_obs()\n\n # for performance reasons we\n # can flatten the obs vector\n if observation_type == ObserationType.FLATTENED:\n self._use_fast_obs()\n\n self.renderer = None\n\n def _make_layout_from_params(self, shelf_columns, shelf_rows, column_height):\n assert shelf_columns % 2 == 1, \"Only odd number of shelf columns is supported\"\n\n self.grid_size = (\n (column_height + 1) * shelf_rows + 2,\n (2 + 1) * shelf_columns + 1,\n )\n self.column_height = column_height\n self.grid = np.zeros((_COLLISION_LAYERS, *self.grid_size), dtype=np.int32)\n self.goals = [\n (self.grid_size[1] // 2 - 1, self.grid_size[0] - 1),\n (self.grid_size[1] // 2, self.grid_size[0] - 1),\n ]\n\n self.highways = np.zeros(self.grid_size, dtype=np.int32)\n\n highway_func = lambda x, y: (\n (x % 3 == 0) # vertical highways\n or (y % (self.column_height + 1) == 0) # horizontal highways\n or (y == self.grid_size[0] - 1) # delivery row\n or ( # remove a box for queuing\n (y > self.grid_size[0] - (self.column_height + 3))\n and ((x == self.grid_size[1] // 2 - 1) or (x == self.grid_size[1] // 2))\n )\n )\n for x in range(self.grid_size[1]):\n for y in range(self.grid_size[0]):\n self.highways[y, x] = highway_func(x, y)\n\n def _make_layout_from_str(self, layout):\n layout = layout.strip()\n layout = layout.replace(\" \", \"\")\n grid_height = layout.count(\"\\n\") + 1\n lines = layout.split(\"\\n\")\n grid_width = len(lines[0])\n for line in lines:\n assert len(line) == grid_width, \"Layout must be rectangular\"\n\n self.grid_size = (grid_height, grid_width)\n self.grid = np.zeros((_COLLISION_LAYERS, *self.grid_size), dtype=np.int32)\n self.highways = np.zeros(self.grid_size, dtype=np.int32)\n\n for y, line in enumerate(lines):\n for x, char in enumerate(line):\n assert char.lower() in \"gx.\"\n if char.lower() == \"g\":\n self.goals.append((x, y))\n self.highways[y, x] = 1\n elif char.lower() == \".\":\n self.highways[y, x] = 1\n\n assert len(self.goals) >= 1, \"At least one goal is required\"\n\n def _use_image_obs(self, image_observation_layers, directional=True):\n \"\"\"\n Set image observation space\n :param image_observation_layers (List[ImageLayer]): list of layers to use as image channels\n :param directional (bool): flag whether observations should be directional (pointing in\n direction of agent or north-wise)\n \"\"\"\n self.image_obs = True\n self.fast_obs = False\n self.image_observation_directional = directional\n self.image_observation_layers = image_observation_layers\n\n observation_shape = (1 + 2 * self.sensor_range, 1 + 2 * self.sensor_range)\n\n layers_min = []\n layers_max = []\n for layer in image_observation_layers:\n if layer == ImageLayer.AGENT_DIRECTION:\n # directions as int\n layer_min = np.zeros(observation_shape, dtype=np.float32)\n layer_max = np.ones(observation_shape, dtype=np.float32) * max([d.value + 1 for d in Direction])\n else:\n # binary layer\n layer_min = np.zeros(observation_shape, dtype=np.float32)\n layer_max = np.ones(observation_shape, dtype=np.float32)\n layers_min.append(layer_min)\n layers_max.append(layer_max)\n\n # total observation\n min_obs = np.stack(layers_min)\n max_obs = np.stack(layers_max)\n self.observation_space = spaces.Tuple(\n tuple([spaces.Box(min_obs, max_obs, dtype=np.float32)] * self.n_agents)\n )\n\n def _use_slow_obs(self):\n self.fast_obs = False\n\n self._obs_bits_for_self = 4 + len(Direction)\n self._obs_bits_per_agent = 1 + len(Direction) + self.msg_bits\n self._obs_bits_per_shelf = 2\n self._obs_bits_for_requests = 2\n\n self._obs_sensor_locations = (1 + 2 * self.sensor_range) ** 2\n\n self._obs_length = (\n self._obs_bits_for_self\n + self._obs_sensor_locations * self._obs_bits_per_agent\n + self._obs_sensor_locations * self._obs_bits_per_shelf\n )\n\n if self.normalised_coordinates:\n location_space = spaces.Box(\n low=0.0,\n high=1.0,\n shape=(2,),\n dtype=np.float32,\n )\n else:\n location_space = spaces.MultiDiscrete(\n [self.grid_size[1], self.grid_size[0]]\n )\n\n self.observation_space = spaces.Tuple(\n tuple(\n [\n spaces.Dict(\n OrderedDict(\n {\n \"self\": spaces.Dict(\n OrderedDict(\n {\n \"location\": location_space,\n \"carrying_shelf\": spaces.MultiDiscrete([2]),\n \"direction\": spaces.Discrete(4),\n \"on_highway\": spaces.MultiBinary(1),\n }\n )\n ),\n \"sensors\": spaces.Tuple(\n self._obs_sensor_locations\n * (\n spaces.Dict(\n OrderedDict(\n {\n \"has_agent\": spaces.MultiBinary(1),\n \"direction\": spaces.Discrete(4),\n \"local_message\": spaces.MultiBinary(\n self.msg_bits\n ),\n \"has_shelf\": spaces.MultiBinary(1),\n \"shelf_requested\": spaces.MultiBinary(\n 1\n ),\n }\n )\n ),\n )\n ),\n }\n )\n )\n for _ in range(self.n_agents)\n ]\n )\n )\n\n def _use_fast_obs(self):\n if self.fast_obs:\n return\n\n self.fast_obs = True\n ma_spaces = []\n for sa_obs in self.observation_space:\n flatdim = spaces.flatdim(sa_obs)\n ma_spaces += [\n spaces.Box(\n low=-float(\"inf\"),\n high=float(\"inf\"),\n shape=(flatdim,),\n dtype=np.float32,\n )\n ]\n\n self.observation_space = spaces.Tuple(tuple(ma_spaces))\n\n def _is_highway(self, x: int, y: int) -> bool:\n return self.highways[y, x]\n\n def _make_obs(self, agent):\n if self.image_obs:\n # write image observations\n if agent.id == 1:\n layers = []\n # first agent's observation --> update global observation layers\n for layer_type in self.image_observation_layers:\n if layer_type == ImageLayer.SHELVES:\n layer = self.grid[_LAYER_SHELFS].copy().astype(np.float32)\n # set all occupied shelf cells to 1.0 (instead of shelf ID)\n layer[layer > 0.0] = 1.0\n # print(\"SHELVES LAYER\")\n elif layer_type == ImageLayer.REQUESTS:\n layer = np.zeros(self.grid_size, dtype=np.float32)\n for requested_shelf in self.request_queue:\n layer[requested_shelf.y, requested_shelf.x] = 1.0\n # print(\"REQUESTS LAYER\")\n elif layer_type == ImageLayer.AGENTS:\n layer = self.grid[_LAYER_AGENTS].copy().astype(np.float32)\n # set all occupied agent cells to 1.0 (instead of agent ID)\n layer[layer > 0.0] = 1.0\n # print(\"AGENTS LAYER\")\n elif layer_type == ImageLayer.AGENT_DIRECTION:\n layer = np.zeros(self.grid_size, dtype=np.float32)\n for ag in self.agents:\n agent_direction = ag.dir.value + 1\n layer[ag.x, ag.y] = float(agent_direction)\n # print(\"AGENT DIRECTIONS LAYER\")\n elif layer_type == ImageLayer.AGENT_LOAD:\n layer = np.zeros(self.grid_size, dtype=np.float32)\n for ag in self.agents:\n if ag.carrying_shelf is not None:\n layer[ag.x, ag.y] = 1.0\n # print(\"AGENT LOAD LAYER\")\n elif layer_type == ImageLayer.GOALS:\n layer = np.zeros(self.grid_size, dtype=np.float32)\n for goal_y, goal_x in self.goals:\n layer[goal_x, goal_y] = 1.0\n # print(\"GOALS LAYER\")\n elif layer_type == ImageLayer.ACCESSIBLE:\n layer = np.ones(self.grid_size, dtype=np.float32)\n for ag in self.agents:\n layer[ag.y, ag.x] = 0.0\n # print(\"ACCESSIBLE LAYER\")\n # print(layer)\n # print()\n # pad with 0s for out-of-map cells\n layer = np.pad(layer, self.sensor_range, mode=\"constant\")\n layers.append(layer)\n self.global_layers = np.stack(layers)\n\n # global information was generated --> get information for agent\n start_x = agent.y\n end_x = agent.y + 2 * self.sensor_range + 1\n start_y = agent.x\n end_y = agent.x + 2 * self.sensor_range + 1\n obs = self.global_layers[:, start_x:end_x, start_y:end_y]\n\n if self.image_observation_directional:\n # rotate image to be in direction of agent\n if agent.dir == Direction.DOWN:\n # rotate by 180 degrees (clockwise)\n obs = np.rot90(obs, k=2, axes=(1,2))\n elif agent.dir == Direction.LEFT:\n # rotate by 90 degrees (clockwise)\n obs = np.rot90(obs, k=3, axes=(1,2))\n elif agent.dir == Direction.RIGHT:\n # rotate by 270 degrees (clockwise)\n obs = np.rot90(obs, k=1, axes=(1,2))\n # no rotation needed for UP direction\n return obs\n\n min_x = agent.x - self.sensor_range\n max_x = agent.x + self.sensor_range + 1\n\n min_y = agent.y - self.sensor_range\n max_y = agent.y + self.sensor_range + 1\n\n # sensors\n if (\n (min_x < 0)\n or (min_y < 0)\n or (max_x > self.grid_size[1])\n or (max_y > self.grid_size[0])\n ):\n padded_agents = np.pad(\n self.grid[_LAYER_AGENTS], self.sensor_range, mode=\"constant\"\n )\n padded_shelfs = np.pad(\n self.grid[_LAYER_SHELFS], self.sensor_range, mode=\"constant\"\n )\n # + self.sensor_range due to padding\n min_x += self.sensor_range\n max_x += self.sensor_range\n min_y += self.sensor_range\n max_y += self.sensor_range\n\n else:\n padded_agents = self.grid[_LAYER_AGENTS]\n padded_shelfs = self.grid[_LAYER_SHELFS]\n\n agents = padded_agents[min_y:max_y, min_x:max_x].reshape(-1)\n shelfs = padded_shelfs[min_y:max_y, min_x:max_x].reshape(-1)\n\n if self.fast_obs:\n # write flattened observations\n obs = _VectorWriter(self.observation_space[agent.id - 1].shape[0])\n\n if self.normalised_coordinates:\n agent_x = agent.x / (self.grid_size[1] - 1)\n agent_y = agent.y / (self.grid_size[0] - 1)\n else:\n agent_x = agent.x\n agent_y = agent.y\n\n obs.write([agent_x, agent_y, int(agent.carrying_shelf is not None)])\n direction = np.zeros(4)\n direction[agent.dir.value] = 1.0\n obs.write(direction)\n obs.write([int(self._is_highway(agent.x, agent.y))])\n\n for i, (id_agent, id_shelf) in enumerate(zip(agents, shelfs)):\n if id_agent == 0:\n obs.skip(1)\n obs.write([1.0])\n obs.skip(3 + self.msg_bits)\n else:\n obs.write([1.0])\n direction = np.zeros(4)\n direction[self.agents[id_agent - 1].dir.value] = 1.0\n obs.write(direction)\n if self.msg_bits > 0:\n obs.write(self.agents[id_agent - 1].message)\n if id_shelf == 0:\n obs.skip(2)\n else:\n obs.write(\n [1.0, int(self.shelfs[id_shelf - 1] in self.request_queue)]\n )\n\n return obs.vector\n \n # write dictionary observations\n obs = {}\n if self.normalised_coordinates:\n agent_x = agent.x / (self.grid_size[1] - 1)\n agent_y = agent.y / (self.grid_size[0] - 1)\n else:\n agent_x = agent.x\n agent_y = agent.y\n # --- self data\n obs[\"self\"] = {\n \"location\": np.array([agent_x, agent_y]),\n \"carrying_shelf\": [int(agent.carrying_shelf is not None)],\n \"direction\": agent.dir.value,\n \"on_highway\": [int(self._is_highway(agent.x, agent.y))],\n }\n # --- sensor data\n obs[\"sensors\"] = tuple({} for _ in range(self._obs_sensor_locations))\n\n # find neighboring agents\n for i, id_ in enumerate(agents):\n if id_ == 0:\n obs[\"sensors\"][i][\"has_agent\"] = [0]\n obs[\"sensors\"][i][\"direction\"] = 0\n obs[\"sensors\"][i][\"local_message\"] = self.msg_bits * [0]\n else:\n obs[\"sensors\"][i][\"has_agent\"] = [1]\n obs[\"sensors\"][i][\"direction\"] = self.agents[id_ - 1].dir.value\n obs[\"sensors\"][i][\"local_message\"] = self.agents[id_ - 1].message\n\n # find neighboring shelfs:\n for i, id_ in enumerate(shelfs):\n if id_ == 0:\n obs[\"sensors\"][i][\"has_shelf\"] = [0]\n obs[\"sensors\"][i][\"shelf_requested\"] = [0]\n else:\n obs[\"sensors\"][i][\"has_shelf\"] = [1]\n obs[\"sensors\"][i][\"shelf_requested\"] = [\n int(self.shelfs[id_ - 1] in self.request_queue)\n ]\n\n return obs\n\n def _recalc_grid(self):\n self.grid[:] = 0\n for s in self.shelfs:\n self.grid[_LAYER_SHELFS, s.y, s.x] = s.id\n\n for a in self.agents:\n self.grid[_LAYER_AGENTS, a.y, a.x] = a.id\n\n def reset(self):\n Shelf.counter = 0\n Agent.counter = 0\n self._cur_inactive_steps = 0\n self._cur_steps = 0\n\n # n_xshelf = (self.grid_size[1] - 1) // 3\n # n_yshelf = (self.grid_size[0] - 2) // 9\n\n # make the shelfs\n self.shelfs = [\n Shelf(x, y)\n for y, x in zip(\n np.indices(self.grid_size)[0].reshape(-1),\n np.indices(self.grid_size)[1].reshape(-1),\n )\n if not self._is_highway(x, y)\n ]\n\n # spawn agents at random locations\n agent_locs = np.random.choice(\n np.arange(self.grid_size[0] * self.grid_size[1]),\n size=self.n_agents,\n replace=False,\n )\n agent_locs = np.unravel_index(agent_locs, self.grid_size)\n # and direction\n agent_dirs = np.random.choice([d for d in Direction], size=self.n_agents)\n self.agents = [\n Agent(x, y, dir_, self.msg_bits)\n for y, x, dir_ in zip(*agent_locs, agent_dirs)\n ]\n\n self._recalc_grid()\n\n self.request_queue = list(\n np.random.choice(self.shelfs, size=self.request_queue_size, replace=False)\n )\n\n return tuple([self._make_obs(agent) for agent in self.agents])\n # for s in self.shelfs:\n # self.grid[0, s.y, s.x] = 1\n # print(self.grid[0])\n\n def step(\n self, actions: List[Action]\n ) -> Tuple[List[np.ndarray], List[float], List[bool], Dict]:\n assert len(actions) == len(self.agents)\n\n for agent, action in zip(self.agents, actions):\n if self.msg_bits > 0:\n agent.req_action = Action(action[0])\n agent.message[:] = action[1:]\n else:\n agent.req_action = Action(action)\n\n # # stationary agents will certainly stay where they are\n # stationary_agents = [agent for agent in self.agents if agent.action != Action.FORWARD]\n\n # # forward agents will move only if they avoid collisions\n # forward_agents = [agent for agent in self.agents if agent.action == Action.FORWARD]\n commited_agents = set()\n\n G = nx.DiGraph()\n\n for agent in self.agents:\n start = agent.x, agent.y\n target = agent.req_location(self.grid_size)\n\n if (\n agent.carrying_shelf\n and start != target\n and self.grid[_LAYER_SHELFS, target[1], target[0]]\n and not (\n self.grid[_LAYER_AGENTS, target[1], target[0]]\n and self.agents[\n self.grid[_LAYER_AGENTS, target[1], target[0]] - 1\n ].carrying_shelf\n )\n ):\n # there's a standing shelf at the target location\n # our agent is carrying a shelf so there's no way\n # this movement can succeed. Cancel it.\n agent.req_action = Action.NOOP\n G.add_edge(start, start)\n else:\n G.add_edge(start, target)\n\n wcomps = [G.subgraph(c).copy() for c in nx.weakly_connected_components(G)]\n\n for comp in wcomps:\n try:\n # if we find a cycle in this component we have to\n # commit all nodes in that cycle, and nothing else\n cycle = nx.algorithms.find_cycle(comp)\n if len(cycle) == 2:\n # we have a situation like this: [A] <-> [B]\n # which is physically impossible. so skip\n continue\n for edge in cycle:\n start_node = edge[0]\n agent_id = self.grid[_LAYER_AGENTS, start_node[1], start_node[0]]\n if agent_id > 0:\n commited_agents.add(agent_id)\n except nx.NetworkXNoCycle:\n\n longest_path = nx.algorithms.dag_longest_path(comp)\n for x, y in longest_path:\n agent_id = self.grid[_LAYER_AGENTS, y, x]\n if agent_id:\n commited_agents.add(agent_id)\n\n commited_agents = set([self.agents[id_ - 1] for id_ in commited_agents])\n failed_agents = set(self.agents) - commited_agents\n\n for agent in failed_agents:\n assert agent.req_action == Action.FORWARD\n agent.req_action = Action.NOOP\n\n rewards = np.zeros(self.n_agents)\n\n for agent in self.agents:\n agent.prev_x, agent.prev_y = agent.x, agent.y\n\n if agent.req_action == Action.FORWARD:\n agent.x, agent.y = agent.req_location(self.grid_size)\n if agent.carrying_shelf:\n agent.carrying_shelf.x, agent.carrying_shelf.y = agent.x, agent.y\n elif agent.req_action in [Action.LEFT, Action.RIGHT]:\n agent.dir = agent.req_direction()\n elif agent.req_action == Action.TOGGLE_LOAD and not agent.carrying_shelf:\n shelf_id = self.grid[_LAYER_SHELFS, agent.y, agent.x]\n if shelf_id:\n agent.carrying_shelf = self.shelfs[shelf_id - 1]\n elif agent.req_action == Action.TOGGLE_LOAD and agent.carrying_shelf:\n if not self._is_highway(agent.x, agent.y):\n agent.carrying_shelf = None\n if agent.has_delivered and self.reward_type == RewardType.TWO_STAGE:\n rewards[agent.id - 1] += 0.5\n\n agent.has_delivered = False\n\n self._recalc_grid()\n\n shelf_delivered = False\n for y, x in self.goals:\n shelf_id = self.grid[_LAYER_SHELFS, x, y]\n if not shelf_id:\n continue\n shelf = self.shelfs[shelf_id - 1]\n\n if shelf not in self.request_queue:\n continue\n # a shelf was successfully delived.\n shelf_delivered = True\n # remove from queue and replace it\n new_request = np.random.choice(\n list(set(self.shelfs) - set(self.request_queue))\n )\n self.request_queue[self.request_queue.index(shelf)] = new_request\n # also reward the agents\n if self.reward_type == RewardType.GLOBAL:\n rewards += 1\n elif self.reward_type == RewardType.INDIVIDUAL:\n agent_id = self.grid[_LAYER_AGENTS, x, y]\n rewards[agent_id - 1] += 1\n elif self.reward_type == RewardType.TWO_STAGE:\n agent_id = self.grid[_LAYER_AGENTS, x, y]\n self.agents[agent_id - 1].has_delivered = True\n rewards[agent_id - 1] += 0.5\n\n if shelf_delivered:\n self._cur_inactive_steps = 0\n else:\n self._cur_inactive_steps += 1\n self._cur_steps += 1\n\n if (\n self.max_inactivity_steps\n and self._cur_inactive_steps >= self.max_inactivity_steps\n ) or (self.max_steps and self._cur_steps >= self.max_steps):\n dones = self.n_agents * [True]\n else:\n dones = self.n_agents * [False]\n\n new_obs = tuple([self._make_obs(agent) for agent in self.agents])\n info = {}\n return new_obs, list(rewards), dones, info\n\n def render(self, mode=\"human\"):\n if not self.renderer:\n from rware.rendering import Viewer\n\n self.renderer = Viewer(self.grid_size)\n return self.renderer.render(self, return_rgb_array=mode == \"rgb_array\")\n\n def close(self):\n if self.renderer:\n self.renderer.close()\n\n def seed(self, seed=None):\n ...\n \n def optimal_returns(self, steps=None, output=False):\n \"\"\"\n Compute optimal returns for environment for all agents given steps\n NOTE: Needs to be called on reset environment with shelves in their initial locations\n\n :param steps (int): number of steps available to agents\n :param output (bool): whether steps should be printed\n :return (List[int]): returns for all agents\n\n This function initially positions agents randomly in the warehouse and assumes\n full observability with agents directly moving to closest possible shelf to deliver\n or closest \"open space\" to return. Required steps for movement (including rotations)\n are computed using A* only moving on highways if shelves are loaded. This serves as a\n crude approximation. Observability with agents directly moving towards requested shelves/\n goals without search significantly simplifies the problem.\n \"\"\"\n # if already computed --> return computed value\n if hasattr(self, 'calculated_optimal_returns'):\n return self.calculated_optimal_returns\n \n if steps is None:\n steps = self.max_steps\n \n def neighbore_locations(state):\n # given location get neighbours\n x, y, direction, loaded, empty_shelf_loc = state\n # neighbours for rotating\n neighbours = [\n (x, y, (direction - 1) % 4, loaded, empty_shelf_loc),\n (x, y, (direction + 1) % 4, loaded, empty_shelf_loc)\n ]\n # neighbour for forward movement\n if direction == 0:\n # going down\n target_x = x\n target_y = y + 1\n elif direction == 1:\n # going left\n target_x = x - 1\n target_y = y\n elif direction == 2:\n # going up\n target_x = x\n target_y = y - 1\n elif direction == 3:\n # going right\n target_x = x + 1\n target_y = y\n else:\n raise ValueError(f\"Invalid direction {direction} for optimal return computation!\")\n\n if target_x >= 0 and target_x < self.grid_size[1] and target_y >= 0 and target_y < self.grid_size[0]:\n # valid location\n if not loaded or (self._is_highway(target_x, target_y) or (target_x, target_y) == empty_shelf_loc):\n neighbours.append((target_x, target_y, direction, loaded, empty_shelf_loc))\n # else:\n # print(f\"({target_x}, {target_y}) out of bounds\")\n # print(state, neighbours)\n return neighbours\n\n def hamming_distance(state1, state2):\n x1, y1, _, _, _ = state1\n x2, y2, _, _, _ = state2\n return abs(x1 - x2) + abs(y1 - y2)\n \n def is_goal(state, goal):\n x, y, _, _, _ = state\n goal_x, goal_y, _, _, _ = goal\n return x == goal_x and y == goal_y\n\n def pathfinder(state1, state2):\n # pathfinder between two warehouse locations\n # print()\n # print(\"\\tFind path:\", state1, state2)\n return list(astar.find_path(\n state1,\n state2,\n neighbore_locations,\n reversePath=False,\n heuristic_cost_estimate_fnct = hamming_distance,\n distance_between_fnct = lambda a, b: 1.0,\n is_goal_reached_fnct = is_goal,\n ))\n \n # count delivered shelves\n agent_deliveries = [0] * self.n_agents\n agent_directions = list(np.random.randint(0, 4, self.n_agents))\n agent_locations = [(np.random.choice(self.grid_size[1]), np.random.choice(self.grid_size[0])) for _ in range(self.n_agents)]\n # agent goal location with remaining distances to goal\n agent_goals = [loc for loc in agent_locations]\n agent_goal_distances = [0] * self.n_agents\n # original locations of collected shelves\n agent_shelf_original_locations = [None] * self.n_agents\n # agent status (0 - go to requested shelf, 1 - go to goal, 2 - bring back shelf)\n agent_status = [2] * self.n_agents\n\n # print(self.grid_size)\n # print(self.goals)\n \n for t in range(0, steps):\n if output:\n print()\n print(f\"STEP {t}\")\n for i in range(self.n_agents):\n agent_direction = agent_directions[i]\n goal = agent_goals[i]\n goal_distance = agent_goal_distances[i]\n agent_stat = agent_status[i]\n agent_shelf_orig_location = agent_shelf_original_locations[i]\n if output:\n print(f\"\\tAgent {i}: {agent_locations[i]} --> {goal} ({goal_distance}) with stat={agent_stat}\")\n if goal_distance == 0:\n # reached goal\n if agent_stat == 0:\n # goal is to collect shelf --> now will be loaded\n # new goal: go to goal location\n agent_locations[i] = goal\n agent_shelf_original_locations[i] = goal\n # find closest goal\n state = (goal[0], goal[1], agent_direction, True, goal)\n closest_goal = None\n closest_goal_distance = None\n closest_goal_direction = None\n for possible_goal in self.goals:\n goal_state = (possible_goal[0], possible_goal[1], None, True, goal)\n path = pathfinder(state, goal_state)\n distance = len(path)\n direction = path[-1][2]\n if closest_goal_distance is None or distance < closest_goal_distance:\n closest_goal = possible_goal\n closest_goal_distance = distance\n closest_goal_direction = direction\n agent_goals[i] = closest_goal\n agent_goal_distances[i] = closest_goal_distance\n agent_directions[i] = closest_goal_direction\n agent_status[i] = 1\n elif agent_stat == 1:\n # goal is to deliver shelf at goal --> now delivered\n # new goal: bring back shelf\n agent_deliveries[i] += 1\n # for new goal: return to original location\n assert agent_shelf_orig_location is not None\n agent_locations[i] = goal\n agent_goals[i] = agent_shelf_orig_location\n state = (goal[0], goal[1], agent_direction, True, agent_shelf_orig_location)\n goal_state = (agent_goals[i][0], agent_goals[i][1], None, True, agent_shelf_orig_location)\n path = pathfinder(state, goal_state)\n agent_goal_distances[i] = len(path)\n agent_directions[i] = path[-1][2]\n agent_shelf_original_locations[i] = None\n agent_status[i] = 2\n elif agent_stat == 2:\n # goal is to bring back shelf --> now succeeded\n # new goal: identify new random unrequested shelf to collect\n # find unrequested shelf\n shelf = np.random.choice(self.shelfs)\n agent_locations[i] = goal\n agent_goals[i] = (shelf.x, shelf.y)\n agent_shelf_original_locations[i] = None\n state = (goal[0], goal[1], agent_direction, False, (-1, -1))\n goal_state = (agent_goals[i][0], agent_goals[i][1], None, False, (-1, -1))\n path = pathfinder(state, goal_state)\n agent_goal_distances[i] = len(path)\n agent_status[i] = 0\n agent_directions[i] = path[-1][2]\n else:\n # not yet reached goal --> get one closer to goal\n agent_goal_distances[i] -= 1\n \n if self.reward_type == RewardType.GLOBAL:\n total_returns = sum(agent_deliveries)\n self.calculated_optimal_returns = [total_returns] * self.n_agents\n else:\n self.calculated_optimal_returns = agent_deliveries\n return self.calculated_optimal_returns\n\n\nif __name__ == \"__main__\":\n env = Warehouse(9, 8, 3, 10, 3, 1, 5, None, None, RewardType.GLOBAL)\n env.reset()\n import time\n from tqdm import tqdm\n\n time.sleep(2)\n # env.render()\n # env.step(18 * [Action.LOAD] + 2 * [Action.NOOP])\n\n for _ in tqdm(range(1000000)):\n # time.sleep(2)\n # env.render()\n actions = env.action_space.sample()\n env.step(actions)"
]
| [
[
"numpy.rot90",
"numpy.pad",
"numpy.array",
"numpy.random.choice",
"numpy.zeros",
"numpy.ones",
"numpy.unravel_index",
"numpy.stack",
"numpy.arange",
"numpy.random.randint",
"numpy.indices"
]
]
|
GChen-ai/simple-deeplearning-framework | [
"0ad48f107457f576e4952e75637d68fa3da4f5a7"
]
| [
"test_pool.py"
]
| [
"from src.layers import*\nimport numpy as np\npool=Avgpool2D(2)\nimg=np.array([[[[3,0,4,2],[6,5,4,1],[3,0,2,2],[1,1,1,1]]\n ],\n [[[3,0,4,2],[6,5,4,1],[3,0,2,2],[1,1,1,1]]\n ]])\nprint(img.shape)\nout=pool.forward(img)\nprint(out)\nback=pool.backward(out)"
]
| [
[
"numpy.array"
]
]
|
pelperscience/arctic-connectivity | [
"946e90c5310682d7e2499e2ca9e2fd5ace71535c"
]
| [
"community_detection/cooc.py"
]
| [
"\"\"\"Create a matrix that shows how often bin pairs fall in the same degenerate solution.\"\"\"\n\nimport numpy as np\nfrom scipy import sparse\nimport xarray as xr ######################### xr\n\nfrom itertools import combinations\n\nimport sys\nimport pickle\nfrom glob import glob\nfrom importlib import reload\n\nsys.path.append('/science/users/4302001/arctic-connectivity/tools')\nsys.path.append('/Users/daanreijnders/surfdrive/Thesis/repository/tools')\nimport community\n\nreadDir = \"/data/oceanparcels/input_data/CMEMS/GLOBAL_REANALYSIS_PHY_001_030/\"\nmeanDir = \"/data/oceanparcels/input_data/CMEMS/GLOBAL_REANALYSIS_PHY_001_030_monthly/\"\nfieldFiles = sorted(glob(readDir + \"mercatorglorys12v1_gl12_mean_*.nc\"))\n\nwritedir = '/scratch/DaanR/psets/'\n\npsetdir = \"/data/oceanparcels/output_data/data_Daan/psets/\"\nmatdir = \"/data/oceanparcels/output_data/data_Daan/matrices/\"\nnetdir = \"/data/oceanparcels/output_data/data_Daan/networks/\"\ncomdir = \"/data/oceanparcels/output_data/data_Daan/communities/\"\n\nwith open('/scratch/DaanR/meshes/ico_mesh_hex_r7.pickle', 'rb') as meshPick:\n meshDict = pickle.load(meshPick)\nmyBins = community.hexCountBins(meshDict['points'], \n np.degrees(meshDict['lons']), \n np.degrees(meshDict['lats']), \n meshDict['permutation'], \n meshDict['simplices'])\nmyMask = community.hexMask(myBins, -180, 180, 60, 90)\nmyMask.growToLevel(4)\nmyBins.calculate_voronoi(myMask, innerMaskLevel=2, outerMaskLevel=3)\n#myBins.add_regular_rim()\nmyBins.calculate_neighbors()\ndel meshDict\nwith open(\"oceanMask_no_rim.pickle\", 'rb') as pickFile:\n myBins.oceanMask = pickle.load(pickFile)\n \nmyParts = community.particles.from_pickle('/scratch/DaanR/meshes/ico_mesh_parts_deg_arctic_r11_delland.pickle')\nprint(\"Number of particles:\", myParts.n)\n\nensembleCommunityID = {}\ncodelengths = []\nfor run in range(1, 101):\n myBins.load_communities(comdir + f\"infomap_ensemble/masked_network_Rcmems_Pico11_S2018-3-1_D90_DT20_ODT24_Cico7_mt2_multirunN{run}.clu\")\n ensembleCommunityID[run-1] = myBins.communityID\n codelengths.append(myBins.codelength)\n \nN = np.sum(myBins.oceanMask)\ncooc = np.zeros((N, N))\nfor run in range(0,100):\n for comm in np.unique(ensembleCommunityID[run].data[~ensembleCommunityID[run].mask]):\n coocCurrCom = np.expand_dims((ensembleCommunityID[run].data[~ensembleCommunityID[run].mask] == comm).astype('int'), axis=1)\n cooc = cooc + np.matmul(coocCurrCom, coocCurrCom.T)\n \nwith open(\"cooccurance.pickle\", \"wb\") as pickFile:\n pickle.dump(cooc, pickFile)\n"
]
| [
[
"numpy.matmul",
"numpy.zeros",
"numpy.sum",
"numpy.degrees",
"numpy.unique"
]
]
|
rongou/cudf | [
"23cafcf0ae1a2fe5e6b7138f4c92c2dbfa2ec93b"
]
| [
"python/cudf/cudf/core/series.py"
]
| [
"# Copyright (c) 2018-2021, NVIDIA CORPORATION.\n\nfrom __future__ import annotations\n\nimport functools\nimport inspect\nimport pickle\nimport warnings\nfrom collections import abc as abc\nfrom hashlib import sha256\nfrom numbers import Number\nfrom shutil import get_terminal_size\nfrom typing import Any, MutableMapping, Optional, Set, Union\n\nimport cupy\nimport numpy as np\nimport pandas as pd\nfrom numba import cuda\nfrom pandas._config import get_option\n\nimport cudf\nfrom cudf import _lib as libcudf\nfrom cudf._lib.scalar import _is_null_host_scalar\nfrom cudf._lib.transform import bools_to_mask\nfrom cudf._typing import ColumnLike, DataFrameOrSeries, ScalarLike\nfrom cudf.api.types import (\n _is_non_decimal_numeric_dtype,\n _is_scalar_or_zero_d_array,\n is_bool_dtype,\n is_categorical_dtype,\n is_decimal_dtype,\n is_dict_like,\n is_dtype_equal,\n is_integer,\n is_integer_dtype,\n is_interval_dtype,\n is_list_dtype,\n is_list_like,\n is_scalar,\n is_struct_dtype,\n)\nfrom cudf.core.abc import Serializable\nfrom cudf.core.column import (\n DatetimeColumn,\n TimeDeltaColumn,\n arange,\n as_column,\n column,\n column_empty_like,\n full,\n)\nfrom cudf.core.column.categorical import (\n CategoricalAccessor as CategoricalAccessor,\n)\nfrom cudf.core.column.column import concat_columns\nfrom cudf.core.column.lists import ListMethods\nfrom cudf.core.column.string import StringMethods\nfrom cudf.core.column.struct import StructMethods\nfrom cudf.core.column_accessor import ColumnAccessor\nfrom cudf.core.frame import Frame, _drop_rows_by_labels\nfrom cudf.core.groupby.groupby import SeriesGroupBy\nfrom cudf.core.index import BaseIndex, RangeIndex, as_index\nfrom cudf.core.indexed_frame import (\n IndexedFrame,\n _FrameIndexer,\n _get_label_range_or_mask,\n _indices_from_labels,\n)\nfrom cudf.core.single_column_frame import SingleColumnFrame\nfrom cudf.utils import cudautils, docutils\nfrom cudf.utils.docutils import copy_docstring\nfrom cudf.utils.dtypes import (\n can_convert_to_column,\n find_common_type,\n is_mixed_with_object_dtype,\n min_scalar_type,\n)\nfrom cudf.utils.utils import (\n get_appropriate_dispatched_func,\n get_relevant_submodule,\n to_cudf_compatible_scalar,\n)\n\n\ndef _append_new_row_inplace(col: ColumnLike, value: ScalarLike):\n \"\"\"Append a scalar `value` to the end of `col` inplace.\n Cast to common type if possible\n \"\"\"\n to_type = find_common_type([type(value), col.dtype])\n val_col = as_column(value, dtype=to_type)\n old_col = col.astype(to_type)\n\n col._mimic_inplace(concat_columns([old_col, val_col]), inplace=True)\n\n\nclass _SeriesIlocIndexer(_FrameIndexer):\n \"\"\"\n For integer-location based selection.\n \"\"\"\n\n def __getitem__(self, arg):\n if isinstance(arg, tuple):\n arg = list(arg)\n data = self._frame._column[arg]\n\n if (\n isinstance(data, (dict, list))\n or _is_scalar_or_zero_d_array(data)\n or _is_null_host_scalar(data)\n ):\n return data\n return self._frame._from_data(\n {self._frame.name: data}, index=cudf.Index(self._frame.index[arg]),\n )\n\n def __setitem__(self, key, value):\n from cudf.core.column import column\n\n if isinstance(key, tuple):\n key = list(key)\n\n # coerce value into a scalar or column\n if is_scalar(value):\n value = to_cudf_compatible_scalar(value)\n elif not (\n isinstance(value, (list, dict))\n and isinstance(\n self._frame._column.dtype, (cudf.ListDtype, cudf.StructDtype)\n )\n ):\n value = column.as_column(value)\n\n if (\n not isinstance(\n self._frame._column.dtype,\n (\n cudf.Decimal64Dtype,\n cudf.Decimal32Dtype,\n cudf.CategoricalDtype,\n ),\n )\n and hasattr(value, \"dtype\")\n and _is_non_decimal_numeric_dtype(value.dtype)\n ):\n # normalize types if necessary:\n if not is_integer(key):\n to_dtype = np.result_type(\n value.dtype, self._frame._column.dtype\n )\n value = value.astype(to_dtype)\n self._frame._column._mimic_inplace(\n self._frame._column.astype(to_dtype), inplace=True\n )\n\n self._frame._column[key] = value\n\n\nclass _SeriesLocIndexer(_FrameIndexer):\n \"\"\"\n Label-based selection\n \"\"\"\n\n def __getitem__(self, arg: Any) -> Union[ScalarLike, DataFrameOrSeries]:\n if isinstance(arg, pd.MultiIndex):\n arg = cudf.from_pandas(arg)\n\n if isinstance(self._frame.index, cudf.MultiIndex) and not isinstance(\n arg, cudf.MultiIndex\n ):\n result = self._frame.index._get_row_major(self._frame, arg)\n if (\n isinstance(arg, tuple)\n and len(arg) == self._frame._index.nlevels\n and not any((isinstance(x, slice) for x in arg))\n ):\n result = result.iloc[0]\n return result\n try:\n arg = self._loc_to_iloc(arg)\n except (TypeError, KeyError, IndexError, ValueError):\n raise KeyError(arg)\n\n return self._frame.iloc[arg]\n\n def __setitem__(self, key, value):\n try:\n key = self._loc_to_iloc(key)\n except KeyError as e:\n if (\n is_scalar(key)\n and not isinstance(self._frame.index, cudf.MultiIndex)\n and is_scalar(value)\n ):\n _append_new_row_inplace(self._frame.index._values, key)\n _append_new_row_inplace(self._frame._column, value)\n return\n else:\n raise e\n if isinstance(value, (pd.Series, cudf.Series)):\n value = cudf.Series(value)\n value = value._align_to_index(self._frame.index, how=\"right\")\n self._frame.iloc[key] = value\n\n def _loc_to_iloc(self, arg):\n if _is_scalar_or_zero_d_array(arg):\n if not _is_non_decimal_numeric_dtype(self._frame.index.dtype):\n # TODO: switch to cudf.utils.dtypes.is_integer(arg)\n if isinstance(arg, cudf.Scalar) and is_integer_dtype(\n arg.dtype\n ):\n found_index = arg.value\n return found_index\n elif is_integer(arg):\n found_index = arg\n return found_index\n try:\n found_index = self._frame.index._values.find_first_value(\n arg, closest=False\n )\n return found_index\n except (TypeError, KeyError, IndexError, ValueError):\n raise KeyError(\"label scalar is out of bound\")\n\n elif isinstance(arg, slice):\n return _get_label_range_or_mask(\n self._frame.index, arg.start, arg.stop, arg.step\n )\n elif isinstance(arg, (cudf.MultiIndex, pd.MultiIndex)):\n if isinstance(arg, pd.MultiIndex):\n arg = cudf.MultiIndex.from_pandas(arg)\n\n return _indices_from_labels(self._frame, arg)\n\n else:\n arg = cudf.core.series.Series(cudf.core.column.as_column(arg))\n if arg.dtype in (bool, np.bool_):\n return arg\n else:\n indices = _indices_from_labels(self._frame, arg)\n if indices.null_count > 0:\n raise KeyError(\"label scalar is out of bound\")\n return indices\n\n\nclass Series(SingleColumnFrame, IndexedFrame, Serializable):\n \"\"\"\n One-dimensional GPU array (including time series).\n\n Labels need not be unique but must be a hashable type. The object\n supports both integer- and label-based indexing and provides a\n host of methods for performing operations involving the index.\n Statistical methods from ndarray have been overridden to\n automatically exclude missing data (currently represented\n as null/NaN).\n\n Operations between Series (`+`, `-`, `/`, `*`, `**`) align\n values based on their associated index values-– they need\n not be the same length. The result index will be the\n sorted union of the two indexes.\n\n ``Series`` objects are used as columns of ``DataFrame``.\n\n Parameters\n ----------\n data : array-like, Iterable, dict, or scalar value\n Contains data stored in Series.\n\n index : array-like or Index (1d)\n Values must be hashable and have the same length\n as data. Non-unique index values are allowed. Will\n default to RangeIndex (0, 1, 2, …, n) if not provided.\n If both a dict and index sequence are used, the index will\n override the keys found in the dict.\n\n dtype : str, numpy.dtype, or ExtensionDtype, optional\n Data type for the output Series. If not specified,\n this will be inferred from data.\n\n name : str, optional\n The name to give to the Series.\n\n nan_as_null : bool, Default True\n If ``None``/``True``, converts ``np.nan`` values to\n ``null`` values.\n If ``False``, leaves ``np.nan`` values as is.\n \"\"\"\n\n _accessors: Set[Any] = set()\n _loc_indexer_type = _SeriesLocIndexer\n _iloc_indexer_type = _SeriesIlocIndexer\n\n # The `constructor*` properties are used by `dask` (and `dask_cudf`)\n @property\n def _constructor(self):\n return Series\n\n @property\n def _constructor_sliced(self):\n raise NotImplementedError(\n \"_constructor_sliced not supported for Series!\"\n )\n\n @property\n def _constructor_expanddim(self):\n return cudf.DataFrame\n\n @classmethod\n def from_categorical(cls, categorical, codes=None):\n \"\"\"Creates from a pandas.Categorical\n\n Parameters\n ----------\n categorical : pandas.Categorical\n Contains data stored in a pandas Categorical.\n\n codes : array-like, optional.\n The category codes of this categorical. If ``codes`` are\n defined, they are used instead of ``categorical.codes``\n\n Returns\n -------\n Series\n A cudf categorical series.\n\n Examples\n --------\n >>> import cudf\n >>> import pandas as pd\n >>> pd_categorical = pd.Categorical(pd.Series(['a', 'b', 'c', 'a'], dtype='category'))\n >>> pd_categorical\n ['a', 'b', 'c', 'a']\n Categories (3, object): ['a', 'b', 'c']\n >>> series = cudf.Series.from_categorical(pd_categorical)\n >>> series\n 0 a\n 1 b\n 2 c\n 3 a\n dtype: category\n Categories (3, object): ['a', 'b', 'c']\n \"\"\" # noqa: E501\n col = cudf.core.column.categorical.pandas_categorical_as_column(\n categorical, codes=codes\n )\n return Series(data=col)\n\n @classmethod\n def from_masked_array(cls, data, mask, null_count=None):\n \"\"\"Create a Series with null-mask.\n This is equivalent to:\n\n Series(data).set_mask(mask, null_count=null_count)\n\n Parameters\n ----------\n data : 1D array-like\n The values. Null values must not be skipped. They can appear\n as garbage values.\n mask : 1D array-like\n The null-mask. Valid values are marked as ``1``; otherwise ``0``.\n The mask bit given the data index ``idx`` is computed as::\n\n (mask[idx // 8] >> (idx % 8)) & 1\n null_count : int, optional\n The number of null values.\n If None, it is calculated automatically.\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> import cudf\n >>> a = cudf.Series([1, 2, 3, None, 4, None])\n >>> a\n 0 1\n 1 2\n 2 3\n 3 <NA>\n 4 4\n 5 <NA>\n dtype: int64\n >>> b = cudf.Series([10, 11, 12, 13, 14])\n >>> cudf.Series.from_masked_array(data=b, mask=a._column.mask)\n 0 10\n 1 11\n 2 12\n 3 <NA>\n 4 14\n dtype: int64\n \"\"\"\n col = column.as_column(data).set_mask(mask)\n return cls(data=col)\n\n def __init__(\n self, data=None, index=None, dtype=None, name=None, nan_as_null=True,\n ):\n if isinstance(data, pd.Series):\n if name is None:\n name = data.name\n if isinstance(data.index, pd.MultiIndex):\n index = cudf.from_pandas(data.index)\n else:\n index = as_index(data.index)\n elif isinstance(data, pd.Index):\n name = data.name\n data = data.values\n elif isinstance(data, BaseIndex):\n name = data.name\n data = data._values\n if dtype is not None:\n data = data.astype(dtype)\n elif isinstance(data, ColumnAccessor):\n name, data = data.names[0], data.columns[0]\n\n if isinstance(data, Series):\n index = data._index if index is None else index\n if name is None:\n name = data.name\n data = data._column\n if dtype is not None:\n data = data.astype(dtype)\n\n if isinstance(data, dict):\n index = data.keys()\n data = column.as_column(\n list(data.values()), nan_as_null=nan_as_null, dtype=dtype\n )\n\n if data is None:\n if index is not None:\n data = column.column_empty(\n row_count=len(index), dtype=None, masked=True\n )\n else:\n data = {}\n\n if not isinstance(data, column.ColumnBase):\n data = column.as_column(data, nan_as_null=nan_as_null, dtype=dtype)\n else:\n if dtype is not None:\n data = data.astype(dtype)\n\n if index is not None and not isinstance(index, BaseIndex):\n index = as_index(index)\n\n assert isinstance(data, column.ColumnBase)\n\n super().__init__({name: data})\n self._index = RangeIndex(len(data)) if index is None else index\n\n @classmethod\n def _from_data(\n cls,\n data: MutableMapping,\n index: Optional[BaseIndex] = None,\n name: Any = None,\n ) -> Series:\n \"\"\"\n Construct the Series from a ColumnAccessor\n \"\"\"\n out: Series = super()._from_data(data, index, name)\n if index is None:\n out._index = RangeIndex(out._data.nrows)\n return out\n\n def __contains__(self, item):\n return item in self._index\n\n @classmethod\n def from_pandas(cls, s, nan_as_null=None):\n \"\"\"\n Convert from a Pandas Series.\n\n Parameters\n ----------\n s : Pandas Series object\n A Pandas Series object which has to be converted\n to cuDF Series.\n nan_as_null : bool, Default None\n If ``None``/``True``, converts ``np.nan`` values to\n ``null`` values.\n If ``False``, leaves ``np.nan`` values as is.\n\n Raises\n ------\n TypeError for invalid input type.\n\n Examples\n --------\n >>> import cudf\n >>> import pandas as pd\n >>> import numpy as np\n >>> data = [10, 20, 30, np.nan]\n >>> pds = pd.Series(data)\n >>> cudf.Series.from_pandas(pds)\n 0 10.0\n 1 20.0\n 2 30.0\n 3 <NA>\n dtype: float64\n >>> cudf.Series.from_pandas(pds, nan_as_null=False)\n 0 10.0\n 1 20.0\n 2 30.0\n 3 NaN\n dtype: float64\n \"\"\"\n return cls(s, nan_as_null=nan_as_null)\n\n @property\n def dt(self):\n \"\"\"\n Accessor object for datetimelike properties of the Series values.\n\n Examples\n --------\n >>> s.dt.hour\n >>> s.dt.second\n >>> s.dt.day\n\n Returns\n -------\n A Series indexed like the original Series.\n\n Raises\n ------\n TypeError if the Series does not contain datetimelike values.\n \"\"\"\n if isinstance(self._column, DatetimeColumn):\n return DatetimeProperties(self)\n elif isinstance(self._column, TimeDeltaColumn):\n return TimedeltaProperties(self)\n else:\n raise AttributeError(\n \"Can only use .dt accessor with datetimelike values\"\n )\n\n def serialize(self):\n header, frames = super().serialize()\n\n header[\"index\"], index_frames = self._index.serialize()\n header[\"index_frame_count\"] = len(index_frames)\n # For backwards compatibility with older versions of cuDF, index\n # columns are placed before data columns.\n frames = index_frames + frames\n\n return header, frames\n\n @classmethod\n def deserialize(cls, header, frames):\n if \"column\" in header:\n warnings.warn(\n \"Series objects serialized in cudf version \"\n \"21.10 or older will no longer be deserializable \"\n \"after version 21.12. Please load and resave any \"\n \"pickles before upgrading to version 22.02.\",\n FutureWarning,\n )\n header[\"columns\"] = [header.pop(\"column\")]\n header[\"column_names\"] = pickle.dumps(\n [pickle.loads(header[\"name\"])]\n )\n\n index_nframes = header[\"index_frame_count\"]\n obj = super().deserialize(\n header, frames[header[\"index_frame_count\"] :]\n )\n\n idx_typ = pickle.loads(header[\"index\"][\"type-serialized\"])\n index = idx_typ.deserialize(header[\"index\"], frames[:index_nframes])\n obj._index = index\n\n return obj\n\n def _get_columns_by_label(self, labels, downcast=False):\n \"\"\"Return the column specified by `labels`\n\n For cudf.Series, either the column, or an empty series is returned.\n Parameter `downcast` does not have effects.\n \"\"\"\n new_data = super()._get_columns_by_label(labels, downcast)\n\n return (\n self.__class__(data=new_data, index=self.index)\n if len(new_data) > 0\n else self.__class__(dtype=self.dtype, name=self.name)\n )\n\n def drop(\n self,\n labels=None,\n axis=0,\n index=None,\n columns=None,\n level=None,\n inplace=False,\n errors=\"raise\",\n ):\n \"\"\"\n Return Series with specified index labels removed.\n\n Remove elements of a Series based on specifying the index labels.\n When using a multi-index, labels on different levels can be removed by\n specifying the level.\n\n Parameters\n ----------\n labels : single label or list-like\n Index labels to drop.\n axis : 0, default 0\n Redundant for application on Series.\n index : single label or list-like\n Redundant for application on Series. But ``index`` can be used\n instead of ``labels``\n columns : single label or list-like\n This parameter is ignored. Use ``index`` or ``labels`` to specify.\n level : int or level name, optional\n For MultiIndex, level from which the labels will be removed.\n inplace : bool, default False\n If False, return a copy. Otherwise, do operation\n inplace and return None.\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and only existing labels are\n dropped.\n\n Returns\n -------\n Series or None\n Series with specified index labels removed or None if\n ``inplace=True``\n\n Raises\n ------\n KeyError\n If any of the labels is not found in the selected axis and\n ``error='raise'``\n\n See Also\n --------\n Series.reindex\n Return only specified index labels of Series\n Series.dropna\n Return series without null values\n Series.drop_duplicates\n Return series with duplicate values removed\n cudf.DataFrame.drop\n Drop specified labels from rows or columns in dataframe\n\n Examples\n --------\n >>> s = cudf.Series([1,2,3], index=['x', 'y', 'z'])\n >>> s\n x 1\n y 2\n z 3\n dtype: int64\n\n Drop labels x and z\n\n >>> s.drop(labels=['x', 'z'])\n y 2\n dtype: int64\n\n Drop a label from the second level in MultiIndex Series.\n\n >>> midx = cudf.MultiIndex.from_product([[0, 1, 2], ['x', 'y']])\n >>> s = cudf.Series(range(6), index=midx)\n >>> s\n 0 x 0\n y 1\n 1 x 2\n y 3\n 2 x 4\n y 5\n >>> s.drop(labels='y', level=1)\n 0 x 0\n 1 x 2\n 2 x 4\n \"\"\"\n if labels is not None:\n if index is not None or columns is not None:\n raise ValueError(\n \"Cannot specify both 'labels' and 'index'/'columns'\"\n )\n if axis == 1:\n raise ValueError(\"No axis named 1 for object type Series\")\n target = labels\n elif index is not None:\n target = index\n elif columns is not None:\n target = [] # Ignore parameter columns\n else:\n raise ValueError(\n \"Need to specify at least one of 'labels', \"\n \"'index' or 'columns'\"\n )\n\n if inplace:\n out = self\n else:\n out = self.copy()\n\n dropped = _drop_rows_by_labels(out, target, level, errors)\n\n out._data = dropped._data\n out._index = dropped._index\n\n if not inplace:\n return out\n\n def append(self, to_append, ignore_index=False, verify_integrity=False):\n \"\"\"Append values from another ``Series`` or array-like object.\n If ``ignore_index=True``, the index is reset.\n\n Parameters\n ----------\n to_append : Series or list/tuple of Series\n Series to append with self.\n ignore_index : boolean, default False.\n If True, do not use the index.\n verify_integrity : bool, default False\n This Parameter is currently not supported.\n\n Returns\n -------\n Series\n A new concatenated series\n\n See Also\n --------\n cudf.concat : General function to concatenate DataFrame or\n Series objects.\n\n Examples\n --------\n >>> import cudf\n >>> s1 = cudf.Series([1, 2, 3])\n >>> s2 = cudf.Series([4, 5, 6])\n >>> s1\n 0 1\n 1 2\n 2 3\n dtype: int64\n >>> s2\n 0 4\n 1 5\n 2 6\n dtype: int64\n >>> s1.append(s2)\n 0 1\n 1 2\n 2 3\n 0 4\n 1 5\n 2 6\n dtype: int64\n\n >>> s3 = cudf.Series([4, 5, 6], index=[3, 4, 5])\n >>> s3\n 3 4\n 4 5\n 5 6\n dtype: int64\n >>> s1.append(s3)\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n 5 6\n dtype: int64\n\n With `ignore_index` set to True:\n\n >>> s1.append(s2, ignore_index=True)\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n 5 6\n dtype: int64\n \"\"\"\n if verify_integrity not in (None, False):\n raise NotImplementedError(\n \"verify_integrity parameter is not supported yet.\"\n )\n\n if is_list_like(to_append):\n to_concat = [self]\n to_concat.extend(to_append)\n else:\n to_concat = [self, to_append]\n\n return cudf.concat(to_concat, ignore_index=ignore_index)\n\n def reindex(self, index=None, copy=True):\n \"\"\"Return a Series that conforms to a new index\n\n Parameters\n ----------\n index : Index, Series-convertible, default None\n copy : boolean, default True\n\n Returns\n -------\n A new Series that conforms to the supplied index\n\n Examples\n --------\n >>> import cudf\n >>> series = cudf.Series([10, 20, 30, 40], index=['a', 'b', 'c', 'd'])\n >>> series\n a 10\n b 20\n c 30\n d 40\n dtype: int64\n >>> series.reindex(['a', 'b', 'y', 'z'])\n a 10\n b 20\n y <NA>\n z <NA>\n dtype: int64\n \"\"\"\n name = self.name or 0\n idx = self._index if index is None else index\n series = self.to_frame(name).reindex(idx, copy=copy)[name]\n series.name = self.name\n return series\n\n def reset_index(self, drop=False, inplace=False):\n \"\"\"\n Reset index to RangeIndex\n\n Parameters\n ----------\n drop : bool, default False\n Just reset the index, without inserting it as a column in\n the new DataFrame.\n inplace : bool, default False\n Modify the Series in place (do not create a new object).\n\n Returns\n -------\n Series or DataFrame or None\n When `drop` is False (the default), a DataFrame is returned.\n The newly created columns will come first in the DataFrame,\n followed by the original Series values.\n When `drop` is True, a `Series` is returned.\n In either case, if ``inplace=True``, no value is returned.\n\n Examples\n --------\n >>> import cudf\n >>> series = cudf.Series(['a', 'b', 'c', 'd'], index=[10, 11, 12, 13])\n >>> series\n 10 a\n 11 b\n 12 c\n 13 d\n dtype: object\n >>> series.reset_index()\n index 0\n 0 10 a\n 1 11 b\n 2 12 c\n 3 13 d\n >>> series.reset_index(drop=True)\n 0 a\n 1 b\n 2 c\n 3 d\n dtype: object\n \"\"\"\n if not drop:\n if inplace is True:\n raise TypeError(\n \"Cannot reset_index inplace on a Series \"\n \"to create a DataFrame\"\n )\n return self.to_frame().reset_index(drop=drop)\n else:\n if inplace is True:\n self._index = RangeIndex(len(self))\n else:\n return self._from_data(self._data, index=RangeIndex(len(self)))\n\n def set_index(self, index):\n \"\"\"Returns a new Series with a different index.\n\n Parameters\n ----------\n index : Index, Series-convertible\n the new index or values for the new index\n\n Returns\n -------\n Series\n A new Series with assigned index.\n\n Examples\n --------\n >>> import cudf\n >>> series = cudf.Series([10, 11, 12, 13, 14])\n >>> series\n 0 10\n 1 11\n 2 12\n 3 13\n 4 14\n dtype: int64\n >>> series.set_index(['a', 'b', 'c', 'd', 'e'])\n a 10\n b 11\n c 12\n d 13\n e 14\n dtype: int64\n \"\"\"\n warnings.warn(\n \"Series.set_index is deprecated and will be removed in the future\",\n FutureWarning,\n )\n index = index if isinstance(index, BaseIndex) else as_index(index)\n return self._from_data(self._data, index, self.name)\n\n def to_frame(self, name=None):\n \"\"\"Convert Series into a DataFrame\n\n Parameters\n ----------\n name : str, default None\n Name to be used for the column\n\n Returns\n -------\n DataFrame\n cudf DataFrame\n\n Examples\n --------\n >>> import cudf\n >>> series = cudf.Series(['a', 'b', 'c', None, 'd'], name='sample', index=[10, 11, 12, 13, 15])\n >>> series\n 10 a\n 11 b\n 12 c\n 13 <NA>\n 15 d\n Name: sample, dtype: object\n >>> series.to_frame()\n sample\n 10 a\n 11 b\n 12 c\n 13 <NA>\n 15 d\n \"\"\" # noqa: E501\n\n if name is not None:\n col = name\n elif self.name is None:\n col = 0\n else:\n col = self.name\n\n return cudf.DataFrame({col: self._column}, index=self.index)\n\n def set_mask(self, mask, null_count=None):\n warnings.warn(\n \"Series.set_mask is deprecated and will be removed in the future.\",\n FutureWarning,\n )\n return self._from_data(\n {self.name: self._column.set_mask(mask)}, self._index\n )\n\n def memory_usage(self, index=True, deep=False):\n \"\"\"\n Return the memory usage of the Series.\n\n The memory usage can optionally include the contribution of\n the index and of elements of `object` dtype.\n\n Parameters\n ----------\n index : bool, default True\n Specifies whether to include the memory usage of the Series index.\n deep : bool, default False\n If True, introspect the data deeply by interrogating\n `object` dtypes for system-level memory consumption, and include\n it in the returned value.\n\n Returns\n -------\n int\n Bytes of memory consumed.\n\n See Also\n --------\n cudf.DataFrame.memory_usage : Bytes consumed by\n a DataFrame.\n\n Examples\n --------\n >>> s = cudf.Series(range(3), index=['a','b','c'])\n >>> s.memory_usage()\n 48\n\n Not including the index gives the size of the rest of the data, which\n is necessarily smaller:\n\n >>> s.memory_usage(index=False)\n 24\n \"\"\"\n if deep:\n warnings.warn(\n \"The deep parameter is ignored and is only included \"\n \"for pandas compatibility.\"\n )\n n = self._column.memory_usage()\n if index:\n n += self._index.memory_usage()\n return n\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n if method == \"__call__\":\n return get_appropriate_dispatched_func(\n cudf, cudf.Series, cupy, ufunc, inputs, kwargs\n )\n else:\n return NotImplemented\n\n def __array_function__(self, func, types, args, kwargs):\n handled_types = [cudf.Series]\n for t in types:\n if t not in handled_types:\n return NotImplemented\n\n cudf_submodule = get_relevant_submodule(func, cudf)\n cudf_ser_submodule = get_relevant_submodule(func, cudf.Series)\n cupy_submodule = get_relevant_submodule(func, cupy)\n\n return get_appropriate_dispatched_func(\n cudf_submodule,\n cudf_ser_submodule,\n cupy_submodule,\n func,\n args,\n kwargs,\n )\n\n def map(self, arg, na_action=None) -> \"Series\":\n \"\"\"\n Map values of Series according to input correspondence.\n\n Used for substituting each value in a Series with another value,\n that may be derived from a function, a ``dict`` or\n a :class:`Series`.\n\n Parameters\n ----------\n arg : function, collections.abc.Mapping subclass or Series\n Mapping correspondence.\n na_action : {None, 'ignore'}, default None\n If 'ignore', propagate NaN values, without passing them to the\n mapping correspondence.\n\n Returns\n -------\n Series\n Same index as caller.\n\n Examples\n --------\n >>> s = cudf.Series(['cat', 'dog', np.nan, 'rabbit'])\n >>> s\n 0 cat\n 1 dog\n 2 <NA>\n 3 rabbit\n dtype: object\n\n ``map`` accepts a ``dict`` or a ``Series``. Values that are not found\n in the ``dict`` are converted to ``NaN``, default values in dicts are\n currently not supported.:\n\n >>> s.map({'cat': 'kitten', 'dog': 'puppy'})\n 0 kitten\n 1 puppy\n 2 <NA>\n 3 <NA>\n dtype: object\n\n It also accepts numeric functions:\n\n >>> s = cudf.Series([1, 2, 3, 4, np.nan])\n >>> s.map(lambda x: x ** 2)\n 0 1\n 1 4\n 2 9\n 3 16\n 4 <NA>\n dtype: int64\n\n Notes\n -----\n Please note map currently only supports fixed-width numeric\n type functions.\n \"\"\"\n if isinstance(arg, dict):\n if hasattr(arg, \"__missing__\"):\n raise NotImplementedError(\n \"default values in dicts are currently not supported.\"\n )\n lhs = cudf.DataFrame({\"x\": self, \"orig_order\": arange(len(self))})\n rhs = cudf.DataFrame(\n {\n \"x\": arg.keys(),\n \"s\": arg.values(),\n \"bool\": full(len(arg), True, dtype=self.dtype),\n }\n )\n res = lhs.merge(rhs, on=\"x\", how=\"left\").sort_values(\n by=\"orig_order\"\n )\n result = res[\"s\"]\n result.name = self.name\n result.index = self.index\n elif isinstance(arg, cudf.Series):\n if not arg.index.is_unique:\n raise ValueError(\n \"Reindexing only valid with\"\n \" uniquely valued Index objects\"\n )\n lhs = cudf.DataFrame({\"x\": self, \"orig_order\": arange(len(self))})\n rhs = cudf.DataFrame(\n {\n \"x\": arg.keys(),\n \"s\": arg,\n \"bool\": full(len(arg), True, dtype=self.dtype),\n }\n )\n res = lhs.merge(rhs, on=\"x\", how=\"left\").sort_values(\n by=\"orig_order\"\n )\n result = res[\"s\"]\n result.name = self.name\n result.index = self.index\n else:\n result = self.applymap(arg)\n return result\n\n def __getitem__(self, arg):\n if isinstance(arg, slice):\n return self.iloc[arg]\n else:\n return self.loc[arg]\n\n iteritems = SingleColumnFrame.__iter__\n\n items = SingleColumnFrame.__iter__\n\n def __setitem__(self, key, value):\n if isinstance(key, slice):\n self.iloc[key] = value\n else:\n self.loc[key] = value\n\n def take(self, indices, axis=0, keep_index=True):\n # Validate but don't use the axis.\n _ = self._get_axis_from_axis_arg(axis)\n return super().take(indices, keep_index)\n\n def __repr__(self):\n _, height = get_terminal_size()\n max_rows = (\n height\n if get_option(\"display.max_rows\") == 0\n else get_option(\"display.max_rows\")\n )\n if max_rows not in (0, None) and len(self) > max_rows:\n top = self.head(int(max_rows / 2 + 1))\n bottom = self.tail(int(max_rows / 2 + 1))\n preprocess = cudf.concat([top, bottom])\n else:\n preprocess = self.copy()\n preprocess.index = preprocess.index._clean_nulls_from_index()\n if (\n preprocess.nullable\n and not isinstance(\n preprocess._column, cudf.core.column.CategoricalColumn\n )\n and not is_list_dtype(preprocess.dtype)\n and not is_struct_dtype(preprocess.dtype)\n and not is_decimal_dtype(preprocess.dtype)\n and not is_struct_dtype(preprocess.dtype)\n ) or isinstance(\n preprocess._column, cudf.core.column.timedelta.TimeDeltaColumn\n ):\n output = (\n preprocess.astype(\"O\")\n .fillna(cudf._NA_REP)\n .to_pandas()\n .__repr__()\n )\n elif isinstance(\n preprocess._column, cudf.core.column.CategoricalColumn\n ):\n min_rows = (\n height\n if get_option(\"display.min_rows\") == 0\n else get_option(\"display.min_rows\")\n )\n show_dimensions = get_option(\"display.show_dimensions\")\n if preprocess._column.categories.dtype.kind == \"f\":\n pd_series = (\n preprocess.astype(\"str\")\n .to_pandas()\n .astype(\n dtype=pd.CategoricalDtype(\n categories=preprocess.dtype.categories.astype(\n \"str\"\n ).to_pandas(),\n ordered=preprocess.dtype.ordered,\n )\n )\n )\n else:\n if is_categorical_dtype(self):\n if is_interval_dtype(\n self.dtype.categories\n ) and is_struct_dtype(preprocess._column.categories):\n # with a series input the IntervalIndex's are turn\n # into a struct dtype this line will change the\n # categories back to an intervalIndex.\n preprocess.dtype._categories = self.dtype.categories\n pd_series = preprocess.to_pandas()\n output = pd_series.to_string(\n name=self.name,\n dtype=self.dtype,\n min_rows=min_rows,\n max_rows=max_rows,\n length=show_dimensions,\n na_rep=cudf._NA_REP,\n )\n else:\n output = preprocess.to_pandas().__repr__()\n\n lines = output.split(\"\\n\")\n\n if isinstance(preprocess._column, cudf.core.column.CategoricalColumn):\n category_memory = lines[-1]\n if preprocess._column.categories.dtype.kind == \"f\":\n category_memory = category_memory.replace(\"'\", \"\").split(\": \")\n category_memory = (\n category_memory[0].replace(\n \"object\", preprocess._column.categories.dtype.name\n )\n + \": \"\n + category_memory[1]\n )\n lines = lines[:-1]\n if len(lines) > 1:\n if lines[-1].startswith(\"Name: \"):\n lines = lines[:-1]\n lines.append(\"Name: %s\" % str(self.name))\n if len(self) > len(preprocess):\n lines[-1] = lines[-1] + \", Length: %d\" % len(self)\n lines[-1] = lines[-1] + \", \"\n elif lines[-1].startswith(\"Length: \"):\n lines = lines[:-1]\n lines.append(\"Length: %d\" % len(self))\n lines[-1] = lines[-1] + \", \"\n else:\n lines = lines[:-1]\n lines[-1] = lines[-1] + \"\\n\"\n lines[-1] = lines[-1] + \"dtype: %s\" % self.dtype\n else:\n lines = output.split(\",\")\n lines[-1] = \" dtype: %s)\" % self.dtype\n return \",\".join(lines)\n if isinstance(preprocess._column, cudf.core.column.CategoricalColumn):\n lines.append(category_memory)\n return \"\\n\".join(lines)\n\n def _binaryop(\n self,\n other: Frame,\n fn: str,\n fill_value: Any = None,\n reflect: bool = False,\n can_reindex: bool = False,\n *args,\n **kwargs,\n ):\n # Specialize binops to align indices.\n if isinstance(other, SingleColumnFrame):\n if (\n # TODO: The can_reindex logic also needs to be applied for\n # DataFrame (the methods that need it just don't exist yet).\n not can_reindex\n and fn in cudf.utils.utils._EQUALITY_OPS\n and (\n isinstance(other, Series)\n # TODO: mypy doesn't like this line because the index\n # property is not defined on SingleColumnFrame (or Index,\n # for that matter). Ignoring is the easy solution for now,\n # a cleaner fix requires reworking the type hierarchy.\n and not self.index.equals(other.index) # type: ignore\n )\n ):\n raise ValueError(\n \"Can only compare identically-labeled Series objects\"\n )\n lhs, other = _align_indices([self, other], allow_non_unique=True)\n else:\n lhs = self\n\n operands = lhs._make_operands_for_binop(other, fill_value, reflect)\n return (\n lhs._from_data(\n data=lhs._colwise_binop(operands, fn), index=lhs._index,\n )\n if operands is not NotImplemented\n else NotImplemented\n )\n\n def logical_and(self, other):\n return self._binaryop(other, \"l_and\").astype(np.bool_)\n\n def remainder(self, other):\n return self._binaryop(other, \"mod\")\n\n def logical_or(self, other):\n return self._binaryop(other, \"l_or\").astype(np.bool_)\n\n def logical_not(self):\n return self._unaryop(\"not\")\n\n @copy_docstring(CategoricalAccessor) # type: ignore\n @property\n def cat(self):\n return CategoricalAccessor(parent=self)\n\n @copy_docstring(StringMethods) # type: ignore\n @property\n def str(self):\n return StringMethods(parent=self)\n\n @copy_docstring(ListMethods) # type: ignore\n @property\n def list(self):\n return ListMethods(parent=self)\n\n @copy_docstring(StructMethods) # type: ignore\n @property\n def struct(self):\n return StructMethods(parent=self)\n\n @property\n def dtype(self):\n \"\"\"dtype of the Series\"\"\"\n return self._column.dtype\n\n @classmethod\n def _concat(cls, objs, axis=0, index=True):\n # Concatenate index if not provided\n if index is True:\n if isinstance(objs[0].index, cudf.MultiIndex):\n index = cudf.MultiIndex._concat([o.index for o in objs])\n else:\n index = cudf.core.index.GenericIndex._concat(\n [o.index for o in objs]\n )\n\n names = {obj.name for obj in objs}\n if len(names) == 1:\n [name] = names\n else:\n name = None\n\n if len(objs) > 1:\n dtype_mismatch = False\n for obj in objs[1:]:\n if (\n obj.null_count == len(obj)\n or len(obj) == 0\n or isinstance(\n obj._column, cudf.core.column.CategoricalColumn\n )\n or isinstance(\n objs[0]._column, cudf.core.column.CategoricalColumn\n )\n ):\n continue\n\n if (\n not dtype_mismatch\n and (\n not isinstance(\n objs[0]._column, cudf.core.column.CategoricalColumn\n )\n and not isinstance(\n obj._column, cudf.core.column.CategoricalColumn\n )\n )\n and objs[0].dtype != obj.dtype\n ):\n dtype_mismatch = True\n\n if is_mixed_with_object_dtype(objs[0], obj):\n raise TypeError(\n \"cudf does not support mixed types, please type-cast \"\n \"both series to same dtypes.\"\n )\n\n if dtype_mismatch:\n common_dtype = find_common_type([obj.dtype for obj in objs])\n objs = [obj.astype(common_dtype) for obj in objs]\n\n col = concat_columns([o._column for o in objs])\n\n if isinstance(col, cudf.core.column.Decimal64Column):\n col = col._with_type_metadata(objs[0]._column.dtype)\n\n if isinstance(col, cudf.core.column.StructColumn):\n col = col._with_type_metadata(objs[0].dtype)\n\n return cls(data=col, index=index, name=name)\n\n @property\n def valid_count(self):\n \"\"\"Number of non-null values\"\"\"\n return self._column.valid_count\n\n @property\n def null_count(self):\n \"\"\"Number of null values\"\"\"\n return self._column.null_count\n\n @property\n def nullable(self):\n \"\"\"A boolean indicating whether a null-mask is needed\"\"\"\n return self._column.nullable\n\n @property\n def has_nulls(self):\n \"\"\"\n Indicator whether Series contains null values.\n\n Returns\n -------\n out : bool\n If Series has atleast one null value, return True, if not\n return False.\n\n Examples\n --------\n >>> import cudf\n >>> series = cudf.Series([1, 2, None, 3, 4])\n >>> series\n 0 1\n 1 2\n 2 <NA>\n 3 3\n 4 4\n dtype: int64\n >>> series.has_nulls\n True\n >>> series.dropna().has_nulls\n False\n \"\"\"\n return self._column.has_nulls()\n\n def dropna(self, axis=0, inplace=False, how=None):\n \"\"\"\n Return a Series with null values removed.\n\n Parameters\n ----------\n axis : {0 or ‘index’}, default 0\n There is only one axis to drop values from.\n inplace : bool, default False\n If True, do operation inplace and return None.\n how : str, optional\n Not in use. Kept for compatibility.\n\n Returns\n -------\n Series\n Series with null entries dropped from it.\n\n See Also\n --------\n Series.isna : Indicate null values.\n\n Series.notna : Indicate non-null values.\n\n Series.fillna : Replace null values.\n\n cudf.DataFrame.dropna : Drop rows or columns which\n contain null values.\n\n cudf.Index.dropna : Drop null indices.\n\n Examples\n --------\n >>> import cudf\n >>> ser = cudf.Series([1, 2, None])\n >>> ser\n 0 1\n 1 2\n 2 null\n dtype: int64\n\n Drop null values from a Series.\n\n >>> ser.dropna()\n 0 1\n 1 2\n dtype: int64\n\n Keep the Series with valid entries in the same variable.\n\n >>> ser.dropna(inplace=True)\n >>> ser\n 0 1\n 1 2\n dtype: int64\n\n Empty strings are not considered null values.\n `None` is considered a null value.\n\n >>> ser = cudf.Series(['', None, 'abc'])\n >>> ser\n 0\n 1 <NA>\n 2 abc\n dtype: object\n >>> ser.dropna()\n 0\n 2 abc\n dtype: object\n \"\"\"\n if axis not in (0, \"index\"):\n raise ValueError(\n \"Series.dropna supports only one axis to drop values from\"\n )\n\n result = super().dropna(axis=axis)\n\n return self._mimic_inplace(result, inplace=inplace)\n\n def drop_duplicates(self, keep=\"first\", inplace=False, ignore_index=False):\n \"\"\"\n Return Series with duplicate values removed.\n\n Parameters\n ----------\n keep : {'first', 'last', ``False``}, default 'first'\n Method to handle dropping duplicates:\n\n - 'first' : Drop duplicates except for the first occurrence.\n - 'last' : Drop duplicates except for the last occurrence.\n - ``False`` : Drop all duplicates.\n\n inplace : bool, default ``False``\n If ``True``, performs operation inplace and returns None.\n\n Returns\n -------\n Series or None\n Series with duplicates dropped or None if ``inplace=True``.\n\n Examples\n --------\n >>> s = cudf.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'],\n ... name='animal')\n >>> s\n 0 lama\n 1 cow\n 2 lama\n 3 beetle\n 4 lama\n 5 hippo\n Name: animal, dtype: object\n\n With the `keep` parameter, the selection behaviour of duplicated\n values can be changed. The value ‘first’ keeps the first\n occurrence for each set of duplicated entries.\n The default value of keep is ‘first’. Note that order of\n the rows being returned is not guaranteed\n to be sorted.\n\n >>> s.drop_duplicates()\n 3 beetle\n 1 cow\n 5 hippo\n 0 lama\n Name: animal, dtype: object\n\n The value ‘last’ for parameter `keep` keeps the last occurrence\n for each set of duplicated entries.\n\n >>> s.drop_duplicates(keep='last')\n 3 beetle\n 1 cow\n 5 hippo\n 4 lama\n Name: animal, dtype: object\n\n The value `False` for parameter `keep` discards all sets\n of duplicated entries. Setting the value of ‘inplace’ to\n `True` performs the operation inplace and returns `None`.\n\n >>> s.drop_duplicates(keep=False, inplace=True)\n >>> s\n 3 beetle\n 1 cow\n 5 hippo\n Name: animal, dtype: object\n \"\"\"\n result = super().drop_duplicates(keep=keep, ignore_index=ignore_index)\n\n return self._mimic_inplace(result, inplace=inplace)\n\n def fill(self, fill_value, begin=0, end=-1, inplace=False):\n return self._fill([fill_value], begin, end, inplace)\n\n def fillna(\n self, value=None, method=None, axis=None, inplace=False, limit=None\n ):\n if isinstance(value, pd.Series):\n value = Series.from_pandas(value)\n\n if not (is_scalar(value) or isinstance(value, (abc.Mapping, Series))):\n raise TypeError(\n f'\"value\" parameter must be a scalar, dict '\n f\"or Series, but you passed a \"\n f'\"{type(value).__name__}\"'\n )\n\n if isinstance(value, (abc.Mapping, Series)):\n value = Series(value)\n if not self.index.equals(value.index):\n value = value.reindex(self.index)\n value = value._column\n\n return super().fillna(\n value=value, method=method, axis=axis, inplace=inplace, limit=limit\n )\n\n # TODO: When this method is removed we can also remove ColumnBase.to_array.\n def to_array(self, fillna=None):\n warnings.warn(\n \"The to_array method will be removed in a future cuDF \"\n \"release. Consider using `to_numpy` instead.\",\n FutureWarning,\n )\n return self._column.to_array(fillna=fillna)\n\n def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):\n if bool_only not in (None, True):\n raise NotImplementedError(\n \"The bool_only parameter is not supported for Series.\"\n )\n return super().all(axis, skipna, level, **kwargs)\n\n def any(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):\n if bool_only not in (None, True):\n raise NotImplementedError(\n \"The bool_only parameter is not supported for Series.\"\n )\n return super().any(axis, skipna, level, **kwargs)\n\n def to_pandas(self, index=True, nullable=False, **kwargs):\n \"\"\"\n Convert to a Pandas Series.\n\n Parameters\n ----------\n index : Boolean, Default True\n If ``index`` is ``True``, converts the index of cudf.Series\n and sets it to the pandas.Series. If ``index`` is ``False``,\n no index conversion is performed and pandas.Series will assign\n a default index.\n nullable : Boolean, Default False\n If ``nullable`` is ``True``, the resulting series will be\n having a corresponding nullable Pandas dtype. If ``nullable``\n is ``False``, the resulting series will either convert null\n values to ``np.nan`` or ``None`` depending on the dtype.\n\n Returns\n -------\n out : Pandas Series\n\n Examples\n --------\n >>> import cudf\n >>> ser = cudf.Series([-3, 2, 0])\n >>> pds = ser.to_pandas()\n >>> pds\n 0 -3\n 1 2\n 2 0\n dtype: int64\n >>> type(pds)\n <class 'pandas.core.series.Series'>\n\n ``nullable`` parameter can be used to control\n whether dtype can be Pandas Nullable or not:\n\n >>> ser = cudf.Series([10, 20, None, 30])\n >>> ser\n 0 10\n 1 20\n 2 <NA>\n 3 30\n dtype: int64\n >>> ser.to_pandas(nullable=True)\n 0 10\n 1 20\n 2 <NA>\n 3 30\n dtype: Int64\n >>> ser.to_pandas(nullable=False)\n 0 10.0\n 1 20.0\n 2 NaN\n 3 30.0\n dtype: float64\n \"\"\"\n if index is True:\n index = self.index.to_pandas()\n s = self._column.to_pandas(index=index, nullable=nullable)\n s.name = self.name\n return s\n\n @property\n def data(self):\n \"\"\"The gpu buffer for the data\n\n Returns\n -------\n out : The GPU buffer of the Series.\n\n Examples\n --------\n >>> import cudf\n >>> series = cudf.Series([1, 2, 3, 4])\n >>> series\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n >>> series.data\n <cudf.core.buffer.Buffer object at 0x7f23c192d110>\n >>> series.data.to_host_array()\n array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,\n 0, 0, 4, 0, 0, 0, 0, 0, 0, 0], dtype=uint8)\n \"\"\" # noqa: E501\n return self._column.data\n\n @property\n def nullmask(self):\n \"\"\"The gpu buffer for the null-mask\"\"\"\n return cudf.Series(self._column.nullmask)\n\n def as_mask(self):\n \"\"\"Convert booleans to bitmask\n\n Returns\n -------\n device array\n\n Examples\n --------\n >>> import cudf\n >>> s = cudf.Series([True, False, True])\n >>> s.as_mask()\n <cudf.core.buffer.Buffer object at 0x7f23c3eed0d0>\n >>> s.as_mask().to_host_array()\n array([ 5, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,\n 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 181, 164,\n 188, 1, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255,\n 127, 253, 214, 62, 241, 1, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n dtype=uint8)\n \"\"\"\n if not is_bool_dtype(self.dtype):\n raise TypeError(\n f\"Series must of boolean dtype, found: {self.dtype}\"\n )\n\n return self._column.as_mask()\n\n def astype(self, dtype, copy=False, errors=\"raise\"):\n \"\"\"\n Cast the Series to the given dtype\n\n Parameters\n ----------\n\n dtype : data type, or dict of column name -> data type\n Use a numpy.dtype or Python type to cast Series object to\n the same type. Alternatively, use {col: dtype, ...}, where col is a\n series name and dtype is a numpy.dtype or Python type to cast to.\n copy : bool, default False\n Return a deep-copy when ``copy=True``. Note by default\n ``copy=False`` setting is used and hence changes to\n values then may propagate to other cudf objects.\n errors : {'raise', 'ignore', 'warn'}, default 'raise'\n Control raising of exceptions on invalid data for provided dtype.\n\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original\n object.\n - ``warn`` : prints last exceptions as warnings and\n return original object.\n\n Returns\n -------\n out : Series\n Returns ``self.copy(deep=copy)`` if ``dtype`` is the same\n as ``self.dtype``.\n\n Examples\n --------\n >>> import cudf\n >>> series = cudf.Series([1, 2], dtype='int32')\n >>> series\n 0 1\n 1 2\n dtype: int32\n >>> series.astype('int64')\n 0 1\n 1 2\n dtype: int64\n\n Convert to categorical type:\n\n >>> series.astype('category')\n 0 1\n 1 2\n dtype: category\n Categories (2, int64): [1, 2]\n\n Convert to ordered categorical type with custom ordering:\n\n >>> cat_dtype = cudf.CategoricalDtype(categories=[2, 1], ordered=True)\n >>> series.astype(cat_dtype)\n 0 1\n 1 2\n dtype: category\n Categories (2, int64): [2 < 1]\n\n Note that using ``copy=False`` (enabled by default)\n and changing data on a new Series will\n propagate changes:\n\n >>> s1 = cudf.Series([1, 2])\n >>> s1\n 0 1\n 1 2\n dtype: int64\n >>> s2 = s1.astype('int64', copy=False)\n >>> s2[0] = 10\n >>> s1\n 0 10\n 1 2\n dtype: int64\n \"\"\"\n if errors not in (\"ignore\", \"raise\", \"warn\"):\n raise ValueError(\"invalid error value specified\")\n\n if is_dict_like(dtype):\n if len(dtype) > 1 or self.name not in dtype:\n raise KeyError(\n \"Only the Series name can be used for \"\n \"the key in Series dtype mappings.\"\n )\n dtype = dtype[self.name]\n\n if is_dtype_equal(dtype, self.dtype):\n return self.copy(deep=copy)\n try:\n data = self._column.astype(dtype)\n\n return self._from_data(\n {self.name: (data.copy(deep=True) if copy else data)},\n index=self._index,\n )\n\n except Exception as e:\n if errors == \"raise\":\n raise e\n elif errors == \"warn\":\n import traceback\n\n tb = traceback.format_exc()\n warnings.warn(tb)\n elif errors == \"ignore\":\n pass\n return self\n\n def sort_index(self, axis=0, *args, **kwargs):\n if axis not in (0, \"index\"):\n raise ValueError(\"Only axis=0 is valid for Series.\")\n return super().sort_index(axis=axis, *args, **kwargs)\n\n def sort_values(\n self,\n axis=0,\n ascending=True,\n inplace=False,\n kind=\"quicksort\",\n na_position=\"last\",\n ignore_index=False,\n ):\n \"\"\"Sort by the values along either axis.\n\n Parameters\n ----------\n ascending : bool or list of bool, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders. If this is a list of bools, must match the length of the\n by.\n na_position : {‘first’, ‘last’}, default ‘last’\n 'first' puts nulls at the beginning, 'last' puts nulls at the end\n ignore_index : bool, default False\n If True, index will not be sorted.\n\n Returns\n -------\n Series : Series with sorted values.\n\n Notes\n -----\n Difference from pandas:\n * Support axis='index' only.\n * Not supporting: inplace, kind\n\n Examples\n --------\n >>> import cudf\n >>> s = cudf.Series([1, 5, 2, 4, 3])\n >>> s.sort_values()\n 0 1\n 2 2\n 4 3\n 3 4\n 1 5\n dtype: int64\n \"\"\"\n return super().sort_values(\n by=self.name,\n axis=axis,\n ascending=ascending,\n inplace=inplace,\n kind=kind,\n na_position=na_position,\n ignore_index=ignore_index,\n )\n\n def nlargest(self, n=5, keep=\"first\"):\n \"\"\"Returns a new Series of the *n* largest element.\n\n Parameters\n ----------\n n : int, default 5\n Return this many descending sorted values.\n keep : {'first', 'last'}, default 'first'\n When there are duplicate values that cannot all fit in a\n Series of `n` elements:\n\n - ``first`` : return the first `n` occurrences in order\n of appearance.\n - ``last`` : return the last `n` occurrences in reverse\n order of appearance.\n\n Returns\n -------\n Series\n The `n` largest values in the Series, sorted in decreasing order.\n\n Examples\n --------\n >>> import cudf\n >>> countries_population = {\"Italy\": 59000000, \"France\": 65000000,\n ... \"Malta\": 434000, \"Maldives\": 434000,\n ... \"Brunei\": 434000, \"Iceland\": 337000,\n ... \"Nauru\": 11300, \"Tuvalu\": 11300,\n ... \"Anguilla\": 11300, \"Montserrat\": 5200}\n >>> series = cudf.Series(countries_population)\n >>> series\n Italy 59000000\n France 65000000\n Malta 434000\n Maldives 434000\n Brunei 434000\n Iceland 337000\n Nauru 11300\n Tuvalu 11300\n Anguilla 11300\n Montserrat 5200\n dtype: int64\n >>> series.nlargest()\n France 65000000\n Italy 59000000\n Malta 434000\n Maldives 434000\n Brunei 434000\n dtype: int64\n >>> series.nlargest(3)\n France 65000000\n Italy 59000000\n Malta 434000\n dtype: int64\n >>> series.nlargest(3, keep='last')\n France 65000000\n Italy 59000000\n Brunei 434000\n dtype: int64\n \"\"\"\n return self._n_largest_or_smallest(True, n, [self.name], keep)\n\n def nsmallest(self, n=5, keep=\"first\"):\n \"\"\"\n Returns a new Series of the *n* smallest element.\n\n Parameters\n ----------\n n : int, default 5\n Return this many ascending sorted values.\n keep : {'first', 'last'}, default 'first'\n When there are duplicate values that cannot all fit in a\n Series of `n` elements:\n\n - ``first`` : return the first `n` occurrences in order\n of appearance.\n - ``last`` : return the last `n` occurrences in reverse\n order of appearance.\n\n Returns\n -------\n Series\n The `n` smallest values in the Series, sorted in increasing order.\n\n Examples\n --------\n >>> import cudf\n >>> countries_population = {\"Italy\": 59000000, \"France\": 65000000,\n ... \"Brunei\": 434000, \"Malta\": 434000,\n ... \"Maldives\": 434000, \"Iceland\": 337000,\n ... \"Nauru\": 11300, \"Tuvalu\": 11300,\n ... \"Anguilla\": 11300, \"Montserrat\": 5200}\n >>> s = cudf.Series(countries_population)\n >>> s\n Italy 59000000\n France 65000000\n Brunei 434000\n Malta 434000\n Maldives 434000\n Iceland 337000\n Nauru 11300\n Tuvalu 11300\n Anguilla 11300\n Montserrat 5200\n dtype: int64\n\n The `n` smallest elements where ``n=5`` by default.\n\n >>> s.nsmallest()\n Montserrat 5200\n Nauru 11300\n Tuvalu 11300\n Anguilla 11300\n Iceland 337000\n dtype: int64\n\n The `n` smallest elements where ``n=3``. Default `keep` value is\n 'first' so Nauru and Tuvalu will be kept.\n\n >>> s.nsmallest(3)\n Montserrat 5200\n Nauru 11300\n Tuvalu 11300\n dtype: int64\n\n The `n` smallest elements where ``n=3`` and keeping the last\n duplicates. Anguilla and Tuvalu will be kept since they are the last\n with value 11300 based on the index order.\n\n >>> s.nsmallest(3, keep='last')\n Montserrat 5200\n Anguilla 11300\n Tuvalu 11300\n dtype: int64\n \"\"\"\n return self._n_largest_or_smallest(False, n, [self.name], keep)\n\n def argsort(\n self,\n axis=0,\n kind=\"quicksort\",\n order=None,\n ascending=True,\n na_position=\"last\",\n ):\n obj = self.__class__._from_data(\n {\n None: super().argsort(\n axis=axis,\n kind=kind,\n order=order,\n ascending=ascending,\n na_position=na_position,\n )\n }\n )\n obj.name = self.name\n return obj\n\n def replace(self, to_replace=None, value=None, *args, **kwargs):\n if is_dict_like(to_replace) and value is not None:\n raise ValueError(\n \"Series.replace cannot use dict-like to_replace and non-None \"\n \"value\"\n )\n\n return super().replace(to_replace, value, *args, **kwargs)\n\n def update(self, other):\n \"\"\"\n Modify Series in place using values from passed Series.\n Uses non-NA values from passed Series to make updates. Aligns\n on index.\n\n Parameters\n ----------\n other : Series, or object coercible into Series\n\n Examples\n --------\n >>> import cudf\n >>> s = cudf.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n >>> s.update(cudf.Series([4, 5, 6]))\n >>> s\n 0 4\n 1 5\n 2 6\n dtype: int64\n >>> s = cudf.Series(['a', 'b', 'c'])\n >>> s\n 0 a\n 1 b\n 2 c\n dtype: object\n >>> s.update(cudf.Series(['d', 'e'], index=[0, 2]))\n >>> s\n 0 d\n 1 b\n 2 e\n dtype: object\n >>> s = cudf.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n >>> s.update(cudf.Series([4, 5, 6, 7, 8]))\n >>> s\n 0 4\n 1 5\n 2 6\n dtype: int64\n\n If ``other`` contains NaNs the corresponding values are not updated\n in the original Series.\n\n >>> s = cudf.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n >>> s.update(cudf.Series([4, np.nan, 6], nan_as_null=False))\n >>> s\n 0 4\n 1 2\n 2 6\n dtype: int64\n\n ``other`` can also be a non-Series object type\n that is coercible into a Series\n\n >>> s = cudf.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n >>> s.update([4, np.nan, 6])\n >>> s\n 0 4\n 1 2\n 2 6\n dtype: int64\n >>> s = cudf.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n >>> s.update({1: 9})\n >>> s\n 0 1\n 1 9\n 2 3\n dtype: int64\n \"\"\"\n\n if not isinstance(other, cudf.Series):\n other = cudf.Series(other)\n\n if not self.index.equals(other.index):\n other = other.reindex(index=self.index)\n mask = other.notna()\n\n self.mask(mask, other, inplace=True)\n\n def reverse(self):\n warnings.warn(\n \"Series.reverse is deprecated and will be removed in the future.\",\n FutureWarning,\n )\n rinds = column.arange((self._column.size - 1), -1, -1, dtype=np.int32)\n return self._from_data(\n {self.name: self._column[rinds]}, self.index._values[rinds]\n )\n\n def one_hot_encoding(self, cats, dtype=\"float64\"):\n \"\"\"Perform one-hot-encoding\n\n Parameters\n ----------\n cats : sequence of values\n values representing each category.\n dtype : numpy.dtype\n specifies the output dtype.\n\n Returns\n -------\n Sequence\n A sequence of new series for each category. Its length is\n determined by the length of ``cats``.\n\n Examples\n --------\n >>> import cudf\n >>> s = cudf.Series(['a', 'b', 'c', 'a'])\n >>> s\n 0 a\n 1 b\n 2 c\n 3 a\n dtype: object\n >>> s.one_hot_encoding(['a', 'c', 'b'])\n [0 1.0\n 1 0.0\n 2 0.0\n 3 1.0\n dtype: float64, 0 0.0\n 1 0.0\n 2 1.0\n 3 0.0\n dtype: float64, 0 0.0\n 1 1.0\n 2 0.0\n 3 0.0\n dtype: float64]\n \"\"\"\n\n warnings.warn(\n \"Series.one_hot_encoding is deprecated and will be removed in \"\n \"future, use `get_dummies` instead.\",\n FutureWarning,\n )\n\n if hasattr(cats, \"to_arrow\"):\n cats = cats.to_pandas()\n else:\n cats = pd.Series(cats, dtype=\"object\")\n dtype = cudf.dtype(dtype)\n\n try:\n cats_col = as_column(cats, nan_as_null=False, dtype=self.dtype)\n except TypeError:\n raise ValueError(\"Cannot convert `cats` as cudf column.\")\n\n if self._column.size * cats_col.size >= np.iinfo(\"int32\").max:\n raise ValueError(\n \"Size limitation exceeded: series.size * category.size < \"\n \"np.iinfo('int32').max. Consider reducing size of category\"\n )\n\n res = libcudf.transform.one_hot_encode(self._column, cats_col)\n if dtype.type == np.bool_:\n return [\n Series._from_data({None: x}, index=self._index)\n for x in list(res.values())\n ]\n else:\n return [\n Series._from_data({None: x.astype(dtype)}, index=self._index)\n for x in list(res.values())\n ]\n\n def label_encoding(self, cats, dtype=None, na_sentinel=-1):\n \"\"\"Perform label encoding.\n\n Parameters\n ----------\n values : sequence of input values\n dtype : numpy.dtype; optional\n Specifies the output dtype. If `None` is given, the\n smallest possible integer dtype (starting with np.int8)\n is used.\n na_sentinel : number, default -1\n Value to indicate missing category.\n\n Returns\n -------\n A sequence of encoded labels with value between 0 and n-1 classes(cats)\n\n Examples\n --------\n >>> import cudf\n >>> s = cudf.Series([1, 2, 3, 4, 10])\n >>> s.label_encoding([2, 3])\n 0 -1\n 1 0\n 2 1\n 3 -1\n 4 -1\n dtype: int8\n\n `na_sentinel` parameter can be used to\n control the value when there is no encoding.\n\n >>> s.label_encoding([2, 3], na_sentinel=10)\n 0 10\n 1 0\n 2 1\n 3 10\n 4 10\n dtype: int8\n\n When none of `cats` values exist in s, entire\n Series will be `na_sentinel`.\n\n >>> s.label_encoding(['a', 'b', 'c'])\n 0 -1\n 1 -1\n 2 -1\n 3 -1\n 4 -1\n dtype: int8\n \"\"\"\n\n warnings.warn(\n \"Series.label_encoding is deprecated and will be removed in the \"\n \"future. Consider using cuML's LabelEncoder instead.\",\n FutureWarning,\n )\n\n return self._label_encoding(cats, dtype, na_sentinel)\n\n def _label_encoding(self, cats, dtype=None, na_sentinel=-1):\n # Private implementation of deprecated public label_encoding method\n def _return_sentinel_series():\n return Series(\n cudf.core.column.full(\n size=len(self), fill_value=na_sentinel, dtype=dtype\n ),\n index=self.index,\n name=None,\n )\n\n if dtype is None:\n dtype = min_scalar_type(max(len(cats), na_sentinel), 8)\n\n cats = column.as_column(cats)\n if is_mixed_with_object_dtype(self, cats):\n return _return_sentinel_series()\n\n try:\n # Where there is a type-cast failure, we have\n # to catch the exception and return encoded labels\n # with na_sentinel values as there would be no corresponding\n # encoded values of cats in self.\n cats = cats.astype(self.dtype)\n except ValueError:\n return _return_sentinel_series()\n\n order = column.arange(len(self))\n codes = column.arange(len(cats), dtype=dtype)\n\n value = cudf.DataFrame({\"value\": cats, \"code\": codes})\n codes = cudf.DataFrame(\n {\"value\": self._data.columns[0].copy(deep=False), \"order\": order}\n )\n\n codes = codes.merge(value, on=\"value\", how=\"left\")\n codes = codes.sort_values(\"order\")[\"code\"].fillna(na_sentinel)\n\n codes.name = None\n codes.index = self._index\n return codes\n\n # UDF related\n def apply(self, func, convert_dtype=True, args=(), **kwargs):\n \"\"\"\n Apply a scalar function to the values of a Series.\n\n Similar to `pandas.Series.apply. Applies a user\n defined function elementwise over a series.\n\n Parameters\n ----------\n func : function\n Scalar Python function to apply.\n convert_dtype : bool, default True\n In cuDF, this parameter is always True. Because\n cuDF does not support arbitrary object dtypes,\n the result will always be the common type as determined\n by numba based on the function logic and argument types.\n See examples for details.\n args : tuple\n Not supported\n **kwargs\n Not supported\n\n Notes\n -----\n UDFs are cached in memory to avoid recompilation. The first\n call to the UDF will incur compilation overhead. `func` may\n call nested functions that are decorated with the decorator\n `numba.cuda.jit(device=True)`, otherwise numba will raise a\n typing error.\n\n Examples\n --------\n\n Apply a basic function to a series\n >>> sr = cudf.Series([1,2,3])\n >>> def f(x):\n ... return x + 1\n >>> sr.apply(f)\n 0 2\n 1 3\n 2 4\n dtype: int64\n\n Apply a basic function to a series with nulls\n\n >>> sr = cudf.Series([1,cudf.NA,3])\n >>> def f(x):\n ... return x + 1\n >>> sr.apply(f)\n 0 2\n 1 <NA>\n 2 4\n dtype: int64\n\n Use a function that does something conditionally,\n based on if the value is or is not null\n\n >>> sr = cudf.Series([1,cudf.NA,3])\n >>> def f(x):\n ... if x is cudf.NA:\n ... return 42\n ... else:\n ... return x - 1\n >>> sr.apply(f)\n 0 0\n 1 42\n 2 2\n dtype: int64\n\n Results will be upcast to the common dtype required\n as derived from the UDFs logic. Note that this means\n the common type will be returned even if such data\n is passed that would not result in any values of that\n dtype.\n\n >>> sr = cudf.Series([1,cudf.NA,3])\n >>> def f(x):\n ... return x + 1.5\n >>> sr.apply(f)\n 0 2.5\n 1 <NA>\n 2 4.5\n dtype: float64\n \"\"\"\n if args or kwargs:\n raise ValueError(\n \"UDFs using *args or **kwargs are not yet supported.\"\n )\n\n # these functions are generally written as functions of scalar\n # values rather than rows. Rather than writing an entirely separate\n # numba kernel that is not built around a row object, its simpler\n # to just turn this into the equivalent single column dataframe case\n name = self.name or \"__temp_srname\"\n df = cudf.DataFrame({name: self})\n f_ = cuda.jit(device=True)(func)\n\n return df.apply(lambda row: f_(row[name]))\n\n def applymap(self, udf, out_dtype=None):\n \"\"\"Apply an elementwise function to transform the values in the Column.\n\n The user function is expected to take one argument and return the\n result, which will be stored to the output Series. The function\n cannot reference globals except for other simple scalar objects.\n\n Parameters\n ----------\n udf : function\n Either a callable python function or a python function already\n decorated by ``numba.cuda.jit`` for call on the GPU as a device\n\n out_dtype : numpy.dtype; optional\n The dtype for use in the output.\n Only used for ``numba.cuda.jit`` decorated udf.\n By default, the result will have the same dtype as the source.\n\n Returns\n -------\n result : Series\n The mask and index are preserved.\n\n Notes\n -----\n The supported Python features are listed in\n\n https://numba.pydata.org/numba-doc/dev/cuda/cudapysupported.html\n\n with these exceptions:\n\n * Math functions in `cmath` are not supported since `libcudf` does not\n have complex number support and output of `cmath` functions are most\n likely complex numbers.\n\n * These five functions in `math` are not supported since numba\n generates multiple PTX functions from them\n\n * math.sin()\n * math.cos()\n * math.tan()\n * math.gamma()\n * math.lgamma()\n\n * Series with string dtypes are not supported in `applymap` method.\n\n * Global variables need to be re-defined explicitly inside\n the udf, as numba considers them to be compile-time constants\n and there is no known way to obtain value of the global variable.\n\n Examples\n --------\n Returning a Series of booleans using only a literal pattern.\n\n >>> import cudf\n >>> s = cudf.Series([1, 10, -10, 200, 100])\n >>> s.applymap(lambda x: x)\n 0 1\n 1 10\n 2 -10\n 3 200\n 4 100\n dtype: int64\n >>> s.applymap(lambda x: x in [1, 100, 59])\n 0 True\n 1 False\n 2 False\n 3 False\n 4 True\n dtype: bool\n >>> s.applymap(lambda x: x ** 2)\n 0 1\n 1 100\n 2 100\n 3 40000\n 4 10000\n dtype: int64\n >>> s.applymap(lambda x: (x ** 2) + (x / 2))\n 0 1.5\n 1 105.0\n 2 95.0\n 3 40100.0\n 4 10050.0\n dtype: float64\n >>> def cube_function(a):\n ... return a ** 3\n ...\n >>> s.applymap(cube_function)\n 0 1\n 1 1000\n 2 -1000\n 3 8000000\n 4 1000000\n dtype: int64\n >>> def custom_udf(x):\n ... if x > 0:\n ... return x + 5\n ... else:\n ... return x - 5\n ...\n >>> s.applymap(custom_udf)\n 0 6\n 1 15\n 2 -15\n 3 205\n 4 105\n dtype: int64\n \"\"\"\n if not callable(udf):\n raise ValueError(\"Input UDF must be a callable object.\")\n return self._from_data({self.name: self._unaryop(udf)}, self._index)\n\n #\n # Stats\n #\n def count(self, level=None, **kwargs):\n \"\"\"\n Return number of non-NA/null observations in the Series\n\n Returns\n -------\n int\n Number of non-null values in the Series.\n\n Notes\n -----\n Parameters currently not supported is `level`.\n\n Examples\n --------\n >>> import cudf\n >>> ser = cudf.Series([1, 5, 2, 4, 3])\n >>> ser.count()\n 5\n \"\"\"\n\n if level is not None:\n raise NotImplementedError(\"level parameter is not implemented yet\")\n\n return self.valid_count\n\n def mode(self, dropna=True):\n \"\"\"\n Return the mode(s) of the dataset.\n\n Always returns Series even if only one value is returned.\n\n Parameters\n ----------\n dropna : bool, default True\n Don't consider counts of NA/NaN/NaT.\n\n Returns\n -------\n Series\n Modes of the Series in sorted order.\n\n Examples\n --------\n >>> import cudf\n >>> series = cudf.Series([7, 6, 5, 4, 3, 2, 1])\n >>> series\n 0 7\n 1 6\n 2 5\n 3 4\n 4 3\n 5 2\n 6 1\n dtype: int64\n >>> series.mode()\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n 5 6\n 6 7\n dtype: int64\n\n We can include ``<NA>`` values in mode by\n passing ``dropna=False``.\n\n >>> series = cudf.Series([7, 4, 3, 3, 7, None, None])\n >>> series\n 0 7\n 1 4\n 2 3\n 3 3\n 4 7\n 5 <NA>\n 6 <NA>\n dtype: int64\n >>> series.mode()\n 0 3\n 1 7\n dtype: int64\n >>> series.mode(dropna=False)\n 0 3\n 1 7\n 2 <NA>\n dtype: int64\n \"\"\"\n val_counts = self.value_counts(ascending=False, dropna=dropna)\n if len(val_counts) > 0:\n val_counts = val_counts[val_counts == val_counts.iloc[0]]\n\n return Series(val_counts.index.sort_values(), name=self.name)\n\n def round(self, decimals=0, how=\"half_even\"):\n if not is_integer(decimals):\n raise ValueError(\n f\"decimals must be an int, got {type(decimals).__name__}\"\n )\n decimals = int(decimals)\n return super().round(decimals, how)\n\n def cov(self, other, min_periods=None):\n \"\"\"\n Compute covariance with Series, excluding missing values.\n\n Parameters\n ----------\n other : Series\n Series with which to compute the covariance.\n\n Returns\n -------\n float\n Covariance between Series and other normalized by N-1\n (unbiased estimator).\n\n Notes\n -----\n `min_periods` parameter is not yet supported.\n\n Examples\n --------\n >>> import cudf\n >>> ser1 = cudf.Series([0.9, 0.13, 0.62])\n >>> ser2 = cudf.Series([0.12, 0.26, 0.51])\n >>> ser1.cov(ser2)\n -0.015750000000000004\n \"\"\"\n\n if min_periods is not None:\n raise NotImplementedError(\n \"min_periods parameter is not implemented yet\"\n )\n\n if self.empty or other.empty:\n return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)\n\n lhs = self.nans_to_nulls().dropna()\n rhs = other.nans_to_nulls().dropna()\n\n lhs, rhs = _align_indices([lhs, rhs], how=\"inner\")\n\n return lhs._column.cov(rhs._column)\n\n def transpose(self):\n \"\"\"Return the transpose, which is by definition self.\n \"\"\"\n\n return self\n\n T = property(transpose, doc=transpose.__doc__)\n\n def corr(self, other, method=\"pearson\", min_periods=None):\n \"\"\"Calculates the sample correlation between two Series,\n excluding missing values.\n\n Examples\n --------\n >>> import cudf\n >>> ser1 = cudf.Series([0.9, 0.13, 0.62])\n >>> ser2 = cudf.Series([0.12, 0.26, 0.51])\n >>> ser1.corr(ser2)\n -0.20454263717316112\n \"\"\"\n\n if method not in (\"pearson\",):\n raise ValueError(f\"Unknown method {method}\")\n\n if min_periods not in (None,):\n raise NotImplementedError(\"Unsupported argument 'min_periods'\")\n\n if self.empty or other.empty:\n return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)\n\n lhs = self.nans_to_nulls().dropna()\n rhs = other.nans_to_nulls().dropna()\n lhs, rhs = _align_indices([lhs, rhs], how=\"inner\")\n\n return lhs._column.corr(rhs._column)\n\n def autocorr(self, lag=1):\n \"\"\"Compute the lag-N autocorrelation. This method computes the Pearson\n correlation between the Series and its shifted self.\n\n Parameters\n ----------\n lag : int, default 1\n Number of lags to apply before performing autocorrelation.\n\n Returns\n -------\n result : float\n The Pearson correlation between self and self.shift(lag).\n\n Examples\n --------\n >>> import cudf\n >>> s = cudf.Series([0.25, 0.5, 0.2, -0.05])\n >>> s.autocorr()\n 0.10355263309024071\n >>> s.autocorr(lag=2)\n -0.9999999999999999\n \"\"\"\n return self.corr(self.shift(lag))\n\n def isin(self, values):\n \"\"\"Check whether values are contained in Series.\n\n Parameters\n ----------\n values : set or list-like\n The sequence of values to test. Passing in a single string will\n raise a TypeError. Instead, turn a single string into a list\n of one element.\n\n Returns\n -------\n result : Series\n Series of booleans indicating if each element is in values.\n\n Raises\n -------\n TypeError\n If values is a string\n\n Examples\n --------\n >>> import cudf\n >>> s = cudf.Series(['lama', 'cow', 'lama', 'beetle', 'lama',\n ... 'hippo'], name='animal')\n >>> s.isin(['cow', 'lama'])\n 0 True\n 1 True\n 2 True\n 3 False\n 4 True\n 5 False\n Name: animal, dtype: bool\n\n Passing a single string as ``s.isin('lama')`` will raise an error. Use\n a list of one element instead:\n\n >>> s.isin(['lama'])\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n 5 False\n Name: animal, dtype: bool\n\n Strings and integers are distinct and are therefore not comparable:\n\n >>> cudf.Series([1]).isin(['1'])\n 0 False\n dtype: bool\n >>> cudf.Series([1.1]).isin(['1.1'])\n 0 False\n dtype: bool\n \"\"\"\n\n if is_scalar(values):\n raise TypeError(\n \"only list-like objects are allowed to be passed \"\n f\"to isin(), you passed a [{type(values).__name__}]\"\n )\n\n return Series(\n self._column.isin(values), index=self.index, name=self.name\n )\n\n def unique(self):\n \"\"\"\n Returns unique values of this Series.\n\n Returns\n -------\n Series\n A series with only the unique values.\n\n Examples\n --------\n >>> import cudf\n >>> series = cudf.Series(['a', 'a', 'b', None, 'b', None, 'c'])\n >>> series\n 0 a\n 1 a\n 2 b\n 3 <NA>\n 4 b\n 5 <NA>\n 6 c\n dtype: object\n >>> series.unique()\n 0 <NA>\n 1 a\n 2 b\n 3 c\n dtype: object\n \"\"\"\n res = self._column.unique()\n return Series(res, name=self.name)\n\n def nunique(self, method=\"sort\", dropna=True):\n \"\"\"Returns the number of unique values of the Series: approximate version,\n and exact version to be moved to libcudf\n\n Excludes NA values by default.\n\n Parameters\n ----------\n dropna : bool, default True\n Don't include NA values in the count.\n\n Returns\n -------\n int\n\n Examples\n --------\n >>> import cudf\n >>> s = cudf.Series([1, 3, 5, 7, 7])\n >>> s\n 0 1\n 1 3\n 2 5\n 3 7\n 4 7\n dtype: int64\n >>> s.nunique()\n 4\n \"\"\"\n if method != \"sort\":\n msg = \"non sort based distinct_count() not implemented yet\"\n raise NotImplementedError(msg)\n if self.null_count == len(self):\n return 0\n return self._column.distinct_count(method, dropna)\n\n def value_counts(\n self,\n normalize=False,\n sort=True,\n ascending=False,\n bins=None,\n dropna=True,\n ):\n \"\"\"Return a Series containing counts of unique values.\n\n The resulting object will be in descending order so that\n the first element is the most frequently-occurring element.\n Excludes NA values by default.\n\n Parameters\n ----------\n normalize : bool, default False\n If True then the object returned will contain\n the relative frequencies of the unique values.\n\n sort : bool, default True\n Sort by frequencies.\n\n ascending : bool, default False\n Sort in ascending order.\n\n bins : int, optional\n Rather than count values, group them into half-open bins,\n works with numeric data. This Parameter is not yet supported.\n\n dropna : bool, default True\n Don’t include counts of NaN and None.\n\n Returns\n -------\n result : Series containing counts of unique values.\n\n See also\n --------\n Series.count\n Number of non-NA elements in a Series.\n\n cudf.DataFrame.count\n Number of non-NA elements in a DataFrame.\n\n Examples\n --------\n >>> import cudf\n >>> sr = cudf.Series([1.0, 2.0, 2.0, 3.0, 3.0, 3.0, None])\n >>> sr\n 0 1.0\n 1 2.0\n 2 2.0\n 3 3.0\n 4 3.0\n 5 3.0\n 6 <NA>\n dtype: float64\n >>> sr.value_counts()\n 3.0 3\n 2.0 2\n 1.0 1\n dtype: int32\n\n The order of the counts can be changed by passing ``ascending=True``:\n\n >>> sr.value_counts(ascending=True)\n 1.0 1\n 2.0 2\n 3.0 3\n dtype: int32\n\n With ``normalize`` set to True, returns the relative frequency\n by dividing all values by the sum of values.\n\n >>> sr.value_counts(normalize=True)\n 3.0 0.500000\n 2.0 0.333333\n 1.0 0.166667\n dtype: float64\n\n To include ``NA`` value counts, pass ``dropna=False``:\n\n >>> sr = cudf.Series([1.0, 2.0, 2.0, 3.0, None, 3.0, 3.0, None])\n >>> sr\n 0 1.0\n 1 2.0\n 2 2.0\n 3 3.0\n 4 <NA>\n 5 3.0\n 6 3.0\n 7 <NA>\n dtype: float64\n >>> sr.value_counts(dropna=False)\n 3.0 3\n 2.0 2\n <NA> 2\n 1.0 1\n dtype: int32\n \"\"\"\n if bins is not None:\n raise NotImplementedError(\"bins is not yet supported\")\n\n if dropna and self.null_count == len(self):\n return Series(\n [],\n dtype=np.int32,\n name=self.name,\n index=cudf.Index([], dtype=self.dtype),\n )\n\n res = self.groupby(self, dropna=dropna).count(dropna=dropna)\n res.index.name = None\n\n if sort:\n res = res.sort_values(ascending=ascending)\n\n if normalize:\n res = res / float(res._column.sum())\n return res\n\n def hash_values(self, method=\"murmur3\"):\n \"\"\"Compute the hash of values in this column.\n\n Parameters\n ----------\n method : {'murmur3', 'md5'}, default 'murmur3'\n Hash function to use:\n * murmur3: MurmurHash3 hash function.\n * md5: MD5 hash function.\n\n Returns\n -------\n Series\n A Series with hash values.\n\n Examples\n --------\n >>> import cudf\n >>> series = cudf.Series([10, 120, 30])\n >>> series\n 0 10\n 1 120\n 2 30\n dtype: int64\n >>> series.hash_values(method=\"murmur3\")\n 0 -1930516747\n 1 422619251\n 2 -941520876\n dtype: int32\n >>> series.hash_values(method=\"md5\")\n 0 7be4bbacbfdb05fb3044e36c22b41e8b\n 1 947ca8d2c5f0f27437f156cfbfab0969\n 2 d0580ef52d27c043c8e341fd5039b166\n dtype: object\n \"\"\"\n return Series._from_data(\n {None: self._hash(method=method)}, index=self.index\n )\n\n def hash_encode(self, stop, use_name=False):\n \"\"\"Encode column values as ints in [0, stop) using hash function.\n\n This method is deprecated. Replace ``series.hash_encode(stop,\n use_name=False)`` with ``series.hash_values(method=\"murmur3\") % stop``.\n\n Parameters\n ----------\n stop : int\n The upper bound on the encoding range.\n use_name : bool\n If ``True`` then combine hashed column values\n with hashed column name. This is useful for when the same\n values in different columns should be encoded\n with different hashed values.\n\n Returns\n -------\n result : Series\n The encoded Series.\n\n Examples\n --------\n >>> import cudf\n >>> series = cudf.Series([10, 120, 30])\n >>> series.hash_encode(stop=200)\n 0 53\n 1 51\n 2 124\n dtype: int32\n\n You can choose to include name while hash\n encoding by specifying `use_name=True`\n\n >>> series.hash_encode(stop=200, use_name=True)\n 0 131\n 1 29\n 2 76\n dtype: int32\n \"\"\"\n warnings.warn(\n \"The `hash_encode` method will be removed in a future cuDF \"\n \"release. Replace `series.hash_encode(stop, use_name=False)` \"\n 'with `series.hash_values(method=\"murmur3\") % stop`.',\n FutureWarning,\n )\n\n if not stop > 0:\n raise ValueError(\"stop must be a positive integer.\")\n\n if use_name:\n name_hasher = sha256()\n name_hasher.update(str(self.name).encode())\n name_hash_bytes = name_hasher.digest()[:4]\n name_hash_int = (\n int.from_bytes(name_hash_bytes, \"little\", signed=False)\n & 0xFFFFFFFF\n )\n initial_hash = [name_hash_int]\n else:\n initial_hash = None\n\n hashed_values = Series._from_data(\n {\n self.name: self._hash(\n method=\"murmur3\", initial_hash=initial_hash\n )\n },\n self.index,\n )\n\n if hashed_values.has_nulls:\n raise ValueError(\"Column must have no nulls.\")\n\n return hashed_values % stop\n\n def quantile(\n self, q=0.5, interpolation=\"linear\", exact=True, quant_index=True\n ):\n \"\"\"\n Return values at the given quantile.\n\n Parameters\n ----------\n\n q : float or array-like, default 0.5 (50% quantile)\n 0 <= q <= 1, the quantile(s) to compute\n interpolation : {’linear’, ‘lower’, ‘higher’, ‘midpoint’, ‘nearest’}\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points i and j:\n columns : list of str\n List of column names to include.\n exact : boolean\n Whether to use approximate or exact quantile algorithm.\n quant_index : boolean\n Whether to use the list of quantiles as index.\n\n Returns\n -------\n float or Series\n If ``q`` is an array, a Series will be returned where the\n index is ``q`` and the values are the quantiles, otherwise\n a float will be returned.\n\n Examples\n --------\n >>> import cudf\n >>> series = cudf.Series([1, 2, 3, 4])\n >>> series\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n >>> series.quantile(0.5)\n 2.5\n >>> series.quantile([0.25, 0.5, 0.75])\n 0.25 1.75\n 0.50 2.50\n 0.75 3.25\n dtype: float64\n \"\"\"\n\n result = self._column.quantile(q, interpolation, exact)\n\n if isinstance(q, Number):\n return result\n\n if quant_index:\n index = np.asarray(q)\n if len(self) == 0:\n result = column_empty_like(\n index, dtype=self.dtype, masked=True, newsize=len(index),\n )\n else:\n index = None\n\n return Series(result, index=index, name=self.name)\n\n @docutils.doc_describe()\n def describe(\n self,\n percentiles=None,\n include=None,\n exclude=None,\n datetime_is_numeric=False,\n ):\n \"\"\"{docstring}\"\"\"\n\n def _prepare_percentiles(percentiles):\n percentiles = list(percentiles)\n\n if not all(0 <= x <= 1 for x in percentiles):\n raise ValueError(\n \"All percentiles must be between 0 and 1, \" \"inclusive.\"\n )\n\n # describe always includes 50th percentile\n if 0.5 not in percentiles:\n percentiles.append(0.5)\n\n percentiles = np.sort(percentiles)\n return percentiles\n\n def _format_percentile_names(percentiles):\n return [\"{0}%\".format(int(x * 100)) for x in percentiles]\n\n def _format_stats_values(stats_data):\n return map(lambda x: round(x, 6), stats_data)\n\n def _describe_numeric(self):\n # mimicking pandas\n data = {\n \"count\": self.count(),\n \"mean\": self.mean(),\n \"std\": self.std(),\n \"min\": self.min(),\n **dict(\n zip(\n _format_percentile_names(percentiles),\n self.quantile(percentiles)\n .to_numpy(na_value=np.nan)\n .tolist(),\n )\n ),\n \"max\": self.max(),\n }\n\n return Series(\n data=_format_stats_values(data.values()),\n index=data.keys(),\n nan_as_null=False,\n name=self.name,\n )\n\n def _describe_timedelta(self):\n # mimicking pandas\n data = {\n \"count\": str(self.count()),\n \"mean\": str(self.mean()),\n \"std\": str(self.std()),\n \"min\": str(pd.Timedelta(self.min())),\n **dict(\n zip(\n _format_percentile_names(percentiles),\n self.quantile(percentiles)\n .astype(\"str\")\n .to_numpy(na_value=np.nan)\n .tolist(),\n )\n ),\n \"max\": str(pd.Timedelta(self.max())),\n }\n\n return Series(\n data=data.values(),\n index=data.keys(),\n dtype=\"str\",\n nan_as_null=False,\n name=self.name,\n )\n\n def _describe_categorical(self):\n # blocked by StringColumn/DatetimeColumn support for\n # value_counts/unique\n data = {\n \"count\": self.count(),\n \"unique\": len(self.unique()),\n \"top\": None,\n \"freq\": None,\n }\n if data[\"count\"] > 0:\n # In case there's a tie, break the tie by sorting the index\n # and take the top.\n val_counts = self.value_counts(ascending=False)\n tied_val_counts = val_counts[\n val_counts == val_counts.iloc[0]\n ].sort_index()\n data.update(\n {\n \"top\": tied_val_counts.index[0],\n \"freq\": tied_val_counts.iloc[0],\n }\n )\n\n return Series(\n data=data.values(),\n dtype=\"str\",\n index=data.keys(),\n nan_as_null=False,\n name=self.name,\n )\n\n def _describe_timestamp(self):\n data = {\n \"count\": str(self.count()),\n \"mean\": str(pd.Timestamp(self.mean())),\n \"min\": str(pd.Timestamp(self.min())),\n **dict(\n zip(\n _format_percentile_names(percentiles),\n self.quantile(percentiles)\n .astype(self.dtype)\n .astype(\"str\")\n .to_numpy(na_value=np.nan),\n )\n ),\n \"max\": str(pd.Timestamp((self.max()))),\n }\n\n return Series(\n data=data.values(),\n dtype=\"str\",\n index=data.keys(),\n nan_as_null=False,\n name=self.name,\n )\n\n if percentiles is not None:\n percentiles = _prepare_percentiles(percentiles)\n else:\n # pandas defaults\n percentiles = np.array([0.25, 0.5, 0.75])\n\n if is_bool_dtype(self.dtype):\n return _describe_categorical(self)\n elif isinstance(self._column, cudf.core.column.NumericalColumn):\n return _describe_numeric(self)\n elif isinstance(self._column, cudf.core.column.TimeDeltaColumn):\n return _describe_timedelta(self)\n elif isinstance(self._column, cudf.core.column.DatetimeColumn):\n return _describe_timestamp(self)\n else:\n return _describe_categorical(self)\n\n def digitize(self, bins, right=False):\n \"\"\"Return the indices of the bins to which each value in series belongs.\n\n Notes\n -----\n Monotonicity of bins is assumed and not checked.\n\n Parameters\n ----------\n bins : np.array\n 1-D monotonically, increasing array with same type as this series.\n right : bool\n Indicates whether interval contains the right or left bin edge.\n\n Returns\n -------\n A new Series containing the indices.\n\n Examples\n --------\n >>> import cudf\n >>> s = cudf.Series([0.2, 6.4, 3.0, 1.6])\n >>> bins = cudf.Series([0.0, 1.0, 2.5, 4.0, 10.0])\n >>> inds = s.digitize(bins)\n >>> inds\n 0 1\n 1 4\n 2 3\n 3 2\n dtype: int32\n \"\"\"\n return Series(\n cudf.core.column.numerical.digitize(self._column, bins, right)\n )\n\n def diff(self, periods=1):\n \"\"\"Calculate the difference between values at positions i and i - N in\n an array and store the output in a new array.\n\n Returns\n -------\n Series\n First differences of the Series.\n\n Notes\n -----\n Diff currently only supports float and integer dtype columns with\n no null values.\n\n Examples\n --------\n >>> import cudf\n >>> series = cudf.Series([1, 1, 2, 3, 5, 8])\n >>> series\n 0 1\n 1 1\n 2 2\n 3 3\n 4 5\n 5 8\n dtype: int64\n\n Difference with previous row\n\n >>> series.diff()\n 0 <NA>\n 1 0\n 2 1\n 3 1\n 4 2\n 5 3\n dtype: int64\n\n Difference with 3rd previous row\n\n >>> series.diff(periods=3)\n 0 <NA>\n 1 <NA>\n 2 <NA>\n 3 2\n 4 4\n 5 6\n dtype: int64\n\n Difference with following row\n\n >>> series.diff(periods=-1)\n 0 0\n 1 -1\n 2 -1\n 3 -2\n 4 -3\n 5 <NA>\n dtype: int64\n \"\"\"\n if self.has_nulls:\n raise AssertionError(\n \"Diff currently requires columns with no null values\"\n )\n\n if not np.issubdtype(self.dtype, np.number):\n raise NotImplementedError(\n \"Diff currently only supports numeric dtypes\"\n )\n\n # TODO: move this libcudf\n input_col = self._column\n output_col = column_empty_like(input_col)\n output_mask = column_empty_like(input_col, dtype=\"bool\")\n if output_col.size > 0:\n cudautils.gpu_diff.forall(output_col.size)(\n input_col, output_col, output_mask, periods\n )\n\n output_col = column.build_column(\n data=output_col.data,\n dtype=output_col.dtype,\n mask=bools_to_mask(output_mask),\n )\n\n return Series(output_col, name=self.name, index=self.index)\n\n @copy_docstring(SeriesGroupBy)\n def groupby(\n self,\n by=None,\n axis=0,\n level=None,\n as_index=True,\n sort=False,\n group_keys=True,\n squeeze=False,\n observed=False,\n dropna=True,\n ):\n import cudf.core.resample\n\n if axis not in (0, \"index\"):\n raise NotImplementedError(\"axis parameter is not yet implemented\")\n\n if group_keys is not True:\n raise NotImplementedError(\n \"The group_keys keyword is not yet implemented\"\n )\n\n if squeeze is not False:\n raise NotImplementedError(\n \"squeeze parameter is not yet implemented\"\n )\n\n if observed is not False:\n raise NotImplementedError(\n \"observed parameter is not yet implemented\"\n )\n\n if by is None and level is None:\n raise TypeError(\n \"groupby() requires either by or level to be specified.\"\n )\n\n return (\n cudf.core.resample.SeriesResampler(self, by=by)\n if isinstance(by, cudf.Grouper) and by.freq\n else SeriesGroupBy(\n self, by=by, level=level, dropna=dropna, sort=sort\n )\n )\n\n def rename(self, index=None, copy=True):\n \"\"\"\n Alter Series name\n\n Change Series.name with a scalar value\n\n Parameters\n ----------\n index : Scalar, optional\n Scalar to alter the Series.name attribute\n copy : boolean, default True\n Also copy underlying data\n\n Returns\n -------\n Series\n\n Notes\n -----\n Difference from pandas:\n - Supports scalar values only for changing name attribute\n - Not supporting : inplace, level\n\n Examples\n --------\n >>> import cudf\n >>> series = cudf.Series([10, 20, 30])\n >>> series\n 0 10\n 1 20\n 2 30\n dtype: int64\n >>> series.name\n >>> renamed_series = series.rename('numeric_series')\n >>> renamed_series\n 0 10\n 1 20\n 2 30\n Name: numeric_series, dtype: int64\n >>> renamed_series.name\n 'numeric_series'\n \"\"\"\n out = self.copy(deep=False)\n out = out.set_index(self.index)\n if index:\n out.name = index\n\n return out.copy(deep=copy)\n\n def merge(\n self,\n other,\n on=None,\n left_on=None,\n right_on=None,\n left_index=False,\n right_index=False,\n how=\"inner\",\n sort=False,\n lsuffix=None,\n rsuffix=None,\n method=\"hash\",\n suffixes=(\"_x\", \"_y\"),\n ):\n if left_on not in (self.name, None):\n raise ValueError(\n \"Series to other merge uses series name as key implicitly\"\n )\n\n if lsuffix or rsuffix:\n raise ValueError(\n \"The lsuffix and rsuffix keywords have been replaced with the \"\n \"``suffixes=`` keyword. \"\n \"Please provide the following instead: \\n\\n\"\n \" suffixes=('%s', '%s')\"\n % (lsuffix or \"_x\", rsuffix or \"_y\")\n )\n else:\n lsuffix, rsuffix = suffixes\n\n result = super()._merge(\n other,\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n how=how,\n sort=sort,\n indicator=False,\n suffixes=suffixes,\n )\n\n return result\n\n def add_prefix(self, prefix):\n result = self.copy(deep=True)\n result.index = prefix + self.index.astype(str)\n return result\n\n def add_suffix(self, suffix):\n result = self.copy(deep=True)\n result.index = self.index.astype(str) + suffix\n return result\n\n def keys(self):\n \"\"\"\n Return alias for index.\n\n Returns\n -------\n Index\n Index of the Series.\n\n Examples\n --------\n >>> import cudf\n >>> sr = cudf.Series([10, 11, 12, 13, 14, 15])\n >>> sr\n 0 10\n 1 11\n 2 12\n 3 13\n 4 14\n 5 15\n dtype: int64\n\n >>> sr.keys()\n RangeIndex(start=0, stop=6)\n >>> sr = cudf.Series(['a', 'b', 'c'])\n >>> sr\n 0 a\n 1 b\n 2 c\n dtype: object\n >>> sr.keys()\n RangeIndex(start=0, stop=3)\n >>> sr = cudf.Series([1, 2, 3], index=['a', 'b', 'c'])\n >>> sr\n a 1\n b 2\n c 3\n dtype: int64\n >>> sr.keys()\n StringIndex(['a' 'b' 'c'], dtype='object')\n \"\"\"\n return self.index\n\n def explode(self, ignore_index=False):\n \"\"\"\n Transform each element of a list-like to a row, replicating index\n values.\n\n Parameters\n ----------\n ignore_index : bool, default False\n If True, the resulting index will be labeled 0, 1, …, n - 1.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n >>> import cudf\n >>> s = cudf.Series([[1, 2, 3], [], None, [4, 5]])\n >>> s\n 0 [1, 2, 3]\n 1 []\n 2 None\n 3 [4, 5]\n dtype: list\n >>> s.explode()\n 0 1\n 0 2\n 0 3\n 1 <NA>\n 2 <NA>\n 3 4\n 3 5\n dtype: int64\n \"\"\"\n if not is_list_dtype(self._column.dtype):\n data = self._data.copy(deep=True)\n idx = None if ignore_index else self._index.copy(deep=True)\n return self.__class__._from_data(data, index=idx)\n\n return super()._explode(self._column_names[0], ignore_index)\n\n def pct_change(\n self, periods=1, fill_method=\"ffill\", limit=None, freq=None\n ):\n \"\"\"\n Calculates the percent change between sequential elements\n in the Series.\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for forming percent change.\n fill_method : str, default 'ffill'\n How to handle NAs before computing percent changes.\n limit : int, optional\n The number of consecutive NAs to fill before stopping.\n Not yet implemented.\n freq : str, optional\n Increment to use from time series API.\n Not yet implemented.\n\n Returns\n -------\n Series\n \"\"\"\n if limit is not None:\n raise NotImplementedError(\"limit parameter not supported yet.\")\n if freq is not None:\n raise NotImplementedError(\"freq parameter not supported yet.\")\n elif fill_method not in {\"ffill\", \"pad\", \"bfill\", \"backfill\"}:\n raise ValueError(\n \"fill_method must be one of 'ffill', 'pad', \"\n \"'bfill', or 'backfill'.\"\n )\n\n data = self.fillna(method=fill_method, limit=limit)\n diff = data.diff(periods=periods)\n change = diff / data.shift(periods=periods, freq=freq)\n return change\n\n\ndef make_binop_func(op):\n # This function is used to wrap binary operations in Frame with an\n # appropriate API for Series as required for pandas compatibility. The\n # main effect is reordering and error-checking parameters in\n # Series-specific ways.\n wrapped_func = getattr(Frame, op)\n\n @functools.wraps(wrapped_func)\n def wrapper(self, other, level=None, fill_value=None, axis=0):\n if axis != 0:\n raise NotImplementedError(\"Only axis=0 supported at this time.\")\n return wrapped_func(self, other, axis, level, fill_value)\n\n # functools.wraps copies module level attributes to `wrapper` and sets\n # __wrapped__ attributes to `wrapped_func`. Cpython looks up the signature\n # string of a function by recursively delving into __wrapped__ until\n # it hits the first function that has __signature__ attribute set. To make\n # the signature string of `wrapper` matches with its actual parameter list,\n # we directly set the __signature__ attribute of `wrapper` below.\n\n new_sig = inspect.signature(\n lambda self, other, level=None, fill_value=None, axis=0: None\n )\n wrapper.__signature__ = new_sig\n return wrapper\n\n\n# Wrap all Frame binop functions with the expected API for Series.\nfor binop in (\n \"add\",\n \"radd\",\n \"subtract\",\n \"sub\",\n \"rsub\",\n \"multiply\",\n \"mul\",\n \"rmul\",\n \"mod\",\n \"rmod\",\n \"pow\",\n \"rpow\",\n \"floordiv\",\n \"rfloordiv\",\n \"truediv\",\n \"div\",\n \"divide\",\n \"rtruediv\",\n \"rdiv\",\n \"eq\",\n \"ne\",\n \"lt\",\n \"le\",\n \"gt\",\n \"ge\",\n):\n setattr(Series, binop, make_binop_func(binop))\n\n\nclass DatetimeProperties(object):\n \"\"\"\n Accessor object for datetimelike properties of the Series values.\n\n Returns\n -------\n Returns a Series indexed like the original Series.\n\n Examples\n --------\n >>> import cudf\n >>> import pandas as pd\n >>> seconds_series = cudf.Series(pd.date_range(\"2000-01-01\", periods=3,\n ... freq=\"s\"))\n >>> seconds_series\n 0 2000-01-01 00:00:00\n 1 2000-01-01 00:00:01\n 2 2000-01-01 00:00:02\n dtype: datetime64[ns]\n >>> seconds_series.dt.second\n 0 0\n 1 1\n 2 2\n dtype: int16\n >>> hours_series = cudf.Series(pd.date_range(\"2000-01-01\", periods=3,\n ... freq=\"h\"))\n >>> hours_series\n 0 2000-01-01 00:00:00\n 1 2000-01-01 01:00:00\n 2 2000-01-01 02:00:00\n dtype: datetime64[ns]\n >>> hours_series.dt.hour\n 0 0\n 1 1\n 2 2\n dtype: int16\n >>> weekday_series = cudf.Series(pd.date_range(\"2000-01-01\", periods=3,\n ... freq=\"q\"))\n >>> weekday_series\n 0 2000-03-31\n 1 2000-06-30\n 2 2000-09-30\n dtype: datetime64[ns]\n >>> weekday_series.dt.weekday\n 0 4\n 1 4\n 2 5\n dtype: int16\n \"\"\"\n\n def __init__(self, series):\n self.series = series\n\n @property\n def year(self):\n \"\"\"\n The year of the datetime.\n\n Examples\n --------\n >>> import cudf\n >>> import pandas as pd\n >>> datetime_series = cudf.Series(pd.date_range(\"2000-01-01\",\n ... periods=3, freq=\"Y\"))\n >>> datetime_series\n 0 2000-12-31\n 1 2001-12-31\n 2 2002-12-31\n dtype: datetime64[ns]\n >>> datetime_series.dt.year\n 0 2000\n 1 2001\n 2 2002\n dtype: int16\n \"\"\"\n return self._get_dt_field(\"year\")\n\n @property\n def month(self):\n \"\"\"\n The month as January=1, December=12.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import cudf\n >>> datetime_series = cudf.Series(pd.date_range(\"2000-01-01\",\n ... periods=3, freq=\"M\"))\n >>> datetime_series\n 0 2000-01-31\n 1 2000-02-29\n 2 2000-03-31\n dtype: datetime64[ns]\n >>> datetime_series.dt.month\n 0 1\n 1 2\n 2 3\n dtype: int16\n \"\"\"\n return self._get_dt_field(\"month\")\n\n @property\n def day(self):\n \"\"\"\n The day of the datetime.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import cudf\n >>> datetime_series = cudf.Series(pd.date_range(\"2000-01-01\",\n ... periods=3, freq=\"D\"))\n >>> datetime_series\n 0 2000-01-01\n 1 2000-01-02\n 2 2000-01-03\n dtype: datetime64[ns]\n >>> datetime_series.dt.day\n 0 1\n 1 2\n 2 3\n dtype: int16\n \"\"\"\n return self._get_dt_field(\"day\")\n\n @property\n def hour(self):\n \"\"\"\n The hours of the datetime.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import cudf\n >>> datetime_series = cudf.Series(pd.date_range(\"2000-01-01\",\n ... periods=3, freq=\"h\"))\n >>> datetime_series\n 0 2000-01-01 00:00:00\n 1 2000-01-01 01:00:00\n 2 2000-01-01 02:00:00\n dtype: datetime64[ns]\n >>> datetime_series.dt.hour\n 0 0\n 1 1\n 2 2\n dtype: int16\n \"\"\"\n return self._get_dt_field(\"hour\")\n\n @property\n def minute(self):\n \"\"\"\n The minutes of the datetime.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import cudf\n >>> datetime_series = cudf.Series(pd.date_range(\"2000-01-01\",\n ... periods=3, freq=\"T\"))\n >>> datetime_series\n 0 2000-01-01 00:00:00\n 1 2000-01-01 00:01:00\n 2 2000-01-01 00:02:00\n dtype: datetime64[ns]\n >>> datetime_series.dt.minute\n 0 0\n 1 1\n 2 2\n dtype: int16\n \"\"\"\n return self._get_dt_field(\"minute\")\n\n @property\n def second(self):\n \"\"\"\n The seconds of the datetime.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import cudf\n >>> datetime_series = cudf.Series(pd.date_range(\"2000-01-01\",\n ... periods=3, freq=\"s\"))\n >>> datetime_series\n 0 2000-01-01 00:00:00\n 1 2000-01-01 00:00:01\n 2 2000-01-01 00:00:02\n dtype: datetime64[ns]\n >>> datetime_series.dt.second\n 0 0\n 1 1\n 2 2\n dtype: int16\n \"\"\"\n return self._get_dt_field(\"second\")\n\n @property\n def weekday(self):\n \"\"\"\n The day of the week with Monday=0, Sunday=6.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import cudf\n >>> datetime_series = cudf.Series(pd.date_range('2016-12-31',\n ... '2017-01-08', freq='D'))\n >>> datetime_series\n 0 2016-12-31\n 1 2017-01-01\n 2 2017-01-02\n 3 2017-01-03\n 4 2017-01-04\n 5 2017-01-05\n 6 2017-01-06\n 7 2017-01-07\n 8 2017-01-08\n dtype: datetime64[ns]\n >>> datetime_series.dt.weekday\n 0 5\n 1 6\n 2 0\n 3 1\n 4 2\n 5 3\n 6 4\n 7 5\n 8 6\n dtype: int16\n \"\"\"\n return self._get_dt_field(\"weekday\")\n\n @property\n def dayofweek(self):\n \"\"\"\n The day of the week with Monday=0, Sunday=6.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import cudf\n >>> datetime_series = cudf.Series(pd.date_range('2016-12-31',\n ... '2017-01-08', freq='D'))\n >>> datetime_series\n 0 2016-12-31\n 1 2017-01-01\n 2 2017-01-02\n 3 2017-01-03\n 4 2017-01-04\n 5 2017-01-05\n 6 2017-01-06\n 7 2017-01-07\n 8 2017-01-08\n dtype: datetime64[ns]\n >>> datetime_series.dt.dayofweek\n 0 5\n 1 6\n 2 0\n 3 1\n 4 2\n 5 3\n 6 4\n 7 5\n 8 6\n dtype: int16\n \"\"\"\n return self._get_dt_field(\"weekday\")\n\n @property\n def dayofyear(self):\n \"\"\"\n The day of the year, from 1-365 in non-leap years and\n from 1-366 in leap years.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import cudf\n >>> datetime_series = cudf.Series(pd.date_range('2016-12-31',\n ... '2017-01-08', freq='D'))\n >>> datetime_series\n 0 2016-12-31\n 1 2017-01-01\n 2 2017-01-02\n 3 2017-01-03\n 4 2017-01-04\n 5 2017-01-05\n 6 2017-01-06\n 7 2017-01-07\n 8 2017-01-08\n dtype: datetime64[ns]\n >>> datetime_series.dt.dayofyear\n 0 366\n 1 1\n 2 2\n 3 3\n 4 4\n 5 5\n 6 6\n 7 7\n 8 8\n dtype: int16\n \"\"\"\n return self._get_dt_field(\"day_of_year\")\n\n @property\n def day_of_year(self):\n \"\"\"\n The day of the year, from 1-365 in non-leap years and\n from 1-366 in leap years.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import cudf\n >>> datetime_series = cudf.Series(pd.date_range('2016-12-31',\n ... '2017-01-08', freq='D'))\n >>> datetime_series\n 0 2016-12-31\n 1 2017-01-01\n 2 2017-01-02\n 3 2017-01-03\n 4 2017-01-04\n 5 2017-01-05\n 6 2017-01-06\n 7 2017-01-07\n 8 2017-01-08\n dtype: datetime64[ns]\n >>> datetime_series.dt.day_of_year\n 0 366\n 1 1\n 2 2\n 3 3\n 4 4\n 5 5\n 6 6\n 7 7\n 8 8\n dtype: int16\n \"\"\"\n return self._get_dt_field(\"day_of_year\")\n\n @property\n def is_leap_year(self):\n \"\"\"\n Boolean indicator if the date belongs to a leap year.\n\n A leap year is a year, which has 366 days (instead of 365) including\n 29th of February as an intercalary day. Leap years are years which are\n multiples of four with the exception of years divisible by 100 but not\n by 400.\n\n Returns\n -------\n Series\n Booleans indicating if dates belong to a leap year.\n\n Example\n -------\n >>> import pandas as pd, cudf\n >>> s = cudf.Series(\n ... pd.date_range(start='2000-02-01', end='2013-02-01', freq='1Y'))\n >>> s\n 0 2000-12-31\n 1 2001-12-31\n 2 2002-12-31\n 3 2003-12-31\n 4 2004-12-31\n 5 2005-12-31\n 6 2006-12-31\n 7 2007-12-31\n 8 2008-12-31\n 9 2009-12-31\n 10 2010-12-31\n 11 2011-12-31\n 12 2012-12-31\n dtype: datetime64[ns]\n >>> s.dt.is_leap_year\n 0 True\n 1 False\n 2 False\n 3 False\n 4 True\n 5 False\n 6 False\n 7 False\n 8 True\n 9 False\n 10 False\n 11 False\n 12 True\n dtype: bool\n \"\"\"\n res = libcudf.datetime.is_leap_year(self.series._column).fillna(False)\n return Series._from_data(\n ColumnAccessor({None: res}),\n index=self.series._index,\n name=self.series.name,\n )\n\n @property\n def quarter(self):\n \"\"\"\n Integer indicator for which quarter of the year the date belongs in.\n\n There are 4 quarters in a year. With the first quarter being from\n January - March, second quarter being April - June, third quarter\n being July - September and fourth quarter being October - December.\n\n Returns\n -------\n Series\n Integer indicating which quarter the date belongs to.\n\n Examples\n -------\n >>> import cudf\n >>> s = cudf.Series([\"2020-05-31 08:00:00\",\"1999-12-31 18:40:00\"],\n ... dtype=\"datetime64[ms]\")\n >>> s.dt.quarter\n 0 2\n 1 4\n dtype: int8\n \"\"\"\n res = libcudf.datetime.extract_quarter(self.series._column).astype(\n np.int8\n )\n return Series._from_data(\n {None: res}, index=self.series._index, name=self.series.name,\n )\n\n def isocalendar(self):\n \"\"\"\n Returns a DataFrame with the year, week, and day\n calculated according to the ISO 8601 standard.\n\n Returns\n -------\n DataFrame\n with columns year, week and day\n\n Examples\n --------\n >>> ser = cudf.Series(pd.date_range(start=\"2021-07-25\",\n ... end=\"2021-07-30\"))\n >>> ser.dt.isocalendar()\n year week day\n 0 2021 29 7\n 1 2021 30 1\n 2 2021 30 2\n 3 2021 30 3\n 4 2021 30 4\n 5 2021 30 5\n >>> ser.dt.isocalendar().week\n 0 29\n 1 30\n 2 30\n 3 30\n 4 30\n 5 30\n Name: week, dtype: object\n\n >>> serIndex = cudf.to_datetime(pd.Series([\"2010-01-01\", pd.NaT]))\n >>> serIndex.dt.isocalendar()\n year week day\n 0 2009 53 5\n 1 <NA> <NA> <NA>\n >>> serIndex.dt.isocalendar().year\n 0 2009\n 1 <NA>\n Name: year, dtype: object\n \"\"\"\n return cudf.core.tools.datetimes._to_iso_calendar(self)\n\n @property\n def is_month_start(self):\n \"\"\"\n Booleans indicating if dates are the first day of the month.\n \"\"\"\n return (self.day == 1).fillna(False)\n\n @property\n def days_in_month(self):\n \"\"\"\n Get the total number of days in the month that the date falls on.\n\n Returns\n -------\n Series\n Integers representing the number of days in month\n\n Example\n -------\n >>> import pandas as pd, cudf\n >>> s = cudf.Series(\n ... pd.date_range(start='2000-08-01', end='2001-08-01', freq='1M'))\n >>> s\n 0 2000-08-31\n 1 2000-09-30\n 2 2000-10-31\n 3 2000-11-30\n 4 2000-12-31\n 5 2001-01-31\n 6 2001-02-28\n 7 2001-03-31\n 8 2001-04-30\n 9 2001-05-31\n 10 2001-06-30\n 11 2001-07-31\n dtype: datetime64[ns]\n >>> s.dt.days_in_month\n 0 31\n 1 30\n 2 31\n 3 30\n 4 31\n 5 31\n 6 28\n 7 31\n 8 30\n 9 31\n 10 30\n 11 31\n dtype: int16\n \"\"\"\n res = libcudf.datetime.days_in_month(self.series._column)\n return Series._from_data(\n ColumnAccessor({None: res}),\n index=self.series._index,\n name=self.series.name,\n )\n\n @property\n def is_month_end(self):\n \"\"\"\n Boolean indicator if the date is the last day of the month.\n\n Returns\n -------\n Series\n Booleans indicating if dates are the last day of the month.\n\n Example\n -------\n >>> import pandas as pd, cudf\n >>> s = cudf.Series(\n ... pd.date_range(start='2000-08-26', end='2000-09-03', freq='1D'))\n >>> s\n 0 2000-08-26\n 1 2000-08-27\n 2 2000-08-28\n 3 2000-08-29\n 4 2000-08-30\n 5 2000-08-31\n 6 2000-09-01\n 7 2000-09-02\n 8 2000-09-03\n dtype: datetime64[ns]\n >>> s.dt.is_month_end\n 0 False\n 1 False\n 2 False\n 3 False\n 4 False\n 5 True\n 6 False\n 7 False\n 8 False\n dtype: bool\n \"\"\" # noqa: E501\n last_day = libcudf.datetime.last_day_of_month(self.series._column)\n last_day = Series._from_data(\n ColumnAccessor({None: last_day}),\n index=self.series._index,\n name=self.series.name,\n )\n return (self.day == last_day.dt.day).fillna(False)\n\n @property\n def is_quarter_start(self):\n \"\"\"\n Boolean indicator if the date is the first day of a quarter.\n\n Returns\n -------\n Series\n Booleans indicating if dates are the begining of a quarter\n\n Example\n -------\n >>> import pandas as pd, cudf\n >>> s = cudf.Series(\n ... pd.date_range(start='2000-09-26', end='2000-10-03', freq='1D'))\n >>> s\n 0 2000-09-26\n 1 2000-09-27\n 2 2000-09-28\n 3 2000-09-29\n 4 2000-09-30\n 5 2000-10-01\n 6 2000-10-02\n 7 2000-10-03\n dtype: datetime64[ns]\n >>> s.dt.is_quarter_start\n 0 False\n 1 False\n 2 False\n 3 False\n 4 False\n 5 True\n 6 False\n 7 False\n dtype: bool\n \"\"\"\n day = self.series._column.get_dt_field(\"day\")\n first_month = self.series._column.get_dt_field(\"month\").isin(\n [1, 4, 7, 10]\n )\n\n result = ((day == cudf.Scalar(1)) & first_month).fillna(False)\n return Series._from_data(\n {None: result}, index=self.series._index, name=self.series.name,\n )\n\n @property\n def is_quarter_end(self):\n \"\"\"\n Boolean indicator if the date is the last day of a quarter.\n\n Returns\n -------\n Series\n Booleans indicating if dates are the end of a quarter\n\n Example\n -------\n >>> import pandas as pd, cudf\n >>> s = cudf.Series(\n ... pd.date_range(start='2000-09-26', end='2000-10-03', freq='1D'))\n >>> s\n 0 2000-09-26\n 1 2000-09-27\n 2 2000-09-28\n 3 2000-09-29\n 4 2000-09-30\n 5 2000-10-01\n 6 2000-10-02\n 7 2000-10-03\n dtype: datetime64[ns]\n >>> s.dt.is_quarter_end\n 0 False\n 1 False\n 2 False\n 3 False\n 4 True\n 5 False\n 6 False\n 7 False\n dtype: bool\n \"\"\"\n day = self.series._column.get_dt_field(\"day\")\n last_day = libcudf.datetime.last_day_of_month(self.series._column)\n last_day = last_day.get_dt_field(\"day\")\n last_month = self.series._column.get_dt_field(\"month\").isin(\n [3, 6, 9, 12]\n )\n\n result = ((day == last_day) & last_month).fillna(False)\n return Series._from_data(\n {None: result}, index=self.series._index, name=self.series.name,\n )\n\n @property\n def is_year_start(self):\n \"\"\"\n Boolean indicator if the date is the first day of the year.\n\n Returns\n -------\n Series\n Booleans indicating if dates are the first day of the year.\n\n Example\n -------\n >>> import pandas as pd, cudf\n >>> s = cudf.Series(pd.date_range(\"2017-12-30\", periods=3))\n >>> dates\n 0 2017-12-30\n 1 2017-12-31\n 2 2018-01-01\n dtype: datetime64[ns]\n >>> dates.dt.is_year_start\n 0 False\n 1 False\n 2 True\n dtype: bool\n \"\"\"\n outcol = self.series._column.get_dt_field(\n \"day_of_year\"\n ) == cudf.Scalar(1)\n return Series._from_data(\n {None: outcol.fillna(False)},\n index=self.series._index,\n name=self.series.name,\n )\n\n @property\n def is_year_end(self):\n \"\"\"\n Boolean indicator if the date is the last day of the year.\n\n Returns\n -------\n Series\n Booleans indicating if dates are the last day of the year.\n\n Example\n -------\n >>> import pandas as pd, cudf\n >>> dates = cudf.Series(pd.date_range(\"2017-12-30\", periods=3))\n >>> dates\n 0 2017-12-30\n 1 2017-12-31\n 2 2018-01-01\n dtype: datetime64[ns]\n >>> dates.dt.is_year_end\n 0 False\n 1 True\n 2 False\n dtype: bool\n \"\"\"\n day_of_year = self.series._column.get_dt_field(\"day_of_year\")\n leap_dates = libcudf.datetime.is_leap_year(self.series._column)\n\n leap = day_of_year == cudf.Scalar(366)\n non_leap = day_of_year == cudf.Scalar(365)\n result = cudf._lib.copying.copy_if_else(leap, non_leap, leap_dates)\n result = result.fillna(False)\n return Series._from_data(\n {None: result}, index=self.series._index, name=self.series.name,\n )\n\n def _get_dt_field(self, field):\n out_column = self.series._column.get_dt_field(field)\n return Series(\n data=out_column, index=self.series._index, name=self.series.name\n )\n\n def ceil(self, freq):\n \"\"\"\n Perform ceil operation on the data to the specified freq.\n\n Parameters\n ----------\n freq : str\n One of [\"D\", \"H\", \"T\", \"min\", \"S\", \"L\", \"ms\", \"U\", \"us\", \"N\"].\n Must be a fixed frequency like 'S' (second) not 'ME' (month end).\n See `frequency aliases <https://pandas.pydata.org/docs/\\\n user_guide/timeseries.html#timeseries-offset-aliases>`__\n for more details on these aliases.\n\n Returns\n -------\n Series\n Series with all timestamps rounded up to the specified frequency.\n The index is preserved.\n\n Examples\n --------\n >>> import cudf\n >>> t = cudf.Series([\"2001-01-01 00:04:45\", \"2001-01-01 00:04:58\",\n ... \"2001-01-01 00:05:04\"], dtype=\"datetime64[ns]\")\n >>> t.dt.ceil(\"T\")\n 0 2001-01-01 00:05:00\n 1 2001-01-01 00:05:00\n 2 2001-01-01 00:06:00\n dtype: datetime64[ns]\n \"\"\"\n out_column = self.series._column.ceil(freq)\n\n return Series._from_data(\n data={self.series.name: out_column}, index=self.series._index\n )\n\n def floor(self, freq):\n \"\"\"\n Perform floor operation on the data to the specified freq.\n\n Parameters\n ----------\n freq : str\n One of [\"D\", \"H\", \"T\", \"min\", \"S\", \"L\", \"ms\", \"U\", \"us\", \"N\"].\n Must be a fixed frequency like 'S' (second) not 'ME' (month end).\n See `frequency aliases <https://pandas.pydata.org/docs/\\\n user_guide/timeseries.html#timeseries-offset-aliases>`__\n for more details on these aliases.\n\n Returns\n -------\n Series\n Series with all timestamps rounded up to the specified frequency.\n The index is preserved.\n\n Examples\n --------\n >>> import cudf\n >>> t = cudf.Series([\"2001-01-01 00:04:45\", \"2001-01-01 00:04:58\",\n ... \"2001-01-01 00:05:04\"], dtype=\"datetime64[ns]\")\n >>> t.dt.floor(\"T\")\n 0 2001-01-01 00:04:00\n 1 2001-01-01 00:04:00\n 2 2001-01-01 00:05:00\n dtype: datetime64[ns]\n \"\"\"\n out_column = self.series._column.floor(freq)\n\n return Series._from_data(\n data={self.series.name: out_column}, index=self.series._index\n )\n\n def round(self, freq):\n \"\"\"\n Perform round operation on the data to the specified freq.\n\n Parameters\n ----------\n freq : str\n One of [\"D\", \"H\", \"T\", \"min\", \"S\", \"L\", \"ms\", \"U\", \"us\", \"N\"].\n Must be a fixed frequency like 'S' (second) not 'ME' (month end).\n See `frequency aliases <https://pandas.pydata.org/docs/\\\n user_guide/timeseries.html#timeseries-offset-aliases>`__\n for more details on these aliases.\n\n Returns\n -------\n Series\n Series with all timestamps rounded to the specified frequency.\n The index is preserved.\n\n Examples\n --------\n >>> import cudf\n >>> dt_sr = cudf.Series([\n ... \"2001-01-01 00:04:45\",\n ... \"2001-01-01 00:04:58\",\n ... \"2001-01-01 00:05:04\",\n ... ], dtype=\"datetime64[ns]\")\n >>> dt_sr.dt.round(\"T\")\n 0 2001-01-01 00:05:00\n 1 2001-01-01 00:05:00\n 2 2001-01-01 00:05:00\n dtype: datetime64[ns]\n \"\"\"\n out_column = self.series._column.round(freq)\n\n return Series._from_data(\n data={self.series.name: out_column}, index=self.series._index\n )\n\n def strftime(self, date_format, *args, **kwargs):\n \"\"\"\n Convert to Series using specified ``date_format``.\n\n Return a Series of formatted strings specified by ``date_format``,\n which supports the same string format as the python standard library.\n Details of the string format can be found in `python string format doc\n <https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior>`_.\n\n Parameters\n ----------\n date_format : str\n Date format string (e.g. “%Y-%m-%d”).\n\n Returns\n -------\n Series\n Series of formatted strings.\n\n Notes\n -----\n\n The following date format identifiers are not yet\n supported: ``%c``, ``%x``,``%X``\n\n Examples\n --------\n >>> import cudf\n >>> import pandas as pd\n >>> weekday_series = cudf.Series(pd.date_range(\"2000-01-01\", periods=3,\n ... freq=\"q\"))\n >>> weekday_series.dt.strftime(\"%Y-%m-%d\")\n >>> weekday_series\n 0 2000-03-31\n 1 2000-06-30\n 2 2000-09-30\n dtype: datetime64[ns]\n 0 2000-03-31\n 1 2000-06-30\n 2 2000-09-30\n dtype: object\n >>> weekday_series.dt.strftime(\"%Y %d %m\")\n 0 2000 31 03\n 1 2000 30 06\n 2 2000 30 09\n dtype: object\n >>> weekday_series.dt.strftime(\"%Y / %d / %m\")\n 0 2000 / 31 / 03\n 1 2000 / 30 / 06\n 2 2000 / 30 / 09\n dtype: object\n \"\"\"\n\n if not isinstance(date_format, str):\n raise TypeError(\n f\"'date_format' must be str, not {type(date_format)}\"\n )\n\n # TODO: Remove following validations\n # once https://github.com/rapidsai/cudf/issues/5991\n # is implemented\n not_implemented_formats = {\n \"%c\",\n \"%x\",\n \"%X\",\n }\n for d_format in not_implemented_formats:\n if d_format in date_format:\n raise NotImplementedError(\n f\"{d_format} date-time format is not \"\n f\"supported yet, Please follow this issue \"\n f\"https://github.com/rapidsai/cudf/issues/5991 \"\n f\"for tracking purposes.\"\n )\n str_col = self.series._column.as_string_column(\n dtype=\"str\", format=date_format\n )\n return Series(\n data=str_col, index=self.series._index, name=self.series.name\n )\n\n\nclass TimedeltaProperties(object):\n \"\"\"\n Accessor object for timedeltalike properties of the Series values.\n\n Returns\n -------\n Returns a Series indexed like the original Series.\n\n Examples\n --------\n >>> import cudf\n >>> seconds_series = cudf.Series([1, 2, 3], dtype='timedelta64[s]')\n >>> seconds_series\n 0 00:00:01\n 1 00:00:02\n 2 00:00:03\n dtype: timedelta64[s]\n >>> seconds_series.dt.seconds\n 0 1\n 1 2\n 2 3\n dtype: int64\n >>> series = cudf.Series([12231312123, 1231231231, 1123236768712, 2135656,\n ... 3244334234], dtype='timedelta64[ms]')\n >>> series\n 0 141 days 13:35:12.123\n 1 14 days 06:00:31.231\n 2 13000 days 10:12:48.712\n 3 0 days 00:35:35.656\n 4 37 days 13:12:14.234\n dtype: timedelta64[ms]\n >>> series.dt.components\n days hours minutes seconds milliseconds microseconds nanoseconds\n 0 141 13 35 12 123 0 0\n 1 14 6 0 31 231 0 0\n 2 13000 10 12 48 712 0 0\n 3 0 0 35 35 656 0 0\n 4 37 13 12 14 234 0 0\n >>> series.dt.days\n 0 141\n 1 14\n 2 13000\n 3 0\n 4 37\n dtype: int64\n >>> series.dt.seconds\n 0 48912\n 1 21631\n 2 36768\n 3 2135\n 4 47534\n dtype: int64\n >>> series.dt.microseconds\n 0 123000\n 1 231000\n 2 712000\n 3 656000\n 4 234000\n dtype: int64\n >>> s.dt.nanoseconds\n 0 0\n 1 0\n 2 0\n 3 0\n 4 0\n dtype: int64\n \"\"\"\n\n def __init__(self, series):\n self.series = series\n\n @property\n def days(self):\n \"\"\"\n Number of days.\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> import cudf\n >>> s = cudf.Series([12231312123, 1231231231, 1123236768712, 2135656,\n ... 3244334234], dtype='timedelta64[ms]')\n >>> s\n 0 141 days 13:35:12.123\n 1 14 days 06:00:31.231\n 2 13000 days 10:12:48.712\n 3 0 days 00:35:35.656\n 4 37 days 13:12:14.234\n dtype: timedelta64[ms]\n >>> s.dt.days\n 0 141\n 1 14\n 2 13000\n 3 0\n 4 37\n dtype: int64\n \"\"\"\n return self._get_td_field(\"days\")\n\n @property\n def seconds(self):\n \"\"\"\n Number of seconds (>= 0 and less than 1 day).\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> import cudf\n >>> s = cudf.Series([12231312123, 1231231231, 1123236768712, 2135656,\n ... 3244334234], dtype='timedelta64[ms]')\n >>> s\n 0 141 days 13:35:12.123\n 1 14 days 06:00:31.231\n 2 13000 days 10:12:48.712\n 3 0 days 00:35:35.656\n 4 37 days 13:12:14.234\n dtype: timedelta64[ms]\n >>> s.dt.seconds\n 0 48912\n 1 21631\n 2 36768\n 3 2135\n 4 47534\n dtype: int64\n >>> s.dt.microseconds\n 0 123000\n 1 231000\n 2 712000\n 3 656000\n 4 234000\n dtype: int64\n \"\"\"\n return self._get_td_field(\"seconds\")\n\n @property\n def microseconds(self):\n \"\"\"\n Number of microseconds (>= 0 and less than 1 second).\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> import cudf\n >>> s = cudf.Series([12231312123, 1231231231, 1123236768712, 2135656,\n ... 3244334234], dtype='timedelta64[ms]')\n >>> s\n 0 141 days 13:35:12.123\n 1 14 days 06:00:31.231\n 2 13000 days 10:12:48.712\n 3 0 days 00:35:35.656\n 4 37 days 13:12:14.234\n dtype: timedelta64[ms]\n >>> s.dt.microseconds\n 0 123000\n 1 231000\n 2 712000\n 3 656000\n 4 234000\n dtype: int64\n \"\"\"\n return self._get_td_field(\"microseconds\")\n\n @property\n def nanoseconds(self):\n \"\"\"\n Return the number of nanoseconds (n), where 0 <= n < 1 microsecond.\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> import cudf\n >>> s = cudf.Series([12231312123, 1231231231, 1123236768712, 2135656,\n ... 3244334234], dtype='timedelta64[ns]')\n >>> s\n 0 00:00:12.231312123\n 1 00:00:01.231231231\n 2 00:18:43.236768712\n 3 00:00:00.002135656\n 4 00:00:03.244334234\n dtype: timedelta64[ns]\n >>> s.dt.nanoseconds\n 0 123\n 1 231\n 2 712\n 3 656\n 4 234\n dtype: int64\n \"\"\"\n return self._get_td_field(\"nanoseconds\")\n\n @property\n def components(self):\n \"\"\"\n Return a Dataframe of the components of the Timedeltas.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n >>> s = cudf.Series([12231312123, 1231231231, 1123236768712, 2135656, 3244334234], dtype='timedelta64[ms]')\n >>> s\n 0 141 days 13:35:12.123\n 1 14 days 06:00:31.231\n 2 13000 days 10:12:48.712\n 3 0 days 00:35:35.656\n 4 37 days 13:12:14.234\n dtype: timedelta64[ms]\n >>> s.dt.components\n days hours minutes seconds milliseconds microseconds nanoseconds\n 0 141 13 35 12 123 0 0\n 1 14 6 0 31 231 0 0\n 2 13000 10 12 48 712 0 0\n 3 0 0 35 35 656 0 0\n 4 37 13 12 14 234 0 0\n \"\"\" # noqa: E501\n return self.series._column.components(index=self.series._index)\n\n def _get_td_field(self, field):\n out_column = getattr(self.series._column, field)\n return Series(\n data=out_column, index=self.series._index, name=self.series.name\n )\n\n\ndef _align_indices(series_list, how=\"outer\", allow_non_unique=False):\n \"\"\"\n Internal util to align the indices of a list of Series objects\n\n series_list : list of Series objects\n how : {\"outer\", \"inner\"}\n If \"outer\", the values of the resulting index are the\n unique values of the index obtained by concatenating\n the indices of all the series.\n If \"inner\", the values of the resulting index are\n the values common to the indices of all series.\n allow_non_unique : bool\n Whether or not to allow non-unique valued indices in the input\n series.\n \"\"\"\n if len(series_list) <= 1:\n return series_list\n\n # check if all indices are the same\n head = series_list[0].index\n\n all_index_equal = True\n for sr in series_list[1:]:\n if not sr.index.equals(head):\n all_index_equal = False\n break\n\n # check if all names are the same\n all_names_equal = True\n for sr in series_list[1:]:\n if not sr.index.names == head.names:\n all_names_equal = False\n new_index_names = [None] * head.nlevels\n if all_names_equal:\n new_index_names = head.names\n\n if all_index_equal:\n return series_list\n\n if how == \"outer\":\n combined_index = cudf.core.reshape.concat(\n [sr.index for sr in series_list]\n ).unique()\n combined_index.names = new_index_names\n else:\n combined_index = series_list[0].index\n for sr in series_list[1:]:\n combined_index = (\n cudf.DataFrame(index=sr.index).join(\n cudf.DataFrame(index=combined_index),\n sort=True,\n how=\"inner\",\n )\n ).index\n combined_index.names = new_index_names\n\n # align all Series to the combined index\n result = [\n sr._align_to_index(\n combined_index, how=how, allow_non_unique=allow_non_unique\n )\n for sr in series_list\n ]\n\n return result\n\n\ndef isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):\n \"\"\"Returns a boolean array where two arrays are equal within a tolerance.\n\n Two values in ``a`` and ``b`` are considered equal when the following\n equation is satisfied.\n\n .. math::\n |a - b| \\\\le \\\\mathrm{atol} + \\\\mathrm{rtol} |b|\n\n Parameters\n ----------\n a : list-like, array-like or cudf.Series\n Input sequence to compare.\n b : list-like, array-like or cudf.Series\n Input sequence to compare.\n rtol : float\n The relative tolerance.\n atol : float\n The absolute tolerance.\n equal_nan : bool\n If ``True``, null's in ``a`` will be considered equal\n to null's in ``b``.\n\n Returns\n -------\n Series\n\n See Also\n --------\n np.isclose : Returns a boolean array where two arrays are element-wise\n equal within a tolerance.\n\n Examples\n --------\n >>> import cudf\n >>> s1 = cudf.Series([1.9876543, 2.9876654, 3.9876543, None, 9.9, 1.0])\n >>> s2 = cudf.Series([1.987654321, 2.987654321, 3.987654321, None, 19.9,\n ... None])\n >>> s1\n 0 1.9876543\n 1 2.9876654\n 2 3.9876543\n 3 <NA>\n 4 9.9\n 5 1.0\n dtype: float64\n >>> s2\n 0 1.987654321\n 1 2.987654321\n 2 3.987654321\n 3 <NA>\n 4 19.9\n 5 <NA>\n dtype: float64\n >>> cudf.isclose(s1, s2)\n 0 True\n 1 True\n 2 True\n 3 False\n 4 False\n 5 False\n dtype: bool\n >>> cudf.isclose(s1, s2, equal_nan=True)\n 0 True\n 1 True\n 2 True\n 3 True\n 4 False\n 5 False\n dtype: bool\n >>> cudf.isclose(s1, s2, equal_nan=False)\n 0 True\n 1 True\n 2 True\n 3 False\n 4 False\n 5 False\n dtype: bool\n \"\"\"\n\n if not can_convert_to_column(a):\n raise TypeError(\n f\"Parameter `a` is expected to be a \"\n f\"list-like or Series object, found:{type(a)}\"\n )\n if not can_convert_to_column(b):\n raise TypeError(\n f\"Parameter `b` is expected to be a \"\n f\"list-like or Series object, found:{type(a)}\"\n )\n\n if isinstance(a, pd.Series):\n a = Series.from_pandas(a)\n if isinstance(b, pd.Series):\n b = Series.from_pandas(b)\n\n index = None\n\n if isinstance(a, cudf.Series) and isinstance(b, cudf.Series):\n b = b.reindex(a.index)\n index = as_index(a.index)\n\n a_col = column.as_column(a)\n a_array = cupy.asarray(a_col.data_array_view)\n\n b_col = column.as_column(b)\n b_array = cupy.asarray(b_col.data_array_view)\n\n result = cupy.isclose(\n a=a_array, b=b_array, rtol=rtol, atol=atol, equal_nan=equal_nan\n )\n result_col = column.as_column(result)\n\n if a_col.null_count and b_col.null_count:\n a_nulls = a_col.isnull()\n b_nulls = b_col.isnull()\n null_values = a_nulls | b_nulls\n\n if equal_nan is True:\n equal_nulls = a_nulls & b_nulls\n\n del a_nulls, b_nulls\n elif a_col.null_count:\n null_values = a_col.isnull()\n elif b_col.null_count:\n null_values = b_col.isnull()\n else:\n return Series(result_col, index=index)\n\n result_col[null_values] = False\n if equal_nan is True and a_col.null_count and b_col.null_count:\n result_col[equal_nulls] = True\n\n return Series(result_col, index=index)\n"
]
| [
[
"numpy.result_type",
"numpy.array",
"numpy.asarray",
"pandas._config.get_option",
"numpy.sort",
"pandas.Series",
"numpy.issubdtype",
"numpy.iinfo"
]
]
|
anhaidgroup/py_entitymatching | [
"6724081d7d95c547e5a51625b4a8207c6c1737f8"
]
| [
"py_entitymatching/dask/daskmlmatcher.py"
]
| [
"\"\"\"\nThis module contains functions related to ML-matcher, that is common across\nall the ML-matchers.\n\"\"\"\nimport logging\n\nimport pandas as pd\nimport numpy as np\n\n# import dask\nimport dask\nfrom dask import delayed\nfrom dask.diagnostics import ProgressBar\n\nimport py_entitymatching.catalog.catalog_manager as cm\nfrom py_entitymatching.matcher.matcher import Matcher\nfrom py_entitymatching.matcher.matcherutils import get_true_lbl_index\nimport py_entitymatching.utils.catalog_helper as ch\nimport py_entitymatching.utils.generic_helper as gh\n\nfrom py_entitymatching.utils.validation_helper import validate_object_type\nfrom py_entitymatching.dask.utils import validate_chunks, get_num_partitions, \\\n get_num_cores, wrap\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass DaskMLMatcher(Matcher):\n \"\"\"\n ML Matcher class.\n \"\"\"\n\n def _fit_sklearn(self, x, y, check_rem=True):\n \"\"\"\n This function mimics fit method supported by sk-learn.\n \"\"\"\n # From the given input, derive the data that can be used for sk-learn\n # methods.\n x, y = self._get_data_for_sklearn(x, y, check_rem=check_rem)\n # Call the fit method from the underlying classifier.\n self.clf.fit(x, y)\n return True\n\n def _fit_ex_attrs(self, table, exclude_attrs, target_attr):\n \"\"\"\n This function supports the fit method, where the DataFrame can be\n given as input along with what attributes must be excluded and the\n target attribute.\n \"\"\"\n # Validate the input parameters.\n # # We expect the input table to be of type pandas DataFrame.\n if not isinstance(table, pd.DataFrame):\n logger.error('Input table is not of type DataFrame')\n raise AssertionError('Input table is not of type DataFrame')\n\n # Convert the exclude attributes into list (if the input is not of list)\n if not isinstance(exclude_attrs, list):\n exclude_attrs = [exclude_attrs]\n\n # Check if the exclude attributes are present in the input table. If\n # not, raise an error.\n if not ch.check_attrs_present(table, exclude_attrs):\n logger.error(\n 'The attributes mentioned in exclude_attrs is not present ' \\\n 'in the input table')\n raise AssertionError(\n 'The attributes mentioned in exclude_attrs is not present ' \\\n 'in the input table')\n\n # Check if the target attribute is present in the input table. If\n # not, raise an error.\n if not ch.check_attrs_present(table, target_attr):\n logger.error('The target_attr is not present in the input table')\n raise AssertionError(\n 'The target_attr is not present in the input table')\n\n # We now remove duplicate attributes from the exclude_attrs\n exclude_attrs = gh.list_drop_duplicates(exclude_attrs)\n\n # We explicitly append target attribute to exclude attributes\n if target_attr not in exclude_attrs:\n exclude_attrs.append(target_attr)\n\n # Now, we get the attributes to project\n attributes_to_project = gh.list_diff(list(table.columns), exclude_attrs)\n\n # Get the predictors and the target attribute from the input table\n # based on the exclude attrs and the target attribute.\n x = table[attributes_to_project]\n y = table[target_attr]\n\n self._fit_sklearn(x, y, check_rem=False)\n\n def fit(self, x=None, y=None, table=None, exclude_attrs=None,\n target_attr=None):\n \"\"\"\n Fit interface for the matcher.\n\n Specifically, there are two ways the user can call the fit method.\n First, interface similar to scikit-learn where the feature vectors\n and target attribute given as projected DataFrame.\n Second, give the DataFrame and explicitly specify the feature vectors\n (by specifying the attributes to be excluded) and the target attribute.\n\n A point to note is all the input parameters have a default value of\n None. This is done to support both the interfaces in a single function.\n\n Args:\n x (DataFrame): The input feature vectors given as pandas\n DataFrame (defaults to None).\n y (DatFrame): The input target attribute given as pandas\n DataFrame with a single column (defaults to None).\n table (DataFrame): The input pandas DataFrame containing feature\n vectors and target attribute (defaults to None).\n exclude_attrs (list): The list of attributes that should be\n excluded from the input table to get the feature vectors.\n target_attr (string): The target attribute in the input table.\n \"\"\"\n # Check if x and y is given, then call a function that handles\n # sk-learn like interface input.\n if x is not None and y is not None:\n self._fit_sklearn(x, y)\n # Check if table and its associated attributes, then call the\n # appropriate function that handles it.\n elif (\n table is not None and exclude_attrs is not None) \\\n and target_attr is not None:\n self._fit_ex_attrs(table, exclude_attrs, target_attr)\n else:\n # If the syntax is not what we expect, raise an syntax error.\n raise SyntaxError(\n 'The arguments supplied does not match the signatures '\n 'supported !!!')\n\n def _predict_sklearn(self, x, check_rem=True, return_prob=False):\n # Function that implements, predict interface mimic-ing sk-learn's\n # predict interface.\n\n # Here check_rem parameter requires a bit of explanation. The\n # check_rem flag checks if the input table has '_id' attribute if so\n # and if check_rem is True then we remove the '_id' attribute from\n # the table.\n # Note: Here check_rem is just passing what is coming in i.e it can be\n # true or false based up on who is calling it.\n x = self._get_data_for_sklearn(x, check_rem=check_rem)\n # Call the underlying predict function.\n y = self.clf.predict(x)\n if not return_prob:\n # Return the predictions\n return y\n else:\n _p = self.clf.predict_proba(x)\n true_index = get_true_lbl_index(self.clf)\n return y, _p[:, true_index]\n\n def _predict_ex_attrs(self, table, exclude_attrs, return_prob=False):\n \"\"\"\n Variant of predict method, where data is derived based on exclude\n attributes.\n \"\"\"\n # Validate input parameters\n # # We expect input table to be a pandas DataFrame.\n if not isinstance(table, pd.DataFrame):\n logger.error('Input table is not of type DataFrame')\n raise AssertionError('Input table is not of type DataFrame')\n\n # # We expect the exclude attributes to be a list, if not convert it\n # into a list.\n if not isinstance(exclude_attrs, list):\n exclude_attrs = [exclude_attrs]\n\n # Check if the input table contains the attributes to be excluded. If\n # not raise an error.\n if not ch.check_attrs_present(table, exclude_attrs):\n logger.error(\n 'The attributes mentioned in exclude_attrs is not present ' \\\n 'in the input table')\n raise AssertionError(\n 'The attributes mentioned in exclude_attrs is not present ' \\\n 'in the input table')\n\n # Get the attributes to project.\n attributes_to_project = gh.list_diff(list(table.columns), exclude_attrs)\n # Get feature vectors and the target attribute\n x = table[attributes_to_project]\n\n # Do the predictions and return the probabilities (if required)\n\n res = self._predict_sklearn(x, check_rem=False, return_prob=return_prob)\n return res\n\n # if not just do the predictions and return the result\n # if not return_prob:\n # # Do the predictions using the ML-based matcher.\n # y = self._predict_sklearn(x, check_rem=False)\n #\n # # Finally return the predictions\n # return y\n # else:\n # res = self._predict_sklearn()\n\n def predict(self, x=None, table=None, exclude_attrs=None, target_attr=None,\n append=False, return_probs=False, probs_attr=None, inplace=True,\n show_progress=False, n_chunks=1):\n \"\"\"\n WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK.\n\n Predict interface for the matcher.\n\n Specifically, there are two ways the user can call the predict method.\n First, interface similar to scikit-learn where the feature vectors\n given as projected DataFrame.\n Second, give the DataFrame and explicitly specify the feature vectors\n (by specifying the attributes to be excluded) .\n\n A point to note is all the input parameters have a default value of\n None. This is done to support both the interfaces in a single function.\n\n Currently, the Dask implementation supports only the cases when the table is not \n None and the flags inplace, append are False. \n\n\n Args:\n x (DataFrame): The input pandas DataFrame containing only feature\n vectors (defaults to None).\n table (DataFrame): The input pandas DataFrame containing feature\n vectors, and may be other attributes (defaults to None).\n exclude_attrs (list): A list of attributes to be excluded from the\n input table to get the feature vectors (defaults to None).\n target_attr (string): The attribute name where the predictions\n need to be stored in the input table (defaults to None).\n probs_attr (string): The attribute name where the prediction probabilities \n need to be stored in the input table (defaults to None).\n append (boolean): A flag to indicate whether the predictions need\n to be appended in the input DataFrame (defaults to False).\n return_probs (boolean): A flag to indicate where the prediction probabilities\n need to be returned (defaults to False). If set to True, returns the \n probability if the pair was a match.\n inplace (boolean): A flag to indicate whether the append needs to be\n done inplace (defaults to True).\n show_progress (boolean): A flag to indicate whether the progress of\n extracting feature vectors must be displayed (defaults to True).\n n_chunks (int): The number of partitions to split the candidate set. If it \n is set to -1, the number of partitions will be set to the \n number of cores in the machine. \n\n\n Returns:\n An array of predictions or a DataFrame with predictions updated.\n\n \"\"\"\n\n logger.warning(\n \"WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK.\")\n\n if x is not None:\n return self._predict(x, table, exclude_attrs, target_attr, append,\n return_probs, probs_attr, inplace)\n else:\n n_chunks = get_num_partitions(n_chunks, len(table))\n if n_chunks == 1 or inplace == True or append == False:\n # When the inplace flag is True, the predictions (and probs) are added\n # in place. If he have to use Dask then we have to modify _predict (\n # specifically _predict_sk_learn) function.\n # So, to keep things simple, we support Dask only when\n # inplace=False\n\n # Similarly, when append=False, the return value from _predict will be\n # different for different cases (for example, when return_probs is True\n # or False). If we have to use Dask then we have to careful in\n # recording the return values for each chunk.\n # So, to keep things simple, we support Dask only when\n # append=True\n\n\n result = self._predict(table=table, exclude_attrs=exclude_attrs,\n target_attr=target_attr, append=append,\n return_probs=return_probs, probs_attr=probs_attr,\n inplace=inplace, copy_props=True)\n\n else:\n predicted_results = []\n splitted_tables = np.array_split(table, n_chunks)\n for i in range(len(splitted_tables)):\n partial_result = delayed(self._predict)(table=splitted_tables[i],\n exclude_attrs=exclude_attrs, target_attr=target_attr,\n append=append,\n return_probs=return_probs,\n probs_attr=probs_attr,\n inplace=inplace,\n copy_props=False)\n predicted_results.append(partial_result)\n predicted_results = delayed(wrap)(predicted_results)\n if show_progress:\n with ProgressBar():\n predicted_results = predicted_results.compute(\n scheduler=\"processes\", num_workers=get_num_cores())\n else:\n predicted_results = predicted_results.compute(\n scheduler=\"processes\", num_workers=get_num_cores())\n\n\n result = pd.concat(predicted_results)\n cm.copy_properties(table, result)\n return result\n\n # predict method\n def _predict(self, x=None, table=None, exclude_attrs=None, target_attr=None,\n append=False, return_probs=False,\n probs_attr=None, inplace=True, copy_props=True):\n \"\"\"\n Delegated function from predict.\n \"\"\"\n # If x is not none, call the predict method that mimics sk-learn\n # predict method.\n if x is not None:\n y = self._predict_sklearn(x, return_prob=return_probs)\n # If the input table and the exclude attributes are not None,\n # then call the appropriate predict method.\n elif table is not None and exclude_attrs is not None:\n y = self._predict_ex_attrs(table, exclude_attrs, return_prob=return_probs)\n # If the append is True, update the table\n if target_attr is not None and append is True:\n # If inplace is True, then update the input table.\n if inplace:\n if return_probs:\n table[target_attr] = y[0]\n table[probs_attr] = y[1]\n # Return the updated table\n return table\n else:\n # Return the updated table\n table[target_attr] = y\n return table\n else:\n # else, create a copy and update it.\n table_copy = table.copy()\n if return_probs:\n table_copy[target_attr] = y[0]\n table_copy[probs_attr] = y[1]\n else:\n table_copy[target_attr] = y\n # copy the properties from the input table to the output\n # table.\n if copy_props:\n cm.copy_properties(table, table_copy)\n # Return the new table.\n return table_copy\n\n else:\n # else, raise a syntax error\n raise SyntaxError(\n 'The arguments supplied does not match '\n 'the signatures supported !!!')\n # Return the predictions\n return y\n\n # get and set name of matcher\n def get_name(self):\n # Return the name of the matcher\n return self.name\n\n def set_name(self, name):\n # Set the name of the matcher\n self.name = name\n\n # helper functions\n def _get_data_for_sklearn(self, x, y=None, check_rem=True):\n \"\"\"\n Gets data in a format that can be used to call sk-learn methods such\n as fit and predict.\n \"\"\"\n # Validate input parameters.\n # # We expect the input object (x) to be of type pandas DataFrame.\n if not isinstance(x, pd.DataFrame):\n logger.error('Input table is not of type DataFrame')\n raise AssertionError('Input table is not of type DataFrame')\n\n # Check to see if we have to remove id column\n if x.columns[0] == '_id' and check_rem == True:\n logger.warning(\n 'Input table contains \"_id\". '\n 'Removing this column for processing')\n # Get the values from the DataFrame\n x = x.values\n # Remove the first column ('_id')\n x = np.delete(x, 0, 1)\n else:\n # Get the values from the DataFrame\n x = x.values\n if y is not None:\n # Remove the _id column from the input.\n if not isinstance(y, pd.Series) and y.columns[0] == '_id' \\\n and check_rem == True:\n logger.warning(\n 'Input table contains \"_id\". '\n 'Removing this column for processing')\n # Get the values from the DataFrame\n y = y.values\n y = np.delete(y, 0, 1)\n else:\n # Get the values from the DataFrame\n y = y.values\n # Return both x and y\n return x, y\n else:\n # Return x\n return x\n"
]
| [
[
"pandas.concat",
"numpy.delete",
"numpy.array_split"
]
]
|
pyc-ycy/qtpandas | [
"5adf5f0cee8d3409e25342ecb390cce19beb6ff1"
]
| [
"tests/test_CustomDelegates.py"
]
| [
"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom builtins import super\nfrom builtins import str\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom qtpandas.compat import Qt, QtCore, QtGui\n\n\nimport pytest\nimport pytestqt\n\nimport numpy\nimport pandas\n\nfrom qtpandas.views.CustomDelegates import BigIntSpinboxDelegate, CustomDoubleSpinboxDelegate, TextDelegate, createDelegate\nfrom qtpandas.models.DataFrameModel import DataFrameModel\n\nclass DemoTableView(QtGui.QTableView):\n\n def __init__(self, parent=None):\n super(DemoTableView, self).__init__(parent)\n self.resize(800, 600)\n self.move(0, 0)\n\nclass TestCustomDelegates(object):\n\n @pytest.fixture\n def emptyTableView(self, qtbot):\n widget = DemoTableView()\n qtbot.addWidget(widget)\n return widget\n\n @pytest.fixture\n def dataFrame(self):\n return pandas.DataFrame(['abc'], columns=['A'])\n\n @pytest.fixture\n def model(self, dataFrame):\n return DataFrameModel(dataFrame)\n\n @pytest.fixture\n def tableView(self, model, emptyTableView):\n emptyTableView.setModel(model)\n return emptyTableView\n\n @pytest.fixture\n def index(self, model):\n index = model.index(0, 0)\n assert index.isValid()\n return index\n\n @pytest.mark.parametrize(\n \"widgetClass, model, exception, exceptionContains\", [\n (QtGui.QWidget, None, AttributeError, \"has no attribute 'model'\"),\n (DemoTableView, None, ValueError, \"no model set for the current view\"),\n (DemoTableView, QtGui.QStandardItemModel(), TypeError, 'model is not of type DataFrameModel'),\n ]\n )\n def test_tableViewMissing(self, widgetClass, qtbot, model, exception, exceptionContains):\n widget = widgetClass()\n qtbot.addWidget(widget)\n with pytest.raises(exception) as excinfo:\n if model:\n widget.setModel(QtGui.QStandardItemModel())\n createDelegate('foo', 'bar', widget)\n assert exceptionContains in str(excinfo.value)\n\n @pytest.mark.parametrize(\n \"value, singleStep\", [\n (numpy.int8(1), 1),\n (numpy.int16(1), 1),\n (numpy.int32(1), 1),\n (numpy.int64(1), 1),\n (numpy.uint8(1), 1),\n (numpy.uint16(1), 1),\n (numpy.uint32(1), 1),\n (numpy.uint64(1), 1),\n (numpy.float16(1.11111), 0.1),\n (numpy.float32(1.11111111), 0.1),\n (numpy.float64(1.1111111111111111), 0.1),\n #(numpy.float128(1.11111111111111111111), 0.1),\n ]\n )\n def test_setDelegates(self, qtbot, tableView, index, value, singleStep):\n dlg = createDelegate(numpy.dtype(value), 0, tableView)\n assert dlg is not None\n\n data = pandas.DataFrame([value], columns=['A'])\n data['A'] = data['A'].astype(value.dtype)\n model = tableView.model()\n model.setDataFrame(data)\n for i, delegate in enumerate([dlg]):\n assert tableView.itemDelegateForColumn(i) == delegate\n\n option = QtGui.QStyleOptionViewItem()\n option.rect = QtCore.QRect(0, 0, 100, 100)\n editor = delegate.createEditor(tableView, option, index)\n delegate.setEditorData(editor, index)\n assert editor.value() == index.data()\n delegate.setModelData(editor, model, index)\n\n delegate.updateEditorGeometry(editor, option, index)\n\n dtype = value.dtype\n if dtype in DataFrameModel._intDtypes:\n info = numpy.iinfo(dtype)\n assert isinstance(delegate, BigIntSpinboxDelegate)\n elif dtype in DataFrameModel._floatDtypes:\n info = numpy.finfo(dtype)\n assert isinstance(delegate, CustomDoubleSpinboxDelegate)\n assert delegate.decimals == DataFrameModel._float_precisions[str(value.dtype)]\n assert delegate.maximum == info.max\n assert editor.maximum() == info.max\n assert delegate.minimum == info.min\n assert editor.minimum() == info.min\n assert delegate.singleStep == singleStep\n assert editor.singleStep() == singleStep\n\n\n def clickEvent(index):\n assert index.isValid()\n\n tableView.clicked.connect(clickEvent)\n with qtbot.waitSignal(tableView.clicked) as blocker:\n qtbot.mouseClick(tableView.viewport(), QtCore.Qt.LeftButton, pos=QtCore.QPoint(10, 10))\n assert blocker.signal_triggered\n\n\nclass TestTextDelegate(object):\n\n @pytest.fixture\n def dataFrame(self):\n data = [['zero', 1, 2, 3, 4],\n ['five', 6, 7, 8, 9],\n ['ten', 11, 12, 13, 14]]\n\n columns = ['Foo', 'Bar', 'Spam', 'Eggs', 'Baz']\n dataFrame = pandas.DataFrame(data, columns=columns)\n return dataFrame\n\n def test_editing(self, dataFrame, qtbot):\n model = DataFrameModel(dataFrame)\n\n tableView = QtGui.QTableView()\n\n qtbot.addWidget(tableView)\n tableView.setModel(model)\n\n delegate = TextDelegate(tableView)\n createDelegate(numpy.dtype('O'), 0, tableView)\n tableView.show()\n\n index = model.index(0, 0)\n preedit_data = index.data()\n\n assert not model.editable\n model.enableEditing(True)\n tableView.edit(index)\n editor = tableView.findChildren(QtGui.QLineEdit)[0]\n qtbot.keyPress(editor, QtCore.Qt.Key_F)\n qtbot.keyPress(editor, QtCore.Qt.Key_Enter)\n QtGui.QApplication.processEvents()\n with qtbot.waitSignal(timeout=100):\n assert index.data(QtCore.Qt.DisplayRole) == 'f'\n"
]
| [
[
"numpy.int8",
"numpy.uint8",
"numpy.uint32",
"numpy.float16",
"numpy.uint16",
"pandas.DataFrame",
"numpy.int64",
"numpy.float64",
"numpy.finfo",
"numpy.float32",
"numpy.uint64",
"numpy.int32",
"numpy.iinfo",
"numpy.dtype",
"numpy.int16"
]
]
|
JiehangXie/PaddleSpeech | [
"60090b49ec27437127ab62358026dd5bb95fccc7"
]
| [
"paddlespeech/text/exps/ernie_linear/avg_model.py"
]
| [
"#!/usr/bin/env python3\n# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport argparse\nimport glob\nimport json\nimport os\n\nimport numpy as np\nimport paddle\n\n\ndef main(args):\n paddle.set_device('cpu')\n\n val_scores = []\n beat_val_scores = []\n selected_epochs = []\n if args.val_best:\n jsons = glob.glob(f'{args.ckpt_dir}/[!train]*.json')\n for y in jsons:\n with open(y, 'r') as f:\n dict_json = json.load(f)\n loss = dict_json['F1']\n epoch = dict_json['epoch']\n if epoch >= args.min_epoch and epoch <= args.max_epoch:\n val_scores.append((epoch, loss))\n\n val_scores = np.array(val_scores)\n sort_idx = np.argsort(-val_scores[:, 1])\n sorted_val_scores = val_scores[sort_idx]\n path_list = [\n args.ckpt_dir + '/{}.pdparams'.format(int(epoch))\n for epoch in sorted_val_scores[:args.num, 0]\n ]\n\n beat_val_scores = sorted_val_scores[:args.num, 1]\n selected_epochs = sorted_val_scores[:args.num, 0].astype(np.int64)\n print(\"best val scores = \" + str(beat_val_scores))\n print(\"selected epochs = \" + str(selected_epochs))\n else:\n path_list = glob.glob(f'{args.ckpt_dir}/[!avg][!final]*.pdparams')\n path_list = sorted(path_list, key=os.path.getmtime)\n path_list = path_list[-args.num:]\n\n print(path_list)\n\n avg = None\n num = args.num\n assert num == len(path_list)\n for path in path_list:\n print(f'Processing {path}')\n states = paddle.load(path)\n if avg is None:\n avg = states\n else:\n for k in avg.keys():\n avg[k] += states[k]\n # average\n for k in avg.keys():\n if avg[k] is not None:\n avg[k] /= num\n\n paddle.save(avg, args.dst_model)\n print(f'Saving to {args.dst_model}')\n\n meta_path = os.path.splitext(args.dst_model)[0] + '.avg.json'\n with open(meta_path, 'w') as f:\n data = json.dumps({\n \"avg_ckpt\": args.dst_model,\n \"ckpt\": path_list,\n \"epoch\": selected_epochs.tolist(),\n \"val_loss\": beat_val_scores.tolist(),\n })\n f.write(data + \"\\n\")\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='average model')\n parser.add_argument('--dst_model', required=True, help='averaged model')\n parser.add_argument(\n '--ckpt_dir', required=True, help='ckpt model dir for average')\n parser.add_argument(\n '--val_best', action=\"store_true\", help='averaged model')\n parser.add_argument(\n '--num', default=5, type=int, help='nums for averaged model')\n parser.add_argument(\n '--min_epoch',\n default=0,\n type=int,\n help='min epoch used for averaging model')\n parser.add_argument(\n '--max_epoch',\n default=65536, # Big enough\n type=int,\n help='max epoch used for averaging model')\n\n args = parser.parse_args()\n print(args)\n\n main(args)\n"
]
| [
[
"numpy.array",
"numpy.argsort"
]
]
|
jchkoch/pycalculix | [
"c943c0408297873de104bd60c404c42eea66b895"
]
| [
"pycalculix/results_file.py"
]
| [
"\"\"\"This module stores the Results_File class.\"\"\"\n\nimport collections\nimport math # used for metric number conversion\nimport os #need to check if results file exists\nimport re # used to get info from frd file\nimport subprocess # used to check ccx version\n\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport matplotlib.cm as cmx\nimport numpy as np\nfrom numpy.lib.polynomial import roots # need to find S1-S3\nfrom numpy.core.function_base import linspace # need to make contours\n\n\nfrom . import base_classes # needed for RESFIELDS\nfrom . import environment\nfrom . import mesh\n\nCMAP = 'jet'\n\nclass ResultsFile(object):\n \"\"\"Makes a results file.\n\n Args:\n problem (Problem): problem that was solved\n\n Attributes:\n __problem (Problem): parent problem\n __steps (list): a list of float time steps\n __results (dict): a dict storing the results data\n results[step]['node'][nnum][field] --> value\n results[step]['element'][enum]['avg'][field] --> value\n results[step]['element'][enum]['max'][field] --> value\n results[step]['element'][enum]['min'][field] --> value\n results[step]['element'][enum]['ipoints'][ipnum][field] --> value\n field = 'ux' or 'Seqv' or 'ey' etc.\n __time (float): current time we are looking at, defaults to -1\n When a file is loaded in, the first time step is loaded.\n \"\"\"\n\n def __init__(self, problem):\n self.__problem = problem\n self.__steps = [] # this stores a list of time steps\n self.__results = {} # stores results, nested dicts\n self.__time = -1\n if self.__problem.solved:\n self.load()\n\n @property\n def steps(self):\n \"\"\"Returns a list of loaded time steps.\n\n Note: this is read only, you can not assign a value to it.\n \"\"\"\n return self.__steps\n\n @property\n def time(self):\n \"\"\"Returns the current time (float) in the results file.\n\n Note: this is read only, you can not assign a value to it.\n \"\"\"\n return self.__time\n\n @staticmethod\n def __metric_num(number, sig_figs=3, sci=False):\n \"\"\"Returns string of number, with only 10**3 suffixes.\n\n If 0 <= number < 1000 no suffix is added.\n This is useful for quantities usually given in metric: stress, displacement.\n\n Args:\n number (float or int): the number we want converted to __metric_number\n sig_figs (int): number of significant figures to use, right of decimal\n sci (bool): True means use scientific formatting, False use metric\n \"\"\"\n if sci:\n format_str = \"%.{}e\".format(sig_figs)\n my_str = format_str % number\n else:\n format_str = \"%.{}f\".format(sig_figs)\n my_str = format_str % number\n if number != 0:\n # get the scientific exponent\n exp = math.floor(math.log10(abs(number)))\n metric_exp = exp - (exp % 3)\n new_float = number/(10**metric_exp)\n if metric_exp != 0:\n format_str = \"%.{}fe%i\".format(sig_figs)\n my_str = format_str % (new_float, metric_exp)\n return my_str\n\n def load(self):\n \"\"\"Loads the results file with problem.fname prefix.\"\"\"\n self.__read_frd() # read nodal results\n self.__read_dat() # read element integration pt results\n\n def nplot(self, field, fname='', display=True, levels=21, gradient=False,\n gmult=1.0, max_val=None, min_val=None, title=''):\n \"\"\"Plots nodal results.\n\n Args:\n field (str): results item to plot, examples: 'ux', 'ey', 'Seqv'\n fname (str): prefix of png file name, if writing an image\n display (bool): True = interactively show the plot\n levels (int): number of levels to use in the colorbar\n gradient (bool): True = results plotted with gradient\n False = results plotted with filled areas\n gmult (int): geometric multiplier on displacement of nodes\n displayed_node_loc = model_node_loc + gmult*node_displacement\n max_val (float or None): max value in the colorbar\n\n - None: max from selected data used\n - float: use the passed float\n min_val (float): min value in the colorbar\n\n - None: min from selected data used\n - float: use the passed float\n title (str): third line in the plot title\n \"\"\"\n # store the selected nodes and elements\n sel = {}\n sel['nodes'] = self.__problem.fea.view.nodes\n sel['elements'] = self.__problem.fea.view.elements\n sel['faces'] = self.__problem.fea.view.faces\n\n # sort nodes low to high so index is correct\n # we have index to id below so showing subsets works\n sel['nodes'] = list(sel['nodes'])\n sel['nodes'] = sorted(sel['nodes'], key=lambda k: k.id)\n\n # store results at nodes\n axials = []\n radials = []\n zvals = []\n id_to_ind = {}\n for node in sel['nodes']:\n id_to_ind[node.id] = len(axials)\n axi = node.y + gmult*self.__results[self.__time]['node'][node.id]['uy']\n rad = node.x + gmult*self.__results[self.__time]['node'][node.id]['ux']\n axials.append(axi)\n radials.append(rad)\n zvals.append(self.__results[self.__time]['node'][node.id][field])\n\n # make a list of triangles, given by indices, looping anticlockwise\n triangles = []\n mylist = []\n if len(sel['elements']) > 0:\n mylist = sel['elements']\n elif len(sel['faces']) > 0:\n mylist = sel['faces']\n for element in mylist:\n tris = element.get_tris() # list of triangle nodes\n for tri in tris:\n for ind, nid in enumerate(tri):\n tri[ind] = id_to_ind[nid] # convert id to index\n triangles += tris\n\n # check to see if selected nodes and elements are\n # in the parent model's nodes and elements\n fig = plt.figure()\n ax_ = fig.add_subplot(111)\n\n # need to set tick list here\n vmin = min(zvals)\n vmax = max(zvals)\n stop_plot = False\n if max_val != None and min_val == None:\n if max_val < vmin:\n stop_plot = True\n print('Error:')\n print(' Only max was passed but it is < the data min!')\n print(' Pass a max_val that is > the data min of %f' % vmin)\n else:\n vmax = max_val\n elif min_val != None and max_val == None:\n if min_val > vmax:\n stop_plot = True\n print('Error:')\n print(' Only min was passed but it is > the data max!')\n print(' Pass a min_val that is < the data max of %f' % vmax)\n else:\n vmin = min_val\n elif max_val != None and min_val != None:\n if max_val < min_val:\n stop_plot = True\n print('Error:')\n print(' Min and max passed, but min > max!')\n print(' Pass a min_val that is < max_val')\n else:\n vmax = max_val\n vmin = min_val\n # exit if stop plot flag is on\n if stop_plot:\n return None\n\n tick_list = [vmin]\n if vmax != vmin:\n # we have a range of values we're plotting\n tick_list = linspace(vmin, vmax, levels+1)\n\n # plot using a gradient(shaded) or levels\n # code required for the colorbar, needs to go before plotting for colormap\n cnorm = colors.Normalize(vmin=vmin, vmax=vmax)\n cmap = colors.ListedColormap(['b', 'b']) # default to plot one val\n if vmax != vmin:\n # we have a range of values we're plotting\n if gradient:\n cmap = plt.get_cmap(CMAP)\n else:\n cmap = plt.get_cmap('jet', levels)\n cmap.set_under('0.3', 0.8)\n cmap.set_over('0.7', 0.8)\n if gradient or len(tick_list) == 1:\n # This one is shaded\n plt.tripcolor(axials, radials, triangles, zvals, shading='gouraud',\n cmap=cmap, norm=cnorm)\n else:\n # this one is not shaded\n plt.tricontourf(axials, radials, triangles, zvals, levels=tick_list,\n cmap=cmap, norm=cnorm, extend='both')\n\n scalarmap = cmx.ScalarMappable(norm=cnorm, cmap=cmap)\n scalarmap.set_array([])\n cbar = plt.colorbar(scalarmap, orientation='vertical', ticks=tick_list)\n\n scibool = False\n if field[0] == 'e':\n # strain plotting, use scientific numbering\n scibool = True\n met_max = self.__metric_num(max(zvals), sci=scibool)\n met_min = self.__metric_num(min(zvals), sci=scibool)\n label = 'Max: %s\\nMin: %s' % (met_max, met_min)\n tick_list = [self.__metric_num(tick, sci=scibool) for tick in tick_list]\n cbar.ax.set_yticklabels(tick_list)\n cbar.ax.set_xlabel(label, labelpad=10, x=0, ha='left')\n cbar.ax.xaxis.set_label_position('top')\n\n # set the horizontal and vertical axes\n base_classes.plot_set_bounds(plt, axials, radials)\n\n # set units\n alist = self.__problem.fea.get_units(field, 'dist', 'time')\n [f_unit, d_unit, t_unit] = alist\n\n # set plot axes\n plot_title = ('Node %s%s\\nTime=%f%s' %\n (field, f_unit, self.__time, t_unit))\n if title != '':\n plot_title += '\\n%s' % title\n plt.title(plot_title)\n plt.xlabel('axial, y'+d_unit)\n plt.ylabel('radial, x'+d_unit)\n ax_.set_aspect('equal')\n if gmult != 1:\n ax_.xaxis.set_ticklabels([])\n ax_.yaxis.set_ticklabels([])\n base_classes.plot_finish(plt, fname, display)\n\n def eplot(self, field, fname='', display=True, levels=21,\n gmult=1.0, mode='avg', max_val=None, min_val=None, title=''):\n \"\"\"Plots element results.\n\n Args:\n field (str): results item to plot. Only stresses supported.\n Examples: 'Sx', 'Sxy', 'S1', 'Seqv' etc.\n fname (str): prefix of png file name, if writing an image\n display (bool): True = interactively show the plot\n levels (int): number of levels to use in the colorbar\n gmult (int): geometric multiplier on displacement of nodes\n displayed_node_loc = model_node_loc + gmult*node_displacement\n mode (str): the type of element result to plot\n\n - 'avg': integration points averaged to avg element result\n - 'max': max value of field in the integration points plotted\n - 'min': min value of field in the integration points plotted\n max_val (float or None): max value in the colorbar\n\n - None: max from selected data used\n - float: use the passed float\n min_val (float): min value in the colorbar\n\n - None: min from selected data used\n - float: use the passed float\n title (str): third line in the plot title\n \"\"\"\n # store the selected nodes and elements\n sel = {}\n sel['nodes'] = self.__problem.fea.view.nodes\n sel['elements'] = self.__problem.fea.view.elements\n sel['faces'] = self.__problem.fea.view.faces\n\n # sort nodes low to high so index is correct\n # we have index to id below so showing subsets works\n sel['nodes'] = list(sel['nodes'])\n sel['nodes'] = sorted(sel['nodes'], key=lambda k: k.id)\n\n # store results at nodes\n axials = []\n radials = []\n zvals = []\n id_to_ind = {}\n for node in sel['nodes']:\n id_to_ind[node.id] = len(axials)\n axi = node.y + gmult*self.__results[self.__time]['node'][node.id]['uy']\n rad = node.x + gmult*self.__results[self.__time]['node'][node.id]['ux']\n axials.append(axi)\n radials.append(rad)\n\n # make a list of triangles, given by indices, looping anticlockwise\n triangles = []\n mylist = []\n if len(sel['elements']) > 0:\n mylist = sel['elements']\n elif len(sel['faces']) > 0:\n mylist = sel['faces']\n for ele in mylist:\n val = self.__results[self.__time]['element'][ele.id][mode][field]\n tris = ele.get_tris() # list of triangle nodes defined by node id\n for tri in tris:\n zvals.append(val)\n for ind, nid in enumerate(tri):\n tri[ind] = id_to_ind[nid] # convert id to index\n triangles += tris\n\n # check to see if selected nodes and elements are\n # in the parent model's nodes and elements\n\n fig = plt.figure()\n ax_ = fig.add_subplot(111)\n\n # need to set tick list here\n vmin = min(zvals)\n vmax = max(zvals)\n stop_plot = False\n if max_val != None and min_val == None:\n if max_val < vmin:\n stop_plot = True\n print('Error:')\n print(' Only max was passed but it is < the data min!')\n print(' Pass a max_val that is > the data min of %f' % vmin)\n else:\n vmax = max_val\n elif min_val != None and max_val == None:\n if min_val > vmax:\n stop_plot = True\n print('Error:')\n print(' Only min was passed but it is > the data max!')\n print(' Pass a min_val that is < the data max of %f' % vmax)\n else:\n vmin = min_val\n elif max_val != None and min_val != None:\n if max_val < min_val:\n stop_plot = True\n print('Error:')\n print(' Min and max passed, but min > max!')\n print(' Pass a min_val that is < max_val')\n else:\n vmax = max_val\n vmin = min_val\n # exit if stop plot flag is on\n if stop_plot:\n return None\n\n tick_list = [vmin]\n if vmax != vmin:\n # we have a range of values we're plotting\n tick_list = linspace(vmin, vmax, levels+1)\n\n # code required for the colorbar, needs to go before plotting for cmap\n cnorm = colors.Normalize(vmin=vmin, vmax=vmax)\n cmap = colors.ListedColormap(['b', 'b']) # default to plot one val\n if vmax != vmin:\n # we have a range of values we're plotting\n cmap = plt.get_cmap(CMAP, levels)\n cmap.set_under('0.3', 0.8)\n cmap.set_over('0.7', 0.8)\n\n # plot using levels\n plt.tripcolor(axials, radials, triangles, zvals,\n shading='flat', cmap=cmap, norm=cnorm)\n\n scalarmap = cmx.ScalarMappable(norm=cnorm, cmap=cmap)\n scalarmap.set_array([])\n cbar = plt.colorbar(scalarmap, orientation='vertical', ticks=tick_list)\n scibool = False\n if field[0] == 'e':\n # strain plotting, use scientific numbering\n scibool = True\n met_max = self.__metric_num(max(zvals), sci=scibool)\n met_min = self.__metric_num(min(zvals), sci=scibool)\n label = 'Max: %s\\nMin: %s' % (met_max, met_min)\n tick_list = [self.__metric_num(tick, sci=scibool) for tick in tick_list]\n cbar.ax.set_yticklabels(tick_list)\n cbar.ax.set_xlabel(label, labelpad=10, x=0, ha='left')\n cbar.ax.xaxis.set_label_position('top')\n\n # set the horizontal and vertical axes\n base_classes.plot_set_bounds(plt, axials, radials)\n\n # set units\n alist = self.__problem.fea.get_units(field, 'dist', 'time')\n [f_unit, d_unit, t_unit] = alist\n\n # set plot axes\n plot_title = ('Element %s %s%s\\nTime=%f%s' %\n (mode, field, f_unit, self.__time, t_unit))\n if title != '':\n plot_title += '\\n%s' % title\n plt.title(plot_title)\n plt.xlabel('axial, y'+d_unit)\n plt.ylabel('radial, x'+d_unit)\n ax_.set_aspect('equal')\n if gmult != 1:\n ax_.xaxis.set_ticklabels([])\n ax_.yaxis.set_ticklabels([])\n base_classes.plot_finish(plt, fname, display)\n\n def set_time(self, time):\n \"\"\"Sets the time point we're looking at in the results file.\n\n Args:\n time (float): time we are setting\n \"\"\"\n if time in self.steps:\n self.__time = time\n print('Results file time set to: %f' % (self.__time))\n else:\n print('Time %f is not in the loaded times. Valid times are:')\n print(self.steps)\n\n\n def plot_gradient(self, start_point, end_point, field, fname='', display=True, title='', max_val=None, min_val=None, curve_fitting=True, n_poly=3, n_subpoints=500, legend=True):\n \"\"\"Create diagram with data projected onto line on the undeformed geometry.\n\n Args:\n start_point [float, float]: starting point of line. [x, y]\n end_point [float, float]: end point of line. Example: [x, y]\n field (str): results item to plot, examples: 'ux', 'ey', 'Seqv'\n fname (str): prefix of png file name, if writing an image\n display (bool): True = interactively show the plot\n title (str): third line in the plot title\n max_val (float or None): max value in the y-axis\n - None: max from selected data used\n - float: use the passed float\n min_val (float or None): min value in the y-axis\n - None: min from selected data used\n - float: use the passed float\n curve_fitting (bool): True = a curve is fitted to the gradient\n n_poly (int): numbers of polygons for fitting\n n_subpoints (int): numbers of points the line is subdivided into\n legend (bool): True = legend with fitted equation is shown\n \"\"\"\n\n # store the selected nodes and elements\n sel = {}\n sel['nodes'] = self.__problem.fea.view.nodes\n\n # sort nodes low to high so index is correct\n # we have index to id below so showing subsets works\n sel['nodes'] = list(sel['nodes'])\n sel['nodes'] = sorted(sel['nodes'], key=lambda k: k.id)\n\n # store results at nodes\n node_position = np.zeros((len(sel['nodes']),2))\n field_values = np.zeros(len(sel['nodes']))\n\n for idx, node in enumerate(sel['nodes']):\n\n node_position[idx] = [node.x, node.y]\n field_values[idx] = self.__results[self.__time]['node'][node.id][field]\n\n\n #create subpoints on line\n subpoints = np.zeros((n_subpoints, 3)) #[x, y, line position]\n\n subpoints[:,0] = np.linspace(start_point[0], end_point[0], n_subpoints)\n subpoints[:,1] = np.linspace(start_point[1], end_point[1], n_subpoints)\n subpoints[:,2] = np.arange(n_subpoints) / n_subpoints * np.sqrt(np.sum( (np.array(start_point) - np.array(end_point))**2))\n\n #calculate weighted field value for every subpoint\n wfield = np.zeros(n_subpoints)\n\n for idx in range(n_subpoints):\n\n #calculate inverse of distance from nodes to subpoints\n dist = np.sqrt(np.sum((node_position-subpoints[idx,0:2])**2,axis=1))\n\n #calculte weighted field value\n #dist[dist < 1E-10] = 1E-10\n #inv_dist = 1. / dist**3\n #wfield[idx] = np.average(field_values, weights=inv_dist)\n\n #use nearest value\n wfield[idx] = field_values[min(range(len(dist)),key=dist.__getitem__)]\n\n\n #plot diagram\n\n fig = plt.figure(figsize=(10,6))\n ax_ = fig.add_subplot(111)\n\n plt.plot(subpoints[:,2], wfield, '-r', linewidth=2.5, label=field)\n\n if curve_fitting==True:\n #execute curve fitting if needed\n poly = np.polyfit(subpoints[:,2], wfield, n_poly)\n\n #string for equation of fitted function\n funcstring = [str(np.round(poly[i]))+u'*x^'+str(np.arange(n_poly,0,-1)[i]) for i in range(n_poly)]\n funcstring.append(str(np.round(poly[-1])))\n funcstring = '+'.join(funcstring)\n\n func = np.poly1d(poly)\n\n plt.plot(subpoints[:,2], func(subpoints[:,2]), '--k', linewidth=1.5, label=funcstring)\n\n\n # set units\n alist = self.__problem.fea.get_units(field, 'dist', 'time')\n [f_unit, d_unit, t_unit] = alist\n\n # set plot axes\n plot_title = ('Gradient %s%s\\nTime=%f%s' %(field, f_unit, self.__time, t_unit))\n if title != '':\n plot_title += '\\n%s' % title\n plt.title(plot_title)\n plt.xlabel('path position'+d_unit)\n plt.ylabel(field + ' ' +f_unit)\n\n #show legend if needed\n if legend == True:\n plt.legend()\n\n #set limits on y-axis\n if min_val!=None:\n plt.gca().set_ylim(bottom=min_val)\n if max_val!=None:\n plt.gca().set_ylim(top=max_val)\n\n plt.grid()\n base_classes.plot_finish(plt, fname, display)\n\n def get_relative_gradient(self, start_point, end_point, field, n_poly=3, n_subpoints=500):\n \"\"\"Calculte relative stress gradient (gradient/start_value)\n\n Args:\n start_point [(float), (float)]: starting point of line. [x, y]\n end_point [(float), (float)]: end point of line. Example: [x, y]\n field (str): results item to plot, examples: 'ux', 'ey', 'Seqv'\n\n Kargs:\n n_poly (int): numbers of polygons for fitting, min=2\n n_subpoints (int): numbers of points the line is subdivided into\n \"\"\"\n\n # store the selected nodes and elements\n sel = {}\n sel['nodes'] = self.__problem.fea.view.nodes\n\n # sort nodes low to high so index is correct\n # we have index to id below so showing subsets works\n sel['nodes'] = list(sel['nodes'])\n sel['nodes'] = sorted(sel['nodes'], key=lambda k: k.id)\n\n # store results at nodes\n node_position = np.zeros((len(sel['nodes']),2))\n field_values = np.zeros(len(sel['nodes']))\n\n for idx, node in enumerate(sel['nodes']):\n\n node_position[idx] = [node.x, node.y]\n field_values[idx] = self.__results[self.__time]['node'][node.id][field]\n\n\n #create subpoints on line\n subpoints = np.zeros((n_subpoints, 3)) #[x, y, line position]\n\n subpoints[:,0] = np.linspace(start_point[0], end_point[0], n_subpoints)\n subpoints[:,1] = np.linspace(start_point[1], end_point[1], n_subpoints)\n subpoints[:,2] = np.arange(n_subpoints) / n_subpoints * np.sqrt(np.sum( (np.array(start_point) - np.array(end_point))**2))\n\n #calculate weighted field value for every subpoint\n wfield = np.zeros(n_subpoints)\n\n for idx in range(n_subpoints):\n\n #calculate inverse of distance from nodes to subpoints\n dist = np.sqrt(np.sum((node_position-subpoints[idx,0:2])**2,axis=1))\n\n #use nearest value\n wfield[idx] = field_values[min(range(len(dist)),key=dist.__getitem__)]\n\n\n #curve fitting\n poly = np.polyfit(subpoints[:,2], wfield, n_poly)\n\n rel_grad = abs(poly[-2])/abs(poly[-1])\n\n return rel_grad\n\n\n\n\n\n @staticmethod\n def __utot(vals):\n \"\"\"Returns the total displacement distance, given [dx,dy,dz].\n\n Args:\n vals (list): [dx, dy, dz] list of displacements in x, y, and z axes\n\n Returns:\n res (float): displacement\n \"\"\"\n # computes sum of the squares\n res = [a**2 for a in vals]\n res = (sum(res))**0.5\n return res\n\n @staticmethod\n def __seqv(vals):\n \"\"\"Returns the Von Mises stress, which will be stored as 'Seqv'.\n\n Args:\n vals (list): list of six stresses [s11,s22,s33,s12,s13,s23]\n\n Returns:\n res (float): Von Mises stress\n \"\"\"\n [s11, s22, s33, s12, s13, s23] = vals\n aval = s11 - s22\n bval = s22 - s33\n cval = s33 - s11\n dval = s12**2 + s23**2 +s13**2\n res = (0.5*(aval**2 + bval**2 + cval**2 +6*dval))**0.5\n return res\n\n @staticmethod\n def __principals(vals):\n \"\"\"Returns principal stresses [S1,S2,S3].\n\n Args:\n vals (list): six stresses [s11,s22,s33,s12,s13,s23]\n\n Returns:\n res (list): principal stresses [S1,S2,S3] stresses are high-to-low\n \"\"\"\n # calculates and returns principal stresses, S1, S2, S3\n [s11, s22, s33, s12, s13, s23] = vals\n aval = 1\n bval = (s11 + s22 + s33)*-1.0\n cval = (s11*s22 + s11*s33 + s22*s33 - s12**2 - s13**2 - s23**2)\n dval = (s11*s22*s33 + 2*s12*s13*s23 - s11*(s23**2) - s22*(s13**2)\n - s33*(s12**2))*-1.0\n res = list(roots([aval, bval, cval, dval]))\n res = sorted(res, reverse=True)\n return res\n\n def __get_data_dict(self, time, type_str):\n \"\"\"Returns the data dict at the correct time for element or node.\n\n Args:\n time (float): None or the time we want, if None use current time\n type_str: 'element' or 'node'\n\n Returns:\n res (dict or None): dictionary with field values in it\n None if the time was invalid\n \"\"\"\n res = self.__results[self.__time][type_str]\n if time != None:\n if time not in self.steps:\n print('Error: passed time is not in steps!')\n print(' Pass a time in the steps:')\n print(self.steps)\n return None\n else:\n res = self.__results[time][type_str]\n return res\n\n def get_nmax(self, field, time=None):\n \"\"\"Returns the max value of node results field in selected nodes.\n\n Reports results for the current time.\n\n Args:\n field (str): results field, for example 'ux', 'ey', 'S1', 'Seqv'\n time (None or float): the time to query\n\n - None: uses the current time\n - float: uses the passed float time\n Returns:\n res (float): max value\n \"\"\"\n nodes = self.__problem.fea.view.nodes\n node_ids = [node.id for node in nodes]\n data_dict = self.__get_data_dict(time, 'node')\n if data_dict == None:\n return None\n ndicts = [data_dict[nid] for nid in node_ids]\n res = [ndict[field] for ndict in ndicts]\n res = max(res)\n return res\n\n def get_nmin(self, field, time=None):\n \"\"\"Returns the min value of node results field in selected nodes.\n\n Reports results for the current time.\n\n Args:\n field (str): results field, for example 'ux', 'ey', 'S1', 'Seqv'\n time (None or float): the time to query\n\n - None: uses the current time\n - float: uses the passed float time\n Returns:\n res (float): min value\n \"\"\"\n nodes = self.__problem.fea.view.nodes\n node_ids = [node.id for node in nodes]\n data_dict = self.__get_data_dict(time, 'node')\n if data_dict == None:\n return None\n ndicts = [data_dict[nid] for nid in node_ids]\n res = [ndict[field] for ndict in ndicts]\n res = min(res)\n return res\n\n def get_nval(self, node, field, time=None):\n \"\"\"Returns the field result value under node.\n\n Result will be returned whether or not passed node is selected.\n\n Args:\n node (str or Node): node we are asking about\n field (str): the results item we want: 'ux', 'Sy', 'Seqv', 'fx'\n time (None or float): the time to query\n\n - None: uses the current time\n - float: uses the passed float time\n Returns:\n res (float or None): float value if field exists, None otherwise\n \"\"\"\n items = self.__problem.fea.get_item(node)\n if len(items) == 1:\n if isinstance(items[0], mesh.Node):\n nnum = items[0].id\n data_dict = self.__get_data_dict(time, 'node')\n if data_dict == None:\n return None\n ndict = data_dict[nnum]\n if field in ndict:\n res = ndict[field]\n return res\n else:\n print('Passed field is not in the results!')\n return None\n else:\n print('You did not pass in a node!')\n print('A single node or string node name must be passed in!')\n return None\n else:\n print('A single node or string node name must be passed in!')\n return None\n\n def get_fsum(self, item):\n \"\"\"Returns the force sum on nodes under a given point or line.\n\n Reports results for the current time.\n\n Args:\n item (Point or SignLine): item that has reaction forces on its nodes\n\n Returns:\n list: [fx, fy, fz] reaction forces in each axis, force units\n \"\"\"\n (fxx, fyy, fzz) = ([], [], [])\n nodes = item.nodes\n nodes = [n.id for n in nodes]\n for node in nodes:\n f_x = self.__results[self.__time]['node'][node]['fx']\n f_y = self.__results[self.__time]['node'][node]['fy']\n f_z = self.__results[self.__time]['node'][node]['fz']\n if f_x != 0 or f_y != 0 or f_z != 0:\n fxx.append(f_x)\n fyy.append(f_y)\n fzz.append(f_z)\n fxx = sum(fxx)\n fyy = sum(fyy)\n fzz = sum(fzz)\n return [fxx, fyy, fzz]\n\n def get_emax(self, field, time=None, mode='avg'):\n \"\"\"Returns the max results field value of selected elements at curent time.\n\n Args:\n field (str): results field, stresses supported 'S1', 'Sx', etc.\n time (None or float): the time to query\n\n - None: uses the current time\n - float: uses the passed float time\n mode (str): type of element result to give back\n\n - 'max': for each element only use the max value of field over\n all of its integration points\n - 'min': for each element only use the min value of field over\n all of its integration points\n - 'avg': for each element only use an average of all integration\n points in the eleemnt. Principal streses and Seqv are\n calculated after averaging 6 stress components.\n Returns:\n res (float): max value\n \"\"\"\n res = []\n elements = self.__problem.fea.view.elements\n data_dict = self.__get_data_dict(time, 'element')\n if data_dict == None:\n return None\n for element in elements:\n enum = element.id\n edict = data_dict[enum][mode]\n res.append(edict[field])\n res = max(res)\n return res\n\n def get_emin(self, field, time=None, mode='avg'):\n \"\"\"Returns the min results field value of selected elements at curent time.\n\n Args:\n field (str): results field, stresses supported 'S1', 'Sx', etc.\n time (None or float): the time to query\n\n - None: uses the current time\n - float: uses the passed float time\n mode (str): type of element result to give back\n\n - 'max': for each element only use the max value of field over\n all of its integration points\n - 'min': for each element only use the min value of field over\n all of its integration points\n - 'avg': for each element only use an average of all integration\n points in the eleemnt. Principal streses and Seqv are\n calculated after averaging 6 stress components.\n Returns:\n res (float): min value\n \"\"\"\n res = []\n elements = self.__problem.fea.view.elements\n data_dict = self.__get_data_dict(time, 'element')\n if data_dict == None:\n return None\n for element in elements:\n enum = element.id\n edict = data_dict[enum][mode]\n res.append(edict[field])\n res = min(res)\n return res\n\n def get_eval(self, element, field, time=None, mode='avg'):\n \"\"\"Returns the field result value under element.\n\n Result will be returned whether or not passed element is selected.\n\n Args:\n element (str or Element): element we are asking about\n field (str): the results item we want: 'Sy', 'Seqv'\n mode (str): the type of element result to get\n\n - 'avg': integration points averaged to avg element result\n - 'max': max value of field in the integration points plotted\n - 'min': min value of field in the integration points plotted\n Returns:\n res (float or None): float value if field exists, None otherwise\n \"\"\"\n items = self.__problem.fea.get_item(element)\n if len(items) == 1:\n if isinstance(items[0], mesh.Element):\n enum = items[0].id\n data_dict = self.__get_data_dict(time, 'element')\n if data_dict == None:\n return None\n edict = data_dict[enum][mode]\n if field in edict:\n res = edict[field]\n return res\n else:\n print('Passed field is not in the results!')\n return None\n else:\n print('You did not pass in a element!')\n print('A single element or string element name must be given!')\n return None\n else:\n print('A single element or string element name must be given!')\n return None\n\n @staticmethod\n def __get_vals(fstr, line):\n \"\"\"Returns a list of typed items based on an input format string.\n\n Args:\n fst (str): C format string, commas separate fields\n line (str): line string to parse\n\n Returns:\n res (list): list of typed items extracted from the line\n \"\"\"\n res = []\n fstr = fstr.split(',')\n thestr = str(line)\n for item in fstr:\n if item[0] == \"'\":\n # strip off the char quaotes\n item = item[1:-1]\n # this is a string entry, grab the val out of the line\n ind = len(item)\n fwd = thestr[:ind]\n thestr = thestr[ind:]\n res.append(fwd)\n else:\n # format is: 1X, A66, 5E12.5, I12\n # 1X is number of spaces\n (mult, ctype) = (1, None)\n m_pat = re.compile(r'^\\d+') # find multiplier\n c_pat = re.compile(r'[XIEA]') # find character\n if m_pat.findall(item) != []:\n mult = int(m_pat.findall(item)[0])\n ctype = c_pat.findall(item)[0]\n if ctype == 'X':\n # we are dealing with spaces, just reduce the line size\n thestr = thestr[mult:]\n elif ctype == 'A':\n # character string only, add it to results\n fwd = thestr[:mult].strip()\n thestr = thestr[mult:]\n res.append(fwd)\n else:\n # IE, split line into m pieces\n w_pat = re.compile(r'[IE](\\d+)') # find the num after char\n width = int(w_pat.findall(item)[0])\n while mult > 0:\n # only add items if we have enough line to look at\n if width <= len(thestr):\n substr = thestr[:width]\n thestr = thestr[width:]\n substr = substr.strip() # remove space padding\n if ctype == 'I':\n substr = int(substr)\n elif ctype == 'E':\n substr = float(substr)\n res.append(substr)\n mult -= 1\n return res\n\n @staticmethod\n def __get_first_dataline(infile):\n \"\"\"\n Reads infile until a line with data is found, then returns it\n A line that starts with ' -1' has data\n \"\"\"\n while True:\n line = infile.readline()\n if line[:3] == ' -1':\n return line\n\n def __store_time(self, time):\n \"\"\"Stores the passed time in the results steps\"\"\"\n if time not in self.__steps:\n self.__steps.append(time)\n if time not in self.__results:\n new_dict = {'node': collections.defaultdict(dict),\n 'element': collections.defaultdict(dict)}\n self.__results[time] = new_dict\n\n def __modearr_estrsresults(self, infile, line):\n \"\"\"Returns an array of line, mode, rfstr, time\"\"\"\n words = line.strip().split()\n # add time if not present\n time = float(words[-1])\n self.__store_time(time)\n # set mode\n rfstr = \"I10,2X,I2,6E14.2\"\n mode = 'stress'\n infile.readline()\n line = infile.readline()\n return [line, mode, rfstr, time]\n\n def __modearr_nresults(self, infile):\n \"\"\"Returns an array of line, mode, rfstr, time\"\"\"\n line = infile.readline()\n fstr = \"1X,' 100','C',6A1,E12.5,I12,20A1,I2,I5,10A1,I2\"\n tmp = self.__get_vals(fstr, line)\n #[key, code, setname, value, numnod, text, ictype, numstp, analys, format_]\n time, format_ = tmp[3], tmp[9]\n\n # set results format to short, long or binary\n # only short and long are parsed so far\n if format_ == 0:\n rfstr = \"1X,I2,I5,6E12.5\"\n elif format_ == 1:\n rfstr = \"1X,I2,I10,6E12.5\"\n elif format_ == 2:\n # binary\n pass\n\n # set the time\n self.__store_time(time)\n\n # get the name to determine if stress or displ\n line = infile.readline()\n fstr = \"1X,I2,2X,8A1,2I5\"\n # [key, name, ncomps, irtype]\n name = self.__get_vals(fstr, line)[1]\n\n mode_by_name = {'DISP': 'displ',\n 'STRESS': 'stress',\n 'TOSTRAIN': 'strain',\n 'FORC': 'force',\n 'ERROR': 'error'}\n mode = mode_by_name[name]\n print('Reading '+mode+' storing: '+\n ','.join(base_classes.RESFIELDS[mode]))\n\n line = self.__get_first_dataline(infile)\n return [line, mode, rfstr, time]\n\n def _save_node_displ(self, line, rfstr, time, mode='displ'):\n \"\"\"Saves node displacement\"\"\"\n node, ux_, uy_, uz_ = self.__get_vals(rfstr, line)[1:]\n labs = base_classes.RESFIELDS[mode]\n vals = [ux_, uy_, uz_]\n utot = self.__utot(vals)\n vals.append(utot)\n adict = self.__results[time]['node'][node]\n for (label, val) in zip(labs, vals):\n adict[label] = val\n\n def _save_node_stress(self, line, rfstr, time, mode='stress'):\n \"\"\"Saves node stress\"\"\"\n tmp = self.__get_vals(rfstr, line)\n # [key, node, sx, sy, sz, sxy, syz, szx]\n node, sxx, syy, szz, sxy, syz, szx = tmp[1:]\n labs = base_classes.RESFIELDS[mode]\n vals = [sxx, syy, szz, sxy, syz, szx]\n seqv = self.__seqv(vals)\n s_1, s_2, s_3 = self.__principals(vals)\n vals.append(seqv)\n vals += [s_1, s_2, s_3]\n adict = self.__results[time]['node'][node]\n for (label, val) in zip(labs, vals):\n adict[label] = val\n\n def _save_node_strain(self, line, rfstr, time, mode='strain'):\n \"\"\"Saves node strain\"\"\"\n tmp = self.__get_vals(rfstr, line)\n # [key, node, ex, ey, ez, exy, eyz, ezx]\n node, exx, eyy, ezz, exy, eyz, ezx = tmp[1:]\n labs = base_classes.RESFIELDS[mode]\n vals = [exx, eyy, ezz, exy, eyz, ezx]\n eeqv = self.__seqv(vals)\n e_1, e_2, e_3 = self.__principals(vals)\n vals.append(eeqv)\n vals += [e_1, e_2, e_3]\n adict = self.__results[time]['node'][node]\n for (label, val) in zip(labs, vals):\n adict[label] = val\n\n def _save_node_force(self, line, rfstr, time, mode='force'):\n \"\"\"Saves node force\"\"\"\n # [key, node, fx, fy, fz]\n node, f_x, f_y, f_z = self.__get_vals(rfstr, line)[1:]\n labs = base_classes.RESFIELDS[mode]\n vals = [f_x, f_y, f_z]\n adict = self.__results[time]['node'][node]\n for (label, val) in zip(labs, vals):\n adict[label] = val\n\n def _save_node_error(self, line, rfstr, time, mode='error'):\n \"\"\"Saves node error\"\"\"\n # [key, node, error]\n node, error = self.__get_vals(rfstr, line)[1:]\n labs = base_classes.RESFIELDS[mode]\n vals = [error]\n adict = self.__results[time]['node'][node]\n for (label, val) in zip(labs, vals):\n adict[label] = val\n\n def _save_ele_stress(self, line, rfstr, time,\n mode='stress'):\n \"\"\"Saves element integration point stresses\"\"\"\n labels = ['Sx', 'Sy', 'Sz', 'Sxy', 'Sxz', 'Syz']\n vals = self.__get_vals(rfstr, line)\n # element_number, integration_pt_number\n enum, ipnum = vals[0], vals[1]\n stress_vals = vals[2:]\n\n adict = {}\n for (label, val) in zip(labels, stress_vals):\n adict[label] = val\n if enum not in self.__results[time]['element']:\n start_val = {'ipoints': {},\n 'avg': {},\n 'min': {},\n 'max': {}}\n self.__results[time]['element'][enum] = start_val\n # each line is an integration point result\n self.__results[time]['element'][enum]['ipoints'][ipnum] = adict\n\n def check_ccx_version(self, timeout=1):\n \"\"\"Raises an exception of the calculix ccx version is too old\"\"\"\n runstr = \"%s -version\" % (environment.CCX)\n try:\n output_str = subprocess.check_output(runstr,\n timeout=timeout,\n shell=True)\n except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as ex:\n output_str = ex.output\n output_str = str(output_str, 'utf-8')\n matches = re.findall(r'\\d+\\.\\d+', output_str)\n version_number = matches[-1]\n print('Using Calculix ccx version=%s '\n '(trailing characters like the p in 2.8p are omitted)'\n % version_number)\n major_version, minor_version = [int(f) for f\n in version_number.split('.')]\n if major_version <= 2 and minor_version <= 8:\n raise Exception('Your version of calculix ccx is too old! '\n 'Please update it to version >=2.8 with '\n 'the command:\\npycalculix-add-feaprograms')\n version = float(output_str.strip().split()[-1])\n # extract version with regex\n\n def __read_frd(self):\n \"\"\"\n Reads a ccx frd results file which contains nodal results.\n The file format is desribed in the cgx docs\n \"\"\"\n fname = self.__problem.fname+'.frd'\n if not os.path.isfile(fname):\n print(\"Error: %s file not found\" % fname)\n return\n # frd reading uses formatting from ccx 2.8 or higher so\n # throw an exception if our version is too old\n self.check_ccx_version(timeout=1)\n infile = open(fname, 'r')\n print('Loading nodal results from file: '+fname)\n mode = None\n time = 0.0\n rfstr = ''\n while True:\n line = infile.readline()\n if not line:\n break\n\n # set the results mode\n if '1PSTEP' in line:\n # we are in a results block\n arr = self.__modearr_nresults(infile)\n line, mode, rfstr, time = arr\n\n # set mode to none if we hit the end of a resuls block\n if line[:3] == ' -3':\n mode = None\n if not mode:\n continue\n\n node_data_saver = getattr(self, '_save_node_' + mode)\n node_data_saver(line, rfstr, time)\n\n infile.close()\n print('The following times have been read:')\n print(self.__steps)\n print('Nodal results from file: %s have been read.' % fname)\n self.set_time(self.__steps[0])\n\n def __read_dat(self):\n \"\"\"\n Reads ccx dat results file.\n It has element integration point results.\n \"\"\"\n fname = self.__problem.fname+'.dat'\n if not os.path.isfile(fname):\n print('Error: %s file not found' % fname)\n return\n infile = open(fname, 'r')\n print('Loading element results from file: '+fname)\n mode = None\n rfstr = ''\n time = 0.0\n while True:\n line = infile.readline()\n if not line:\n break\n\n # check for stress, we skip down to the line data when\n # we call __modearr_estrsresults\n if 'stress' in line:\n arr = self.__modearr_estrsresults(infile, line)\n line, mode, rfstr, time = arr\n\n # reset the read type if we hit a blank line\n if line.strip() == '':\n mode = None\n if not mode:\n continue\n\n # store stress results\n self._save_ele_stress(line, rfstr, time)\n\n infile.close()\n\n # loop over all element results, calculating avg element result\n # by averaging integration point vals\n for time in self.__steps:\n for edict in self.__results[time]['element'].values():\n ipoints = edict['ipoints'].values()\n stress_types = ['Sx', 'Sy', 'Sz', 'Sxy', 'Sxz', 'Syz']\n strslist_by_strstype = collections.defaultdict(list)\n # set stress values in max, min, avg locations\n # of non-summary stress components\n for stress_type in stress_types:\n stress_vals = [ipt[stress_type] for ipt in ipoints]\n stress_avg = sum(stress_vals)/len(stress_vals)\n stress_max = max(stress_vals)\n stress_min = min(stress_vals)\n edict['avg'][stress_type] = stress_avg\n edict['max'][stress_type] = stress_max\n edict['min'][stress_type] = stress_min\n strslist_by_strstype[stress_type] = stress_vals\n # for each element, calc Seqv, S1, S2, S3\n # at each integration point\n for ipt in ipoints:\n stress_vals = [ipt[stress_type] for stress_type\n in stress_types]\n seqv = self.__seqv(stress_vals)\n [s_1, s_2, s_3] = self.__principals(stress_vals)\n strslist_by_strstype['Seqv'].append(seqv)\n strslist_by_strstype['S1'].append(s_1)\n strslist_by_strstype['S2'].append(s_2)\n strslist_by_strstype['S3'].append(s_3)\n # now at the element level, store the avg, max, and min\n # of the Seqv and principal stresses we found at\n # integration points\n for stress_type in ['Seqv', 'S1', 'S2', 'S3']:\n stress_vals = strslist_by_strstype[stress_type]\n stress_avg = sum(stress_vals)/len(stress_vals)\n stress_max = max(stress_vals)\n stress_min = min(stress_vals)\n edict['avg'][stress_type] = stress_avg\n edict['max'][stress_type] = stress_max\n edict['min'][stress_type] = stress_min\n\n\n print('The following times have been read:')\n print(self.__steps)\n print('Element results from file: %s have been read.' % fname)\n"
]
| [
[
"matplotlib.cm.ScalarMappable",
"matplotlib.pyplot.tricontourf",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.tripcolor",
"numpy.arange",
"numpy.polyfit",
"numpy.core.function_base.linspace",
"numpy.poly1d",
"matplotlib.pyplot.gca",
"numpy.array",
"numpy.zeros",
"numpy.round",
"matplotlib.pyplot.title",
"matplotlib.colors.ListedColormap",
"matplotlib.pyplot.figure",
"numpy.lib.polynomial.roots",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.sum",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.ylabel",
"numpy.linspace"
]
]
|
JerryX1110/VFS | [
"22b915318935f459c9ee2d854d741b3f01a2ce9a"
]
| [
"mmaction/models/backbones/resnet3d_slowfast.py"
]
| [
"import torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, kaiming_init\nfrom mmcv.runner import _load_checkpoint, load_checkpoint\nfrom mmcv.utils import print_log\n\nfrom ...utils import get_root_logger\nfrom ..registry import BACKBONES\nfrom .resnet3d import ResNet3d\n\n\nclass ResNet3dPathway(ResNet3d):\n \"\"\"A pathway of Slowfast based on ResNet3d.\n\n Args:\n *args (arguments): Arguments same as :class:``ResNet3d``.\n lateral (bool): Determines whether to enable the lateral connection\n from another pathway. Default: False.\n speed_ratio (int): Speed ratio indicating the ratio between time\n dimension of the fast and slow pathway, corresponding to the\n ``alpha`` in the paper. Default: 8.\n channel_ratio (int): Reduce the channel number of fast pathway\n by ``channel_ratio``, corresponding to ``beta`` in the paper.\n Default: 8.\n fusion_kernel (int): The kernel size of lateral fusion.\n Default: 5.\n **kwargs (keyword arguments): Keywork arguments for ResNet3d.\n \"\"\"\n\n def __init__(self,\n *args,\n lateral=False,\n speed_ratio=8,\n channel_ratio=8,\n fusion_kernel=5,\n **kwargs):\n self.lateral = lateral\n self.speed_ratio = speed_ratio\n self.channel_ratio = channel_ratio\n self.fusion_kernel = fusion_kernel\n super().__init__(*args, **kwargs)\n self.inplanes = self.base_channels\n if self.lateral:\n self.conv1_lateral = ConvModule(\n self.inplanes // self.channel_ratio,\n # https://arxiv.org/abs/1812.03982, the\n # third type of lateral connection has out_channel:\n # 2 * \\beta * C\n self.inplanes * 2 // self.channel_ratio,\n kernel_size=(fusion_kernel, 1, 1),\n stride=(self.speed_ratio, 1, 1),\n padding=((fusion_kernel - 1) // 2, 0, 0),\n bias=False,\n conv_cfg=self.conv_cfg,\n norm_cfg=None,\n act_cfg=None)\n\n self.lateral_connections = []\n for i in range(len(self.stage_blocks)):\n planes = self.base_channels * 2**i\n self.inplanes = planes * self.block.expansion\n\n if lateral and i != self.num_stages - 1:\n # no lateral connection needed in final stage\n lateral_name = f'layer{(i + 1)}_lateral'\n setattr(\n self, lateral_name,\n ConvModule(\n self.inplanes // self.channel_ratio,\n self.inplanes * 2 // self.channel_ratio,\n kernel_size=(fusion_kernel, 1, 1),\n stride=(self.speed_ratio, 1, 1),\n padding=((fusion_kernel - 1) // 2, 0, 0),\n bias=False,\n conv_cfg=self.conv_cfg,\n norm_cfg=None,\n act_cfg=None))\n self.lateral_connections.append(lateral_name)\n\n def make_res_layer(self,\n block,\n inplanes,\n planes,\n blocks,\n spatial_stride=1,\n temporal_stride=1,\n dilation=1,\n style='pytorch',\n inflate=1,\n inflate_style='3x1x1',\n non_local=0,\n non_local_cfg=dict(),\n conv_cfg=None,\n norm_cfg=None,\n act_cfg=None,\n with_cp=False):\n \"\"\"Build residual layer for Slowfast.\n\n Args:\n block (nn.Module): Residual module to be built.\n inplanes (int): Number of channels for the input\n feature in each block.\n planes (int): Number of channels for the output\n feature in each block.\n blocks (int): Number of residual blocks.\n spatial_stride (int | Sequence[int]): Spatial strides\n in residual and conv layers. Default: 1.\n temporal_stride (int | Sequence[int]): Temporal strides in\n residual and conv layers. Default: 1.\n dilation (int): Spacing between kernel elements. Default: 1.\n style (str): ``pytorch`` or ``caffe``. If set to ``pytorch``,\n the stride-two layer is the 3x3 conv layer,\n otherwise the stride-two layer is the first 1x1 conv layer.\n Default: ``pytorch``.\n inflate (int | Sequence[int]): Determine whether to inflate\n for each block. Default: 1.\n inflate_style (str): ``3x1x1`` or ``1x1x1``. which determines\n the kernel sizes and padding strides for conv1 and\n conv2 in each block. Default: ``3x1x1``.\n non_local (int | Sequence[int]): Determine whether to apply\n non-local module in the corresponding block of each stages.\n Default: 0.\n non_local_cfg (dict): Config for non-local module.\n Default: ``dict()``.\n conv_cfg (dict): Config for conv layers. Default: None.\n norm_cfg (dict): Config for norm layers. Default: None.\n act_cfg (dict): Config for activate layers. Default: None.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save\n some memory while slowing down the training speed.\n Default: False.\n\n Returns:\n nn.Module: A residual layer for the given config.\n \"\"\"\n inflate = inflate if not isinstance(inflate,\n int) else (inflate, ) * blocks\n non_local = non_local if not isinstance(\n non_local, int) else (non_local, ) * blocks\n assert len(inflate) == blocks and len(non_local) == blocks\n if self.lateral:\n lateral_inplanes = inplanes * 2 // self.channel_ratio\n else:\n lateral_inplanes = 0\n if (spatial_stride != 1\n or (inplanes + lateral_inplanes) != planes * block.expansion):\n downsample = ConvModule(\n inplanes + lateral_inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=(temporal_stride, spatial_stride, spatial_stride),\n bias=False,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=None)\n else:\n downsample = None\n\n layers = []\n layers.append(\n block(\n inplanes + lateral_inplanes,\n planes,\n spatial_stride,\n temporal_stride,\n dilation,\n downsample,\n style=style,\n inflate=(inflate[0] == 1),\n inflate_style=inflate_style,\n non_local=(non_local[0] == 1),\n non_local_cfg=non_local_cfg,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg,\n with_cp=with_cp))\n inplanes = planes * block.expansion\n\n for i in range(1, blocks):\n layers.append(\n block(\n inplanes,\n planes,\n 1,\n 1,\n dilation,\n style=style,\n inflate=(inflate[i] == 1),\n inflate_style=inflate_style,\n non_local=(non_local[i] == 1),\n non_local_cfg=non_local_cfg,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg,\n with_cp=with_cp))\n\n return nn.Sequential(*layers)\n\n def inflate_weights(self, logger):\n \"\"\"Inflate the resnet2d parameters to resnet3d pathway.\n\n The differences between resnet3d and resnet2d mainly lie in an extra\n axis of conv kernel. To utilize the pretrained parameters in 2d model,\n the weight of conv2d models should be inflated to fit in the shapes of\n the 3d counterpart. For pathway the ``lateral_connection`` part should\n not be inflated from 2d weights.\n\n Args:\n logger (logging.Logger): The logger used to print\n debugging infomation.\n \"\"\"\n\n state_dict_r2d = _load_checkpoint(self.pretrained)\n if 'state_dict' in state_dict_r2d:\n state_dict_r2d = state_dict_r2d['state_dict']\n\n inflated_param_names = []\n for name, module in self.named_modules():\n if 'lateral' in name:\n continue\n if isinstance(module, ConvModule):\n # we use a ConvModule to wrap conv+bn+relu layers, thus the\n # name mapping is needed\n if 'downsample' in name:\n # layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0\n original_conv_name = name + '.0'\n # layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1\n original_bn_name = name + '.1'\n else:\n # layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n}\n original_conv_name = name\n # layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n}\n original_bn_name = name.replace('conv', 'bn')\n self._inflate_conv_params(module.conv, state_dict_r2d,\n original_conv_name,\n inflated_param_names)\n self._inflate_bn_params(module.bn, state_dict_r2d,\n original_bn_name, inflated_param_names)\n\n # check if any parameters in the 2d checkpoint are not loaded\n remaining_names = set(\n state_dict_r2d.keys()) - set(inflated_param_names)\n if remaining_names:\n logger.info(f'These parameters in the 2d checkpoint are not loaded'\n f': {remaining_names}')\n\n def _inflate_conv_params(self, conv3d, state_dict_2d, module_name_2d,\n inflated_param_names):\n \"\"\"Inflate a conv module from 2d to 3d.\n\n The differences of conv modules betweene 2d and 3d in Pathway\n mainly lie in the inplanes due to lateral connections. To fit the\n shapes of the lateral connection counterpart, it will expand\n parameters by concatting conv2d parameters and extra zero paddings.\n\n Args:\n conv3d (nn.Module): The destination conv3d module.\n state_dict_2d (OrderedDict): The state dict of pretrained 2d model.\n module_name_2d (str): The name of corresponding conv module in the\n 2d model.\n inflated_param_names (list[str]): List of parameters that have been\n inflated.\n \"\"\"\n weight_2d_name = module_name_2d + '.weight'\n conv2d_weight = state_dict_2d[weight_2d_name]\n old_shape = conv2d_weight.shape\n new_shape = conv3d.weight.data.shape\n kernel_t = new_shape[2]\n if new_shape[1] != old_shape[1]:\n # Inplanes may be different due to lateral connections\n new_channels = new_shape[1] - old_shape[1]\n pad_shape = old_shape\n pad_shape = pad_shape[:1] + (new_channels, ) + pad_shape[2:]\n # Expand parameters by concat extra channels\n conv2d_weight = torch.cat(\n (conv2d_weight,\n torch.zeros(pad_shape).type_as(conv2d_weight).to(\n conv2d_weight.device)),\n dim=1)\n new_weight = conv2d_weight.data.unsqueeze(2).expand_as(\n conv3d.weight) / kernel_t\n conv3d.weight.data.copy_(new_weight)\n inflated_param_names.append(weight_2d_name)\n\n if getattr(conv3d, 'bias') is not None:\n bias_2d_name = module_name_2d + '.bias'\n conv3d.bias.data.copy_(state_dict_2d[bias_2d_name])\n inflated_param_names.append(bias_2d_name)\n\n def _freeze_stages(self):\n \"\"\"Prevent all the parameters from being optimized before\n `self.frozen_stages`.\"\"\"\n if self.frozen_stages >= 0:\n self.conv1.eval()\n for param in self.conv1.parameters():\n param.requires_grad = False\n\n for i in range(1, self.frozen_stages + 1):\n m = getattr(self, f'layer{i}')\n m.eval()\n for param in m.parameters():\n param.requires_grad = False\n\n if (i != len(self.res_layers) and self.lateral):\n # No fusion needed in the final stage\n lateral_name = self.lateral_connections[i - 1]\n conv_lateral = getattr(self, lateral_name)\n conv_lateral.eval()\n for param in conv_lateral.parameters():\n param.requires_grad = False\n\n def init_weights(self):\n \"\"\"Initiate the parameters either from existing checkpoint or from\n scratch.\"\"\"\n # Override the init_weights of i3d\n super().init_weights()\n for module_name in self.lateral_connections:\n layer = getattr(self, module_name)\n for m in layer.modules():\n if isinstance(m, (nn.Conv3d, nn.Conv2d)):\n kaiming_init(m)\n\n\npathway_cfg = {\n 'resnet3d': ResNet3dPathway,\n # TODO: BNInceptionPathway\n}\n\n\ndef build_pathway(cfg, *args, **kwargs):\n \"\"\"Build pathway.\n\n Args:\n cfg (None or dict): cfg should contain:\n - type (str): identify conv layer type.\n\n Returns:\n nn.Module: Created pathway.\n \"\"\"\n if not (isinstance(cfg, dict) and 'type' in cfg):\n raise TypeError('cfg must be a dict containing the key \"type\"')\n cfg_ = cfg.copy()\n\n pathway_type = cfg_.pop('type')\n if pathway_type not in pathway_cfg:\n raise KeyError(f'Unrecognized pathway type {pathway_type}')\n else:\n pathway_cls = pathway_cfg[pathway_type]\n pathway = pathway_cls(*args, **kwargs, **cfg_)\n\n return pathway\n\n\[email protected]_module()\nclass ResNet3dSlowFast(nn.Module):\n \"\"\"Slowfast backbone.\n\n This module is proposed in `SlowFast Networks for Video Recognition\n <https://arxiv.org/abs/1812.03982>`_\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n pretrained (str): The file path to a pretrained model.\n resample_rate (int): A large temporal stride ``resample_rate``\n on input frames, corresponding to the :math:`\\\\tau` in the paper.\n i.e., it processes only one out of ``resample_rate`` frames.\n Default: 16.\n speed_ratio (int): Speed ratio indicating the ratio between time\n dimension of the fast and slow pathway, corresponding to the\n :math:`\\\\alpha` in the paper. Default: 8.\n channel_ratio (int): Reduce the channel number of fast pathway\n by ``channel_ratio``, corresponding to :math:`\\\\beta` in the paper.\n Default: 8.\n slow_pathway (dict): Configuration of slow branch, should contain\n necessary arguments for building the specific type of pathway\n and:\n type (str): type of backbone the pathway bases on.\n lateral (bool): determine whether to build lateral connection\n for the pathway.Default:\n\n .. code-block:: Python\n\n dict(type='ResNetPathway',\n lateral=True, depth=50, pretrained=None,\n conv1_kernel=(1, 7, 7), dilations=(1, 1, 1, 1),\n conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1))\n\n fast_pathway (dict): Configuration of fast branch, similar to\n `slow_pathway`. Default:\n\n .. code-block:: Python\n\n dict(type='ResNetPathway',\n lateral=False, depth=50, pretrained=None, base_channels=8,\n conv1_kernel=(5, 7, 7), conv1_stride_t=1, pool1_stride_t=1)\n \"\"\"\n\n def __init__(self,\n pretrained,\n resample_rate=8,\n speed_ratio=8,\n channel_ratio=8,\n slow_pathway=dict(\n type='resnet3d',\n depth=50,\n pretrained=None,\n lateral=True,\n conv1_kernel=(1, 7, 7),\n dilations=(1, 1, 1, 1),\n conv1_stride_t=1,\n pool1_stride_t=1,\n inflate=(0, 0, 1, 1)),\n fast_pathway=dict(\n type='resnet3d',\n depth=50,\n pretrained=None,\n lateral=False,\n base_channels=8,\n conv1_kernel=(5, 7, 7),\n conv1_stride_t=1,\n pool1_stride_t=1)):\n super().__init__()\n self.pretrained = pretrained\n self.resample_rate = resample_rate\n self.speed_ratio = speed_ratio\n self.channel_ratio = channel_ratio\n\n if slow_pathway['lateral']:\n slow_pathway['speed_ratio'] = speed_ratio\n slow_pathway['channel_ratio'] = channel_ratio\n\n self.slow_path = build_pathway(slow_pathway)\n self.fast_path = build_pathway(fast_pathway)\n\n def init_weights(self):\n \"\"\"Initiate the parameters either from existing checkpoint or from\n scratch.\"\"\"\n if isinstance(self.pretrained, str):\n logger = get_root_logger()\n msg = f'load model from: {self.pretrained}'\n print_log(msg, logger=logger)\n # Directly load 3D model.\n load_checkpoint(self, self.pretrained, strict=True, logger=logger)\n elif self.pretrained is None:\n # Init two branch seperately.\n self.fast_path.init_weights()\n self.slow_path.init_weights()\n else:\n raise TypeError('pretrained must be a str or None')\n\n def forward(self, x):\n \"\"\"Defines the computation performed at every call.\n\n Args:\n x (torch.Tensor): The input data.\n\n Returns:\n tuple[torch.Tensor]: The feature of the input\n samples extracted by the backbone.\n \"\"\"\n x_slow = x[:, :, ::self.resample_rate, :, :]\n x_slow = self.slow_path.conv1(x_slow)\n x_slow = self.slow_path.maxpool(x_slow)\n\n x_fast = x[:, :, ::self.resample_rate // self.speed_ratio, :, :]\n x_fast = self.fast_path.conv1(x_fast)\n x_fast = self.fast_path.maxpool(x_fast)\n\n if self.slow_path.lateral:\n x_fast_lateral = self.slow_path.conv1_lateral(x_fast)\n x_slow = torch.cat((x_slow, x_fast_lateral), dim=1)\n\n for i, layer_name in enumerate(self.slow_path.res_layers):\n res_layer = getattr(self.slow_path, layer_name)\n x_slow = res_layer(x_slow)\n res_layer_fast = getattr(self.fast_path, layer_name)\n x_fast = res_layer_fast(x_fast)\n if (i != len(self.slow_path.res_layers) - 1\n and self.slow_path.lateral):\n # No fusion needed in the final stage\n lateral_name = self.slow_path.lateral_connections[i]\n conv_lateral = getattr(self.slow_path, lateral_name)\n x_fast_lateral = conv_lateral(x_fast)\n x_slow = torch.cat((x_slow, x_fast_lateral), dim=1)\n\n out = (x_slow, x_fast)\n\n return out\n"
]
| [
[
"torch.nn.Sequential",
"torch.cat",
"torch.zeros"
]
]
|
burgalon/spinningup | [
"6ae9e69d795919f8775ded5d2dd6d6b60ae8ffea"
]
| [
"spinup/utils/plot.py"
]
| [
"import seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport json\nimport os\nimport os.path as osp\nimport numpy as np\n\nDIV_LINE_WIDTH = 50\n\n# Global vars for tracking and labeling data at load time.\nexp_idx = 0\nunits = dict()\n\ndef plot_data(data, xaxis='Epoch', value=\"AverageEpRet\", condition=\"Condition1\", smooth=1, **kwargs):\n if smooth > 1:\n \"\"\"\n smooth data with moving window average.\n that is,\n smoothed_y[t] = average(y[t-k], y[t-k+1], ..., y[t+k-1], y[t+k])\n where the \"smooth\" param is width of that window (2k+1)\n \"\"\"\n y = np.ones(smooth)\n for datum in data:\n x = np.asarray(datum[value])\n z = np.ones(len(x))\n smoothed_x = np.convolve(x,y,'same') / np.convolve(z,y,'same')\n datum[value] = smoothed_x\n\n if isinstance(data, list):\n data = pd.concat(data, ignore_index=True)\n sns.set(style=\"darkgrid\", font_scale=1.5)\n sns.tsplot(data=data, time=xaxis, value=value, unit=\"Unit\", condition=condition, ci='sd', **kwargs)\n \"\"\"\n If you upgrade to any version of Seaborn greater than 0.8.1, switch from \n tsplot to lineplot replacing L29 with:\n\n sns.lineplot(data=data, x=xaxis, y=value, hue=condition, ci='sd', **kwargs)\n\n Changes the colorscheme and the default legend style, though.\n \"\"\"\n plt.legend(loc='best').set_draggable(True)\n #plt.legend(loc='upper center', ncol=3, handlelength=1,\n # borderaxespad=0., prop={'size': 13})\n\n \"\"\"\n For the version of the legend used in the Spinning Up benchmarking page, \n swap L38 with:\n\n plt.legend(loc='upper center', ncol=6, handlelength=1,\n mode=\"expand\", borderaxespad=0., prop={'size': 13})\n \"\"\"\n\n xscale = np.max(np.asarray(data[xaxis])) > 5e3\n if xscale:\n # Just some formatting niceness: x-axis scale in scientific notation if max x is large\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n\n plt.tight_layout(pad=0.5)\n\ndef get_datasets(logdir, condition=None):\n \"\"\"\n Recursively look through logdir for output files produced by\n spinup.logx.Logger. \n\n Assumes that any file \"progress.txt\" is a valid hit. \n \"\"\"\n global exp_idx\n global units\n datasets = []\n for root, _, files in os.walk(logdir):\n if 'progress.txt' in files:\n exp_name = None\n try:\n config_path = open(os.path.join(root,'config.json'))\n config = json.load(config_path)\n if 'exp_name' in config:\n exp_name = config['exp_name']\n except:\n print('No file named config.json')\n condition1 = condition or exp_name or 'exp'\n condition2 = condition1 + '-' + str(exp_idx)\n exp_idx += 1\n if condition1 not in units:\n units[condition1] = 0\n unit = units[condition1]\n units[condition1] += 1\n\n try:\n exp_data = pd.read_table(os.path.join(root,'progress.txt'))\n except:\n print('Could not read from %s'%os.path.join(root,'progress.txt'))\n continue\n performance = 'AverageTestEpRet' if 'AverageTestEpRet' in exp_data else 'AverageEpRet'\n exp_data.insert(len(exp_data.columns),'Unit',unit)\n exp_data.insert(len(exp_data.columns),'Condition1',condition1)\n exp_data.insert(len(exp_data.columns),'Condition2',condition2)\n exp_data.insert(len(exp_data.columns),'Performance',exp_data[performance])\n datasets.append(exp_data)\n return datasets\n\n\ndef get_all_datasets(all_logdirs, legend=None, select=None, exclude=None):\n \"\"\"\n For every entry in all_logdirs,\n 1) check if the entry is a real directory and if it is, \n pull data from it; \n\n 2) if not, check to see if the entry is a prefix for a \n real directory, and pull data from that.\n \"\"\"\n logdirs = []\n for logdir in all_logdirs:\n if osp.isdir(logdir) and logdir[-1]==os.sep:\n logdirs += [logdir]\n else:\n basedir = osp.dirname(logdir)\n fulldir = lambda x : osp.join(basedir, x)\n prefix = logdir.split(os.sep)[-1]\n listdir= os.listdir(basedir)\n logdirs += sorted([fulldir(x) for x in listdir if prefix in x])\n\n \"\"\"\n Enforce selection rules, which check logdirs for certain substrings.\n Makes it easier to look at graphs from particular ablations, if you\n launch many jobs at once with similar names.\n \"\"\"\n if select is not None:\n logdirs = [log for log in logdirs if all(x in log for x in select)]\n if exclude is not None:\n logdirs = [log for log in logdirs if all(not(x in log) for x in exclude)]\n\n # Verify logdirs\n print('Plotting from...\\n' + '='*DIV_LINE_WIDTH + '\\n')\n for logdir in logdirs:\n print(logdir)\n print('\\n' + '='*DIV_LINE_WIDTH)\n\n # Make sure the legend is compatible with the logdirs\n assert not(legend) or (len(legend) == len(logdirs)), \\\n \"Must give a legend title for each set of experiments.\"\n\n # Load data from logdirs\n data = []\n if legend:\n for log, leg in zip(logdirs, legend):\n data += get_datasets(log, leg)\n else:\n for log in logdirs:\n data += get_datasets(log)\n return data\n\n\ndef make_plots(all_logdirs, legend=None, xaxis=None, values=None, count=False, \n font_scale=1.5, smooth=1, select=None, exclude=None, estimator='mean'):\n data = get_all_datasets(all_logdirs, legend, select, exclude)\n values = values if isinstance(values, list) else [values]\n condition = 'Condition2' if count else 'Condition1'\n estimator = getattr(np, estimator) # choose what to show on main curve: mean? max? min?\n for value in values:\n plt.figure()\n plot_data(data, xaxis=xaxis, value=value, condition=condition, smooth=smooth, estimator=estimator)\n plt.show()\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('logdir', nargs='*')\n parser.add_argument('--legend', '-l', nargs='*')\n parser.add_argument('--xaxis', '-x', default='TotalEnvInteracts')\n parser.add_argument('--value', '-y', default='Performance', nargs='*')\n parser.add_argument('--count', action='store_true')\n parser.add_argument('--smooth', '-s', type=int, default=1)\n parser.add_argument('--select', nargs='*')\n parser.add_argument('--exclude', nargs='*')\n parser.add_argument('--est', default='mean')\n args = parser.parse_args()\n \"\"\"\n\n Args: \n logdir (strings): As many log directories (or prefixes to log \n directories, which the plotter will autocomplete internally) as \n you'd like to plot from.\n\n legend (strings): Optional way to specify legend for the plot. The \n plotter legend will automatically use the ``exp_name`` from the\n config.json file, unless you tell it otherwise through this flag.\n This only works if you provide a name for each directory that\n will get plotted. (Note: this may not be the same as the number\n of logdir args you provide! Recall that the plotter looks for\n autocompletes of the logdir args: there may be more than one \n match for a given logdir prefix, and you will need to provide a \n legend string for each one of those matches---unless you have \n removed some of them as candidates via selection or exclusion \n rules (below).)\n\n xaxis (string): Pick what column from data is used for the x-axis.\n Defaults to ``TotalEnvInteracts``.\n\n value (strings): Pick what columns from data to graph on the y-axis. \n Submitting multiple values will produce multiple graphs. Defaults\n to ``Performance``, which is not an actual output of any algorithm.\n Instead, ``Performance`` refers to either ``AverageEpRet``, the \n correct performance measure for the on-policy algorithms, or\n ``AverageTestEpRet``, the correct performance measure for the \n off-policy algorithms. The plotter will automatically figure out \n which of ``AverageEpRet`` or ``AverageTestEpRet`` to report for \n each separate logdir.\n\n count: Optional flag. By default, the plotter shows y-values which\n are averaged across all results that share an ``exp_name``, \n which is typically a set of identical experiments that only vary\n in random seed. But if you'd like to see all of those curves \n separately, use the ``--count`` flag.\n\n smooth (int): Smooth data by averaging it over a fixed window. This \n parameter says how wide the averaging window will be.\n\n select (strings): Optional selection rule: the plotter will only show\n curves from logdirs that contain all of these substrings.\n\n exclude (strings): Optional exclusion rule: plotter will only show \n curves from logdirs that do not contain these substrings.\n\n \"\"\"\n\n make_plots(args.logdir, args.legend, args.xaxis, args.value, args.count, \n smooth=args.smooth, select=args.select, exclude=args.exclude,\n estimator=args.est)\n\nif __name__ == \"__main__\":\n main()"
]
| [
[
"numpy.asarray",
"numpy.ones",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"pandas.concat",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ticklabel_format",
"numpy.convolve"
]
]
|
hukkelas/full_body_anonymization | [
"c61745b137c84ffb742ef6ab2f4721db4acf22b7"
]
| [
"fba/data/build.py"
]
| [
"from .transforms import build_transforms\nfrom .utils import DataPrefetcher, InfiniteSampler\nfrom .datasets import build_dataset\nimport torch\nfrom fba import utils\nfrom torch.utils.data._utils.collate import default_collate\n\n\ndef get_dataloader(cfg, is_train: bool):\n imsize = cfg.imsize\n if is_train:\n cfg_data = cfg.data_train\n else:\n cfg_data = cfg.data_val\n\n gpu_transform = build_transforms(\n cfg_data.image_gpu_transforms, imsize, cfg.jit_transform)\n cpu_transform = build_transforms(cfg_data.cpu_transforms, imsize, False)\n dataset = build_dataset(cfg_data.dataset, imsize, cpu_transform, is_train=is_train)\n sampler = None\n if is_train:\n sampler = InfiniteSampler(\n dataset, rank=utils.rank(),\n num_replicas=utils.world_size(), \n **cfg_data.sampler)\n elif utils.world_size() > 1:\n sampler = torch.utils.data.DistributedSampler(\n dataset, **cfg_data.sampler)\n dataloader = torch.utils.data.DataLoader(\n dataset,\n **cfg_data.loader,\n sampler=sampler,\n collate_fn=collate_fn\n )\n dataloader = DataPrefetcher(\n dataloader,\n image_gpu_transforms=gpu_transform\n )\n return dataloader\n\n\ndef collate_fn(batch):\n elem = batch[0]\n ignore_keys = set([\"embed_map\", \"vertx2cat\"])\n batch_ = {key: default_collate([d[key] for d in batch]) for key in elem if key not in ignore_keys} \n if \"embed_map\" in elem:\n batch_[\"embed_map\"] = elem[\"embed_map\"]\n if \"vertx2cat\" in elem:\n batch_[\"vertx2cat\"] = elem[\"vertx2cat\"]\n return batch_\n\n\ndef build_dataloader_train(cfg):\n return get_dataloader(cfg, is_train=True)\n\n\ndef build_dataloader_val(cfg):\n return get_dataloader(cfg, is_train=False)\n\n"
]
| [
[
"torch.utils.data.DistributedSampler",
"torch.utils.data._utils.collate.default_collate",
"torch.utils.data.DataLoader"
]
]
|
Knoxantropicen/rlkit | [
"c60fb3794bdd8a6fc4480e668dc3832c5f5f3ab5"
]
| [
"rlkit/pytorch/sac/policies.py"
]
| [
"import numpy as np\nimport torch\nfrom torch import nn as nn\n\nfrom rlkit.policies.base import ExplorationPolicy, Policy\nfrom rlkit.pytorch.distributions import TanhNormal\nfrom rlkit.pytorch.networks import Mlp\n\n\nLOG_SIG_MAX = 2\nLOG_SIG_MIN = -20\n\n\nclass TanhGaussianPolicy(Mlp, ExplorationPolicy):\n \"\"\"\n Usage:\n\n ```\n policy = TanhGaussianPolicy(...)\n action, mean, log_std, _ = policy(obs)\n action, mean, log_std, _ = policy(obs, deterministic=True)\n action, mean, log_std, log_prob = policy(obs, return_log_prob=True)\n ```\n\n Here, mean and log_std are the mean and log_std of the Gaussian that is\n sampled from.\n\n If deterministic is True, action = tanh(mean).\n If return_log_prob is False (default), log_prob = None\n This is done because computing the log_prob can be a bit expensive.\n \"\"\"\n def __init__(\n self,\n hidden_sizes,\n obs_dim,\n action_dim,\n std=None,\n init_w=1e-3,\n **kwargs\n ):\n self.save_init_params(locals())\n super().__init__(\n hidden_sizes,\n input_size=obs_dim,\n output_size=action_dim,\n init_w=init_w,\n **kwargs\n )\n self.log_std = None\n self.std = std\n if std is None:\n last_hidden_size = obs_dim\n if len(hidden_sizes) > 0:\n last_hidden_size = hidden_sizes[-1]\n self.last_fc_log_std = nn.Linear(last_hidden_size, action_dim)\n self.last_fc_log_std.weight.data.uniform_(-init_w, init_w)\n self.last_fc_log_std.bias.data.uniform_(-init_w, init_w)\n else:\n self.log_std = np.log(std)\n assert LOG_SIG_MIN <= self.log_std <= LOG_SIG_MAX\n\n def get_action(self, obs_np, deterministic=False):\n actions = self.get_actions(obs_np[None], deterministic=deterministic)\n return actions[0, :], {}\n\n def get_actions(self, obs_np, deterministic=False):\n return self.eval_np(obs_np, deterministic=deterministic)[0]\n\n def forward(\n self,\n obs,\n reparameterize=True,\n deterministic=False,\n return_log_prob=False,\n ):\n \"\"\"\n :param obs: Observation\n :param deterministic: If True, do not sample\n :param return_log_prob: If True, return a sample and its log probability\n \"\"\"\n h = obs\n for i, fc in enumerate(self.fcs):\n h = self.hidden_activation(fc(h))\n mean = self.last_fc(h)\n if self.std is None:\n log_std = self.last_fc_log_std(h)\n log_std = torch.clamp(log_std, LOG_SIG_MIN, LOG_SIG_MAX)\n std = torch.exp(log_std)\n else:\n std = self.std\n log_std = self.log_std\n\n log_prob = None\n entropy = None\n mean_action_log_prob = None\n pre_tanh_value = None\n if deterministic:\n action = torch.tanh(mean)\n else:\n tanh_normal = TanhNormal(mean, std)\n if return_log_prob:\n if reparameterize is True:\n action, pre_tanh_value = tanh_normal.rsample(\n return_pretanh_value=True\n )\n else:\n action, pre_tanh_value = tanh_normal.sample(\n return_pretanh_value=True\n )\n log_prob = tanh_normal.log_prob(\n action,\n pre_tanh_value=pre_tanh_value\n )\n log_prob = log_prob.sum(dim=1, keepdim=True)\n else:\n if reparameterize is True:\n action = tanh_normal.rsample()\n else:\n action = tanh_normal.sample()\n\n return (\n action, mean, log_std, log_prob, entropy, std,\n mean_action_log_prob, pre_tanh_value,\n )\n\n\nclass MakeDeterministic(Policy):\n def __init__(self, stochastic_policy):\n self.stochastic_policy = stochastic_policy\n\n def get_action(self, observation):\n return self.stochastic_policy.get_action(observation,\n deterministic=True)\n\n def get_actions(self, observations):\n return self.stochastic_policy.get_actions(observations,\n deterministic=True)\n"
]
| [
[
"torch.nn.Linear",
"numpy.log",
"torch.clamp",
"torch.tanh",
"torch.exp"
]
]
|
DrLachie/pyclesperanto_prototype | [
"56843fac2543265c40f108fd40eac3ecf85c8458"
]
| [
"tests/test_voronoi_otsu_labeling.py"
]
| [
"import pyclesperanto_prototype as cle\nimport numpy as np\n\ndef test_voronoi_otsu_labeling():\n \n gpu_input = cle.push(np.asarray([\n\n [0, 0, 1, 1, 0, 0],\n [0, 1, 8, 9, 1, 0],\n [0, 1, 7, 6, 1, 0],\n [0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 0],\n [0, 0, 1, 8, 7, 1],\n [0, 0, 1, 1, 1, 0],\n\n ]))\n\n\n gpu_reference = cle.push(np.asarray([\n\n [0, 0, 1, 1, 0, 0],\n [0, 1, 1, 1, 1, 0],\n [0, 1, 1, 1, 1, 0],\n [0, 0, 1, 2, 2, 0],\n [0, 0, 0, 2, 2, 0],\n [0, 0, 2, 2, 2, 0],\n [0, 0, 0, 2, 2, 0],\n\n ]))\n\n gpu_output = cle.voronoi_otsu_labeling(gpu_input, spot_sigma=1, outline_sigma=1)\n\n a = cle.pull(gpu_output)\n b = cle.pull(gpu_reference)\n\n print(a)\n print(b)\n\n assert (np.array_equal(a, b))\n"
]
| [
[
"numpy.array_equal",
"numpy.asarray"
]
]
|
herman-nside/spektral | [
"58bb524ec783f187145c3afe53db491dbc1f0ba0"
]
| [
"examples/graph_prediction/tud_mincut.py"
]
| [
"\"\"\"\nThis example shows how to perform molecule regression with the\n[Open Graph Benchmark](https://ogb.stanford.edu) `mol-esol` dataset, using a\nsimple GCN with MinCutPool in batch mode.\nExpect unstable training due to the small-ish size of the dataset.\n\"\"\"\n\nimport numpy as np\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.layers import Dense, Input\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\n\nfrom spektral.data import BatchLoader\nfrom spektral.datasets import TUDataset\nfrom spektral.layers import GCNConv, GlobalSumPool, MinCutPool\n\n################################################################################\n# PARAMETERS\n################################################################################\nlearning_rate = 1e-3 # Learning rate\nepochs = 10 # Number of training epochs\nbatch_size = 32 # Batch size\n\n################################################################################\n# LOAD DATA\n################################################################################\ndataset = TUDataset(\"PROTEINS\", clean=True)\n\n# Parameters\nN = max(g.n_nodes for g in dataset)\nF = dataset.n_node_features # Dimension of node features\nS = dataset.n_edge_features # Dimension of edge features\nn_out = dataset.n_labels # Dimension of the target\n\n# Train/test split\nidxs = np.random.permutation(len(dataset))\nsplit_va, split_te = int(0.8 * len(dataset)), int(0.9 * len(dataset))\nidx_tr, idx_va, idx_te = np.split(idxs, [split_va, split_te])\ndataset_tr = dataset[idx_tr]\ndataset_va = dataset[idx_va]\ndataset_te = dataset[idx_te]\n\nloader_tr = BatchLoader(dataset_tr, batch_size=batch_size)\nloader_va = BatchLoader(dataset_va, batch_size=batch_size)\nloader_te = BatchLoader(dataset_te, batch_size=batch_size)\n\n################################################################################\n# BUILD MODEL\n################################################################################\nX_in = Input(shape=(None, F))\nA_in = Input(shape=(None, None))\n\nX_1 = GCNConv(32, activation=\"relu\")([X_in, A_in])\nX_1, A_1 = MinCutPool(N // 2)([X_1, A_in])\nX_2 = GCNConv(32, activation=\"relu\")([X_1, A_1])\nX_3 = GlobalSumPool()(X_2)\noutput = Dense(n_out)(X_3)\n\n# Build model\nmodel = Model(inputs=[X_in, A_in], outputs=output)\nopt = Adam(lr=learning_rate)\nmodel.compile(optimizer=opt, loss=\"categorical_crossentropy\", metrics=[\"acc\"])\nmodel.summary()\n\n################################################################################\n# FIT MODEL\n################################################################################\nmodel.fit(\n loader_tr.load(),\n steps_per_epoch=loader_tr.steps_per_epoch,\n epochs=epochs,\n validation_data=loader_va,\n validation_steps=loader_va.steps_per_epoch,\n callbacks=[EarlyStopping(patience=10, restore_best_weights=True)],\n)\n\n################################################################################\n# EVALUATE MODEL\n################################################################################\nprint(\"Testing model\")\nloss, acc = model.evaluate(loader_te.load(), steps=loader_te.steps_per_epoch)\nprint(\"Done. Test loss: {}. Test acc: {}\".format(loss, acc))\n"
]
| [
[
"tensorflow.keras.layers.Input",
"numpy.split",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.Model",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.callbacks.EarlyStopping"
]
]
|
hankyul2/Show_Attend_Tell | [
"1fc76af8f62e5ba84307f91622ba243fff49b943"
]
| [
"main.py"
]
| [
"import json\nimport os\n\nimport math\nimport warnings\n\nimport torch\nfrom torch.nn.utils.rnn import pack_padded_sequence\n\nwarnings.filterwarnings('ignore')\nos.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n\nfrom torch.optim import SGD, Adam, AdamW\nfrom torch.optim.lr_scheduler import OneCycleLR\nfrom pytorch_lightning import LightningModule\nfrom pytorch_lightning.utilities.cli import instantiate_class, LightningCLI\nfrom torchmetrics import MetricCollection, Accuracy, BLEUScore\n\nfrom utils import LabelSmoothing, EMA\nfrom data import BaseDataModule\nfrom model import get_model\n\n\nclass BaseImageCaptionSystem(LightningModule):\n def __init__(self, model_name: str, pretrained: bool, num_step: int, max_epochs: int,\n gpus: str, optimizer_init: dict, lr_scheduler_init: dict, processed_root: str,\n use_feat: bool = False, dropout: float = 0.0, save_folder='inference_results'):\n \"\"\" Define base vision classification system\n :arg\n model_name: model name string ex) efficientnet_v2_s\n pretrained: use pretrained weight or not\n num_classes: number of class of dataset\n num_step: number of step for 1 epoch\n max_epochs: number of epoch to train\n gpus: gpus id string ex) 1,2,\n optimizer_init: optimizer class path and init args\n lr_scheduler_init: learning rate scheduler class path and init args\n use_precise_bn: precise_bn is re-calculating batch statistic after each epoch end.\n augmentation: use mixup based augmentation ex) cutmix, cutout, mixup\n ema: use exponential moving average to increase model performance\n dropout: dropout rate for model\n \"\"\"\n super(BaseImageCaptionSystem, self).__init__()\n self.save_hyperparameters()\n\n # step 1. save data related info (not defined here)\n self.gpus = len(gpus.split(',')) - 1\n self.num_step = int(math.ceil(num_step / (self.gpus)))\n self.max_epochs = max_epochs\n\n # step 2. define model\n self.word_map = self.open_word_map(processed_root)\n self.idx_map = {v: k for k, v in self.word_map.items()}\n self.model = get_model(model_name, pretrained, use_feat, len(self.word_map), dropout)\n\n # step 3. define lr tools (optimizer, lr scheduler)\n self.optimizer_init_config = optimizer_init\n self.lr_scheduler_init_config = lr_scheduler_init\n self.criterion = LabelSmoothing()\n\n # step 4. define metric\n metrics = MetricCollection({'top@1': Accuracy(top_k=1), 'top@5': Accuracy(top_k=5)})\n self.train_metric = metrics.clone(prefix='train/')\n self.valid_metric = metrics.clone(prefix='valid/')\n self.test_metric = metrics.clone(prefix='test/')\n self.bleu_metric = BLEUScore()\n self.beam_bleu_metric = BLEUScore()\n self.save_folder = save_folder\n self.results = {'references': [], 'hypothesis': []}\n\n def forward(self, batch, batch_idx):\n x, y = batch\n loss, y_hat = self.compute_loss_eval(x, y)\n return loss\n\n def training_step(self, batch, batch_idx, optimizer_idx=None):\n return self.shared_step(batch, self.train_metric, 'train')\n\n def validation_step(self, batch, batch_idx, dataloader_idx=None):\n return self.shared_step(batch[:-1], self.valid_metric, 'valid', batch[-1], self.bleu_metric)\n\n def test_step(self, batch, batch_idx, dataloader_idx=None):\n imgs, feats, _, _, references = batch\n references = self.get_reference_list(references, list(range(len(references))))\n hypothesis = [self.model.inference(img, feat, self.word_map, self.idx_map) for img, feat in zip(imgs, feats)]\n self.log_dict({f'test/BLEU@4': self.beam_bleu_metric(references, hypothesis)}, prog_bar=True)\n self.show_example(imgs[0], references[0][0], hypothesis[0], batch_idx)\n self.results['hypothesis'].extend(hypothesis)\n self.results['references'].extend(references)\n\n def on_test_end(self) -> None:\n import json\n with open(f'{self.save_folder}/inference_results.json', 'w') as f:\n json.dump(self.results, f, ensure_ascii=False, indent=4)\n\n def show_example(self, img, reference, hypothesis, batch_idx):\n import numpy as np\n import matplotlib.pyplot as plt\n from pathlib import Path\n Path(self.save_folder).mkdir(exist_ok=True)\n plt.figure(figsize=(16,20))\n mean, std = torch.tensor([[[0.485]], [[0.456]], [[0.406]]]), torch.tensor([[[0.229]], [[0.224]], [[0.225]]])\n img = np.transpose((img.clone().cpu() * std + mean).numpy(), (1, 2, 0))\n plt.imshow(img)\n plt.xticks([])\n plt.yticks([])\n plt.xlabel(f'{hypothesis}\\n{reference}', fontsize=18)\n plt.savefig(f'{self.save_folder}/{batch_idx}.png')\n\n def shared_step(self, batch, metric, mode, references=None, bleu_metric=None):\n preds, caps_sorted, decode_lengths, alphas, sort_ind = self.model(*batch)\n y_hat = pack_padded_sequence(preds, decode_lengths, batch_first=True).data\n y = pack_padded_sequence(caps_sorted[:, 1:], decode_lengths, batch_first=True).data\n loss = self.criterion(y_hat, y) + ((1. - alphas.sum(dim=1)) ** 2).mean()\n metric = metric(y_hat, y)\n self.log_dict({f'{mode}/loss': loss}, prog_bar=True)\n self.log_dict(metric, prog_bar=True)\n\n if bleu_metric:\n references = self.get_reference_list(references, sort_ind)\n hypothesis = [' '.join(self.idx_map[i] for i in pred[:decode_lengths[j]]) for j, pred in enumerate(torch.max(preds, dim=2)[1].tolist())]\n self.log_dict({f'{mode}/teaching_force_BLEU@4': bleu_metric(references, hypothesis)}, prog_bar=True)\n\n return loss\n\n def get_reference_list(self, references, sort_ind):\n reference_list = []\n references = references[sort_ind]\n for idx in range(references.size(0)):\n reference = references[idx].tolist()\n reference = list(\n map(lambda c: ' '.join([self.idx_map[w] for w in c if w not in {self.word_map['<start>'], self.word_map['<end>'], self.word_map['<pad>']}]), reference))\n reference_list.append(reference)\n return reference_list\n\n def configure_optimizers(self):\n optimizer = instantiate_class(self.model.parameters(), self.optimizer_init_config)\n\n lr_scheduler = {'scheduler': instantiate_class(optimizer, self.update_and_get_lr_scheduler_config()),\n 'interval': 'step'}\n return {'optimizer': optimizer, 'lr_scheduler': lr_scheduler}\n\n def update_and_get_lr_scheduler_config(self):\n if 'num_step' in self.lr_scheduler_init_config['init_args']:\n self.lr_scheduler_init_config['init_args']['num_step'] = self.num_step\n if 'max_epochs' in self.lr_scheduler_init_config['init_args']:\n self.lr_scheduler_init_config['init_args']['max_epochs'] = self.max_epochs\n if 'max_lr' in self.lr_scheduler_init_config['init_args']:\n self.lr_scheduler_init_config['init_args']['max_lr'] = self.optimizer_init_config['init_args']['lr']\n if 'total_steps' in self.lr_scheduler_init_config['init_args']:\n self.lr_scheduler_init_config['init_args']['total_steps'] = self.num_step * self.max_epochs\n return self.lr_scheduler_init_config\n\n def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx):\n \"\"\"Faster optimization step\"\"\"\n optimizer.zero_grad(set_to_none=True)\n\n def open_word_map(self, processed_root):\n with open(os.path.join(processed_root, 'WORDMAP.json'), 'r') as f:\n word_map = json.load(f)\n return word_map\n\n\nclass MyLightningCLI(LightningCLI):\n def add_arguments_to_parser(self, parser):\n # 1. link argument\n parser.link_arguments('data.use_feat', 'model.use_feat', apply_on='instantiate')\n parser.link_arguments('data.processed_root', 'model.processed_root', apply_on='instantiate')\n parser.link_arguments('data.num_step', 'model.num_step', apply_on='instantiate')\n parser.link_arguments('trainer.max_epochs', 'model.max_epochs', apply_on='parse')\n parser.link_arguments('trainer.gpus', 'model.gpus', apply_on='parse')\n\n # 2. add optimizer & scheduler argument\n parser.add_optimizer_args((SGD, Adam, AdamW), link_to='model.optimizer_init')\n parser.add_lr_scheduler_args((OneCycleLR,), link_to='model.lr_scheduler_init')\n\n\nif __name__ == '__main__':\n cli = MyLightningCLI(BaseImageCaptionSystem, BaseDataModule, save_config_overwrite=True)\n # cli.trainer.test(ckpt_path='best', dataloaders=cli.datamodule.test_dataloader())"
]
| [
[
"torch.max",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.figure",
"torch.tensor",
"torch.nn.utils.rnn.pack_padded_sequence",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.imshow"
]
]
|
XuehaiPan/Soft-Actor-Critic | [
"9018199f28351f4106dab73a9dc3631c52b72260"
]
| [
"common/network.py"
]
| [
"import numpy as np\nimport torch\nimport torch.nn as nn\n\n\n__all__ = [\n 'build_encoder',\n 'Container', 'NetworkBase',\n 'VanillaNeuralNetwork', 'VanillaNN',\n 'MultilayerPerceptron', 'MLP',\n 'GRUHidden', 'cat_hidden',\n 'RecurrentNeuralNetwork', 'RNN',\n 'ConvolutionalNeuralNetwork', 'CNN'\n]\n\n\ndef build_encoder(config):\n state_dim = (config.state_dim or config.observation_dim)\n state_encoder = nn.Identity()\n if config.FC_encoder:\n if config.state_dim is not None or len(config.encoder_hidden_dims) > 0:\n state_encoder = VanillaNeuralNetwork(n_dims=[config.observation_dim,\n *config.encoder_hidden_dims,\n state_dim],\n activation=config.encoder_activation,\n output_activation=None)\n elif config.RNN_encoder:\n state_encoder = RecurrentNeuralNetwork(n_dims_before_rnn=[config.observation_dim,\n *config.encoder_hidden_dims_before_rnn],\n n_dims_rnn_hidden=config.encoder_hidden_dims_rnn,\n n_dims_after_rnn=[*config.encoder_hidden_dims_after_rnn,\n state_dim],\n skip_connection=config.skip_connection,\n trainable_initial_hidden=config.trainable_hidden,\n activation=config.encoder_activation,\n output_activation=None)\n elif config.CNN_encoder:\n state_encoder = ConvolutionalNeuralNetwork(image_size=(config.image_size, config.image_size),\n input_channels=config.observation_dim,\n output_dim=state_dim,\n n_hidden_channels=config.encoder_hidden_channels,\n activation=config.encoder_activation,\n output_activation=None,\n **config.build_from_keys(['kernel_sizes',\n 'strides',\n 'paddings',\n 'poolings',\n 'batch_normalization']))\n\n config.state_encoder = state_encoder\n config.state_dim = state_dim\n\n return state_encoder\n\n\nclass Container(nn.Module):\n def __init__(self):\n super().__init__()\n self.device = None\n\n def to(self, *args, **kwargs):\n device, *_ = torch._C._nn._parse_to(*args, **kwargs)\n if device is not None:\n device = torch.device(device)\n for module in self.children():\n if isinstance(module, Container):\n module.to(device)\n self.device = device\n return super().to(*args, **kwargs)\n\n def save_model(self, path, key_filter=None):\n state_dict = self.state_dict()\n keys = list(state_dict.keys())\n for key in keys:\n if key_filter is not None and not key_filter(key):\n state_dict.pop(key)\n else:\n state_dict[key] = state_dict[key].cpu()\n\n torch.save(state_dict, path)\n return state_dict\n\n def load_model(self, path, strict=True):\n return self.load_state_dict(torch.load(path, map_location=self.device), strict=strict)\n\n\nNetworkBase = Container\n\n\nclass VanillaNeuralNetwork(NetworkBase):\n def __init__(self, n_dims, activation=nn.ReLU(inplace=True), output_activation=None, device=None):\n super().__init__()\n\n self.activation = activation\n self.output_activation = output_activation\n\n self.linear_layers = nn.ModuleList()\n for i in range(len(n_dims) - 1):\n self.linear_layers.append(module=nn.Linear(in_features=n_dims[i],\n out_features=n_dims[i + 1],\n bias=True))\n\n self.in_features = n_dims[0]\n self.out_features = n_dims[-1]\n\n self.to(device)\n\n def forward(self, x):\n n_layers = len(self.linear_layers)\n for i, layer in enumerate(self.linear_layers):\n x = layer(x)\n if i < n_layers - 1:\n x = self.activation(x)\n if self.output_activation is not None:\n x = self.output_activation(x)\n return x\n\n\nclass GRUHidden(object):\n def __init__(self, hidden):\n self.hidden = hidden\n\n def __str__(self):\n return str(self.hidden)\n\n def __repr__(self):\n return repr(self.hidden)\n\n def __getitem__(self, item):\n new_hidden = []\n for h in self.hidden:\n new_hidden.append(h[item])\n return GRUHidden(hidden=new_hidden)\n\n def __getattr__(self, item):\n attr = getattr(torch.Tensor, item)\n\n if callable(attr):\n self_hidden = self.hidden\n\n def func(*args, **kwargs):\n new_hidden = []\n for h in self_hidden:\n new_hidden.append(attr(h, *args, **kwargs))\n return GRUHidden(hidden=new_hidden)\n\n return func\n else:\n new_hidden = []\n for h in self.hidden:\n new_hidden.append(getattr(h, item))\n return GRUHidden(hidden=new_hidden)\n\n def float(self):\n new_hidden = []\n for h in self.hidden:\n new_hidden.append(torch.FloatTensor(h))\n return GRUHidden(hidden=new_hidden)\n\n @staticmethod\n def cat(hiddens, dim=0):\n hiddens = [hidden.hidden for hidden in hiddens]\n new_hidden = []\n for ith_layer_hiddens in zip(*hiddens):\n ith_layer_hiddens = torch.cat(ith_layer_hiddens, dim=dim)\n new_hidden.append(ith_layer_hiddens)\n return GRUHidden(hidden=new_hidden)\n\n\ncat_hidden = GRUHidden.cat\n\n\nclass RecurrentNeuralNetwork(NetworkBase):\n def __init__(self, n_dims_before_rnn, n_dims_rnn_hidden, n_dims_after_rnn,\n skip_connection=False, trainable_initial_hidden=False,\n activation=nn.ReLU(inplace=True), output_activation=None, device=None):\n assert len(n_dims_rnn_hidden) > 0\n\n super().__init__()\n\n n_dims_rnn_hidden = [n_dims_before_rnn[-1], *n_dims_rnn_hidden]\n n_dims_after_rnn = [n_dims_rnn_hidden[-1], *n_dims_after_rnn]\n\n self.skip_connection = skip_connection\n if skip_connection:\n n_dims_after_rnn[0] += n_dims_before_rnn[-1]\n\n self.activation = activation\n self.output_activation = output_activation\n\n self.linear_layers_before_rnn = VanillaNeuralNetwork(n_dims=n_dims_before_rnn,\n activation=activation,\n output_activation=None)\n\n self.gru_layers = nn.ModuleList()\n for i in range(len(n_dims_rnn_hidden) - 1):\n self.gru_layers.append(module=nn.GRU(input_size=n_dims_rnn_hidden[i],\n hidden_size=n_dims_rnn_hidden[i + 1],\n num_layers=1, bias=True,\n batch_first=False, bidirectional=False))\n\n if trainable_initial_hidden:\n self.init_hiddens = nn.ParameterList()\n for i in range(len(n_dims_rnn_hidden) - 1):\n bound = 1 / np.sqrt(n_dims_rnn_hidden[i])\n hidden = nn.Parameter(torch.Tensor(1, 1, n_dims_rnn_hidden[i + 1]), requires_grad=True)\n nn.init.uniform_(hidden, -bound, bound)\n self.init_hiddens.append(hidden)\n else:\n self.init_hiddens = []\n for i in range(len(n_dims_rnn_hidden) - 1):\n self.init_hiddens.append(torch.zeros(1, 1, n_dims_rnn_hidden[i + 1],\n device=torch.device('cpu'),\n requires_grad=False))\n\n self.linear_layers_after_rnn = VanillaNeuralNetwork(n_dims=n_dims_after_rnn,\n activation=activation,\n output_activation=output_activation)\n\n self.in_features = self.linear_layers_before_rnn.in_features\n self.out_features = self.linear_layers_after_rnn.out_features\n\n self.to(device)\n\n def forward(self, x, hx=None):\n if hx is None:\n hx = self.initial_hiddens(batch_size=x.size(1))\n assert isinstance(hx, GRUHidden)\n\n identity = x = self.linear_layers_before_rnn(x)\n\n ha = []\n hn = []\n for i, gru_layer in enumerate(self.gru_layers):\n hn.append(None)\n x, hn[i] = gru_layer(x, hx.hidden[i])\n ha.append(x)\n\n if self.skip_connection:\n x = torch.cat([x, identity], dim=-1)\n x = self.linear_layers_after_rnn(x)\n ha = GRUHidden(hidden=ha)\n hn = GRUHidden(hidden=hn)\n return x, hn, ha\n\n def initial_hiddens(self, batch_size=1):\n init_hidden = GRUHidden(hidden=list(self.init_hiddens))\n init_hidden = init_hidden.to(self.device)\n return init_hidden.repeat(1, batch_size, 1)\n\n\nclass ConvolutionalNeuralNetwork(NetworkBase):\n def __init__(self, image_size, input_channels, n_hidden_channels,\n kernel_sizes, strides, paddings, poolings,\n output_dim=None, headless=False, batch_normalization=False,\n activation=nn.ReLU(inplace=True), output_activation=None, device=None):\n assert len(n_hidden_channels) == len(kernel_sizes)\n assert len(n_hidden_channels) == len(strides)\n assert len(n_hidden_channels) == len(paddings)\n assert len(n_hidden_channels) == len(poolings)\n\n assert bool(output_dim) != bool(headless)\n\n super().__init__()\n\n n_hidden_channels = [input_channels, *n_hidden_channels]\n\n self.activation = activation\n self.output_activation = output_activation\n\n self.conv_layers = nn.ModuleList()\n for i in range(len(n_hidden_channels) - 1):\n conv_layer = nn.Conv2d(n_hidden_channels[i],\n n_hidden_channels[i + 1],\n kernel_size=kernel_sizes[i],\n stride=strides[i],\n padding=paddings[i],\n bias=True)\n\n self.conv_layers.append(module=conv_layer)\n\n self.batch_normalization = batch_normalization\n if batch_normalization:\n self.batch_norm_layers = nn.ModuleList()\n for i in range(1, len(n_hidden_channels)):\n self.batch_norm_layers.append(module=nn.BatchNorm2d(n_hidden_channels[i],\n affine=True))\n\n self.max_pooling_layers = nn.ModuleList(list(map(nn.MaxPool2d, poolings)))\n\n dummy = torch.zeros(1, input_channels, *image_size)\n with torch.no_grad():\n dummy = self(dummy)\n conv_output_dim = int(np.prod(dummy.size()))\n\n if output_dim is not None:\n assert not headless\n self.linear_layer = nn.Linear(in_features=conv_output_dim,\n out_features=output_dim,\n bias=True)\n self.out_features = output_dim\n else:\n assert headless\n self.out_features = conv_output_dim\n self.in_features = (input_channels, *image_size)\n\n self.to(device)\n\n def forward(self, x):\n input_size = x.size()\n x = x.view(-1, *input_size[-3:])\n\n for i, (conv_layer, max_pooling_layer) in enumerate(zip(self.conv_layers,\n self.max_pooling_layers)):\n x = conv_layer(x)\n if self.batch_normalization:\n x = self.batch_norm_layers[i](x)\n x = self.activation(x)\n x = max_pooling_layer(x)\n\n x = x.view(*input_size[:-3], -1)\n if hasattr(self, 'linear_layer'):\n x = self.linear_layer(x)\n if self.output_activation is not None:\n x = self.output_activation(x)\n\n return x\n\n\nMLP = MultilayerPerceptron = VanillaNN = VanillaNeuralNetwork\nRNN = RecurrentNeuralNetwork\nCNN = ConvolutionalNeuralNetwork\n"
]
| [
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.ParameterList",
"torch.nn.GRU",
"torch.nn.ModuleList",
"torch.nn.BatchNorm2d",
"torch.load",
"torch.FloatTensor",
"numpy.sqrt",
"torch.Tensor",
"torch.zeros",
"torch.device",
"torch.nn.Identity",
"torch.save",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.init.uniform_",
"torch.no_grad",
"torch._C._nn._parse_to"
]
]
|
CK-er/mmdet | [
"9bea4068efbcf7bf739dbe41917a68d525c29868"
]
| [
"mmdet/core/bbox/transforms.py"
]
| [
"import numpy as np\nimport torch\n\ndef bbox2delta(proposals, gt, means=[0, 0, 0, 0], stds=[1, 1, 1, 1]):\n assert proposals.size() == gt.size()\n\n proposals = proposals.float()\n gt = gt.float()\n px = (proposals[..., 0] + proposals[..., 2]) * 0.5\n py = (proposals[..., 1] + proposals[..., 3]) * 0.5\n pw = proposals[..., 2] - proposals[..., 0] + 1.0\n ph = proposals[..., 3] - proposals[..., 1] + 1.0\n\n gx = (gt[..., 0] + gt[..., 2]) * 0.5\n gy = (gt[..., 1] + gt[..., 3]) * 0.5\n gw = gt[..., 2] - gt[..., 0] + 1.0\n gh = gt[..., 3] - gt[..., 1] + 1.0\n\n dx = (gx - px) / pw\n dy = (gy - py) / ph\n dw = torch.log(gw / pw)\n dh = torch.log(gh / ph)\n deltas = torch.stack([dx, dy, dw, dh], dim=-1)\n\n means = deltas.new_tensor(means).unsqueeze(0)\n stds = deltas.new_tensor(stds).unsqueeze(0)\n deltas = deltas.sub_(means).div_(stds)\n\n return deltas\n\n\ndef delta2bbox(rois,\n deltas,\n means=[0, 0, 0, 0],\n stds=[1, 1, 1, 1],\n max_shape=None,\n wh_ratio_clip=16 / 1000):\n means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4)\n stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4)\n denorm_deltas = deltas * stds + means\n dx = denorm_deltas[:, 0::4]\n dy = denorm_deltas[:, 1::4]\n dw = denorm_deltas[:, 2::4]\n dh = denorm_deltas[:, 3::4]\n max_ratio = np.abs(np.log(wh_ratio_clip))\n dw = dw.clamp(min=-max_ratio, max=max_ratio)\n dh = dh.clamp(min=-max_ratio, max=max_ratio)\n px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx)\n py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy)\n pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw)\n ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh)\n gw = pw * dw.exp()\n gh = ph * dh.exp()\n gx = torch.addcmul(px, 1, pw, dx) # gx = px + pw * dx\n gy = torch.addcmul(py, 1, ph, dy) # gy = py + ph * dy\n x1 = gx - gw * 0.5 + 0.5\n y1 = gy - gh * 0.5 + 0.5\n x2 = gx + gw * 0.5 - 0.5\n y2 = gy + gh * 0.5 - 0.5\n if max_shape is not None:\n x1 = x1.clamp(min=0, max=max_shape[1] - 1)\n y1 = y1.clamp(min=0, max=max_shape[0] - 1)\n x2 = x2.clamp(min=0, max=max_shape[1] - 1)\n y2 = y2.clamp(min=0, max=max_shape[0] - 1)\n bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas)\n return bboxes\n\n\n\ndef bbox_flip(bboxes, img_shape, direction='horizontal'):\n \"\"\"Flip bboxes horizontally or vertically.\n\n Args:\n bboxes (Tensor): Shape (..., 4*k)\n img_shape (tuple): Image shape.\n direction (str): Flip direction, options are \"horizontal\" and\n \"vertical\". Default: \"horizontal\"\n\n\n Returns:\n Tensor: Flipped bboxes.\n \"\"\"\n assert bboxes.shape[-1] % 4 == 0\n assert direction in ['horizontal', 'vertical']\n flipped = bboxes.clone()\n if direction == 'vertical':\n flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4]\n flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4]\n else:\n flipped[:, 0::4] = img_shape[1] - bboxes[:, 2::4]\n flipped[:, 2::4] = img_shape[1] - bboxes[:, 0::4]\n return flipped\n\n\ndef bbox_mapping(bboxes,\n img_shape,\n scale_factor,\n flip,\n flip_direction='horizontal'):\n \"\"\"Map bboxes from the original image scale to testing scale\"\"\"\n new_bboxes = bboxes * bboxes.new_tensor(scale_factor)\n if flip:\n new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction)\n return new_bboxes\n\n\ndef bbox_mapping_back(bboxes,\n img_shape,\n scale_factor,\n flip,\n flip_direction='horizontal'):\n \"\"\"Map bboxes from testing scale to original image scale\"\"\"\n new_bboxes = bbox_flip(bboxes, img_shape,\n flip_direction) if flip else bboxes\n new_bboxes = new_bboxes.view(-1, 4) / new_bboxes.new_tensor(scale_factor)\n return new_bboxes.view(bboxes.shape)\n\n\ndef bbox2roi(bbox_list):\n \"\"\"Convert a list of bboxes to roi format.\n\n Args:\n bbox_list (list[Tensor]): a list of bboxes corresponding to a batch\n of images.\n\n Returns:\n Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2]\n \"\"\"\n rois_list = []\n for img_id, bboxes in enumerate(bbox_list):\n if bboxes.size(0) > 0:\n img_inds = bboxes.new_full((bboxes.size(0), 1), img_id)\n rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1)\n else:\n rois = bboxes.new_zeros((0, 5))\n rois_list.append(rois)\n rois = torch.cat(rois_list, 0)\n return rois\n\n\ndef roi2bbox(rois):\n bbox_list = []\n img_ids = torch.unique(rois[:, 0].cpu(), sorted=True)\n for img_id in img_ids:\n inds = (rois[:, 0] == img_id.item())\n bbox = rois[inds, 1:]\n bbox_list.append(bbox)\n return bbox_list\n\n\ndef bbox2result(bboxes, labels, num_classes):\n \"\"\"Convert detection results to a list of numpy arrays.\n\n Args:\n bboxes (Tensor): shape (n, 5)\n labels (Tensor): shape (n, )\n num_classes (int): class number, including background class\n\n Returns:\n list(ndarray): bbox results of each class\n \"\"\"\n if bboxes.shape[0] == 0:\n return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)]\n else:\n bboxes = bboxes.cpu().numpy()\n labels = labels.cpu().numpy()\n return [bboxes[labels == i, :] for i in range(num_classes)]\n\n\ndef distance2bbox(points, distance, max_shape=None):\n \"\"\"Decode distance prediction to bounding box.\n\n Args:\n points (Tensor): Shape (n, 2), [x, y].\n distance (Tensor): Distance from the given point to 4\n boundaries (left, top, right, bottom).\n max_shape (tuple): Shape of the image.\n\n Returns:\n Tensor: Decoded bboxes.\n \"\"\"\n x1 = points[:, 0] - distance[:, 0]\n y1 = points[:, 1] - distance[:, 1]\n x2 = points[:, 0] + distance[:, 2]\n y2 = points[:, 1] + distance[:, 3]\n if max_shape is not None:\n x1 = x1.clamp(min=0, max=max_shape[1])\n y1 = y1.clamp(min=0, max=max_shape[0])\n x2 = x2.clamp(min=0, max=max_shape[1])\n y2 = y2.clamp(min=0, max=max_shape[0])\n return torch.stack([x1, y1, x2, y2], -1)\n"
]
| [
[
"torch.cat",
"torch.stack",
"numpy.log",
"numpy.zeros",
"torch.log",
"torch.addcmul"
]
]
|
kaist-dmlab/Ada-Boundary | [
"9514a2a005aaf79db7eac84c55cbcefbb72a4011"
]
| [
"src/reader/batch_patcher.py"
]
| [
"import numpy as np\nimport time, os, math, operator, statistics, sys\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nfrom random import Random\n\nclass Sample(object):\n def __init__(self, id, image, label):\n self.id = id\n self.image = image\n self.label = label\n\nclass MiniBatch(object):\n def __init__(self):\n self.ids = []\n self.images = []\n self.labels = []\n\n def append(self, id, image, label):\n self.ids.append(id)\n self.images.append(image)\n self.labels.append(label)\n\n def get_size(self):\n return len(self.ids)\n\nclass Quantizer(object):\n def __init__(self, size_of_data, min, max):\n\n # size of data must be even number\n self.size_of_data = size_of_data\n self.half_size_of_data = int(size_of_data/2)\n self.max = max\n self.min = min\n self.step_size = self.max / float(size_of_data)\n self.doubled_step_size = 2.0*self.step_size\n self.quantization_indexes = {}\n\n def quantizer_func_for_boudnary(self, distance):\n if distance >= 0:\n # Positive sample\n index = int(math.ceil(distance / self.step_size))\n else:\n # Negative sample\n index = -int(math.floor(distance / self.step_size))\n return index\n\n def quantizer_func_for_easy(self, distance):\n if distance >= 0:\n # Positive sample\n index = -int(math.ceil(distance / self.doubled_step_size)) + self.half_size_of_data + 1\n else:\n # Negative sample\n index = -int(math.floor(distance / self.doubled_step_size)) + self.half_size_of_data\n return index\n\n def quantizer_func_for_hard(self, distance):\n if distance >= 0:\n # Positive sample\n index = int(math.ceil(distance / self.doubled_step_size)) + self.half_size_of_data\n else:\n # Negative sample\n index = int(math.floor(distance / self.doubled_step_size)) + self.half_size_of_data + 1\n return index\n\n# For computing emphirical distribution F(x), we adopt binning approach based on bins\nclass Binning(object):\n def __init__(self, size_of_data, min, max, num_of_bins):\n self.size_of_data = size_of_data\n self.num_of_bins = num_of_bins\n self.max = max\n self.min = min\n self.step_size = (self.max-self.min) / float(self.num_of_bins)\n self.bins = {}\n\n if num_of_bins % 2 != 0:\n print(\"num_of_bins must be even value.\")\n self.half_num_bins = int(self.num_of_bins/2)\n\n # For only Ada-Uniform method\n # Inverted bin_id index for fast asynch update\n self.inverted_index = np.zeros(self.size_of_data, dtype=float)\n\n # Collect possible bin ids\n self.bin_ids = []\n for i in range(1, self.half_num_bins+1):\n self.bins[i] = []\n self.bin_ids.append(i)\n self.bins[-i] = []\n self.bin_ids.append(-i)\n\n # Random bin initialization\n rand_bin_ids = np.random.choice(self.bin_ids, self.size_of_data)\n for i in range(len(rand_bin_ids)):\n self.bins[rand_bin_ids[i]].append(i)\n self.inverted_index[i] = rand_bin_ids[i]\n\n def get_bin_id(self, distance):\n if distance >= 0:\n # Positive sample\n bin_id = int(math.ceil(distance / self.step_size))\n else:\n # Negative sample\n bin_id = int(math.floor(distance / self.step_size))\n\n if bin_id > self.half_num_bins:\n bin_id = self.half_num_bins\n elif bin_id < -self.half_num_bins:\n bin_id = -self.half_num_bins\n\n return bin_id\n\n def asynch_update_bins(self, ids, distances):\n # Update only partial information\n for i in range(len(ids)):\n prev_bin_id = self.inverted_index[ids[i]]\n # Remove previous info\n self.bins[prev_bin_id].remove(ids[i])\n # Update current info\n cur_bin_id = self.get_bin_id(distances[i])\n self.bins[cur_bin_id].append(ids[i])\n self.inverted_index[ids[i]] = cur_bin_id\n\n\n# update_method in [random, boundary, hard, easy, distance]\nclass ProbTable(object):\n def __init__(self, size_of_data, num_of_classes, s_e, update_method):\n self.size_of_data = size_of_data\n self.num_of_classes = num_of_classes\n self.s_e = s_e\n self.update_method = update_method\n self.max_distance = np.sqrt((float(num_of_classes) - 1.0) / (float(num_of_classes) * float(num_of_classes)))\n self.min_distance = - self.max_distance\n self.fixed_term = math.exp(math.log(s_e) / size_of_data)\n self.table = np.ones(self.size_of_data, dtype=float)\n\n self.quantizer = Quantizer(self.size_of_data, self.min_distance, self.max_distance)\n if update_method == \"boundary\":\n self.quantizer_func = self.quantizer.quantizer_func_for_boudnary\n elif update_method == \"easy\":\n self.quantizer_func = self.quantizer.quantizer_func_for_easy\n elif update_method == \"hard\":\n self.quantizer_func = self.quantizer.quantizer_func_for_hard\n elif update_method == \"uniform\":\n self.binning = Binning(self.size_of_data, self.min_distance, self.max_distance, num_of_bins=40)\n\n # For Ada-Boundary/Easy/Hard\n # Initialize table : Set all importance to max importance, then all samples are chosen properly at 1 iteration\n for i in range(self.size_of_data):\n self.table[i] = math.pow(self.fixed_term, 1)\n\n ################################ Update Method ####################################\n\n # For Ada-Boundary/Easy/Hard methods\n def get_sampling_probability(self, quantization_index):\n return 1.0 / math.pow(self.fixed_term, quantization_index)\n\n def async_update_prob_table(self, ids, distances):\n for i in range(len(ids)):\n self.table[ids[i]] = self.get_sampling_probability(self.quantizer_func(distances[i]))\n\n # For Ada-Uniform: in this cast, a few bin changes give global effect of importance table\n def bulk_update_prob_table(self, ids, distances):\n # Update bins at first\n self.binning.asynch_update_bins(ids, distances)\n\n # Calculate emphirical distibution & use it's inverse the value as the sample importance\n for value in self.binning.bins.values():\n if len(value) == 0:\n importance = 0.0\n else:\n # Assign F^{-1}(x)\n importance = float(self.size_of_data) / float(len(value))\n for id in value:\n self.table[id] = importance\n ###################################################################################\n\n\n# Batch Patcher\nclass BatchPatcher(object):\n def __init__(self, size_of_data, batch_size, num_of_classes, s_e=100.0, update_method=\"random\"):\n # meta info\n self.size_of_data = size_of_data\n self.batch_size = batch_size\n self.num_of_classes = num_of_classes\n self.update_method = update_method\n self.num_iters_per_epoch = int(math.ceil(float(size_of_data) / float(batch_size)))\n\n # importance table\n self.prob_table = ProbTable(size_of_data, num_of_classes, s_e, self.update_method)\n\n # For in-memory mini-batch generation\n self.loaded_data = []\n\n # Replacement in mini-batch for random batch selection\n self.replacement = True\n\n def bulk_load_in_memory(self, sess, ids, images, labels):\n # initialization\n self.loaded_data = []\n for i in range(self.size_of_data):\n self.loaded_data.append(None)\n\n # load data set in memory\n set_test = set()\n\n # while len(self.loaded_data) < self.size_of_data:\n for i in range(self.num_iters_per_epoch * 2):\n mini_ids, mini_images, mini_labels = sess.run([ids, images, labels])\n\n for j in range(self.batch_size):\n id = bytes_to_int(mini_ids[j])\n if not id in set_test:\n self.loaded_data[id] = Sample(bytes_to_int(mini_ids[j]), mini_images[j], bytes_to_int(mini_labels[j]))\n set_test.add(bytes_to_int(mini_ids[j]))\n\n print(\"# of disjoint samples: \", len(self.loaded_data))\n\n def update_prob_table(self, ids, distances):\n if self.update_method == \"uniform\":\n self.prob_table.bulk_update_prob_table(ids, distances)\n else:\n self.prob_table.async_update_prob_table(ids, distances)\n\n def get_next_mini_batch(self, num_of_sample, is_warm_up=True, p_table = None):\n\n if self.update_method == \"random\" or is_warm_up:\n selected_sample_ids = np.random.choice(self.size_of_data, num_of_sample, self.replacement)\n else:\n if p_table is None:\n total_sum = np.sum(self.prob_table.table)\n p_table = self.prob_table.table / total_sum\n selected_sample_ids = np.random.choice(self.size_of_data, num_of_sample, self.replacement, p=p_table)\n\n # Fetch mini-batch samples from loaded_data (in memory)\n mini_batch = MiniBatch()\n for id in selected_sample_ids:\n sample = self.loaded_data[id]\n mini_batch.append(sample.id, sample.image, sample.label)\n\n return mini_batch.ids, mini_batch.images, mini_batch.labels\n\n def get_init_mini_batch(self, init_id):\n # init_id from 0~self.num_iters_per_epoch\n selected_sample_ids = list(range(init_id*self.batch_size, init_id*self.batch_size+self.batch_size))\n\n # Fetch mini-batch samples from loaded_data (in memory)\n mini_batch = MiniBatch()\n for id in selected_sample_ids:\n if id >= self.size_of_data:\n sample = self.loaded_data[0]\n mini_batch.append(sample.id, sample.image, sample.label)\n else:\n sample = self.loaded_data[id]\n mini_batch.append(sample.id, sample.image, sample.label)\n\n return mini_batch.ids, mini_batch.images, mini_batch.labels\n\n def get_normalized_table(self):\n total_sum = np.sum(self.prob_table.table)\n return self.prob_table.table / total_sum\n\ndef bytes_to_int(bytes_array):\n result = 0\n for b in bytes_array:\n result = result * 256 + int(b)\n return result\n"
]
| [
[
"numpy.random.choice",
"numpy.zeros",
"tensorflow.compat.v1.disable_v2_behavior",
"numpy.sum",
"numpy.ones"
]
]
|
ThisIsIsaac/high-res-stereo | [
"55341fd4b3205162a01ad6aebff0086e49e1a909"
]
| [
"unlabeled_util/argoverse_make_pseudo_gt.py"
]
| [
"import argparse\nimport cv2\nfrom models import hsm\nimport numpy as np\nimport os\nimport pdb\nimport skimage.io\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom torch.autograd import Variable\nimport time\nfrom models.submodule import *\nfrom utils.eval import mkdir_p, save_pfm\nfrom utils.preprocess import get_transform\nfrom dataloader import KITTIloader2015 as lk15\nimport subprocess\nfrom datetime import datetime\n# cudnn.benchmark = True\ncudnn.benchmark = False\nimport math\n\n\n\ndef main():\n parser = argparse.ArgumentParser(description='HSM')\n parser.add_argument(\"--name\", required=True)\n parser.add_argument('--datapath', default='./data-mbtest/',\n help='test data path')\n parser.add_argument('--loadmodel', default=None,\n help='model path')\n parser.add_argument('--clean', type=float, default=-1,\n help='clean up output using entropy estimation')\n parser.add_argument('--testres', type=float, default=1.8, # Too low for images. Sometimes turn it to 2 ~ 3\n # for ETH3D we need to use different resolution\n # 1 - nothibg, 0,5 halves the image, 2 doubles the size of the iamge. We need to\n # middleburry 1 (3000, 3000)\n # ETH (3~4) since (1000, 1000)\n help='test time resolution ratio 0-x')\n parser.add_argument('--max_disp', type=int, default=2056,\n help='maximum disparity to search for')\n parser.add_argument('--level', type=int, default=1,\n help='output level of output, default is level 1 (stage 3),\\\n can also use level 2 (stage 2) or level 3 (stage 1)')\n args = parser.parse_args()\n args.max_disp = int(args.max_disp) # max_disp = 2056 * testres\n args.max_disp = 16 * math.floor(args.max_disp/16)\n\n args.name = args.name + \"_\"+ datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S\")\n # dataloader\n from dataloader import listfiles as DA\n\n # test_left_img, test_right_img, _, _ = DA.dataloader(args.datapath)\n # print(\"total test images: \" + str(len(test_left_img)))\n # print(\"output path: \" + args.outdir)\n\n # construct model\n model = hsm(args.max_disp, args.clean, level=args.level)\n model = nn.DataParallel(model, device_ids=[0])\n model.cuda()\n\n if args.loadmodel is not None:\n pretrained_dict = torch.load(args.loadmodel)\n pretrained_dict['state_dict'] = {k: v for k, v in pretrained_dict['state_dict'].items() if 'disp' not in k}\n model.load_state_dict(pretrained_dict['state_dict'], strict=False)\n else:\n print('run with random init')\n print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))\n\n # dry run\n multip = 48\n imgL = np.zeros((1, 3, 24 * multip, 32 * multip))\n imgR = np.zeros((1, 3, 24 * multip, 32 * multip))\n imgL = Variable(torch.FloatTensor(imgL).cuda())\n imgR = Variable(torch.FloatTensor(imgR).cuda())\n with torch.no_grad():\n model.eval()\n pred_disp, entropy = model(imgL, imgR)\n\n left_img_dir = os.path.join(args.datapath, \"stereo_front_left\")\n right_img_dir = os.path.join(args.datapath, \"stereo_front_right\")\n\n left_img_path_list = os.listdir(left_img_dir)\n left_img_path_list.sort()\n right_img_path_list = os.listdir(right_img_dir)\n right_img_path_list.sort()\n\n processed = get_transform()\n model.eval()\n\n # save predictions\n out_path = os.path.join(args.datapath, args.name)\n if not os.path.exists(out_path):\n os.mkdir(out_path)\n\n disp_path = os.path.join(out_path, \"disp\")\n entp_path = os.path.join(out_path, \"entropy\")\n\n if not os.path.exists(disp_path):\n os.mkdir(disp_path)\n\n if not os.path.exists(entp_path):\n os.mkdir(entp_path)\n \n\n \n for (left_img_name, right_img_name) in zip(left_img_path_list, right_img_path_list):\n\n left_img_path = os.path.join(left_img_dir, left_img_name)\n right_img_path = os.path.join(right_img_dir, right_img_name)\n\n print(left_img_path)\n\n imgL_o = (skimage.io.imread(left_img_path).astype('float32'))[:, :, :3]\n imgR_o = (skimage.io.imread(right_img_path).astype('float32'))[:, :, :3]\n imgsize = imgL_o.shape[:2]\n\n max_disp = int(args.max_disp)\n\n ## change max disp\n tmpdisp = int(max_disp * args.testres // 64 * 64)\n if (max_disp * args.testres / 64 * 64) > tmpdisp:\n model.module.maxdisp = tmpdisp + 64\n else:\n model.module.maxdisp = tmpdisp\n if model.module.maxdisp == 64: model.module.maxdisp = 128\n model.module.disp_reg8 = disparityregression(model.module.maxdisp, 16).cuda()\n model.module.disp_reg16 = disparityregression(model.module.maxdisp, 16).cuda()\n model.module.disp_reg32 = disparityregression(model.module.maxdisp, 32).cuda()\n model.module.disp_reg64 = disparityregression(model.module.maxdisp, 64).cuda()\n\n # resize\n imgL_o = cv2.resize(imgL_o, None, fx=args.testres, fy=args.testres, interpolation=cv2.INTER_CUBIC)\n imgR_o = cv2.resize(imgR_o, None, fx=args.testres, fy=args.testres, interpolation=cv2.INTER_CUBIC)\n\n imgL = processed(imgL_o).numpy()\n imgR = processed(imgR_o).numpy()\n\n imgL = np.reshape(imgL, [1, 3, imgL.shape[1], imgL.shape[2]])\n imgR = np.reshape(imgR, [1, 3, imgR.shape[1], imgR.shape[2]])\n\n ##fast pad\n max_h = int(imgL.shape[2] // 64 * 64)\n max_w = int(imgL.shape[3] // 64 * 64)\n if max_h < imgL.shape[2]: max_h += 64\n if max_w < imgL.shape[3]: max_w += 64\n\n top_pad = max_h - imgL.shape[2]\n left_pad = max_w - imgL.shape[3]\n imgL = np.lib.pad(imgL, ((0, 0), (0, 0), (top_pad, 0), (0, left_pad)), mode='constant', constant_values=0)\n imgR = np.lib.pad(imgR, ((0, 0), (0, 0), (top_pad, 0), (0, left_pad)), mode='constant', constant_values=0)\n\n # test\n imgL = torch.FloatTensor(imgL)\n imgR = torch.FloatTensor(imgR)\n\n imgL = imgL.cuda()\n imgR = imgR.cuda()\n\n with torch.no_grad():\n torch.cuda.synchronize()\n\n pred_disp, entropy = model(imgL, imgR)\n torch.cuda.synchronize()\n\n pred_disp = torch.squeeze(pred_disp).data.cpu().numpy()\n\n top_pad = max_h - imgL_o.shape[0]\n left_pad = max_w - imgL_o.shape[1]\n entropy = entropy[top_pad:, :pred_disp.shape[1] - left_pad].cpu().numpy()\n pred_disp = pred_disp[top_pad:, :pred_disp.shape[1] - left_pad]\n\n # resize to highres\n pred_disp = cv2.resize(pred_disp / args.testres, (imgsize[1], imgsize[0]), interpolation=cv2.INTER_LINEAR)\n\n # clip while keep inf\n invalid = np.logical_or(pred_disp == np.inf, pred_disp != pred_disp)\n pred_disp[invalid] = np.inf\n\n out_file_name = left_img_path[len(left_img_path) - len(\"315970554564438888.jpg\"): len(left_img_path) - len(\"jpg\")]\n out_file_name = os.path.join(out_file_name + \"png\", )\n\n pred_disp_png = (pred_disp * 256).astype('uint16')\n cv2.imwrite(os.path.join(disp_path, out_file_name), pred_disp_png)\n entropy_png = (entropy* 256).astype('uint16')\n\n cv2.imwrite(os.path.join(entp_path, out_file_name), entropy_png)\n\n torch.cuda.empty_cache()\n\nif __name__ == '__main__':\n main()\n\n"
]
| [
[
"numpy.logical_or",
"numpy.reshape",
"numpy.zeros",
"torch.cuda.synchronize",
"numpy.lib.pad",
"torch.no_grad",
"torch.FloatTensor",
"torch.cuda.empty_cache",
"torch.squeeze",
"torch.load",
"torch.nn.DataParallel"
]
]
|
LitianD/ObjDetect | [
"849f63467ce9e25c8ba0c24ca7bfdea9d836b0dd"
]
| [
"gif.py"
]
| [
"from PIL import Image\nimport image2gif\nimport numpy as np\nimport os\n\noutfilename = \"D:\\PyCharmProject\\objDetect\\keras-yolo3\\gif\\\\1\\\\1.gif\" # 转化的GIF图片名称\nl = os.listdir(\"D:\\PyCharmProject\\objDetect\\keras-yolo3\\gif\\\\1\")\nframes = []\nfor image_name in l: # 索引各自目录\n im = Image.open(\"D:\\PyCharmProject\\objDetect\\keras-yolo3\\gif\\\\1\\\\\"+image_name) # 将图片打开,本文图片读取的结果是RGBA格式,如果直接读取的RGB则不需要下面那一步\n im = im.convert(\"RGB\") # 通过convert将RGBA格式转化为RGB格式,以便后续处理\n im = np.array(im) # im还不是数组格式,通过此方法将im转化为数组\n frames.append(im) # 批量化\nimage2gif.writeGif(outfilename, frames, duration=0.1, subRectangles=False)"
]
| [
[
"numpy.array"
]
]
|
Saravji/pmdarima | [
"7f42e36beb888d9e1e7e41b0d9c9f7419c730a3a"
]
| [
"examples/arima/example_persisting_a_model.py"
]
| [
"\"\"\"\n=========================\nPersisting an ARIMA model\n=========================\n\n\nThis example demonstrates how we can persist an ARIMA model to disk after\nfitting it. It can then be loaded back up and used to generate forecasts.\n\n.. raw:: html\n\n <br/>\n\"\"\"\nprint(__doc__)\n\n# Author: Taylor Smith <[email protected]>\n\nimport pmdarima as pm\nfrom sklearn.externals import joblib # for persistence\nimport os\n\n# #############################################################################\n# Load the data and split it into separate pieces\ny = pm.datasets.load_wineind()\ntrain, test = y[:125], y[125:]\n\n# Fit an ARIMA\narima = pm.ARIMA(order=(1, 1, 2), seasonal_order=(0, 1, 1, 12))\narima.fit(y)\n\n# #############################################################################\n# Persist a model and create predictions after re-loading it\npickle_tgt = \"arima.pkl\"\ntry:\n # Pickle it\n joblib.dump(arima, pickle_tgt, compress=3)\n\n # Load the model up, create predictions\n arima_loaded = joblib.load(pickle_tgt)\n preds = arima_loaded.predict(n_periods=test.shape[0])\n print(\"Predictions: %r\" % preds)\n\nfinally:\n # Remove the pickle file at the end of this example\n try:\n os.unlink(pickle_tgt)\n except OSError:\n pass\n"
]
| [
[
"sklearn.externals.joblib.load",
"sklearn.externals.joblib.dump"
]
]
|
facebookresearch/uimnet | [
"d7544cf5fb4c65cb262dca203afb0db4ba6c569d"
]
| [
"uimnet/algorithms/mixup.py"
]
| [
"#!/usr/bin/env python3\n#\n# # Copyright (c) 2021 Facebook, inc. and its affiliates. All Rights Reserved\n#\n#\nimport torch\nfrom uimnet import utils\nfrom uimnet.algorithms.erm import ERM\nimport numpy as np\n\n\nclass Mixup(ERM):\n HPARAMS = dict(ERM.HPARAMS)\n HPARAMS.update({\n \"alpha\": (0.3, lambda: float(np.random.choice([0.1, 0.2, 0.3, 1, 2])))\n })\n\n def __init__(\n self,\n num_classes,\n arch,\n device=\"cuda\",\n seed=0,\n use_mixed_precision=False, sn=False, sn_coef=1, sn_bn=False):\n super(Mixup, self).__init__(\n num_classes,\n arch,\n device,\n seed,\n use_mixed_precision=use_mixed_precision, sn=sn, sn_coef=sn_coef, sn_bn=sn_bn)\n\n self.loss = utils.SoftCrossEntropyLoss()\n self.alpha = self.hparams[\"alpha\"]\n self.beta_distr = torch.distributions.Beta(self.alpha, self.alpha)\n self.to(self.device)\n\n self.register_buffer('reference_x', torch.zeros(num_classes, 3, 224, 224))\n self.register_buffer('reference_y', torch.arange(num_classes).long())\n self.reference_done = set()\n\n self.has_native_measure = True\n\n def mixup_(self, x, y):\n perm = torch.randperm(len(x)).to(x.device)\n return self.mixup_pair_(x, x[perm], y, y[perm])\n\n def mixup_pair_(self, x1, x2, y1, y2):\n lamb = self.beta_distr.sample().item()\n mix_x = lamb * x1 + (1 - lamb) * x2\n mix_y = lamb * y1 + (1 - lamb) * y2\n return mix_x, mix_y\n\n def process_minibatch(self, x, y):\n if self.reference_x.is_cuda:\n self.reference_x = self.reference_x.cpu()\n self.reference_y = self.reference_y.cpu()\n\n t = torch.nn.functional.one_hot(y, self.num_classes).float()\n\n if len(self.reference_done) < self.num_classes:\n y_long = y.long().cpu()\n self.reference_x[y_long] = x.cpu()\n self.reference_done |= set(y_long.tolist())\n\n mix_x, mix_y = self.mixup_(x, t)\n mix_x = mix_x.to(self.device, non_blocking=True)\n mix_y = mix_y.to(self.device, non_blocking=True)\n\n return mix_x, mix_y\n\n def uncertainty(self, x, iterations=5):\n x = x.to(self.device)\n t = self.forward(x).softmax(1)\n\n scores = 0\n for iteration in range(iterations):\n ref_p = torch.randperm(len(x))\n ref_x = self.reference_x[ref_p].to(self.device)\n ref_t = torch.nn.functional.one_hot(\n self.reference_y[ref_p].to(self.device), self.num_classes).to(\n self.device)\n\n mix_x, mix_t = self.mixup_pair_(x, ref_x, t, ref_t)\n mix_t_hat = self.forward(mix_x).softmax(1)\n scores += (mix_t_hat - mix_t).norm(2, 1).pow(2)\n\n return scores / iterations\n\n\nif __name__ == '__main__':\n pass\n"
]
| [
[
"torch.zeros",
"torch.nn.functional.one_hot",
"numpy.random.choice",
"torch.arange",
"torch.distributions.Beta"
]
]
|
lmsac/GproDIA | [
"3fc1cdee535c9743806b7be423aba29daca24406"
]
| [
"src/filter_assays.py"
]
| [
"import argparse\n\nparser = argparse.ArgumentParser(\n description='Filter assays.'\n)\nparser.add_argument(\n '--in', nargs='+',\n help='input assay files'\n)\nparser.add_argument(\n '--out',\n help='output assay file'\n)\n\nparser.add_argument(\n '--swath_windows',\n help='SWATH isolation window file'\n)\n\nassay_filter_group = parser.add_argument_group('assay filters') \nassay_filter_group.add_argument(\n '--min_precursor_mz', type=float,\n help='lower m/z limit of precursor ions'\n) \nassay_filter_group.add_argument(\n '--max_precursor_mz', type=float,\n help='upper m/z limit of precursor ions'\n) \nassay_filter_group.add_argument(\n '--min_fragment_number', default=6, type=int,\n help='remove assays with < N fragments (default: %(default)s)'\n)\nassay_filter_group.add_argument(\n '--min_peptide_fragment_number', default=3, type=int,\n help='remove assays with < N peptide fragments (default: %(default)s)'\n)\nassay_filter_group.add_argument(\n '--min_glycan_fragment_number', default=3, type=int,\n help='remove assays with < N glycan fragments (default: %(default)s)'\n)\n\ndef add_fragment_filter_args(parser):\n for quantify_group in ['main', 'quantify']:\n if quantify_group == 'quantify':\n arg_quantify_group = 'quantify_'\n help_quantify_group = 'quantifying '\n else:\n arg_quantify_group = ''\n help_quantify_group = ''\n \n for prior_group in ['main', 'prior_peptide', 'prior_glycan']:\n if prior_group == 'prior_peptide':\n arg_group = arg_quantify_group + 'prior_peptide_'\n help_group = help_quantify_group + 'prior peptide '\n elif prior_group == 'prior_glycan':\n arg_group = arg_quantify_group + 'prior_glycan_'\n help_group = help_quantify_group + 'prior glycan '\n else:\n arg_group = arg_quantify_group\n help_group = help_quantify_group\n \n frag_filter_group = parser.add_argument_group(help_group + 'fragment filters') \n if prior_group == 'prior_peptide' or prior_group == 'prior_glycan':\n if quantify_group == 'quantify':\n default = 6\n else:\n default = 10\n frag_filter_group.add_argument(\n '--%sfragment_number' % arg_group, default=default, type=int,\n help='try to select top N %sfragments' % help_group + ' (default: %(default)s)'\n )\n else:\n if quantify_group == 'quantify':\n default = 12\n else:\n default = 20\n frag_filter_group.add_argument(\n '--%smax_fragment_number' % arg_group, default=default, type=int,\n help='maximal number of fragments (default: %(default)s)'\n )\n frag_filter_group.add_argument(\n '--%sfragment_type' % arg_group, type=str, nargs='+',\n help='list of %sfragment types' % help_group\n )\n if prior_group == 'main' or prior_group == 'prior_peptide':\n frag_filter_group.add_argument(\n '--%smin_fragment_amino_acid_number' % arg_group, type=int,\n help='lower limit of amino acid number of %sfragment ions' % help_group\n )\n frag_filter_group.add_argument(\n '--%sfragment_charge' % arg_group, type=int, nargs='+',\n help='list of allowed charge states of %sfragment ions' % help_group\n )\n frag_filter_group.add_argument(\n '--%sfragment_loss_type' % arg_group, type=str, nargs='+',\n help='list of neutral loss types of %sfragment ions' % help_group\n ) \n frag_filter_group.add_argument(\n '--%smin_fragment_mz' % arg_group, type=float,\n help='lower m/z limit of %sfragment ions' % help_group\n )\n frag_filter_group.add_argument(\n '--%smax_fragment_mz' % arg_group, type=float,\n help='upper m/z limit of %sfragment ions' % help_group\n ) \n if prior_group == 'main' or prior_group == 'prior_glycan': \n frag_filter_group.add_argument(\n '--%smin_fragment_monosaccharide_number' % arg_group, type=eval, default=1,\n help='lower limit of monosaccharide number of %sfragment ions' % help_group + ' (default: %(default)s)'\n )\n if prior_group == 'main': \n frag_filter_group.add_argument(\n '--%smin_relative_fragment_intensity' % arg_group, type=float,\n help='lower relative intensity limit of %sfragment ions' % help_group \n )\n\nadd_fragment_filter_args(parser)\n\nargs = parser.parse_args()\nassay_files = getattr(args, 'in')\nout_file = args.out\nswath_window_file = args.swath_windows\n\nfilter_args = vars(args)\nfilter_args.pop('in')\nfilter_args.pop('out')\nfilter_args.pop('swath_windows')\n\ndef arrange_filter_args(filter_args):\n main_args = {\n 'prior_peptide_fragment_criteria': {},\n 'prior_glycan_fragment_criteria': {},\n 'quantifying_transition_criteria': {\n 'prior_peptide_fragment_criteria': {},\n 'prior_glycan_fragment_criteria': {}\n }\n }\n for k, v in filter_args.items(): \n if k.startswith('quantify_'):\n target = main_args['quantifying_transition_criteria']\n k = k[len('quantify_'):]\n else:\n target = main_args\n if k.startswith('prior_peptide_') and k != 'prior_peptide_fragment_number': \n k = k[len('prior_peptide_'):]\n target = target['prior_peptide_fragment_criteria']\n elif k.startswith('prior_glycan_') and k != 'prior_glycan_fragment_number':\n k = k[len('prior_glycan_'):]\n target = target['prior_glycan_fragment_criteria']\n \n target[k] = v \n \n main_args['min_peptide_fragment_criteria'] = main_args['prior_peptide_fragment_criteria']\n main_args['min_glycan_fragment_criteria'] = main_args['prior_glycan_fragment_criteria']\n return main_args\n \nfilter_criteria = arrange_filter_args(filter_args) \n \n# %%\nimport logging\n\nlogging.basicConfig(\n level=logging.INFO, \n format='%(asctime)s %(filename)s: [%(levelname)s] %(message)s'\n)\n\n# %%\nfrom util import list_files\n\nif globals().get('assay_files', None) is None:\n assay_files = list_files(\n path='.', \n pattern='\\\\.assay\\\\.pickle$'\n )\n \nif len(assay_files) == 0:\n raise ValueError('no assay files')\n \n# %%\nimport os\n\nif globals().get('out_file', None) is None:\n out_file = os.path.splitext(assay_files[0])[0]\n if out_file.endswith('.assay'):\n out_file = out_file[:-len('.assay')]\n if len(assay_files) > 1:\n out_file += '_' + str(len(assay_files))\n out_file += '_filtered.assay.pickle'\n\n# %%\nfrom util import save_pickle, load_pickle\nfrom assay import GlycoAssayBuilder\nimport pandas as pd\n\n# %%\nassays = []\nfor assay_file in assay_files:\n logging.info('loading assays: ' + assay_file) \n \n assay_data = load_pickle(assay_file)\n assays.extend(assay_data)\n \n logging.info('assays loaded: {0}, {1} spectra' \\\n .format(assay_file, len(assay_data)))\n\nlogging.info('assays loaded: {0} spectra totally' \\\n .format(len(assays))) \n\n# %% \nif swath_window_file is not None:\n logging.info('loading SWATH windows: ' + swath_window_file) \n \n swath_windows = pd.read_csv(swath_window_file, sep='\\t')\n \n logging.info('SWATH windows loaded: {0} windows' \\\n .format(len(swath_windows)))\nelse:\n swath_windows = None\n \n# %%\nlogging.info(\n 'filtering assays using the following parameters: \\n' + \\\n '\\n'.join((\n k + '=' + str(v) \n for k, v in filter_args.items()\n if v is not None\n ))\n)\n\nassay_builder = GlycoAssayBuilder()\n\nassays = assay_builder.filter_assays(\n assays,\n swath_windows=swath_windows,\n **filter_criteria\n)\n\nlogging.info('assays filtered: {0} spectra remaining' \\\n .format(len(assays)))\n\n# %%\nlogging.info('saving assays: {0}' \\\n .format(out_file))\n\nsave_pickle(assays, out_file)\n \nlogging.info('assays saved: {0}, {1} spectra' \\\n .format(out_file, len(assays)))\n\n"
]
| [
[
"pandas.read_csv"
]
]
|
idchlife/tf2-mobile-pose-estimation | [
"a1f1f52eecbb841fa878bff4d3c311b79864835d"
]
| [
"models/simplepose_coco.py"
]
| [
"\"\"\"\n SimplePose for COCO Keypoint, implemented in TensorFlow.\n Original paper: 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.\n\"\"\"\n\n__all__ = ['SimplePose', 'simplepose_resnet18_coco', 'simplepose_resnet50b_coco', 'simplepose_resnet101b_coco',\n 'simplepose_resnet152b_coco', 'simplepose_resneta50b_coco', 'simplepose_resneta101b_coco',\n 'simplepose_resneta152b_coco']\n\nimport os\nimport tensorflow as tf\nimport tensorflow.keras.layers as nn\nfrom .common import get_activation_layer, BatchNorm, conv1x1, HeatmapMaxDetBlock, is_channels_first\nfrom .resnet import resnet18, resnet50b, resnet101b, resnet152b\nfrom .resneta import resneta50b, resneta101b, resneta152b\n\n\nclass Deconv2d(nn.Layer):\n \"\"\"\n Standard deconvolution layer.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n strides : int or tuple/list of 2 int, default 1\n Strides of the convolution.\n padding : int or tuple/list of 2 int, default 0\n Padding value for convolution layer.\n out_padding : int or tuple/list of 2 int, default 0\n Output padding value for deconvolution layer.\n dilation : int or tuple/list of 2 int, default 1\n Dilation value for convolution layer.\n groups : int, default 1\n Number of groups.\n use_bias : bool, default True\n Whether the layer uses a bias vector.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n strides=1,\n padding=0,\n out_padding=0,\n dilation=1,\n groups=1,\n use_bias=True,\n data_format=\"channels_last\",\n **kwargs):\n super(Deconv2d, self).__init__(**kwargs)\n assert (dilation == 1)\n assert (groups == 1)\n assert (in_channels is not None)\n\n if isinstance(padding, int):\n padding = (padding, padding)\n\n self.use_crop = (padding[0] > 0) or (padding[1] > 0)\n if self.use_crop:\n self.crop = nn.Cropping2D(\n cropping=padding,\n data_format=data_format,\n name=\"crop\")\n\n self.conv = nn.Conv2DTranspose(\n filters=out_channels,\n kernel_size=kernel_size,\n strides=strides,\n padding=\"valid\",\n output_padding=out_padding,\n data_format=data_format,\n dilation_rate=dilation,\n use_bias=use_bias,\n name=\"conv\")\n\n def call(self, x):\n x = self.conv(x)\n if self.use_crop:\n x = self.crop(x)\n return x\n\n\nclass DeconvBlock(nn.Layer):\n \"\"\"\n Deconvolution block with batch normalization and activation.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n strides : int or tuple/list of 2 int\n Strides of the deconvolution.\n padding : int or tuple/list of 2 int\n Padding value for deconvolution layer.\n out_padding : int or tuple/list of 2 int, default 0\n Output padding value for deconvolution layer.\n dilation : int or tuple/list of 2 int, default 1\n Dilation value for deconvolution layer.\n groups : int, default 1\n Number of groups.\n use_bias : bool, default False\n Whether the layer uses a bias vector.\n use_bn : bool, default True\n Whether to use BatchNorm layer.\n bn_eps : float, default 1e-5\n Small float added to variance in Batch norm.\n activation : function or str or None, default 'relu'\n Activation function or name of activation function.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n strides,\n padding,\n out_padding=0,\n dilation=1,\n groups=1,\n use_bias=False,\n use_bn=True,\n bn_eps=1e-5,\n activation=\"relu\",\n data_format=\"channels_last\",\n **kwargs):\n super(DeconvBlock, self).__init__(**kwargs)\n assert (in_channels is not None)\n self.activate = (activation is not None)\n self.use_bn = use_bn\n\n self.conv = Deconv2d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n out_padding=out_padding,\n dilation=dilation,\n groups=groups,\n use_bias=use_bias,\n data_format=data_format,\n name=\"conv\")\n if self.use_bn:\n self.bn = BatchNorm(\n epsilon=bn_eps,\n data_format=data_format,\n name=\"bn\")\n if self.activate:\n self.activ = get_activation_layer(activation)\n\n def call(self, x, training=None):\n x = self.conv(x)\n if self.use_bn:\n x = self.bn(x, training=training)\n if self.activate:\n x = self.activ(x)\n return x\n\n\nclass SimplePose(tf.keras.Model):\n \"\"\"\n SimplePose model from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.\n\n Parameters:\n ----------\n backbone : nn.Sequential\n Feature extractor.\n backbone_out_channels : int\n Number of output channels for the backbone.\n channels : list of int\n Number of output channels for each decoder unit.\n return_heatmap : bool, default False\n Whether to return only heatmap.\n in_channels : int, default 3\n Number of input channels.\n in_size : tuple of two ints, default (256, 192)\n Spatial size of the expected input image.\n keypoints : int, default 17\n Number of keypoints.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n \"\"\"\n def __init__(self,\n backbone,\n backbone_out_channels,\n channels,\n return_heatmap=False,\n in_channels=3,\n in_size=(256, 192),\n keypoints=17,\n data_format=\"channels_last\",\n **kwargs):\n super(SimplePose, self).__init__(**kwargs)\n assert (in_channels == 3)\n self.in_size = in_size\n self.keypoints = keypoints\n self.return_heatmap = return_heatmap\n self.data_format = data_format\n\n self.backbone = backbone\n self.backbone._name = \"backbone\"\n\n self.decoder = tf.keras.Sequential(name=\"decoder\")\n in_channels = backbone_out_channels\n for i, out_channels in enumerate(channels):\n self.decoder.add(DeconvBlock(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=4,\n strides=2,\n padding=1,\n data_format=data_format,\n name=\"unit{}\".format(i + 1)))\n in_channels = out_channels\n self.decoder.add(conv1x1(\n in_channels=in_channels,\n out_channels=keypoints,\n use_bias=True,\n data_format=data_format,\n name=\"final_block\"))\n\n self.heatmap_max_det = HeatmapMaxDetBlock(\n data_format=data_format,\n name=\"heatmap_max_det\")\n\n def call(self, x, training=None):\n x = self.backbone(x, training=training)\n heatmap = self.decoder(x, training=training)\n if self.return_heatmap or not tf.executing_eagerly():\n return heatmap\n else:\n keypoints = self.heatmap_max_det(heatmap)\n return keypoints\n\n\ndef get_simplepose(backbone,\n backbone_out_channels,\n keypoints,\n model_name=None,\n data_format=\"channels_last\",\n pretrained=False,\n root=os.path.join(\"~\", \".tensorflow\", \"models\"),\n **kwargs):\n \"\"\"\n Create SimplePose model with specific parameters.\n\n Parameters:\n ----------\n backbone : nn.Sequential\n Feature extractor.\n backbone_out_channels : int\n Number of output channels for the backbone.\n keypoints : int\n Number of keypoints.\n model_name : str or None, default None\n Model name for loading pretrained model.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.tensorflow/models'\n Location for keeping the model parameters.\n \"\"\"\n channels = [256, 256, 256]\n\n net = SimplePose(\n backbone=backbone,\n backbone_out_channels=backbone_out_channels,\n channels=channels,\n keypoints=keypoints,\n data_format=data_format,\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import get_model_file\n in_channels = kwargs[\"in_channels\"] if (\"in_channels\" in kwargs) else 3\n input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == \"channels_first\" else\\\n (1,) + net.in_size + (in_channels,)\n net.build(input_shape=input_shape)\n net.load_weights(\n filepath=get_model_file(\n model_name=model_name,\n local_model_store_dir_path=root))\n\n return net\n\n\ndef simplepose_resnet18_coco(pretrained_backbone=False, keypoints=17, data_format=\"channels_last\", **kwargs):\n \"\"\"\n SimplePose model on the base of ResNet-18 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and\n Tracking,' https://arxiv.org/abs/1804.06208.\n\n Parameters:\n ----------\n pretrained_backbone : bool, default False\n Whether to load the pretrained weights for feature extractor.\n keypoints : int, default 17\n Number of keypoints.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.tensorflow/models'\n Location for keeping the model parameters.\n \"\"\"\n backbone = resnet18(pretrained=pretrained_backbone, data_format=data_format).features\n backbone._layers.pop()\n return get_simplepose(backbone=backbone, backbone_out_channels=512, keypoints=keypoints,\n model_name=\"simplepose_resnet18_coco\", data_format=data_format, **kwargs)\n\n\ndef simplepose_resnet50b_coco(pretrained_backbone=False, keypoints=17, data_format=\"channels_last\", **kwargs):\n \"\"\"\n SimplePose model on the base of ResNet-50b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and\n Tracking,' https://arxiv.org/abs/1804.06208.\n\n Parameters:\n ----------\n pretrained_backbone : bool, default False\n Whether to load the pretrained weights for feature extractor.\n keypoints : int, default 17\n Number of keypoints.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.tensorflow/models'\n Location for keeping the model parameters.\n \"\"\"\n backbone = resnet50b(pretrained=pretrained_backbone, data_format=data_format).features\n backbone._layers.pop()\n return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,\n model_name=\"simplepose_resnet50b_coco\", data_format=data_format, **kwargs)\n\n\ndef simplepose_resnet101b_coco(pretrained_backbone=False, keypoints=17, data_format=\"channels_last\", **kwargs):\n \"\"\"\n SimplePose model on the base of ResNet-101b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation\n and Tracking,' https://arxiv.org/abs/1804.06208.\n\n Parameters:\n ----------\n pretrained_backbone : bool, default False\n Whether to load the pretrained weights for feature extractor.\n keypoints : int, default 17\n Number of keypoints.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.tensorflow/models'\n Location for keeping the model parameters.\n \"\"\"\n backbone = resnet101b(pretrained=pretrained_backbone, data_format=data_format).features\n backbone._layers.pop()\n return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,\n model_name=\"simplepose_resnet101b_coco\", data_format=data_format, **kwargs)\n\n\ndef simplepose_resnet152b_coco(pretrained_backbone=False, keypoints=17, data_format=\"channels_last\", **kwargs):\n \"\"\"\n SimplePose model on the base of ResNet-152b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation\n and Tracking,' https://arxiv.org/abs/1804.06208.\n\n Parameters:\n ----------\n pretrained_backbone : bool, default False\n Whether to load the pretrained weights for feature extractor.\n keypoints : int, default 17\n Number of keypoints.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.tensorflow/models'\n Location for keeping the model parameters.\n \"\"\"\n backbone = resnet152b(pretrained=pretrained_backbone, data_format=data_format).features\n backbone._layers.pop()\n return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,\n model_name=\"simplepose_resnet152b_coco\", data_format=data_format, **kwargs)\n\n\ndef simplepose_resneta50b_coco(pretrained_backbone=False, keypoints=17, data_format=\"channels_last\", **kwargs):\n \"\"\"\n SimplePose model on the base of ResNet(A)-50b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation\n and Tracking,' https://arxiv.org/abs/1804.06208.\n\n Parameters:\n ----------\n pretrained_backbone : bool, default False\n Whether to load the pretrained weights for feature extractor.\n keypoints : int, default 17\n Number of keypoints.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.tensorflow/models'\n Location for keeping the model parameters.\n \"\"\"\n backbone = resneta50b(pretrained=pretrained_backbone, data_format=data_format).features\n backbone._layers.pop()\n return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,\n model_name=\"simplepose_resneta50b_coco\", data_format=data_format, **kwargs)\n\n\ndef simplepose_resneta101b_coco(pretrained_backbone=False, keypoints=17, data_format=\"channels_last\", **kwargs):\n \"\"\"\n SimplePose model on the base of ResNet(A)-101b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation\n and Tracking,' https://arxiv.org/abs/1804.06208.\n\n Parameters:\n ----------\n pretrained_backbone : bool, default False\n Whether to load the pretrained weights for feature extractor.\n keypoints : int, default 17\n Number of keypoints.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.tensorflow/models'\n Location for keeping the model parameters.\n \"\"\"\n backbone = resneta101b(pretrained=pretrained_backbone, data_format=data_format).features\n backbone._layers.pop()\n return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,\n model_name=\"simplepose_resneta101b_coco\", data_format=data_format, **kwargs)\n\n\ndef simplepose_resneta152b_coco(pretrained_backbone=False, keypoints=17, data_format=\"channels_last\", **kwargs):\n \"\"\"\n SimplePose model on the base of ResNet(A)-152b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation\n and Tracking,' https://arxiv.org/abs/1804.06208.\n\n Parameters:\n ----------\n pretrained_backbone : bool, default False\n Whether to load the pretrained weights for feature extractor.\n keypoints : int, default 17\n Number of keypoints.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.tensorflow/models'\n Location for keeping the model parameters.\n \"\"\"\n backbone = resneta152b(pretrained=pretrained_backbone, data_format=data_format).features\n backbone._layers.pop()\n return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,\n model_name=\"simplepose_resneta152b_coco\", data_format=data_format, **kwargs)\n\n\ndef _test():\n import numpy as np\n import tensorflow.keras.backend as K\n\n data_format = \"channels_last\"\n # data_format = \"channels_first\"\n in_size = (256, 192)\n keypoints = 17\n return_heatmap = False\n pretrained = False\n\n models = [\n simplepose_resnet18_coco,\n simplepose_resnet50b_coco,\n simplepose_resnet101b_coco,\n simplepose_resnet152b_coco,\n simplepose_resneta50b_coco,\n simplepose_resneta101b_coco,\n simplepose_resneta152b_coco,\n ]\n\n for model in models:\n\n net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap, data_format=data_format)\n\n batch = 14\n x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else\n (batch, in_size[0], in_size[1], 3))\n y = net(x)\n assert (y.shape[0] == batch)\n if return_heatmap:\n if is_channels_first(data_format):\n assert ((y.shape[1] == keypoints) and (y.shape[2] == x.shape[2] // 4) and\n (y.shape[3] == x.shape[3] // 4))\n else:\n assert ((y.shape[3] == keypoints) and (y.shape[1] == x.shape[1] // 4) and\n (y.shape[2] == x.shape[2] // 4))\n else:\n assert ((y.shape[1] == keypoints) and (y.shape[2] == 3))\n\n weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])\n print(\"m={}, {}\".format(model.__name__, weight_count))\n assert (model != simplepose_resnet18_coco or weight_count == 15376721)\n assert (model != simplepose_resnet50b_coco or weight_count == 33999697)\n assert (model != simplepose_resnet101b_coco or weight_count == 52991825)\n assert (model != simplepose_resnet152b_coco or weight_count == 68635473)\n assert (model != simplepose_resneta50b_coco or weight_count == 34018929)\n assert (model != simplepose_resneta101b_coco or weight_count == 53011057)\n assert (model != simplepose_resneta152b_coco or weight_count == 68654705)\n\n\nif __name__ == \"__main__\":\n _test()\n"
]
| [
[
"tensorflow.keras.Sequential",
"tensorflow.executing_eagerly",
"tensorflow.keras.layers.Conv2DTranspose",
"tensorflow.keras.backend.get_value",
"tensorflow.keras.layers.Cropping2D"
]
]
|
samuelyu2002/PACS | [
"5010b2f0d20933b0647e3d6230d673e1830249ec"
]
| [
"experiments/CLIP/predict.py"
]
| [
"import torch\nimport numpy as np\nimport torch.nn as nn\nfrom utils import load_model\nfrom torchvision import transforms\nfrom PIL import Image\nimport json\nimport clip\nfrom collections import defaultdict\nimport argparse\nimport os\n\nparser = argparse.ArgumentParser(description='Extracting frames and audio')\nparser.add_argument(\n '-model_path',\n dest='model_path',\n type=str,\n help='Model path'\n )\nparser.add_argument(\n '-save_dir',\n dest='save_dir',\n default=\"results/\",\n type=str,\n help='Directory containing PACS data'\n )\n\nparser.add_argument(\n '-split',\n dest='split',\n default=\"test\",\n type=str,\n help='which split to predict'\n )\n\nargs = parser.parse_args()\n\nMODEL_PATH = args.model_path\nPRE_LOAD = \"ViT-B-16.pt\"\nDATA_DIR = args.data_dir\nSPLIT = args.split\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nmodel = load_model(PRE_LOAD, device)\ncheckpoint = torch.load(MODEL_PATH)\nmodel.load_state_dict(checkpoint)\n\n\nimg_transform = transforms.Compose([\n transforms.Resize(256, interpolation=Image.BICUBIC),\n transforms.FiveCrop(224),\n transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))\n ])\n\nimg_transform2 = transforms.Compose([\n transforms.Resize(224, interpolation=Image.BICUBIC),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),\n ])\n\nsimilarities = defaultdict(dict)\n\n\ntest_data = json.load(open(f\"{DATA_DIR}/json/{SPLIT}.json\", 'r'))\n\nwith torch.no_grad():\n model.eval()\n correct = 0\n total = 0\n correct0 = 0\n total0 = 0\n correct1 = 0\n total1 = 0\n for pair in test_data:\n obj1, obj2 = pair.split(\"\\\\\\\\\")\n img1 = Image.open(f\"{DATA_DIR}/square_crop/{obj1}.png\")\n img2 = Image.open(f\"{DATA_DIR}/square_crop/{obj2}.png\")\n\n img1_1 = Image.open(f\"{DATA_DIR}/square_crop/{obj1}.png\")\n img2_1 = Image.open(f\"{DATA_DIR}/square_crop/{obj2}.png\")\n\n img1 = img_transform(img1)\n img1_2 = img_transform2(img1_1).reshape(1,3,224,224)\n\n img1 = torch.cat([img1, img1_2], dim=0).to(device)\n\n img2 = img_transform(img2)\n img2_2 = img_transform2(img2_1).reshape(1, 3, 224, 224)\n img2 = torch.cat([img2, img2_2], dim=0).to(device)\n\n for q in test_data[pair]:\n text = test_data[pair][q][\"text\"]\n tokens = clip.tokenize(text).to(device)\n\n imgf1, imgf2, textf1 = model(img1, img2, tokens)\n cs1 = nn.CosineSimilarity()(textf1, imgf1)\n cs2 = nn.CosineSimilarity()(textf1, imgf2)\n\n cs1 = cs1.detach().cpu().numpy()\n cs2 = cs2.detach().cpu().numpy()\n\n similarities[pair][q] = [cs1.tolist(), cs2.tolist()]\n\n cs1 = np.sum(cs1) + 2*cs1[-1]\n cs2 = np.sum(cs2) + 2*cs2[-1]\n\n if test_data[pair][q][\"label\"] == 0:\n if cs1 > cs2:\n correct0 += 1\n correct += 1\n total0 += 1\n \n elif test_data[pair][q][\"label\"] == 1:\n if cs1 < cs2:\n correct1 += 1\n correct += 1\n total1 += 1\n total += 1\n\nprint(correct, total, correct/total)\nprint(correct0, total0, correct0/total0)\nprint(correct1, total1, correct1/total1)\n \n\njson.dump(dict(similarities), open(os.path.join(args.save_dir, f\"results_{SPLIT}.json\"), 'w'))"
]
| [
[
"torch.cat",
"numpy.sum",
"torch.no_grad",
"torch.cuda.is_available",
"torch.load",
"torch.nn.CosineSimilarity"
]
]
|
guanyuliu0818/ITU-Rpy | [
"524f30f73a6fed9be4416637d8d83ad7717f0a00"
]
| [
"itur/models/itu676.py"
]
| [
"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport warnings\n\nimport numpy as np\nfrom astropy import units as u\n\nfrom itur import utils\nfrom itur.models.itu453 import radio_refractive_index\nfrom itur.models.itu835 import (standard_pressure, standard_temperature,\n standard_water_vapour_density)\nfrom itur.models.itu836 import total_water_vapour_content\nfrom itur.models.itu1510 import surface_mean_temperature\nfrom itur.models.itu1511 import topographic_altitude\nfrom itur.utils import (prepare_quantity, prepare_output_array,\n prepare_input_array, load_data, dataset_dir, memory)\n\n\ndef __gamma0_exact__(self, f, p, rho, T):\n # T in Kelvin\n # e : water vapour partial pressure in hPa (total barometric pressure\n # ptot = p + e)\n theta = 300 / T\n e = rho * T / 216.7\n\n f_ox = self.f_ox\n\n D_f_ox = self.a3 * 1e-4 * (p * (theta ** (0.8 - self.a4)) +\n 1.1 * e * theta)\n\n D_f_ox = np.sqrt(D_f_ox**2 + 2.25e-6)\n\n delta_ox = (self.a5 + self.a6 * theta) * 1e-4 * (p + e) * theta**0.8\n\n F_i_ox = f / f_ox * ((D_f_ox - delta_ox * (f_ox - f)) /\n ((f_ox - f) ** 2 + D_f_ox ** 2) +\n (D_f_ox - delta_ox * (f_ox + f)) /\n ((f_ox + f) ** 2 + D_f_ox ** 2))\n\n Si_ox = self.a1 * 1e-7 * p * theta**3 * np.exp(self.a2 * (1 - theta))\n\n N_pp_ox = Si_ox * F_i_ox\n\n d = 5.6e-4 * (p + e) * theta**0.8\n\n N_d_pp = f * p * theta**2 * \\\n (6.14e-5 / (d * (1 + (f / d)**2)) +\n 1.4e-12 * p * theta**1.5 / (1 + 1.9e-5 * f**1.5))\n\n N_pp = N_pp_ox.sum() + N_d_pp\n\n gamma = 0.1820 * f * N_pp # Eq. 1 [dB/km]\n return gamma\n\n\ndef __gammaw_exact__(self, f, p, rho, T):\n # T in Kelvin\n # e : water vapour partial pressure in hPa (total barometric pressure\n # ptot = p + e)\n theta = 300 / T\n e = rho * T / 216.7\n\n f_wv = self.f_wv\n\n D_f_wv = self.b3 * 1e-4 * (p * theta ** self.b4 +\n self.b5 * e * theta ** self.b6)\n\n D_f_wv = 0.535 * D_f_wv + \\\n np.sqrt(0.217 * D_f_wv**2 + 2.1316e-12 * f_wv**2 / theta)\n\n F_i_wv = f / f_wv * ((D_f_wv) / ((f_wv - f)**2 + D_f_wv**2) +\n (D_f_wv) / ((f_wv + f)**2 + D_f_wv**2))\n\n Si_wv = self.b1 * 1e-1 * e * theta**3.5 * np.exp(self.b2 * (1 - theta))\n\n N_pp_wv = Si_wv * F_i_wv\n\n N_pp = N_pp_wv.sum()\n\n gamma = 0.1820 * f * N_pp # Eq. 1 [dB/km]\n return gamma\n\n\nclass __ITU676__():\n \"\"\"Attenuation by atmospheric gases.\n\n Available versions include:\n * P.676-9 (02/12) (Superseded)\n * P.676-10 (09/13) (Superseded)\n * P.676-11 (09/16) (Superseded)\n * P.676-11 (08/19) (Current version)\n Not available versions:\n * P.676-1 (03/92) (Superseded)\n * P.676-2 (10/95) (Superseded)\n * P.676-3 (08/97) (Superseded)\n * P.676-4 (10/99) (Superseded)\n * P.676-5 (02/01) (Superseded)\n * P.676-6 (03/05) (Superseded)\n * P.676-7 (02/07) (Superseded)\n * P.676-8 (10/09) (Superseded)\n \"\"\"\n # This is an abstract class that contains an instance to a version of the\n # ITU-R P.676 recommendation.\n\n def __init__(self, version=12):\n if version == 12:\n self.instance = _ITU676_12_()\n elif version == 11:\n self.instance = _ITU676_11_()\n elif version == 10:\n self.instance = _ITU676_10_()\n elif version == 9:\n self.instance = _ITU676_9_()\n# elif version == 8:\n# self.instance = _ITU676_8()\n# elif version == 7:\n# self.instance = _ITU676_7()\n# elif version == 6:\n# self.instance = _ITU676_6()\n# elif version == 5:\n# self.instance = _ITU676_5()\n# elif version == 4:\n# self.instance = _ITU676_4()\n# elif version == 3:\n# self.instance = _ITU676_3()\n# elif version == 2:\n# self.instance = _ITU676_2()\n# elif version == 1:\n# self.instance = _ITU676_1()\n else:\n raise ValueError(\n 'Version {0} is not implemented for the ITU-R P.676 model.'\n .format(version))\n\n @property\n def __version__(self):\n return self.instance.__version__\n\n def gaseous_attenuation_terrestrial_path(self, r, f, el, rho, P, T, mode):\n # Abstract method to compute the gaseous attenuation over a slant path\n fcn = np.vectorize(self.instance.gaseous_attenuation_terrestrial_path)\n return fcn(r, f, el, rho, P, T, mode)\n\n def gaseous_attenuation_inclined_path(self, f, el, rho, P, T, h1, h2, mode):\n # Abstract method to compute the gaseous attenuation over an inclined path\n fcn = np.vectorize(self.instance.gaseous_attenuation_inclined_path)\n return fcn(f, el, rho, P, T, h1, h2, mode)\n\n def gaseous_attenuation_slant_path(self, f, el, rho, P, T, V_t, h, mode):\n # Abstract method to compute the gaseous attenuation over a slant path\n fcn = np.vectorize(self.instance.gaseous_attenuation_slant_path)\n return fcn(f, el, rho, P, T, V_t, h, mode)\n\n def slant_inclined_path_equivalent_height(self, f, p):\n fcn = np.vectorize(self.instance.slant_inclined_path_equivalent_height,\n excluded=[0], otypes=[np.ndarray])\n return np.array(fcn(f, p).tolist())\n\n def zenit_water_vapour_attenuation(\n self, lat, lon, p, f, V_t=None, h=None):\n # Abstract method to compute the water vapour attenuation over the\n # slant path\n fcn = np.vectorize(self.instance.zenit_water_vapour_attenuation,\n excluded=[0, 1, 4, 5], otypes=[np.ndarray])\n return np.array(fcn(lat, lon, p, f, V_t, h).tolist())\n\n def gamma_exact(self, f, p, rho, t):\n # Abstract method to compute the specific attenuation using the\n # line-by-line method\n fcn = np.vectorize(self.instance.gamma_exact)\n return fcn(f, p, rho, t)\n\n def gammaw_exact(self, f, p, rho, t):\n # Abstract method to compute the specific attenuation due to water\n # vapour\n fcn = np.vectorize(self.instance.gammaw_exact)\n return fcn(f, p, rho, t)\n\n def gamma0_exact(self, f, p, rho, t):\n # Abstract method to compute the specific attenuation due to dry\n # atmoshere\n fcn = np.vectorize(self.instance.gamma0_exact)\n return fcn(f, p, rho, t)\n\n def gammaw_approx(self, f, p, rho, t):\n # Abstract method to compute the specific attenuation due to water\n # vapour\n fcn = np.vectorize(self.instance.gammaw_approx)\n return fcn(f, p, rho, t)\n\n def gamma0_approx(self, f, p, rho, t):\n # Abstract method to compute the specific attenuation due to dry\n # atmoshere\n fcn = np.vectorize(self.instance.gamma0_approx)\n return fcn(f, p, rho, t)\n\n\nclass _ITU676_12_():\n\n tmp = load_data(os.path.join(dataset_dir, '676/v12_lines_oxygen.txt'),\n skip_header=1)\n f_ox = tmp[:, 0]\n a1 = tmp[:, 1]\n a2 = tmp[:, 2]\n a3 = tmp[:, 3]\n a4 = tmp[:, 4]\n a5 = tmp[:, 5]\n a6 = tmp[:, 6]\n\n tmp = load_data(os.path.join(dataset_dir,\n '676//v12_lines_water_vapour.txt'),\n skip_header=1)\n f_wv = tmp[:, 0]\n b1 = tmp[:, 1]\n b2 = tmp[:, 2]\n b3 = tmp[:, 3]\n b4 = tmp[:, 4]\n b5 = tmp[:, 5]\n b6 = tmp[:, 6]\n\n # Coefficients in table 3\n t2_coeffs = [(0.1597, 118.750334),\n (0.1066, 368.498246),\n (0.1325, 424.763020),\n (0.1242, 487.249273),\n (0.0938, 715.392902),\n (0.1448, 773.839490),\n (0.1374, 834.145546)]\n\n # Coefficients in table 4\n hw_coeffs = [(22.23508, 1.52, 2.56),\n (183.310087, 7.62, 10.2),\n (325.152888, 1.56, 2.7),\n (380.197353, 4.15, 5.7),\n (439.150807, 0.2, 0.91),\n (448.001085, 1.63, 2.46),\n (474.689092, 0.76, 2.22),\n (488.490108, 0.26, 2.49),\n (556.935985, 7.81, 10),\n (620.70087, 1.25, 2.35),\n (752.033113, 16.2, 20),\n (916.171582, 1.47, 2.58),\n (970.315022, 1.36, 2.44),\n (987.926764, 1.6, 1.86)]\n\n def __init__(self):\n self.__version__ = 12\n self.year = 2019\n self.month = 8\n self.link = 'https://www.itu.int/rec/R-REC-P.676-11-201712-S/en'\n\n def gammaw_approx(self, f, p, rho, T):\n warnings.warn(\n RuntimeWarning(\n 'Recommendation ITU-R P.676-12 does not have an explicit '\n 'method to approximate gamma_w. The exact method shall be '\n 'used instead.'))\n return self.gamma_exact(f, p, rho, T)\n\n def gamma0_approx(self, f, p, rho, T):\n warnings.warn(\n RuntimeWarning(\n 'Recommendation ITU-R P.676-12 does not have an explicit '\n 'method to approximate gamma_w. The exact method shall be '\n 'used instead.'))\n return self.gamma_exact(f, p, rho, T)\n\n @classmethod\n def gamma0_exact(self, f, p, rho, T):\n return __gamma0_exact__(self, f, p, rho, T)\n\n @classmethod\n def gammaw_exact(self, f, p, rho, T):\n return __gammaw_exact__(self, f, p, rho, T)\n\n @classmethod\n def gamma_exact(self, f, p, rho, T):\n return (self.gamma0_exact(f, p, rho, T) +\n self.gammaw_exact(f, p, rho, T))\n\n @classmethod\n def gaseous_attenuation_approximation(self, f, el, rho, P, T):\n \"\"\"\n T goes in Kelvin\n \"\"\"\n if np.any(f > 350):\n warnings.warn(\n RuntimeWarning(\n 'The approximated method to computes '\n 'the gaseous attenuation in recommendation ITU-P 676-11 '\n 'is only recommended for frequencies below 350GHz'))\n\n if np.any(5 > el) or np.any(np.mod(el, 90) < 5):\n warnings.warn(\n RuntimeWarning(\n 'The approximated method to compute '\n 'the gaseous attenuation in recommendation ITU-P 676-11 '\n 'is only recommended for elevation angles between'\n '5 and 90 degrees'))\n\n # Water vapour attenuation (gammaw) computation as in Section 1 of\n # Annex 2 of [1]\n gamma0 = self.gamma0_exact(f, P, rho, T)\n gammaw = self.gammaw_exact(f, P, rho, T)\n\n return gamma0, gammaw\n\n @classmethod\n def slant_inclined_path_equivalent_height(self, f, P, rho, T):\n \"\"\"\n \"\"\"\n e = rho * T / 216.7\n rp = (P + e) / 1013.25\n\n # Eq. 31 - 34\n t1 = 5.1040 / (1 + 0.066 * rp**-2.3) * \\\n np.exp(-((f - 59.7) / (2.87 + 12.4 * np.exp(-7.9 * rp)))**2)\n\n t2 = sum([(ci * np.exp(2.12 * rp)) /\n ((f - fi)**2 + 0.025 * np.exp(2.2 * rp))\n for ci, fi in self.t2_coeffs])\n\n t3 = 0.0114 * f / (1 + 0.14 * rp**-2.6) * \\\n (15.02 * f**2 - 1353 * f + 5.333e4) / \\\n (f**3 - 151.3 * f**2 + 9629 * f - 6803)\n\n A = 0.7832 + 0.00709 * (T - 273.15)\n\n # Eq. 30\n h0 = 6.1 * A / (1 + 0.17 * rp**-1.1) * (1 + t1 + t2 + t3)\n\n h0 = np.where(f < 70,\n np.minimum(h0, 10.7 * rp**0.3),\n h0)\n\n # Eq. 36 - 38\n A = 1.9298 - 0.04166 * (T - 273.15) + 0.0517 * rho\n B = 1.1674 - 0.00622 * (T - 273.15) + 0.0063 * rho\n sigmaw = 1.013 / (1 + np.exp(-8.6 * (rp - 0.57)))\n\n # Eq. 35 b\n hw = A + B * sum([(ai * sigmaw) / ((f - fi)**2 + bi * sigmaw)\n for fi, ai, bi in self.hw_coeffs])\n return h0, hw\n\n @classmethod\n def gaseous_attenuation_terrestrial_path(\n self, r, f, el, rho, P, T, mode='approx'):\n \"\"\"\n \"\"\"\n if mode == 'approx':\n gamma0, gammaw = self.gaseous_attenuation_approximation(\n f, el, rho, P, T)\n return (gamma0 + gammaw) * r\n else:\n gamma = self.gamma_exact(f, P, rho, T)\n return gamma * r\n\n @classmethod\n def gaseous_attenuation_slant_path(self, f, el, rho, P, T, V_t=None,\n h=None, mode='approx'):\n \"\"\"\n \"\"\"\n if mode == 'approx':\n gamma0, gammaw = self.gaseous_attenuation_approximation(\n f, el, rho, P, T)\n\n h0, hw = self.slant_inclined_path_equivalent_height(f, P, rho, T)\n\n # Use the zenit water-vapour method if the values of V_t\n # and h are provided\n if V_t is not None and h is not None:\n Aw = self.zenit_water_vapour_attenuation(None, None, None,\n f, V_t, h)\n else:\n Aw = gammaw * hw\n\n A0 = gamma0 * h0\n return (A0 + Aw) / np.sin(np.deg2rad(el))\n\n else:\n delta_h = 0.0001 * np.exp((np.arange(0, 923)) / 100)\n h_n = np.cumsum(delta_h)\n T_n = standard_temperature(h_n).to(u.K).value\n press_n = standard_pressure(h_n).value\n rho_n = standard_water_vapour_density(h_n, rho_0=rho).value\n\n e = rho * T / 216.7\n n_n = radio_refractive_index(press_n, e, T).value\n n_ratio = np.pad(n_n[1:], (0, 1), mode='edge') / n_n\n r_n = 6371 + h_n\n\n b = np.pi / 2 - np.deg2rad(el)\n Agas = 0\n for t, press, rho, r, delta, n_r in zip(\n T_n, press_n, rho_n, r_n, delta_h, n_ratio):\n a = - r * np.cos(b) + 0.5 * np.sqrt(\n 4 * r**2 * np.cos(b)**2 + 8 * r * delta + 4 * delta**2)\n a_cos_arg = np.clip((-a**2 - 2 * r * delta - delta**2) /\n (2 * a * r + 2 * a * delta), -1, 1)\n alpha = np.pi - np.arccos(a_cos_arg)\n gamma = self.gamma_exact(f, press, rho, t)\n Agas += a * gamma\n b = np.arcsin(n_r * np.sin(alpha))\n\n return Agas\n\n @classmethod\n def gaseous_attenuation_inclined_path(\n self, f, el, rho, P, T, h1, h2, mode='approx'):\n \"\"\"\n \"\"\"\n if h1 > 10 or h2 > 10:\n raise ValueError(\n 'Both the transmitter and the receiver must be at'\n 'altitude of less than 10 km above the sea level.'\n 'Current altitude Tx: %.2f km, Rx: %.2f km' % (h1, h2))\n\n if mode == 'approx':\n rho = rho * np.exp(h1 / 2)\n gamma0, gammaw = self.gaseous_attenuation_approximation(\n f, el, rho, P, T)\n else:\n gamma0 = self.gamma0_exact(f, P, rho, T)\n gammaw = 0\n\n e = rho * T / 216.7\n h0, hw = self.slant_inclined_path_equivalent_height(f, P + e)\n\n if 5 < el and el < 90:\n h0_p = h0 * (np.exp(-h1 / h0) - np.exp(-h2 / h0))\n hw_p = hw * (np.exp(-h1 / hw) - np.exp(-h2 / hw))\n return (gamma0 * h0_p + gammaw * hw_p) / np.sin(np.deg2rad(el))\n else:\n def F(x):\n return 1 / (0.661 * x + 0.339 * np.sqrt(x**2 + 5.51))\n\n el1 = el\n Re = 8500 # TODO: change to ITU-R P 834\n el2 = np.rad2deg(\n np.arccos(((Re + h1)/(Re + h2))*np.cos(np.deg2rad(el1))))\n\n def xi(eli, hi):\n return np.tan(np.deg2rad(eli)) * np.sqrt((Re + hi) / h0)\n\n def xi_p(eli, hi):\n return np.tan(np.deg2rad(eli)) * np.sqrt((Re + hi) / hw)\n\n def eq_33(h_num, h_den, el, x):\n return np.sqrt(Re + h_num) * F(x) * \\\n np.exp(-h_num / h_den) / np.cos(np.deg2rad(el))\n\n A = gamma0 * np.sqrt(h0) * (eq_33(h1, h0, el1, xi(el1, h1)) -\n eq_33(h2, h0, el2, xi(el2, h2))) +\\\n gammaw * np.sqrt(hw) * (eq_33(h1, hw, el1, xi_p(el1, h1)) -\n eq_33(h2, hw, el2, xi_p(el2, h2)))\n return A\n\n @classmethod\n def zenit_water_vapour_attenuation(\n self, lat, lon, p, f, V_t=None, h=None):\n f_ref = 20.6 # [GHz]\n p_ref = 845 # [hPa]\n\n if h is None:\n h = topographic_altitude(lat, lon).value\n\n if V_t is None:\n V_t = total_water_vapour_content(lat, lon, p, h).value\n\n rho_ref = V_t / 2.38\n t_ref = 14 * np.log(0.22 * V_t / 2.38) + 3 # [Celsius]\n\n a = (0.2048 * np.exp(- ((f - 22.43)/3.097)**2) +\n 0.2326 * np.exp(- ((f - 183.5)/4.096)**2) +\n 0.2073 * np.exp(- ((f - 325)/3.651)**2) - 0.1113)\n\n b = 8.741e4 * np.exp(-0.587 * f) + 312.2 * f**(-2.38) + 0.723\n h = np.clip(h, 0, 4)\n\n gammaw_approx_vect = np.vectorize(self.gammaw_exact)\n\n Aw_term1 = (0.0176 * V_t *\n gammaw_approx_vect(f, p_ref, rho_ref, t_ref + 273.15) /\n gammaw_approx_vect(f_ref, p_ref, rho_ref, t_ref + 273.15))\n\n return np.where(f < 20, Aw_term1, Aw_term1 * (a * h ** b + 1))\n\n\nclass _ITU676_11_():\n\n tmp = load_data(os.path.join(dataset_dir, '676/v11_lines_oxygen.txt'),\n skip_header=1)\n f_ox = tmp[:, 0]\n a1 = tmp[:, 1]\n a2 = tmp[:, 2]\n a3 = tmp[:, 3]\n a4 = tmp[:, 4]\n a5 = tmp[:, 5]\n a6 = tmp[:, 6]\n\n tmp = load_data(os.path.join(dataset_dir,\n '676//v11_lines_water_vapour.txt'),\n skip_header=1)\n f_wv = tmp[:, 0]\n b1 = tmp[:, 1]\n b2 = tmp[:, 2]\n b3 = tmp[:, 3]\n b4 = tmp[:, 4]\n b5 = tmp[:, 5]\n b6 = tmp[:, 6]\n\n idx_approx = np.zeros_like(b1, dtype=bool).squeeze()\n asterisk_rows = [0, 3, 4, 5, 7, 12, 20, 24, 34]\n idx_approx[np.array(asterisk_rows)] = True\n\n def __init__(self):\n self.__version__ = 11\n self.year = 2017\n self.month = 12\n self.link = 'https://www.itu.int/rec/R-REC-P.676-11-201712-S/en'\n\n @classmethod\n def gammaw_approx(self, f, p, rho, T):\n # T in Kelvin\n # e : water vapour partial pressure in hPa (total barometric pressure\n # ptot = p + e)\n theta = 300 / T\n e = rho * T / 216.7\n\n f_wv = self.f_wv[self.idx_approx]\n b1 = self.b1[self.idx_approx]\n b2 = self.b2[self.idx_approx]\n b3 = self.b3[self.idx_approx]\n b4 = self.b4[self.idx_approx]\n b5 = self.b5[self.idx_approx]\n b6 = self.b6[self.idx_approx]\n\n D_f_wv = b3 * 1e-4 * (p * theta ** b4 +\n b5 * e * theta ** b6)\n\n F_i_wv = f / f_wv * ((D_f_wv) / ((f_wv - f)**2 + D_f_wv**2) +\n (D_f_wv) / ((f_wv + f)**2 + D_f_wv**2))\n\n Si_wv = b1 * 1e-1 * e * theta**3.5 * np.exp(b2 * (1 - theta))\n\n N_pp_wv = Si_wv * F_i_wv\n\n N_pp = N_pp_wv.sum()\n\n gamma = 0.1820 * f * N_pp # Eq. 1 [dB/km]\n return gamma\n\n @classmethod\n def gamma0_approx(self, f, p, rho, T):\n # T in Kelvin\n # e : water vapour partial pressure in hPa (total barometric pressure\n # ptot = p + e)\n theta = 300 / T\n e = rho * T / 216.7\n\n f_ox = self.f_ox\n\n D_f_ox = self.a3 * 1e-4 * (p * (theta ** (0.8 - self.a4)) +\n 1.1 * e * theta)\n\n delta_ox = (self.a5 + self.a6 * theta) * 1e-4 * (p + e) * theta**0.8\n\n F_i_ox = f / f_ox * ((D_f_ox - delta_ox * (f_ox - f)) /\n ((f_ox - f) ** 2 + D_f_ox ** 2) +\n (D_f_ox - delta_ox * (f_ox + f)) /\n ((f_ox + f) ** 2 + D_f_ox ** 2))\n\n Si_ox = self.a1 * 1e-7 * p * theta**3 * np.exp(self.a2 * (1 - theta))\n\n N_pp_ox = Si_ox * F_i_ox\n\n d = 5.6e-4 * (p + e) * theta**0.8\n\n N_d_pp = f * p * theta**2 * \\\n (6.14e-5 / (d * (1 + (f / d)**2)) +\n 1.4e-12 * p * theta**1.5 / (1 + 1.9e-5 * f**1.5))\n\n N_pp = N_pp_ox.sum() + N_d_pp\n\n gamma = 0.1820 * f * N_pp # Eq. 1 [dB/km]\n return gamma\n\n @classmethod\n def gamma0_exact(self, f, p, rho, T):\n return __gamma0_exact__(self, f, p, rho, T)\n\n @classmethod\n def gammaw_exact(self, f, p, rho, T):\n return __gammaw_exact__(self, f, p, rho, T)\n\n @classmethod\n def gamma_exact(self, f, p, rho, T):\n return (self.gamma0_exact(f, p, rho, T) +\n self.gammaw_exact(f, p, rho, T))\n\n @classmethod\n def gaseous_attenuation_approximation(self, f, el, rho, P, T):\n \"\"\"\n T goes in Kelvin\n \"\"\"\n if np.any(f > 350):\n warnings.warn(\n RuntimeWarning(\n 'The approximated method to computes '\n 'the gaseous attenuation in recommendation ITU-P 676-11 '\n 'is only recommended for frequencies below 350GHz'))\n\n if np.any(5 > el) or np.any(np.mod(el, 90) < 5):\n warnings.warn(\n RuntimeWarning(\n 'The approximated method to compute '\n 'the gaseous attenuation in recommendation ITU-P 676-11 '\n 'is only recommended for elevation angles between'\n '5 and 90 degrees'))\n\n # Water vapour attenuation (gammaw) computation as in Section 1 of\n # Annex 2 of [1]\n gamma0 = self.gamma0_approx(f, P, rho, T)\n gammaw = self.gammaw_approx(f, P, rho, T)\n\n return gamma0, gammaw\n\n @classmethod\n def slant_inclined_path_equivalent_height(self, f, p):\n \"\"\"\n \"\"\"\n rp = p / 1013.25\n t1 = 4.64 / (1 + 0.066 * rp**-2.3) * \\\n np.exp(- ((f - 59.7) / (2.87 + 12.4 * np.exp(-7.9 * rp)))**2)\n t2 = (0.14 * np.exp(2.12 * rp)) / \\\n ((f - 118.75)**2 + 0.031 * np.exp(2.2 * rp))\n t3 = 0.0114 / (1 + 0.14 * rp**-2.6) * f * \\\n (-0.0247 + 0.0001 * f + 1.61e-6 * f**2) / \\\n (1 - 0.0169 * f + 4.1e-5 * f**2 + 3.2e-7 * f**3)\n\n h0 = 6.1 / (1 + 0.17 * rp**-1.1) * (1 + t1 + t2 + t3)\n\n h0 = np.where(f < 70,\n np.minimum(h0, 10.7 * rp**0.3),\n h0)\n\n sigmaw = 1.013 / (1 + np.exp(-8.6 * (rp - 0.57)))\n hw = 1.66 * (1 + (1.39 * sigmaw) / ((f - 22.235)**2 + 2.56 * sigmaw) +\n (3.37 * sigmaw) / ((f - 183.31)**2 + 4.69 * sigmaw) +\n (1.58 * sigmaw) / ((f - 325.1)**2 + 2.89 * sigmaw))\n\n return h0, hw\n\n @classmethod\n def gaseous_attenuation_terrestrial_path(\n self, r, f, el, rho, P, T, mode='approx'):\n \"\"\"\n \"\"\"\n if mode == 'approx':\n gamma0, gammaw = self.gaseous_attenuation_approximation(\n f, el, rho, P, T)\n return (gamma0 + gammaw) * r\n else:\n gamma = self.gamma_exact(f, P, rho, T)\n return gamma * r\n\n @classmethod\n def gaseous_attenuation_slant_path(self, f, el, rho, P, T, V_t=None,\n h=None, mode='approx'):\n \"\"\"\n \"\"\"\n if mode == 'approx':\n gamma0, gammaw = self.gaseous_attenuation_approximation(\n f, el, rho, P, T)\n\n e = rho * T / 216.7\n h0, hw = self.slant_inclined_path_equivalent_height(f, P + e)\n\n # Use the zenit water-vapour method if the values of V_t\n # and h are provided\n if V_t is not None and h is not None:\n Aw = self.zenit_water_vapour_attenuation(None, None, None,\n f, V_t, h)\n else:\n Aw = gammaw * hw\n\n A0 = gamma0 * h0\n return (A0 + Aw) / np.sin(np.deg2rad(el))\n\n else:\n delta_h = 0.0001 * np.exp((np.arange(0, 923)) / 100)\n h_n = np.cumsum(delta_h)\n T_n = standard_temperature(h_n).to(u.K).value\n press_n = standard_pressure(h_n).value\n rho_n = standard_water_vapour_density(h_n, rho_0=rho).value\n\n e = rho * T / 216.7\n n_n = radio_refractive_index(press_n, e, T).value\n n_ratio = np.pad(n_n[1:], (0, 1), mode='edge') / n_n\n r_n = 6371 + h_n\n\n b = np.pi / 2 - np.deg2rad(el)\n Agas = 0\n for t, press, rho, r, delta, n_r in zip(\n T_n, press_n, rho_n, r_n, delta_h, n_ratio):\n a = - r * np.cos(b) + 0.5 * np.sqrt(\n 4 * r**2 * np.cos(b)**2 + 8 * r * delta + 4 * delta**2)\n a_cos_arg = np.clip((-a**2 - 2 * r * delta - delta**2) /\n (2 * a * r + 2 * a * delta), -1, 1)\n alpha = np.pi - np.arccos(a_cos_arg)\n gamma = self.gamma_exact(f, press, rho, t)\n Agas += a * gamma\n b = np.arcsin(n_r * np.sin(alpha))\n\n return Agas\n\n @classmethod\n def gaseous_attenuation_inclined_path(\n self, f, el, rho, P, T, h1, h2, mode='approx'):\n \"\"\"\n \"\"\"\n if h1 > 10 or h2 > 10:\n raise ValueError(\n 'Both the transmitter and the receiver must be at'\n 'altitude of less than 10 km above the sea level.'\n 'Current altitude Tx: %.2f km, Rx: %.2f km' % (h1, h2))\n\n if mode == 'approx':\n rho = rho * np.exp(h1 / 2)\n gamma0, gammaw = self.gaseous_attenuation_approximation(\n f, el, rho, P, T)\n else:\n gamma0 = self.gamma0_exact(f, P, rho, T)\n gammaw = 0\n\n e = rho * T / 216.7\n h0, hw = self.slant_inclined_path_equivalent_height(f, P + e)\n\n if 5 < el and el < 90:\n h0_p = h0 * (np.exp(-h1 / h0) - np.exp(-h2 / h0))\n hw_p = hw * (np.exp(-h1 / hw) - np.exp(-h2 / hw))\n return (gamma0 * h0_p + gammaw * hw_p) / np.sin(np.deg2rad(el))\n else:\n def F(x):\n return 1 / (0.661 * x + 0.339 * np.sqrt(x**2 + 5.51))\n\n el1 = el\n Re = 8500 # TODO: change to ITU-R P 834\n el2 = np.rad2deg(\n np.arccos(((Re + h1)/(Re + h2))*np.cos(np.deg2rad(el1))))\n\n def xi(eli, hi):\n return np.tan(np.deg2rad(eli)) * np.sqrt((Re + hi) / h0)\n\n def xi_p(eli, hi):\n return np.tan(np.deg2rad(eli)) * np.sqrt((Re + hi) / hw)\n\n def eq_33(h_num, h_den, el, x):\n return np.sqrt(Re + h_num) * F(x) * \\\n np.exp(-h_num / h_den) / np.cos(np.deg2rad(el))\n\n A = gamma0 * np.sqrt(h0) * (eq_33(h1, h0, el1, xi(el1, h1)) -\n eq_33(h2, h0, el2, xi(el2, h2))) +\\\n gammaw * np.sqrt(hw) * (eq_33(h1, hw, el1, xi_p(el1, h1)) -\n eq_33(h2, hw, el2, xi_p(el2, h2)))\n return A\n\n @classmethod\n def zenit_water_vapour_attenuation(\n self, lat, lon, p, f, V_t=None, h=None):\n f_ref = 20.6 # [GHz]\n p_ref = 815 # [hPa]\n\n if h is None:\n h = topographic_altitude(lat, lon).value\n\n if V_t is None:\n V_t = total_water_vapour_content(lat, lon, p, h).value\n\n rho_ref = V_t / 3.67\n t_ref = 14 * np.log(0.22 * V_t / 3.67) + 3 # [Celsius]\n\n a = (0.2048 * np.exp(- ((f - 22.43)/3.097)**2) +\n 0.2326 * np.exp(- ((f-183.5)/4.096)**2) +\n 0.2073 * np.exp(- ((f-325)/3.651)**2) - 0.113)\n\n b = 8.741e4 * np.exp(-0.587 * f) + 312.2 * f**(-2.38) + 0.723\n h = np.minimum(h, 4)\n\n gammaw_approx_vect = np.vectorize(self.gammaw_approx)\n\n Aw_term1 = (0.0176 * V_t *\n gammaw_approx_vect(f, p_ref, rho_ref, t_ref + 273.15) /\n gammaw_approx_vect(f_ref, p_ref, rho_ref, t_ref + 273.15))\n\n return np.where(f < 20, Aw_term1, Aw_term1 * (a * h ** b + 1))\n\n\nclass _ITU676_10_():\n\n tmp = load_data(os.path.join(dataset_dir, '676/v10_lines_oxygen.txt'),\n skip_header=1)\n f_ox = tmp[:, 0]\n a1 = tmp[:, 1]\n a2 = tmp[:, 2]\n a3 = tmp[:, 3]\n a4 = tmp[:, 4]\n a5 = tmp[:, 5]\n a6 = tmp[:, 6]\n\n tmp = load_data(os.path.join(dataset_dir,\n '676//v10_lines_water_vapour.txt'),\n skip_header=1)\n f_wv = tmp[:, 0]\n b1 = tmp[:, 1]\n b2 = tmp[:, 2]\n b3 = tmp[:, 3]\n b4 = tmp[:, 4]\n b5 = tmp[:, 5]\n b6 = tmp[:, 6]\n\n def __init__(self):\n self.__version__ = 10\n self.year = 2013\n self.month = 9\n self.link = 'https://www.itu.int/rec/R-REC-P.676-10-201309-S/en'\n\n @classmethod\n def gammaw_approx(self, f, P, rho, T):\n rp = P / 1013\n rt = 288 / (T)\n eta1 = 0.955 * rp * rt**0.68 + 0.006 * rho\n eta2 = 0.735 * rp * rt**0.50 + 0.0353 * rt**4 * rho\n\n def g(f, fi): return 1 + ((f - fi) / (f + fi))**2\n gammaw = (\n (3.98 * eta1 * np.exp(2.23 * (1 - rt))) /\n ((f - 22.235) ** 2 + 9.42 * eta1 ** 2) * g(f, 22.0) +\n (11.96 * eta1 * np.exp(0.70 * (1 - rt))) /\n ((f - 183.310) ** 2 + 11.14 * eta1 ** 2) +\n (0.081 * eta1 * np.exp(6.44 * (1 - rt))) /\n ((f - 321.226) ** 2 + 6.29 * eta1 ** 2) +\n (3.660 * eta1 * np.exp(1.60 * (1 - rt))) /\n ((f - 325.153) ** 2 + 9.22 * eta1 ** 2) +\n (25.37 * eta1 * np.exp(1.09 * (1 - rt))) / ((f - 380.000) ** 2) +\n (17.40 * eta1 * np.exp(1.46 * (1 - rt))) / ((f - 448.000) ** 2) +\n (844.6 * eta1 * np.exp(0.17 * (1 - rt))) / ((f - 557.000) ** 2) *\n g(f, 557.0) + (290.0 * eta1 * np.exp(0.41 * (1 - rt))) /\n ((f - 752.000) ** 2) * g(f, 752.0) +\n (8.3328e4 * eta2 * np.exp(0.99 * (1 - rt))) /\n ((f - 1780.00) ** 2) *\n g(f, 1780.0)) * f ** 2 * rt ** 2.5 * rho * 1e-4\n return gammaw\n\n @classmethod\n def gamma0_approx(self, f, P, rho, T):\n rp = P / 1013.0\n rt = 288.0 / (T)\n\n def phi(rp, rt, a, b, c, d): return np.power(\n rp, a) * np.power(rt, b) * np.exp(c * (1 - rp) + d * (1 - rt))\n\n # Dry air attenuation (gamma0) computation as in Section 1 of Annex 2\n # of [1]\n delta = -0.00306 * phi(rp, rt, 3.211, -14.94, 1.583, -16.37)\n\n xi1 = phi(rp, rt, 0.0717, -1.8132, 0.0156, -1.6515)\n xi2 = phi(rp, rt, 0.5146, -4.6368, -0.1921, -5.7416)\n xi3 = phi(rp, rt, 0.3414, -6.5851, 0.2130, -8.5854)\n xi4 = phi(rp, rt, -0.0112, 0.0092, -0.1033, -0.0009)\n xi5 = phi(rp, rt, 0.2705, -2.7192, -0.3016, -4.1033)\n xi6 = phi(rp, rt, 0.2445, -5.9191, 0.0422, -8.0719)\n xi7 = phi(rp, rt, -0.1833, 6.5589, -0.2402, 6.131)\n\n gamma54 = 2.192 * phi(rp, rt, 1.8286, -1.9487, 0.4051, -2.8509)\n gamma58 = 12.59 * phi(rp, rt, 1.0045, 3.5610, 0.1588, 1.2834)\n gamma60 = 15.00 * phi(rp, rt, 0.9003, 4.1335, 0.0427, 1.6088)\n gamma62 = 14.28 * phi(rp, rt, 0.9886, 3.4176, 0.1827, 1.3429)\n gamma64 = 6.819 * phi(rp, rt, 1.4320, 0.6258, 0.3177, -0.5914)\n gamma66 = 1.908 * phi(rp, rt, 2.0717, -4.1404, 0.4910, -4.8718)\n\n def fcn_le_54():\n return (((7.2 * rt**2.8) / (f**2 + 0.34 * rp**2 * rt**1.6) +\n (0.62 * xi3) / ((54 - f)**(1.16 * xi1) + 0.83 * xi2)) *\n f**2 * rp**2 * 1e-3)\n\n def fcn_le_60():\n return (np.exp(np.log(gamma54) / 24.0 * (f - 58) * (f - 60) -\n np.log(gamma58) / 8.0 * (f - 54) * (f - 60) +\n np.log(gamma60) / 12.0 * (f - 54) * (f - 58)))\n\n def fcn_le_62():\n return (gamma60 + (gamma62 - gamma60) * (f - 60) / 2.0)\n\n def fcn_le_66():\n return (np.exp(np.log(gamma62) / 8.0 * (f - 64) * (f - 66) -\n np.log(gamma64) / 4.0 * (f - 62) * (f - 66) +\n np.log(gamma66) / 8.0 * (f - 62) * (f - 64)))\n\n def fcn_le_120():\n return ((3.02e-4 * rt**3.5 + (0.283 * rt**3.8) /\n ((f - 118.75)**2 + 2.91 * rp**2 * rt**1.6) +\n (0.502 * xi6 * (1 - 0.0163 * xi7 * (f - 66))) /\n ((f - 66)**(1.4346 * xi4) + 1.15 * xi5)) *\n f**2 * rp**2 * 1e-3)\n\n def fcn_rest():\n return (((3.02e-4) / (1 + 1.9e-5 * f**1.5) +\n (0.283 * rt**0.3) / ((f - 118.75)**2 +\n 2.91 * rp**2 * rt**1.6)) *\n f**2 * rp**2 * rt**3.5 * 1e-3 + delta)\n\n gamma0 = \\\n np.where(\n f <= 54, fcn_le_54(),\n np.where(\n np.logical_and(54 < f, f <= 60), fcn_le_60(),\n np.where(\n np.logical_and(60 < f, f <= 62), fcn_le_62(),\n np.where(\n np.logical_and(62 < f, f <= 66), fcn_le_66(),\n np.where(\n np.logical_and(66 < f, f <= 120),\n fcn_le_120(),\n fcn_rest())))))\n\n return gamma0\n\n @classmethod\n def gamma0_exact(self, f, p, rho, T):\n return __gamma0_exact__(self, f, p, rho, T)\n\n @classmethod\n def gammaw_exact(self, f, p, rho, T):\n return __gammaw_exact__(self, f, p, rho, T)\n\n @classmethod\n def gamma_exact(self, f, p, rho, T):\n return (self.gamma0_exact(f, p, rho, T) +\n self.gammaw_exact(f, p, rho, T))\n\n @classmethod\n def gaseous_attenuation_approximation(self, f, el, rho, P, T):\n \"\"\"\n T goes in Kelvin\n \"\"\"\n if np.any(f > 350):\n warnings.warn(\n RuntimeWarning(\n 'The approximated method to computes '\n 'the gaseous attenuation in recommendation ITU-P 676-11 '\n 'is only recommended for frequencies below 350GHz'))\n\n if np.any(5 > el) or np.any(np.mod(el, 90) < 5):\n warnings.warn(\n RuntimeWarning(\n 'The approximated method to compute '\n 'the gaseous attenuation in recommendation ITU-P 676-11 '\n 'is only recommended for elevation angles between'\n '5 and 90 degrees'))\n\n # Water vapour attenuation (gammaw) computation as in Section 1 of\n # Annex 2 of [1]\n gamma0 = self.gamma0_approx(f, P, rho, T)\n gammaw = self.gammaw_approx(f, P, rho, T)\n\n return gamma0, gammaw\n\n @classmethod\n def slant_inclined_path_equivalent_height(self, f, p):\n \"\"\"\n \"\"\"\n rp = p / 1013.0\n t1 = (4.64) / (1 + 0.066 * rp**-2.3) * \\\n np.exp(- ((f - 59.7) / (2.87 + 12.4 * np.exp(-7.9 * rp)))**2)\n t2 = (0.14 * np.exp(2.21 * rp)) / \\\n ((f - 118.75)**2 + 0.031 * np.exp(2.2 * rp))\n t3 = (0.0114) / (1 + 0.14 * rp**-2.6) * f * \\\n (-0.0247 + 0.0001 * f + 1.61e-6 * f**2) / \\\n (1 - 0.0169 * f + 4.1e-5 * f**2 + 3.2e-7 * f**3)\n\n h0 = (6.1) / (1 + 0.17 * rp**-1.1) * (1 + t1 + t2 + t3)\n\n h0 = np.where(f < 70,\n np.minimum(h0, 10.7 * rp**0.3),\n h0)\n\n sigmaw = (1.013) / (1 + np.exp(-8.6 * (rp - 0.57)))\n hw = 1.66 * (1 + (1.39 * sigmaw) / ((f - 22.235)**2 + 2.56 * sigmaw) +\n (3.37 * sigmaw) / ((f - 183.31)**2 + 4.69 * sigmaw) +\n (1.58 * sigmaw) / ((f - 325.1)**2 + 2.89 * sigmaw))\n\n return h0, hw\n\n @classmethod\n def gaseous_attenuation_terrestrial_path(\n self, r, f, el, rho, P, T, mode='approx'):\n \"\"\"\n \"\"\"\n if mode == 'approx':\n gamma0, gammaw = self.gaseous_attenuation_approximation(\n f, el, rho, P, T)\n return (gamma0 + gammaw) * r\n else:\n gamma = self.gamma_exact(f, P, rho, T)\n return gamma * r\n\n @classmethod\n def gaseous_attenuation_slant_path(self, f, el, rho, P, T, V_t=None,\n h=None, mode='approx'):\n \"\"\"\n \"\"\"\n if mode == 'approx':\n gamma0, gammaw = self.gaseous_attenuation_approximation(\n f, el, rho, P, T)\n e = rho * T / 216.7\n h0, hw = self.slant_inclined_path_equivalent_height(f, P + e)\n\n # Use the zenit water-vapour method if the values of V_t\n # and h are provided\n if V_t is not None and h is not None:\n Aw = self.zenit_water_vapour_attenuation(None, None, None,\n f, V_t, h)\n else:\n Aw = gammaw * hw\n\n A0 = gamma0 * h0\n return (A0 + Aw) / np.sin(np.deg2rad(el))\n\n else:\n delta_h = 0.0001 * np.exp((np.arange(1, 923) - 1) / 100)\n h_n = np.cumsum(delta_h)\n T_n = standard_temperature(h_n).to(u.K).value\n press_n = standard_pressure(h_n).value\n rho_n = standard_water_vapour_density(h_n, rho_0=rho).value\n\n e = rho * T / 216.7\n n_n = radio_refractive_index(press_n, e, T).value\n n_ratio = np.pad(n_n[1:], (0, 1), mode='edge') / n_n\n r_n = 6371 + h_n\n\n b = np.pi / 2 - np.deg2rad(el)\n Agas = 0\n for t, press, rho, r, delta, n_r in zip(\n T_n, press_n, rho_n, r_n, delta_h, n_ratio):\n a = - r * np.cos(b) + 0.5 * np.sqrt(\n 4 * r**2 * np.cos(b)**2 + 8 * r * delta + 4 * delta**2)\n a_cos_arg = np.clip((-a**2 - 2 * r * delta - delta**2) /\n (2 * a * r + 2 * a * delta), -1, 1)\n alpha = np.pi - np.arccos(a_cos_arg)\n gamma = self.gamma_exact(f, press, rho, t)\n Agas += a * gamma\n b = np.arcsin(n_r * np.sin(alpha))\n\n return Agas\n\n @classmethod\n def gaseous_attenuation_inclined_path(\n self, f, el, rho, P, T, h1, h2, mode='approx'):\n \"\"\"\n \"\"\"\n if h1 > 10 or h2 > 10:\n raise ValueError(\n 'Both the transmitter and the receiver must be at'\n 'altitude of less than 10 km above the sea level.'\n 'Current altitude Tx: %.2f km, Rx: %.2f km' % (h1, h2))\n\n if mode == 'approx':\n rho = rho * np.exp(h1 / 2)\n gamma0, gammaw = self.gaseous_attenuation_approximation(\n f, el, rho, P, T)\n else:\n gamma0 = self.gamma_exact(f, P, rho, T)\n gammaw = 0\n\n e = rho * T / 216.7\n h0, hw = self.slant_inclined_path_equivalent_height(f, P + e)\n\n if 5 < el and el < 90:\n h0_p = h0 * (np.exp(-h1 / h0) - np.exp(-h2 / h0))\n hw_p = hw * (np.exp(-h1 / hw) - np.exp(-h2 / hw))\n return (gamma0 * h0_p + gammaw * hw_p) / np.sin(np.deg2rad(el))\n else:\n def F(x):\n return 1 / (0.661 * x + 0.339 * np.sqrt(x**2 + 5.51))\n\n el1 = el\n Re = 8500 # TODO: change to ITU-R P 834\n el2 = np.rad2deg(\n np.arccos(((Re + h1)/(Re + h2))*np.cos(np.deg2rad(el1))))\n\n def xi(eli, hi):\n return np.tan(np.deg2rad(eli)) * np.sqrt((Re + hi) / h0)\n\n def xi_p(eli, hi):\n return np.tan(np.deg2rad(eli)) * np.sqrt((Re + hi) / hw)\n\n def eq_33(h_num, h_den, el, x):\n return np.sqrt(Re + h_num) * F(x) * \\\n np.exp(-h_num / h_den) / np.cos(np.deg2rad(el))\n\n A = gamma0 * np.sqrt(h0) * (eq_33(h1, h0, el1, xi(el1, h1)) -\n eq_33(h2, h0, el2, xi(el2, h2))) +\\\n gammaw * np.sqrt(hw) * (eq_33(h1, hw, el1, xi_p(el1, h1)) -\n eq_33(h2, hw, el2, xi_p(el2, h2)))\n return A\n\n @classmethod\n def zenit_water_vapour_attenuation(\n self, lat, lon, p, f, V_t=None, h=None):\n f_ref = 20.6 # [GHz]\n p_ref = 780 # [hPa]\n if V_t is None:\n V_t = total_water_vapour_content(lat, lon, p, h).value\n rho_ref = V_t / 4 # [g/m3]\n t_ref = 14 * np.log(0.22 * V_t / 4) + 3 # [Celsius]\n\n gammaw_approx_vect = np.vectorize(self.gammaw_approx)\n return (0.0173 * V_t *\n gammaw_approx_vect(f, p_ref, rho_ref, t_ref + 273) /\n gammaw_approx_vect(f_ref, p_ref, rho_ref, t_ref + 273))\n\n\nclass _ITU676_9_():\n\n tmp = load_data(os.path.join(dataset_dir, '676//v9_lines_oxygen.txt'),\n skip_header=1)\n f_ox = tmp[:, 0]\n a1 = tmp[:, 1]\n a2 = tmp[:, 2]\n a3 = tmp[:, 3]\n a4 = tmp[:, 4]\n a5 = tmp[:, 5]\n a6 = tmp[:, 6]\n\n tmp = load_data(os.path.join(dataset_dir,\n '676//v9_lines_water_vapour.txt'),\n skip_header=1)\n f_wv = tmp[:, 0]\n b1 = tmp[:, 1]\n b2 = tmp[:, 2]\n b3 = tmp[:, 3]\n b4 = tmp[:, 4]\n b5 = tmp[:, 5]\n b6 = tmp[:, 6]\n\n def __init__(self):\n self.__version__ = 9\n self.year = 2012\n self.month = 2\n self.link = 'https://www.itu.int/rec/R-REC-P.676-9-201202-S/en'\n\n # Recommendation ITU-P R.676-9 has most of the methods similar to those\n # in Recommendation ITU-P R.676-10.\n @staticmethod\n def gammaw_approx(*args, **kwargs):\n return _ITU676_10_.gammaw_approx(*args, **kwargs)\n\n @staticmethod\n def gamma0_approx(*args, **kwargs):\n return _ITU676_10_.gamma0_approx(*args, **kwargs)\n\n @staticmethod\n def gaseous_attenuation_inclined_path(*args, **kwargs):\n return _ITU676_10_.gaseous_attenuation_inclined_path(*args, **kwargs)\n\n @staticmethod\n def zenit_water_vapour_attenuation(*args, **kwargs):\n return _ITU676_10_.zenit_water_vapour_attenuation(*args, **kwargs)\n\n @staticmethod\n def gaseous_attenuation_approximation(*args, **kwargs):\n return _ITU676_10_.gaseous_attenuation_approximation(*args, **kwargs)\n\n @staticmethod\n def slant_inclined_path_equivalent_height(*args, **kwargs):\n return _ITU676_10_.slant_inclined_path_equivalent_height(*args,\n **kwargs)\n\n @staticmethod\n def gaseous_attenuation_terrestrial_path(*args, **kwargs):\n return _ITU676_10_.gaseous_attenuation_terrestrial_path(*args,\n **kwargs)\n\n @staticmethod\n def gaseous_attenuation_slant_path(*args, **kwargs):\n return _ITU676_10_.gaseous_attenuation_slant_path(*args, **kwargs)\n\n def gamma0_exact(self, f, p, rho, T):\n return __gamma0_exact__(self, f, p, rho, T)\n\n def gammaw_exact(self, f, p, rho, T):\n return __gammaw_exact__(self, f, p, rho, T)\n\n def gamma_exact(self, f, p, rho, T):\n return (self.gamma0_exact(f, p, rho, T) +\n self.gammaw_exact(f, p, rho, T))\n\n__model = __ITU676__()\n\n\ndef change_version(new_version):\n \"\"\"\n Change the version of the ITU-R P.676 recommendation currently being used.\n\n\n Parameters\n ----------\n new_version : int\n Number of the version to use.\n Valid values are:\n * P.676-1 (08/94) (Superseded)\n * P.676-2 (10/99) (Superseded)\n * P.676-3 (02/01) (Superseded)\n * P.676-4 (04/03) (Superseded)\n * P.676-5 (08/07) (Superseded)\n * P.676-6 (02/12) (Current version)\n \"\"\"\n global __model\n __model = __ITU676__(new_version)\n utils.memory.clear()\n\n\ndef get_version():\n \"\"\"\n Obtain the version of the ITU-R P.676 recommendation currently being used.\n \"\"\"\n global __model\n return __model.__version__\n\n\ndef gaseous_attenuation_terrestrial_path(r, f, el, rho, P, T, mode):\n \"\"\"\n Estimate the attenuation of atmospheric gases on terrestrial paths.\n This function operates in two modes, 'approx', and 'exact':\n\n * 'approx': a simplified approximate method to estimate gaseous attenuation\n that is applicable in the frequency range 1-350 GHz.\n * 'exact': an estimate of gaseous attenuation computed by summation of\n individual absorption lines that is valid for the frequency\n range 1-1,000 GHz\n\n\n Parameters\n ----------\n r : number or Quantity\n Path length (km)\n f : number or Quantity\n Frequency (GHz)\n el : sequence, number or Quantity\n Elevation angle (degrees)\n rho : number or Quantity\n Water vapor density (g/m**3)\n P : number or Quantity\n Atmospheric pressure (hPa)\n T : number or Quantity\n Absolute temperature (K)\n mode : string, optional\n Mode for the calculation. Valid values are 'approx', 'exact'. If\n 'approx' Uses the method in Annex 2 of the recommendation (if any),\n else uses the method described in Section 1. Default, 'approx'\n\n\n Returns\n -------\n attenuation: Quantity\n Terrestrial path attenuation (dB)\n\n References\n --------\n [1] Attenuation by atmospheric gases:\n https://www.itu.int/rec/R-REC-P.676/en\n \"\"\"\n type_output = type(el)\n r = prepare_quantity(r, u.km, 'Path Length')\n f = prepare_quantity(f, u.GHz, 'Frequency')\n el = prepare_quantity(prepare_input_array(el), u.deg, 'Elevation angle')\n rho = prepare_quantity(rho, u.g / u.m**3, 'Water vapor density')\n P = prepare_quantity(P, u.hPa, 'Atospheric pressure')\n T = prepare_quantity(T, u.K, 'Temperature')\n val = __model.gaseous_attenuation_terrestrial_path(\n r, f, el, rho, P, T, mode)\n return prepare_output_array(val, type_output) * u.dB\n\n\ndef gaseous_attenuation_slant_path(f, el, rho, P, T, V_t=None, h=None,\n mode='approx'):\n \"\"\"\n Estimate the attenuation of atmospheric gases on slant paths. This function\n operates in two modes, 'approx', and 'exact':\n\n * 'approx': a simplified approximate method to estimate gaseous attenuation\n that is applicable in the frequency range 1-350 GHz.\n * 'exact': an estimate of gaseous attenuation computed by summation of\n individual absorption lines that is valid for the frequency\n range 1-1,000 GHz\n\n\n Parameters\n ----------\n f : number or Quantity\n Frequency (GHz)\n el : sequence, number or Quantity\n Elevation angle (degrees)\n rho : number or Quantity\n Water vapor density (g/m3)\n P : number or Quantity\n Atmospheric pressure (hPa)\n T : number or Quantity\n Absolute temperature (K)\n V_t: number or Quantity (kg/m2)\n Integrated water vapour content from: a) local radiosonde or\n radiometric data or b) at the required percentage of time (kg/m2)\n obtained from the digital maps in Recommendation ITU-R P.836 (kg/m2).\n If None, use general method to compute the wet-component of the\n gaseous attenuation. If provided, 'h' must be also provided. Default\n is None.\n h : number, sequence, or numpy.ndarray\n Altitude of the receivers. If None, use the topographical altitude as\n described in recommendation ITU-R P.1511. If provided, 'V_t' needs to\n be also provided. Default is None.\n mode : string, optional\n Mode for the calculation. Valid values are 'approx', 'exact'. If\n 'approx' Uses the method in Annex 2 of the recommendation (if any),\n else uses the method described in Section 1. Default, 'approx'\n\n\n Returns\n -------\n attenuation: Quantity\n Slant path attenuation (dB)\n\n References\n --------\n [1] Attenuation by atmospheric gases:\n https://www.itu.int/rec/R-REC-P.676/en\n \"\"\"\n type_output = type(el)\n f = prepare_quantity(f, u.GHz, 'Frequency')\n el = prepare_quantity(prepare_input_array(el), u.deg, 'Elevation angle')\n rho = prepare_quantity(rho, u.g / u.m**3, 'Water vapor density')\n P = prepare_quantity(P, u.hPa, 'Atospheric pressure')\n T = prepare_quantity(T, u.K, 'Temperature')\n V_t = prepare_quantity(V_t, u.kg / u.m**2,\n 'Integrated water vapour content')\n h = prepare_quantity(h, u.km, 'Altitude')\n val = __model.gaseous_attenuation_slant_path(\n f, el, rho, P, T, V_t, h, mode)\n return prepare_output_array(val, type_output) * u.dB\n\n\ndef gaseous_attenuation_inclined_path(f, el, rho, P, T, h1, h2, mode='approx'):\n \"\"\"\n Estimate the attenuation of atmospheric gases on inclined paths between two\n ground stations at heights h1 and h2. This function operates in two modes,\n 'approx', and 'exact':\n\n * 'approx': a simplified approximate method to estimate gaseous attenuation\n that is applicable in the frequency range 1-350 GHz.\n * 'exact': an estimate of gaseous attenuation computed by summation of\n individual absorption lines that is valid for the frequency\n range 1-1,000 GHz\n\n\n Parameters\n ----------\n f : number or Quantity\n Frequency (GHz)\n el : sequence, number or Quantity\n Elevation angle (degrees)\n rho : number or Quantity\n Water vapor density (g/m3)\n P : number or Quantity\n Atmospheric pressure (hPa)\n T : number or Quantity\n Absolute temperature (K)\n h1 : number or Quantity\n Height of ground station 1 (km)\n h2 : number or Quantity\n Height of ground station 2 (km)\n mode : string, optional\n Mode for the calculation. Valid values are 'approx', 'exact'. If\n 'approx' Uses the method in Annex 2 of the recommendation (if any),\n else uses the method described in Section 1. Default, 'approx'\n\n\n Returns\n -------\n attenuation: Quantity\n Inclined path attenuation (dB)\n\n References\n --------\n [1] Attenuation by atmospheric gases:\n https://www.itu.int/rec/R-REC-P.676/en\n \"\"\"\n f = prepare_quantity(f, u.GHz, 'Frequency')\n el = prepare_quantity(el, u.deg, 'Elevation angle')\n type_output = type(el)\n rho = prepare_quantity(rho, u.g / u.m**3, 'Water vapor density')\n P = prepare_quantity(P, u.hPa, 'Atospheric pressure')\n T = prepare_quantity(T, u.K, 'Temperature')\n h1 = prepare_quantity(h1, u.km, 'Height of Ground Station 1')\n h2 = prepare_quantity(h2, u.km, 'Height of Ground Station 2')\n val = __model.gaseous_attenuation_inclined_path(\n f, el, rho, P, T, h1, h2, mode=mode)\n return prepare_output_array(val, type_output) * u.dB\n\n\ndef slant_inclined_path_equivalent_height(f, p):\n \"\"\" Computes the equivalent height to be used for oxygen and water vapour\n gaseous attenuation computations.\n\n Parameters\n ----------\n f : number or Quantity\n Frequency (GHz)\n p : number\n Percentage of the time the gaseous attenuation value is exceeded.\n\n Returns\n -------\n ho, hw : Quantity\n Equivalent height for oxygen and water vapour (m)\n\n References\n --------\n [1] Attenuation by atmospheric gases:\n https://www.itu.int/rec/R-REC-P.676/en\n\n \"\"\"\n type_output = type(f)\n f = prepare_quantity(f, u.GHz, 'Frequency')\n\n val = __model.slant_inclined_path_equivalent_height(f, p)\n return prepare_output_array(val, type_output) * u.m\n\n\[email protected]\ndef zenit_water_vapour_attenuation(lat, lon, p, f, V_t=None, h=None):\n \"\"\"\n An alternative method may be used to compute the slant path attenuation by\n water vapour, in cases where the integrated water vapour content along the\n path, ``V_t``, is known.\n\n\n Parameters\n ----------\n lat : number, sequence, or numpy.ndarray\n Latitudes of the receiver points\n lon : number, sequence, or numpy.ndarray\n Longitudes of the receiver points\n p : number\n Percentage of the time the zenit water vapour attenuation value is\n exceeded.\n f : number or Quantity\n Frequency (GHz)\n V_t : number or Quantity, optional\n Integrated water vapour content along the path (kg/m2 or mm).\n If not provided this value is estimated using Recommendation\n ITU-R P.836. Default value None\n h : number, sequence, or numpy.ndarray\n Altitude of the receivers. If None, use the topographical altitude as\n described in recommendation ITU-R P.1511\n\n\n Returns\n -------\n A_w : Quantity\n Water vapour attenuation along the slant path (dB)\n\n References\n --------\n [1] Attenuation by atmospheric gases:\n https://www.itu.int/rec/R-REC-P.676/en\n \"\"\"\n type_output = type(lat)\n lat = prepare_input_array(lat)\n lon = prepare_input_array(lon)\n lon = np.mod(lon, 360)\n f = prepare_quantity(f, u.GHz, 'Frequency')\n V_t = prepare_quantity(\n V_t,\n u.kg / u.m**2,\n 'Integrated water vapour content along the path')\n h = prepare_quantity(h, u.km, 'Altitude')\n val = __model.zenit_water_vapour_attenuation(\n lat, lon, p, f, V_t=V_t, h=h)\n return prepare_output_array(val, type_output) * u.dB\n\n\ndef gammaw_approx(f, P, rho, T):\n \"\"\"\n Method to estimate the specific attenuation due to water vapour using the\n approximate method descibed in Annex 2.\n\n\n Parameters\n ----------\n f : number or Quantity\n Frequency (GHz)\n P : number or Quantity\n Atmospheric pressure (hPa)\n rho : number or Quantity\n Water vapor density (g/m3)\n T : number or Quantity\n Absolute temperature (K)\n\n\n Returns\n -------\n gamma_w : Quantity\n Water vapour specific attenuation (dB/km)\n\n References\n --------\n [1] Attenuation by atmospheric gases:\n https://www.itu.int/rec/R-REC-P.676/en\n \"\"\"\n global __model\n type_output = type(f)\n f = prepare_quantity(f, u.GHz, 'Frequency')\n P = prepare_quantity(P, u.hPa, 'Atmospheric pressure ')\n rho = prepare_quantity(rho, u.g / u.m**3, 'Water vapour density')\n T = prepare_quantity(T, u.K, 'Temperature')\n val = __model.gammaw_approx(f, P, rho, T)\n return prepare_output_array(val, type_output) * u.dB / u.km\n\n\ndef gamma0_approx(f, P, rho, T):\n \"\"\"\n Method to estimate the specific attenuation due to dry atmosphere using the\n approximate method descibed in Annex 2.\n\n Parameters\n ----------\n f : number or Quantity\n Frequency (GHz)\n P : number or Quantity\n Atmospheric pressure (hPa)\n rho : number or Quantity\n Water vapor density (g/m3)\n T : number or Quantity\n Absolute temperature (K)\n\n\n Returns\n -------\n gamma_w : Quantity\n Dry atmosphere specific attenuation (dB/km)\n\n References\n --------\n [1] Attenuation by atmospheric gases:\n https://www.itu.int/rec/R-REC-P.676/en\n \"\"\"\n global __model\n type_output = type(f)\n f = prepare_quantity(f, u.GHz, 'Frequency')\n P = prepare_quantity(P, u.hPa, 'Atmospheric pressure')\n rho = prepare_quantity(rho, u.g / u.m**3, 'Water vapour density')\n T = prepare_quantity(T, u.K, 'Temperature')\n val = __model.gamma0_approx(f, P, rho, T)\n return prepare_output_array(val, type_output) * u.dB / u.km\n\n\ndef gammaw_exact(f, P, rho, T):\n \"\"\"\n Method to estimate the specific attenuation due to water vapour using\n the line-by-line method described in Annex 1 of the recommendation.\n\n\n Parameters\n ----------\n f : number or Quantity\n Frequency (GHz)\n P : number or Quantity\n Atmospheric pressure (hPa)\n rho : number or Quantity\n Water vapor density (g/m3)\n T : number or Quantity\n Absolute temperature (K)\n\n\n Returns\n -------\n gamma_w : Quantity\n Water vapour specific attenuation (dB/km)\n\n References\n --------\n [1] Attenuation by atmospheric gases:\n https://www.itu.int/rec/R-REC-P.676/en\n \"\"\"\n global __model\n type_output = type(f)\n f = prepare_quantity(f, u.GHz, 'Frequency')\n P = prepare_quantity(P, u.hPa, 'Atmospheric pressure ')\n rho = prepare_quantity(rho, u.g / u.m**3, 'Water vapour density')\n T = prepare_quantity(T, u.K, 'Temperature')\n val = __model.gammaw_exact(f, P, rho, T)\n return prepare_output_array(val, type_output) * u.dB / u.km\n\n\ndef gamma0_exact(f, P, rho, T):\n \"\"\"\n Method to estimate the specific attenuation due to dry atmosphere using\n the line-by-line method described in Annex 1 of the recommendation.\n\n Parameters\n ----------\n f : number or Quantity\n Frequency (GHz)\n P : number or Quantity\n Atmospheric pressure (hPa)\n rho : number or Quantity\n Water vapor density (g/m3)\n T : number or Quantity\n Absolute temperature (K)\n\n\n Returns\n -------\n gamma_w : Quantity\n Dry atmosphere specific attenuation (dB/km)\n\n References\n --------\n [1] Attenuation by atmospheric gases:\n https://www.itu.int/rec/R-REC-P.676/en\n \"\"\"\n global __model\n type_output = type(f)\n f = prepare_quantity(f, u.GHz, 'Frequency')\n P = prepare_quantity(P, u.hPa, 'Atmospheric pressure')\n rho = prepare_quantity(rho, u.g / u.m**3, 'Water vapour density')\n T = prepare_quantity(T, u.K, 'Temperature')\n val = __model.gamma0_exact(f, P, rho, T)\n return prepare_output_array(val, type_output) * u.dB / u.km\n\n\ndef gamma_exact(f, P, rho, T):\n \"\"\"\n Method to estimate the specific attenuation using the line-by-line method\n described in Annex 1 of the recommendation.\n\n\n Parameters\n ----------\n f : number or Quantity\n Frequency (GHz)\n P : number or Quantity\n Atmospheric pressure (hPa)\n rho : number or Quantity\n Water vapor density (g/m3)\n T : number or Quantity\n Absolute temperature (K)\n\n Returns\n -------\n gamma : Quantity\n Specific attenuation (dB/km)\n\n References\n --------\n [1] Attenuation by atmospheric gases:\n https://www.itu.int/rec/R-REC-P.676/en\n \"\"\"\n global __model\n f = prepare_quantity(f, u.GHz, 'Frequency')\n type_output = type(f)\n\n P = prepare_quantity(P, u.hPa, 'Atmospheric pressure ')\n rho = prepare_quantity(rho, u.g / u.m**3, 'Water vapour density')\n T = prepare_quantity(T, u.K, 'Temperature')\n val = __model.gamma_exact(f, P, rho, T)\n return prepare_output_array(val, type_output) * u.dB / u.km\n"
]
| [
[
"numpy.arccos",
"numpy.minimum",
"numpy.exp",
"numpy.where",
"numpy.cos",
"numpy.cumsum",
"numpy.deg2rad",
"numpy.zeros_like",
"numpy.sin",
"numpy.vectorize",
"numpy.log",
"numpy.logical_and",
"numpy.arange",
"numpy.sqrt",
"numpy.mod",
"numpy.array",
"numpy.pad",
"numpy.power",
"numpy.clip",
"numpy.any"
]
]
|
TCherici/StRADRL | [
"528bf4fbd61f91a79dfc24fc6d6c9caa66a4a5a5"
]
| [
"settings/options3.py"
]
| [
"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\ndef get_options(option_type):\n \"\"\"\n option_type: string\n 'training' or 'diplay' or 'visualize'\n \"\"\" \n # name\n tf.app.flags.DEFINE_string(\"training_name\",\"tc_v1\",\"name of next training in log\")\n\n \n # Common\n tf.app.flags.DEFINE_string(\"env_type\", \"gym\", \"environment type (lab or gym or maze)\")\n tf.app.flags.DEFINE_string(\"env_name\", \"CartPole-v1\", \"environment name (for lab)\")\n tf.app.flags.DEFINE_integer(\"env_max_steps\", 400000, \"max number of steps in environment\")\n \n tf.app.flags.DEFINE_boolean(\"use_base\", False, \"whether to use base A3C for aux network\")\n tf.app.flags.DEFINE_boolean(\"use_pixel_change\", False, \"whether to use pixel change\")\n tf.app.flags.DEFINE_boolean(\"use_value_replay\", False, \"whether to use value function replay\")\n tf.app.flags.DEFINE_boolean(\"use_reward_prediction\", False, \"whether to use reward prediction\")\n tf.app.flags.DEFINE_boolean(\"use_temporal_coherence\", True, \"whether to use temporal coherence\")\n tf.app.flags.DEFINE_boolean(\"use_proportionality\", False, \"whether to use proportionality\")\n tf.app.flags.DEFINE_boolean(\"use_causality\", False, \"whether to use causality\")\n tf.app.flags.DEFINE_boolean(\"use_repeatability\", False, \"whether to use repeatability\")\n tf.app.flags.DEFINE_string(\"checkpoint_dir\", \"/tmp/StRADRL/checkpoints\", \"checkpoint directory\")\n\n # For training\n if option_type == 'training':\n tf.app.flags.DEFINE_string(\"temp_dir\", \"/tmp/StRADRL/tensorboard/\", \"base directory for tensorboard\")\n tf.app.flags.DEFINE_string(\"log_dir\", \"/tmp/StRADRL/log/\", \"base directory for logs\")\n tf.app.flags.DEFINE_integer(\"max_time_step\", 10**6, \"max time steps\")\n tf.app.flags.DEFINE_integer(\"save_interval_step\", 10**4, \"saving interval steps\")\n tf.app.flags.DEFINE_boolean(\"grad_norm_clip\", 40.0, \"gradient norm clipping\")\n\n #base\n tf.app.flags.DEFINE_float(\"initial_learning_rate\", 1e-3, \"learning rate\")\n tf.app.flags.DEFINE_float(\"gamma\", 0.99, \"discount factor for rewards\")\n tf.app.flags.DEFINE_float(\"entropy_beta\", 0.01, \"entropy regurarlization constant\")\n tf.app.flags.DEFINE_float(\"value_lambda\", 0.5, \"value ratio for base loss\")\n tf.app.flags.DEFINE_float(\"base_lambda\", 0.97, \"generalized adv. est. lamba for short-long sight\")\n \n \n # auxiliary\n tf.app.flags.DEFINE_integer(\"parallel_size\", 1, \"parallel thread size\")\n tf.app.flags.DEFINE_float(\"aux_initial_learning_rate\", 1e-3, \"learning rate\")\n tf.app.flags.DEFINE_float(\"aux_lambda\", 0.0, \"generalized adv. est. lamba for short-long sight (aux)\")\n tf.app.flags.DEFINE_float(\"gamma_pc\", 0.9, \"discount factor for pixel control\")\n tf.app.flags.DEFINE_float(\"pixel_change_lambda\", 0.0001, \"pixel change lambda\") # 0.05, 0.01 ~ 0.1 for lab, 0.0001 ~ 0.01 for gym\n tf.app.flags.DEFINE_float(\"temporal_coherence_lambda\", 1., \"temporal coherence lambda\")\n tf.app.flags.DEFINE_float(\"proportionality_lambda\", 100., \"proportionality lambda\")\n tf.app.flags.DEFINE_float(\"causality_lambda\", 1., \"causality lambda\")\n tf.app.flags.DEFINE_float(\"repeatability_lambda\", 100., \"repeatability lambda\")\n \n tf.app.flags.DEFINE_integer(\"experience_history_size\", 100000, \"experience replay buffer size\")\n \n # queuer\n tf.app.flags.DEFINE_integer(\"local_t_max\", 20, \"repeat step size\")\n tf.app.flags.DEFINE_integer(\"queue_length\", 5, \"max number of batches (of length local_t_max) in queue\")\n tf.app.flags.DEFINE_integer(\"env_runner_sync\", 1, \"number of env episodes before sync to global\")\n tf.app.flags.DEFINE_float(\"action_freq\", 0, \"number of actions per second in env\")\n \n\n # For display\n if option_type == 'display':\n tf.app.flags.DEFINE_string(\"frame_save_dir\", \"/tmp/StRADRL_frames\", \"frame save directory\")\n tf.app.flags.DEFINE_boolean(\"recording\", False, \"whether to record movie\")\n tf.app.flags.DEFINE_boolean(\"frame_saving\", False, \"whether to save frames\")\n\n return tf.app.flags.FLAGS\n"
]
| [
[
"tensorflow.app.flags.DEFINE_float",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.app.flags.DEFINE_string"
]
]
|
wjm41/soapgp | [
"ef57cebb7413abb96b54983141e188dff5166d03"
]
| [
"data/Malaria/generate_smiles.py"
]
| [
"import pandas as pd\nimport sys\n\ncsv_name = sys.argv[1]+'.csv'\nsmiles_name = sys.argv[1]+'.can'\n\nSMILES_df = pd.read_csv(csv_name,header=0, index_col=False)\nfile=open(smiles_name,'w')\nfor i,row in SMILES_df.iterrows():\n\tfile.write(row['SMILES']+'\\t'+str(row['Percentage_inhibition_3D7'])+'\\t'+str(row['Percentage_inhibition_DD2'])+'\\t'+str(row['Percentage_inhibition_3D7_PFLDH'])+'\\t'+str(row['XC50_3D7 (microM)'])+'\\t'+str(row['Percentage_inhibition_HEPG2'])+'\\n')\nfile.close()\n"
]
| [
[
"pandas.read_csv"
]
]
|
leying95/stereopy | [
"1580a88a091a2ebc0f177ea73409e2c4b4dd4c7e"
]
| [
"stereo/tools/cell_type_anno.py"
]
| [
"#!/usr/bin/env python3\n# coding: utf-8\n\"\"\"\n@author: Ping Qiu [email protected]\n@last modified by: Ping Qiu\n@file:cell_type_anno.py\n@time:2021/03/09\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport os\nfrom multiprocessing import Pool\nimport traceback\nfrom ..log_manager import logger\nfrom ..utils.correlation import spearmanr_corr, pearson_corr\nfrom ..preprocess.normalize import normalize_total\nfrom ..config import stereo_conf\nfrom ..utils import remove_file\nfrom ..core.tool_base import ToolBase\nfrom ..core.stereo_result import CellTypeResult\nfrom scipy.sparse import issparse\n\n\nclass CellTypeAnno(ToolBase):\n def __init__(self, adata, ref_dir=None, cores=1, keep_zeros=True, use_rf=True, sample_rate=0.8,\n n_estimators=20, strategy='1', method='spearmanr', split_num=1, out_dir=None, name='cell_type_anno'):\n super(CellTypeAnno, self).__init__(data=adata, method=method, name=name)\n self.param = locals()\n self.data = adata\n self.ref_dir = ref_dir if ref_dir else os.path.join(stereo_conf.data_dir, 'ref_db', 'FANTOM5')\n self.n_jobs = cores\n self.keep_zeros = keep_zeros\n self.use_rf = use_rf\n self.sample_rate = sample_rate\n self.n_estimators = n_estimators\n self.strategy = strategy\n self.method = method\n self.split_num = split_num\n self.output = out_dir if out_dir else stereo_conf.out_dir\n self.result = CellTypeResult(name=name, param=self.param)\n self.check_param()\n\n def split_dataframe(self, df):\n\n datas = []\n logger.info(f'input data: {df.shape[0]} genes, {df.shape[1]} cells.')\n if self.split_num > 1:\n logger.info(f'split the anndata.X to {self.split_num} matrixs')\n step_size = int(df.shape[1]/self.split_num) + 1\n for i in range(self.split_num):\n start = i * step_size\n end = start + step_size if start + step_size < df.shape[1] else df.shape[1]\n datas.append(df.iloc[:, start: end])\n else:\n datas.append(df)\n return datas\n\n @staticmethod\n def concat_top_corr_files(files, output_dir, prefix=None):\n df = pd.read_csv(files[0])\n for f in files[1:]:\n df1 = pd.read_csv(f)\n df = df.append(df1)\n file_name = f'{prefix}_top_annotation.csv' if prefix else 'top_annotation.csv'\n df.to_csv(os.path.join(output_dir, file_name), index=False)\n return df\n\n def merge_subsample_result(self, input_dir, prefix, output_dir):\n files = [os.path.join(input_dir, f) for f in os.listdir(input_dir) if prefix in f]\n df = pd.read_csv(files[0])\n for f in files[1:]:\n df1 = pd.read_csv(f)\n df = df.append(df1)\n type_cnt = df.groupby(['cell', 'cell type']).count()[['corr_score']]\n type_cnt['type_rate'] = type_cnt / self.n_estimators\n type_cnt.columns = ['type_cnt', 'type_rate']\n type_cnt.reset_index(inplace=True)\n score_mean = df.groupby(['cell', 'cell type']).mean()[['corr_score']]\n score_mean.columns = ['score_mean']\n score_mean.reset_index(inplace=True)\n df = score_mean.merge(type_cnt, on=['cell', 'cell type'])\n df.to_csv(os.path.join(output_dir, 'all_annotation.csv'), index=False)\n df = df[(df.groupby('cell')['type_cnt'].transform('max') == df['type_cnt']) & (\n df.groupby('cell')['score_mean'].transform('max') == df['score_mean'])]\n df.to_csv(os.path.join(output_dir, 'top_annotation.csv'), index=False)\n return df\n\n @staticmethod\n def merge_subsample_result_filter(input_dir, prefix, output_dir):\n files = [os.path.join(input_dir, f) for f in os.listdir(input_dir) if prefix in f]\n df = pd.read_csv(files[0])\n for f in files[1:]:\n df_1 = pd.read_csv(f)\n df = df.append(df_1)\n score_mean = df.groupby(['cell', 'cell type']).mean()[['corr_score']]\n score_mean.columns = ['score_mean']\n score_mean.reset_index(inplace=True)\n tmp = score_mean.merge(df, on=['cell', 'cell type'])\n tmp = tmp[tmp['score_mean'] <= tmp['corr_score']]\n type_cnt = tmp.groupby(['cell', 'cell type']).count()[['score_mean']].reset_index()\n type_sum = tmp.groupby(['cell']).count()[['corr_score']].reset_index()\n score_mean = tmp.groupby(['cell', 'cell type']).mean()[['corr_score']]\n score_mean.columns = ['score_mean']\n score_mean.reset_index(inplace=True)\n type_rate = type_cnt.merge(type_sum, on=['cell'])\n type_rate['type_rate'] = type_rate['score_mean'] / type_rate['corr_score']\n type_rate.columns = ['cell', 'cell type', 'type_cnt', 'type_cnt_sum', 'type_rate']\n df = score_mean.merge(type_rate, on=['cell', 'cell type'])\n df.to_csv(os.path.join(output_dir, 'all_annotation.csv'), index=False)\n df = df[df.groupby('cell')['type_cnt'].transform('max') == df['type_cnt']]\n df = df[df.groupby('cell')['score_mean'].transform('max') == df['score_mean']]\n df.to_csv(os.path.join(output_dir, 'top_annotation.csv'), index=False)\n return df\n\n def fit(self):\n exp_matrix = self.data.X.toarray().T if issparse(self.data.X) else self.data.X.T\n df = pd.DataFrame(exp_matrix, index=list(self.data.var_names),\n columns=list(self.data.obs_names))\n datas = self.split_dataframe(df) if self.split_num > 1 else [df]\n tmp_output = os.path.join(self.output, 'tmp')\n logger.info('start to run annotation.')\n if not os.path.exists(tmp_output):\n os.makedirs(tmp_output)\n if self.use_rf:\n pool = Pool(self.n_jobs)\n for i in range(self.n_estimators):\n for j in range(len(datas)):\n sub_index = f'subsample_{i}_{j}'\n pool.apply_async(run_annotation, (datas[j], self.ref_dir, self.method, self.keep_zeros, tmp_output,\n sub_index, self.use_rf, self.sample_rate),\n error_callback=subprocess_error)\n pool.close()\n pool.join()\n logger.info(f'start to merge top result ...')\n for i in range(self.n_estimators):\n files = [os.path.join(tmp_output, f'subsample_{i}_{j}.top_{self.method}_corr.csv')\n for j in range(self.split_num)]\n index = f'subsample_{i}'\n self.concat_top_corr_files(files, tmp_output, index)\n if self.strategy == 1:\n self.result.anno_data = self.merge_subsample_result(tmp_output, 'top_annotation.csv', self.output)\n else:\n self.result.anno_data = self.merge_subsample_result_filter(tmp_output, 'top_annotation.csv',\n self.output)\n else:\n pool = Pool(self.n_jobs)\n for i in range(len(datas)):\n sub_index = f'sub_{i}'\n pool.apply_async(run_annotation, (datas[i], self.ref_dir, self.method, self.keep_zeros, tmp_output,\n sub_index, self.use_rf, self.sample_rate),\n error_callback=subprocess_error)\n pool.close()\n pool.join()\n logger.info(f'start to merge top result ...')\n files = [os.path.join(tmp_output, f'sub_{i}.top_{self.method}_corr.csv') for i in range(len(datas))]\n self.result.anno_data = self.concat_top_corr_files(files, self.output)\n # clear tmp directory\n remove_file(tmp_output)\n self.add_result(result=self.result, key_added=self.name)\n\n\ndef parse_ref_data(ref_dir):\n logger.info(f'loading ref data')\n ref_sample_path = os.path.join(ref_dir, 'ref_sample_epx.csv')\n ref_db = pd.read_csv(ref_sample_path, index_col=0, header=0)\n ref_db = ref_db.fillna(0)\n # remove duplicate indices\n ref_db = ref_db[~ref_db.index.duplicated(keep='first')]\n logger.info('reference dataset shape: %s genes, %s samples' % ref_db.shape)\n return ref_db\n\n\ndef random_choose_genes(df, sample_rate):\n sample_cnt = pd.Series(np.int32(df.sum(axis=0) * sample_rate), index=df.columns)\n gene_rate = df / df.sum(axis=0)\n sample_df = gene_rate.apply(lambda x: choose_gene(x, sample_cnt), axis=0)\n sample_df.fillna(0, inplace=True)\n return sample_df\n\n\ndef choose_gene(x, num):\n gene_list = list(x.index)\n p = x.values\n res = np.random.choice(a=gene_list, size=num[x.name], p=p)\n res = np.unique(res, return_counts=True)\n res = pd.Series(data=res[1], index=res[0], name=x.name)\n return res\n\n\ndef annotation(df, ref_db, method, keep_zeros):\n # find common genes between test data and ref data\n test_genes = set(df.index)\n ref_genes = set(ref_db.index)\n if keep_zeros:\n common_genes = list(ref_genes)\n else:\n common_genes = list(ref_genes.intersection(test_genes))\n # only keep non-zero genes\n df = df.reindex(common_genes, fill_value=0.0)\n ref_db = ref_db.reindex(common_genes, fill_value=0.0)\n if method == 'pearson':\n corr_score = pearson_corr(ref_db, df)\n else:\n corr_score = spearmanr_corr(ref_db, df)\n corr_score = corr_score.fillna(0)\n return corr_score\n\n\ndef get_top_corr(score, cell_map, output):\n max_index = score.values.argmax(axis=1)\n max_value = score.values.max(axis=1)\n samples = score.columns[max_index]\n cell_type = cell_map.loc[samples, 'cell type']\n cell = score.index\n df = pd.DataFrame({'cell': cell, 'cell type': cell_type, 'corr_score': max_value, 'corr_sample': samples})\n df.to_csv(output, index=False)\n return df\n\n\ndef run_annotation(sub_df, ref_dir, method, keep_zero, output, sub_index, use_rf, sample_rate):\n ref_db = parse_ref_data(ref_dir)\n cell_map = pd.read_csv(os.path.join(ref_dir, 'cell_map.csv'), index_col=0, header=0, sep=',')\n if use_rf:\n logger.info('random choose')\n sub_df = random_choose_genes(sub_df, sample_rate)\n nor_x = normalize_total(sub_df.transpose().values, target_sum=10000) # TODO select some of normalize method\n nor_x = np.log1p(nor_x, out=nor_x)\n sub_df = pd.DataFrame(nor_x.T, columns=sub_df.columns, index=sub_df.index)\n logger.info('annotation')\n corr_df = annotation(sub_df, ref_db, method, keep_zero)\n all_out = os.path.join(output, f'{sub_index}.all_{method}_corr.csv')\n top_out = os.path.join(output, f'{sub_index}.top_{method}_corr.csv')\n corr_df.to_csv(all_out)\n get_top_corr(corr_df, cell_map, top_out)\n logger.info(f'subsample {sub_index} DONE!')\n\n\ndef subprocess_error(value):\n \"\"\"\n 子进程打印错误信息\n :param value:\n :return:\n \"\"\"\n logger.error(f\"error: {value}\")\n logger.error(\"========\")\n logger.error(traceback.format_exc())\n raise\n"
]
| [
[
"scipy.sparse.issparse",
"numpy.random.choice",
"pandas.DataFrame",
"numpy.log1p",
"pandas.Series",
"pandas.read_csv",
"numpy.unique"
]
]
|
TylerYep/ml-toolkit | [
"095bdce961133acc720f90b6d1bbb0a7becbfc9f"
]
| [
"ai_toolkit/datasets/dataset.py"
]
| [
"from __future__ import annotations\n\nimport sys\nfrom pathlib import Path\nfrom typing import Any, Callable, Tuple\n\nimport torch\nfrom torch.utils.data import DataLoader, TensorDataset, random_split\nfrom torch.utils.data.dataloader import default_collate\n\nfrom ai_toolkit.args import Arguments\n\nTensorDataLoader = DataLoader[Tuple[torch.Tensor, ...]]\n\n\nclass DatasetLoader:\n def __init__(self) -> None:\n self.CLASS_LABELS: list[str] = []\n self.DATA_PATH = Path(\"/content/\" if \"google.colab\" in sys.modules else \"data/\")\n\n @staticmethod\n def get_collate_fn(device: torch.device) -> Callable[[list[Any]], Any]:\n \"\"\"\n for indices in batch_sampler:\n yield collate_fn([dataset[i] for i in indices])\n \"\"\"\n\n def to_device(b: torch.Tensor) -> Any:\n return (\n list(map(to_device, b))\n if isinstance(b, (list, tuple))\n else b.to(device)\n )\n\n return lambda x: map(to_device, default_collate(x))\n\n def split_data(\n self,\n orig_dataset: TensorDataset,\n args: Arguments,\n device: torch.device,\n val_split: float,\n ) -> tuple[TensorDataLoader, TensorDataLoader]:\n collate_fn = self.get_collate_fn(device)\n generator_seed = torch.Generator().manual_seed(0)\n orig_len = len(orig_dataset)\n if args.num_examples:\n n = args.num_examples\n data_split = [n, n, orig_len - 2 * n]\n train_set, val_set = random_split(orig_dataset, data_split, generator_seed)[\n :-1\n ]\n else:\n train_size = int((1 - val_split) * orig_len)\n data_split = [train_size, orig_len - train_size]\n train_set, val_set = random_split(orig_dataset, data_split, generator_seed)\n\n train_loader = DataLoader(\n train_set,\n batch_size=args.batch_size,\n shuffle=True,\n collate_fn=collate_fn,\n pin_memory=torch.cuda.is_available(),\n num_workers=args.num_workers,\n )\n val_loader = DataLoader(\n val_set,\n batch_size=args.batch_size,\n collate_fn=collate_fn,\n pin_memory=torch.cuda.is_available(),\n num_workers=args.num_workers,\n )\n return train_loader, val_loader\n\n def load_train_data(\n self, args: Arguments, device: torch.device, val_split: float = 0.2\n ) -> tuple[TensorDataLoader, TensorDataLoader, tuple[Any, ...]]:\n raise NotImplementedError\n\n def load_test_data(self, args: Arguments, device: torch.device) -> TensorDataLoader:\n raise NotImplementedError\n"
]
| [
[
"torch.cuda.is_available",
"torch.utils.data.random_split",
"torch.Generator",
"torch.utils.data.dataloader.default_collate"
]
]
|
bearpelican/PSSR | [
"a90b7d208d4369946500a70a6f31c44e3367e4c7"
]
| [
"utils/resnet.py"
]
| [
"import torch.nn as nn\nimport torch,math,sys\nimport torch.utils.model_zoo as model_zoo\nfrom functools import partial\nimport fastai.vision.learner as fvl\n\n__all__ = ['WNResNet', 'wnresnet18', 'wnresnet34', 'wnresnet50', 'wnresnet101', 'wnresnet152']\n\n# or: ELU+init (a=0.54; gain=1.55)\nact_fn = nn.ReLU(inplace=True)\n\nclass Flatten(nn.Module):\n def forward(self, x): return x.view(x.size(0), -1)\n\ndef init_cnn(m):\n if getattr(m, 'bias', None) is not None: nn.init.constant_(m.bias, 0)\n if isinstance(m, (nn.Conv2d,nn.Linear)): nn.init.kaiming_normal_(m.weight)\n for l in m.children(): init_cnn(l)\n\ndef conv(ni, nf, ks=3, stride=1, bias=False):\n return nn.Conv2d(ni, nf, kernel_size=ks, stride=stride, padding=ks//2, bias=bias)\n\ndef noop(x): return x\n\ndef conv_layer(ni, nf, ks=3, stride=1, act=True):\n layers = [nn.utils.weight_norm(conv(ni, nf, ks, stride=stride))]\n if act: layers.append(act_fn)\n return nn.Sequential(*layers)\n\nclass ResBlock(nn.Module):\n def __init__(self, expansion, ni, nh, stride=1):\n super().__init__()\n nf,ni = nh*expansion,ni*expansion\n layers = [conv_layer(ni, nh, 1)]\n layers += [\n conv_layer(nh, nf, 3, stride=stride,act=False)\n ] if expansion==1 else [\n conv_layer(nh, nh, 3, stride=stride),\n conv_layer(nh, nf, 1, act=False)\n ]\n self.convs = nn.Sequential(*layers)\n # TODO: check whether act=True works better\n self.idconv = noop if ni==nf else conv_layer(ni, nf, 1, act=False)\n self.pool = noop if stride==1 else nn.AvgPool2d(2, ceil_mode=True)\n\n def forward(self, x): return act_fn(self.convs(x) + self.idconv(self.pool(x)))\n\ndef filt_sz(recep): return min(64, 2**math.floor(math.log2(recep*0.75)))\n\nclass WNResNet(nn.Sequential):\n def __init__(self, expansion, layers, c_in=3, c_out=1000):\n stem = []\n sizes = [c_in,32,32,64]\n for i in range(3):\n stem.append(conv_layer(sizes[i], sizes[i+1], stride=2 if i==0 else 1))\n #nf = filt_sz(c_in*9)\n #stem.append(conv_layer(c_in, nf, stride=2 if i==1 else 1))\n #c_in = nf\n\n block_szs = [64//expansion,64,128,256,512]\n blocks = [self._make_layer(expansion, block_szs[i], block_szs[i+1], l, 1 if i==0 else 2)\n for i,l in enumerate(layers)]\n super().__init__(\n *stem,\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1),\n *blocks,\n nn.AdaptiveAvgPool2d(1), Flatten(),\n nn.Linear(block_szs[-1]*expansion, c_out),\n )\n init_cnn(self)\n\n def _make_layer(self, expansion, ni, nf, blocks, stride):\n return nn.Sequential(\n *[ResBlock(expansion, ni if i==0 else nf, nf, stride if i==0 else 1)\n for i in range(blocks)])\n\ndef wnresnet(expansion, n_layers, name, pretrained=False, **kwargs):\n model = WNResNet(expansion, n_layers, **kwargs)\n if pretrained: model.load_state_dict(model_zoo.load_url(model_urls[name]))\n return model\n\ndef _wnresnet_split(m:nn.Module): return (m[0][6],m[1])\n_wnresnet_meta = {'cut':-2, 'split':_wnresnet_split }\n\nme = sys.modules[__name__]\nfor n,e,l in [\n [ 18 , 1, [2,2,2 ,2] ],\n [ 34 , 1, [3,4,6 ,3] ],\n [ 50 , 4, [3,4,6 ,3] ],\n [ 101, 4, [3,4,23,3] ],\n [ 152, 4, [3,8,36,3] ],\n]:\n name = f'wnresnet{n}'\n setattr(me, name, partial(wnresnet, expansion=e, n_layers=l, name=name))\n arch = getattr(me, name)\n fvl.model_meta[arch] = {**_wnresnet_meta}\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.init.constant_",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.MaxPool2d",
"torch.nn.init.kaiming_normal_",
"torch.utils.model_zoo.load_url",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d"
]
]
|
thejus-vm/dopamine | [
"d2f4128f056f781e70ea926ab071a621f955a23c"
]
| [
"dopamine/jax/agents/quantile/quantile_agent.py"
]
| [
"# coding=utf-8\n# Copyright 2018 The Dopamine Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"An extension of Rainbow to perform quantile regression.\n\nThis loss is computed as in \"Distributional Reinforcement Learning with Quantile\nRegression\" - Dabney et. al, 2017\"\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport functools\n\nfrom dopamine.jax import networks\nfrom dopamine.jax.agents.dqn import dqn_agent\nfrom dopamine.replay_memory import prioritized_replay_buffer\nimport gin\nimport jax\nimport jax.numpy as jnp\nimport tensorflow as tf\n\n\[email protected](jax.vmap, in_axes=(None, 0, 0, 0, None))\ndef target_distribution(target_network, next_states, rewards, terminals,\n cumulative_gamma):\n \"\"\"Builds the Quantile target distribution as per Dabney et al. (2017).\n\n Args:\n target_network: Jax Module used for the target network.\n next_states: numpy array of batched next states.\n rewards: numpy array of batched rewards.\n terminals: numpy array of batched terminals.\n cumulative_gamma: float, cumulative gamma to use.\n\n Returns:\n The target distribution from the replay.\n \"\"\"\n is_terminal_multiplier = 1. - terminals.astype(jnp.float32)\n # Incorporate terminal state to discount factor.\n gamma_with_terminal = cumulative_gamma * is_terminal_multiplier\n next_state_target_outputs = target_network(next_states)\n q_values = jnp.squeeze(next_state_target_outputs.q_values)\n next_qt_argmax = jnp.argmax(q_values)\n logits = jnp.squeeze(next_state_target_outputs.logits)\n next_logits = logits[next_qt_argmax]\n return jax.lax.stop_gradient(rewards + gamma_with_terminal * next_logits)\n\n\[email protected](jax.jit, static_argnums=(0, 8, 9, 10))\ndef train(network_def, target_params, optimizer, states, actions,\n next_states, rewards, terminals, kappa, num_atoms, cumulative_gamma):\n \"\"\"Run a training step.\"\"\"\n online_params = optimizer.target\n def loss_fn(params, target):\n def q_online(state):\n return network_def.apply(params, state)\n\n logits = jax.vmap(q_online)(states).logits\n logits = jnp.squeeze(logits)\n # Fetch the logits for its selected action. We use vmap to perform this\n # indexing across the batch.\n chosen_action_logits = jax.vmap(lambda x, y: x[y])(logits, actions)\n bellman_errors = (target[:, None, :] -\n chosen_action_logits[:, :, None]) # Input `u' of Eq. 9.\n # Eq. 9 of paper.\n huber_loss = (\n (jnp.abs(bellman_errors) <= kappa).astype(jnp.float32) *\n 0.5 * bellman_errors ** 2 +\n (jnp.abs(bellman_errors) > kappa).astype(jnp.float32) *\n kappa * (jnp.abs(bellman_errors) - 0.5 * kappa))\n\n tau_hat = ((jnp.arange(num_atoms, dtype=jnp.float32) + 0.5) /\n num_atoms) # Quantile midpoints. See Lemma 2 of paper.\n # Eq. 10 of paper.\n tau_bellman_diff = jnp.abs(\n tau_hat[None, :, None] - (bellman_errors < 0).astype(jnp.float32))\n quantile_huber_loss = tau_bellman_diff * huber_loss\n # Sum over tau dimension, average over target value dimension.\n loss = jnp.sum(jnp.mean(quantile_huber_loss, 2), 1)\n return jnp.mean(loss), loss\n\n def q_target(state):\n return network_def.apply(target_params, state)\n\n grad_fn = jax.value_and_grad(loss_fn, has_aux=True)\n target = target_distribution(q_target,\n next_states,\n rewards,\n terminals,\n cumulative_gamma)\n (mean_loss, loss), grad = grad_fn(online_params, target)\n optimizer = optimizer.apply_gradient(grad)\n return optimizer, loss, mean_loss\n\n\[email protected]\nclass JaxQuantileAgent(dqn_agent.JaxDQNAgent):\n \"\"\"An implementation of Quantile regression DQN agent.\"\"\"\n\n def __init__(self,\n num_actions,\n observation_shape=dqn_agent.NATURE_DQN_OBSERVATION_SHAPE,\n observation_dtype=dqn_agent.NATURE_DQN_DTYPE,\n stack_size=dqn_agent.NATURE_DQN_STACK_SIZE,\n network=networks.QuantileNetwork,\n kappa=1.0,\n num_atoms=200,\n gamma=0.99,\n update_horizon=1,\n min_replay_history=50000,\n update_period=4,\n target_update_period=10000,\n epsilon_fn=dqn_agent.linearly_decaying_epsilon,\n epsilon_train=0.1,\n epsilon_eval=0.05,\n epsilon_decay_period=1000000,\n replay_scheme='prioritized',\n optimizer='adam',\n summary_writer=None,\n summary_writing_frequency=500,\n seed=None,\n allow_partial_reload=False):\n \"\"\"Initializes the agent and constructs the Graph.\n\n Args:\n num_actions: Int, number of actions the agent can take at any state.\n observation_shape: tuple of ints or an int. If single int, the observation\n is assumed to be a 2D square.\n observation_dtype: DType, specifies the type of the observations. Note\n that if your inputs are continuous, you should set this to jnp.float32.\n stack_size: int, number of frames to use in state stack.\n network: flax.linen Module, expects 3 parameters: num_actions, num_atoms,\n network_type.\n kappa: Float, Huber loss cutoff.\n num_atoms: Int, the number of buckets for the value function distribution.\n gamma: Float, exponential decay factor as commonly used in the RL\n literature.\n update_horizon: Int, horizon at which updates are performed, the 'n' in\n n-step update.\n min_replay_history: Int, number of stored transitions for training to\n start.\n update_period: Int, period between DQN updates.\n target_update_period: Int, ppdate period for the target network.\n epsilon_fn: Function expecting 4 parameters: (decay_period, step,\n warmup_steps, epsilon), and which returns the epsilon value used for\n exploration during training.\n epsilon_train: Float, final epsilon for training.\n epsilon_eval: Float, epsilon during evaluation.\n epsilon_decay_period: Int, number of steps for epsilon to decay.\n replay_scheme: String, replay memory scheme to be used. Choices are:\n uniform - Standard (DQN) replay buffer (Mnih et al., 2015)\n prioritized - Prioritized replay buffer (Schaul et al., 2015)\n optimizer: str, name of optimizer to use.\n summary_writer: SummaryWriter object for outputting training statistics.\n Summary writing disabled if set to None.\n summary_writing_frequency: int, frequency with which summaries will be\n written. Lower values will result in slower training.\n seed: int, a seed for DQN's internal RNG, used for initialization and\n sampling actions. If None, will use the current time in nanoseconds.\n allow_partial_reload: bool, whether we allow reloading a partial agent\n (for instance, only the network parameters).\n \"\"\"\n self._num_atoms = num_atoms\n self._kappa = kappa\n self._replay_scheme = replay_scheme\n\n super(JaxQuantileAgent, self).__init__(\n num_actions=num_actions,\n observation_shape=observation_shape,\n observation_dtype=observation_dtype,\n stack_size=stack_size,\n network=functools.partial(network, num_atoms=num_atoms),\n gamma=gamma,\n update_horizon=update_horizon,\n min_replay_history=min_replay_history,\n update_period=update_period,\n target_update_period=target_update_period,\n epsilon_fn=epsilon_fn,\n epsilon_train=epsilon_train,\n epsilon_eval=epsilon_eval,\n epsilon_decay_period=epsilon_decay_period,\n optimizer=optimizer,\n summary_writer=summary_writer,\n summary_writing_frequency=summary_writing_frequency,\n seed=seed,\n allow_partial_reload=allow_partial_reload)\n\n def _build_networks_and_optimizer(self):\n self._rng, rng = jax.random.split(self._rng)\n online_network_params = self.network_def.init(rng, x=self.state)\n optimizer_def = dqn_agent.create_optimizer(self._optimizer_name)\n self.optimizer = optimizer_def.create(online_network_params)\n self.target_network_params = copy.deepcopy(online_network_params)\n\n def _build_replay_buffer(self):\n \"\"\"Creates the replay buffer used by the agent.\"\"\"\n if self._replay_scheme not in ['uniform', 'prioritized']:\n raise ValueError('Invalid replay scheme: {}'.format(self._replay_scheme))\n # Both replay schemes use the same data structure, but the 'uniform' scheme\n # sets all priorities to the same value (which yields uniform sampling).\n return prioritized_replay_buffer.OutOfGraphPrioritizedReplayBuffer(\n observation_shape=self.observation_shape,\n stack_size=self.stack_size,\n update_horizon=self.update_horizon,\n gamma=self.gamma,\n observation_dtype=self.observation_dtype)\n\n def _train_step(self):\n \"\"\"Runs a single training step.\n\n Runs training if both:\n (1) A minimum number of frames have been added to the replay buffer.\n (2) `training_steps` is a multiple of `update_period`.\n\n Also, syncs weights from online_params to target_network_params if training\n steps is a multiple of target update period.\n \"\"\"\n if self._replay.add_count > self.min_replay_history:\n if self.training_steps % self.update_period == 0:\n self._sample_from_replay_buffer()\n self.optimizer, loss, mean_loss = train(\n self.network_def,\n self.target_network_params,\n self.optimizer,\n self.replay_elements['state'],\n self.replay_elements['action'],\n self.replay_elements['next_state'],\n self.replay_elements['reward'],\n self.replay_elements['terminal'],\n self._kappa,\n self._num_atoms,\n self.cumulative_gamma)\n if self._replay_scheme == 'prioritized':\n # The original prioritized experience replay uses a linear exponent\n # schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of\n # 0.5 on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders)\n # suggested a fixed exponent actually performs better, except on Pong.\n probs = self.replay_elements['sampling_probabilities']\n loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)\n loss_weights /= jnp.max(loss_weights)\n\n # Rainbow and prioritized replay are parametrized by an exponent\n # alpha, but in both cases it is set to 0.5 - for simplicity's sake we\n # leave it as is here, using the more direct sqrt(). Taking the square\n # root \"makes sense\", as we are dealing with a squared loss. Add a\n # small nonzero value to the loss to avoid 0 priority items. While\n # technically this may be okay, setting all items to 0 priority will\n # cause troubles, and also result in 1.0 / 0.0 = NaN correction terms.\n self._replay.set_priority(self.replay_elements['indices'],\n jnp.sqrt(loss + 1e-10))\n\n # Weight the loss by the inverse priorities.\n loss = loss_weights * loss\n mean_loss = jnp.mean(loss)\n if self.summary_writer is not None:\n summary = tf.compat.v1.Summary(value=[\n tf.compat.v1.Summary.Value(tag='QuantileLoss',\n simple_value=mean_loss)])\n self.summary_writer.add_summary(summary, self.training_steps)\n if self.training_steps % self.target_update_period == 0:\n self._sync_weights()\n\n self.training_steps += 1\n"
]
| [
[
"tensorflow.compat.v1.Summary.Value"
]
]
|
janvi16/-HACKTOBERFEST2K20 | [
"aa9c8b6f7feb245793c0a003ba6fbea3fca9ca22"
]
| [
"Python/Read Excel File/excel_file.py"
]
| [
"import pandas\n\nexcel_data_df = pandas.read_excel('records.xlsx', sheet_name='Employees')\n\n# print whole sheet data\nprint(excel_data_df)"
]
| [
[
"pandas.read_excel"
]
]
|
hyabe/onnx-chainer | [
"339ff390957d9dd29843add015533290fdd051c0"
]
| [
"onnx_chainer/functions/normalization.py"
]
| [
"import sys\n\nimport chainer\nimport numpy as np\n\nfrom onnx_chainer.functions.opset_version import support\nfrom onnx_chainer import onnx_helper\n\n\n@support((1, 6, 7))\ndef convert_BatchNormalization(func, opset_version, input_names,\n num_outputs, context, parameters):\n if len(func.inputs) <= 3:\n # expect this `func` is F.batch_normalization\n x = func.inputs[0].get_variable().array\n mean = chainer.Parameter(x.mean(axis=func.axis))\n input_names.append(context.get_name(mean))\n var = chainer.Parameter(x.var(axis=func.axis))\n input_names.append(context.get_name(var))\n else:\n # expect this `func` is F.fixed_batch_normalization\n mean = chainer.Parameter(func.inputs[3].get_variable().array)\n input_names[3] = context.get_name(mean)\n var = chainer.Parameter(func.inputs[4].get_variable().array)\n input_names[4] = context.get_name(var)\n parameters.append(mean)\n parameters.append(var)\n\n momentum = getattr(func, 'decay', 0.)\n\n # if `use_beta=False`, passed None value to the functions\n if func.inputs[2].get_variable_or_none() is None:\n beta = chainer.Parameter(np.zeros_like(mean, dtype=mean.dtype))\n parameters.append(beta)\n input_names[2] = context.get_name(beta)\n # `use_gamma=False` is same\n if func.inputs[1].get_variable_or_none() is None:\n gamma = chainer.Parameter(np.ones_like(mean, dtype=mean.dtype))\n parameters.append(gamma)\n input_names[1] = context.get_name(gamma)\n\n # TODO(disktnk): On definition of ONNX's BatchNormalization operator,\n # outputs one required output and four optional outputs. This converter\n # must make 5 values for output and return them.\n\n if opset_version == 1:\n return onnx_helper.make_node(\n 'BatchNormalization', input_names, num_outputs,\n epsilon=func.eps,\n momentum=momentum,\n is_test=not chainer.config.train,\n consumed_inputs=[False, False, False, True, True],\n ),\n elif opset_version == 6:\n return onnx_helper.make_node(\n 'BatchNormalization', input_names, num_outputs,\n epsilon=func.eps,\n momentum=momentum,\n is_test=not chainer.config.train,\n ),\n elif opset_version == 7:\n return onnx_helper.make_node(\n 'BatchNormalization', input_names, num_outputs,\n epsilon=func.eps,\n momentum=momentum,\n ),\n\n\n@support((1, 6, 7))\ndef convert_FixedBatchNormalization(func, opset_version,\n input_names, num_outputs, context,\n parameters):\n return convert_BatchNormalization(\n func, opset_version, input_names, num_outputs, context, parameters)\n\n\ndef convert_LocalResponseNormalization(func, opset_version,\n input_names, num_outputs, context,\n parameters):\n size = int(func.n)\n return onnx_helper.make_node(\n 'LRN', input_names, num_outputs,\n alpha=float(func.alpha) * size,\n beta=float(func.beta),\n bias=float(func.k),\n size=size,\n ),\n\n\ndef convert_NormalizeL2(func, opset_version, input_names,\n num_outputs, context, parameters):\n if isinstance(func.axis, tuple) and len(func.axis) != 1:\n raise ValueError(\n 'Normalization along with multiple axes ({}) are not supported in '\n 'the ONNX\\'s LpNormalization operator.'.format(func.axis))\n if abs(func.eps - 1e-5) > sys.float_info.epsilon:\n # default value of F.normaize eps is 1e-5\n raise ValueError(\n '\\'eps\\' is not supported in the ONNX\\'s LpNormalization operator,'\n ' so that ONNX-Chainer does not accept custom values for \\'eps\\' '\n '({})'.format(func.eps))\n\n return onnx_helper.make_node(\n 'LpNormalization', input_names, num_outputs,\n axis=int(func.axis[0]),\n p=2,\n ),\n"
]
| [
[
"numpy.zeros_like",
"numpy.ones_like"
]
]
|
lucasalexsorensen/mlops | [
"2d8157eb493061775bdab9a8e176d2bdcc2c166e"
]
| [
"src/models/train_model.py"
]
| [
"from ..data import MaskDataset\nfrom torch.utils.data import DataLoader\nimport kornia as K\nimport torch.nn as nn\nimport torchvision\nimport torch\n\nimport wandb\nimport numpy as np\nimport pandas as pd\nfrom torchvision.transforms import ToTensor\nfrom .model import Net\nimport argparse\nimport torchmetrics\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Script for fitting model\")\n parser.add_argument(\"train_data\", type=str)\n parser.add_argument(\"val_data\", type=str)\n parser.add_argument(\"output_path\", type=str)\n parser.add_argument(\"--lr\", default=0.00015, type=float)\n parser.add_argument(\"--batch_size\", default=4, type=int)\n parser.add_argument(\"--embed_dim\", default=200, type=int)\n parser.add_argument(\"--patch_size\", default=4, type=int)\n parser.add_argument(\"--depth\", default=5, type=int)\n parser.add_argument(\"--num_heads\", default=16, type=int)\n parser.add_argument(\"--dropout_attn\", default=0.2356, type=float)\n parser.add_argument(\"--dropout_rate\", default=0.1056, type=float)\n args = parser.parse_args()\n wandb.init(project=\"mlops\", job_type=\"train_model\", config=args)\n config = wandb.config\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(\"USING DEVICE\", device)\n\n # num_epochs = 50\n train_set = MaskDataset(root_dir=args.train_data)\n val_set = MaskDataset(root_dir=args.val_data)\n train_loader = DataLoader(train_set, batch_size=config.batch_size, shuffle=True)\n val_loader = DataLoader(val_set, batch_size=config.batch_size, shuffle=True)\n\n augmentations = nn.Sequential(\n K.augmentation.RandomHorizontalFlip(p=0.75),\n K.augmentation.RandomVerticalFlip(p=0.75),\n K.augmentation.RandomErasing(),\n K.augmentation.RandomAffine(degrees=15.0),\n K.augmentation.PatchSequential(\n K.augmentation.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.8),\n grid_size=(\n 64 // config.patch_size,\n 64 // config.patch_size,\n ), # cifar-10 is 64x64 and vit is patch 16\n patchwise_apply=False,\n ),\n ).to(device)\n\n model = Net(vars(config)).to(device)\n criterion = nn.CrossEntropyLoss(reduction=\"sum\")\n optimizer = torch.optim.AdamW(model.parameters(), lr=config.lr)\n\n mode = \"min\"\n scores = np.array([])\n patience = 3\n\n for epoch in range(3):\n print(\"========= EPOCH %d =========\" % epoch)\n model.train()\n train_loss = 0\n for i, data in enumerate(train_loader):\n x, y = data[0].to(device), data[1].to(device)\n x = augmentations(x)\n\n optimizer.zero_grad()\n y_hat = model(x)\n loss = criterion(y_hat, y)\n loss.backward()\n optimizer.step()\n train_loss += loss.item()\n print(\"Train loss: %.3f\" % train_loss)\n wandb.log({\"train_loss\": train_loss})\n\n model.eval()\n val_accuracy = torchmetrics.Accuracy().to(device)\n val_loss = 0\n with torch.no_grad():\n for i, data in enumerate(val_loader):\n x, y = data[0].to(device), data[1].to(device)\n y_hat = model(x)\n loss = criterion(y_hat, y)\n val_accuracy(y_hat, y.long())\n val_loss += loss.item()\n print(\"Validation loss: %.3f\" % val_loss)\n print(\"Validation acc: %.3f\" % val_accuracy.compute())\n wandb.log({\"val_acc\": val_accuracy.compute(), \"val_loss\": val_loss})\n\n scores = np.append(scores, [val_loss])\n\n # check for early stopping\n if np.sum(np.diff(scores[-(patience - 1) :]) > 0) == patience:\n break\n\n # TODO: save checkpoint instead of latest\n torch.save(model.state_dict(), args.output_path)\n\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"numpy.array",
"torch.no_grad",
"numpy.diff",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"numpy.append",
"torch.nn.CrossEntropyLoss"
]
]
|
hg-zhang/deepst | [
"fc35cfd40785a3a0cc56a83c151c629e53eaf6bd"
]
| [
"deepst/datasets/TaxiBJ.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\n load BJ Data from multiple sources as follows:\n meteorologic data\n\"\"\"\nfrom __future__ import print_function\n\nimport os\n#import cPickle as pickle\nimport pickle\nfrom copy import copy\nimport numpy as np\nimport h5py\nfrom . import load_stdata, stat\nfrom ..preprocessing import MinMaxNormalization, remove_incomplete_days, timestamp2vec\nfrom ..config import Config\nfrom .STMatrix import STMatrix\n# np.random.seed(1337) # for reproducibility\n\n# parameters\nDATAPATH = Config().DATAPATH\n\n\ndef load_holiday(timeslots,\n fname=os.path.join(DATAPATH, 'TaxiBJ', 'BJ_Holiday.txt')):\n f = open(fname, 'r')\n holidays = f.readlines()\n holidays = set([h.strip() for h in holidays])\n H = np.zeros(len(timeslots))\n for i, slot in enumerate(timeslots):\n if slot[:8] in holidays:\n H[i] = 1\n print(H.sum())\n # print(timeslots[H==1])\n return H[:, None]\n\n\ndef load_meteorol(timeslots,\n fname=os.path.join(DATAPATH, 'TaxiBJ', 'BJ_Meteorology.h5')):\n '''\n timeslots: the predicted timeslots\n In real-world, we dont have the meteorol data in the predicted timeslot, instead, we use the meteoral at previous timeslots, i.e., slot = predicted_slot - timeslot (you can use predicted meteorol data as well)\n '''\n f = h5py.File(fname, 'r')\n Timeslot = f['date'].value\n WindSpeed = f['WindSpeed'].value\n Weather = f['Weather'].value\n Temperature = f['Temperature'].value\n f.close()\n\n M = dict() # map timeslot to index\n for i, slot in enumerate(Timeslot):\n M[slot] = i\n\n WS = [] # WindSpeed\n WR = [] # Weather\n TE = [] # Temperature\n for slot in timeslots:\n predicted_id = M[slot]\n cur_id = predicted_id - 1\n WS.append(WindSpeed[cur_id])\n WR.append(Weather[cur_id])\n TE.append(Temperature[cur_id])\n\n WS = np.asarray(WS)\n WR = np.asarray(WR)\n TE = np.asarray(TE)\n\n # 0-1 scale\n WS = 1. * (WS - WS.min()) / (WS.max() - WS.min())\n TE = 1. * (TE - TE.min()) / (TE.max() - TE.min())\n\n print(\"shape: \", WS.shape, WR.shape, TE.shape)\n\n # concatenate all these attributes\n merge_data = np.hstack([WR, WS[:, None], TE[:, None]])\n\n # print('meger shape:', merge_data.shape)\n return merge_data\n\n\ndef load_data(T=48,\n nb_flow=2,\n len_closeness=None,\n len_period=None,\n len_trend=None,\n len_test=None,\n preprocess_name='preprocessing.pkl',\n meta_data=True,\n meteorol_data=True,\n holiday_data=True):\n \"\"\"\n \"\"\"\n assert (len_closeness + len_period + len_trend > 0)\n # load data\n # 13 - 16\n data_all = []\n timestamps_all = list()\n # prepare data\n for year in range(13, 17):\n fname = os.path.join(DATAPATH, 'TaxiBJ',\n 'BJ{}_M32x32_T30_InOut.h5'.format(year))\n print(\"file name: \", fname)\n stat(fname)\n data, timestamps = load_stdata(fname)\n # print(timestamps)\n # remove a certain day which does not have 48 timestamps\n data, timestamps = remove_incomplete_days(data, timestamps, T)\n data = data[:, :nb_flow]\n data[data < 0] = 0.\n data_all.append(data)\n timestamps_all.append(timestamps)\n print(\"\\n\")\n\n # minmax_scale\n data_train = np.vstack(copy(data_all))[:-len_test]\n print('train_data shape: ', data_train.shape)\n mmn = MinMaxNormalization()\n mmn.fit(data_train)\n data_all_mmn = [mmn.transform(d) for d in data_all]\n\n fpkl = open(preprocess_name, 'wb')\n for obj in [mmn]:\n pickle.dump(obj, fpkl)\n fpkl.close()\n\n # Clossness, Periodic, Trend\n XC, XP, XT = [], [], []\n Y = []\n timestamps_Y = []\n for data, timestamps in zip(data_all_mmn, timestamps_all):\n # instance-based dataset --> sequences with format as (X, Y) where X is\n # a sequence of images and Y is an image.\n st = STMatrix(data, timestamps, T, CheckComplete=False)\n _XC, _XP, _XT, _Y, _timestamps_Y = st.create_dataset(\n len_closeness=len_closeness,\n len_period=len_period,\n len_trend=len_trend)\n XC.append(_XC)\n XP.append(_XP)\n XT.append(_XT)\n Y.append(_Y)\n timestamps_Y += _timestamps_Y\n\n meta_feature = []\n if meta_data:\n # load time feature\n time_feature = timestamp2vec(timestamps_Y)\n meta_feature.append(time_feature)\n if holiday_data:\n # load holiday\n holiday_feature = load_holiday(timestamps_Y)\n meta_feature.append(holiday_feature)\n if meteorol_data:\n # load meteorol data\n meteorol_feature = load_meteorol(timestamps_Y)\n meta_feature.append(meteorol_feature)\n\n meta_feature = np.hstack(\n meta_feature) if len(meta_feature) > 0 else np.asarray(meta_feature)\n metadata_dim = meta_feature.shape[1] if len(\n meta_feature.shape) > 1 else None\n if metadata_dim < 1:\n metadata_dim = None\n if meta_data and holiday_data and meteorol_data:\n print('time feature:', time_feature.shape, 'holiday feature:',\n holiday_feature.shape, 'meteorol feature: ',\n meteorol_feature.shape, 'mete feature: ', meta_feature.shape)\n\n XC = np.vstack(XC)\n XP = np.vstack(XP)\n XT = np.vstack(XT)\n Y = np.vstack(Y)\n print(\"XC shape: \", XC.shape, \"XP shape: \", XP.shape, \"XT shape: \",\n XT.shape, \"Y shape:\", Y.shape)\n\n XC_train, XP_train, XT_train, Y_train = XC[:\n -len_test], XP[:\n -len_test], XT[:\n -len_test], Y[:\n -len_test]\n XC_test, XP_test, XT_test, Y_test = XC[-len_test:], XP[-len_test:], XT[\n -len_test:], Y[-len_test:]\n timestamp_train, timestamp_test = timestamps_Y[:-len_test], timestamps_Y[\n -len_test:]\n\n X_train = []\n X_test = []\n for l, X_ in zip([len_closeness, len_period, len_trend],\n [XC_train, XP_train, XT_train]):\n if l > 0:\n X_train.append(X_)\n for l, X_ in zip([len_closeness, len_period, len_trend],\n [XC_test, XP_test, XT_test]):\n if l > 0:\n X_test.append(X_)\n print('train shape:', XC_train.shape, Y_train.shape, 'test shape: ',\n XC_test.shape, Y_test.shape)\n\n if metadata_dim is not None:\n meta_feature_train, meta_feature_test = meta_feature[:\n -len_test], meta_feature[\n -len_test:]\n X_train.append(meta_feature_train)\n X_test.append(meta_feature_test)\n for _X in X_train:\n print(_X.shape, )\n print()\n for _X in X_test:\n print(_X.shape, )\n print()\n return X_train, Y_train, X_test, Y_test, mmn, metadata_dim, timestamp_train, timestamp_test\n"
]
| [
[
"numpy.hstack",
"numpy.asarray",
"numpy.vstack"
]
]
|
marcorainone/TropPOLoRaTools | [
"d89e24f29325c50be050b79c2cdce75225fdfaa6"
]
| [
"src/graph-rsigra-interval.py"
]
| [
"#!/usr/bin/env python3\n# ===================================================================================\n# Project: TropPo \n# v. 1.0 2020-03-01, ICTP Wireless Lab\n# Programmer: Marco Rainone - ICTP Wireless Lab\n# Specifications, revisions and verifications: \n# Marco Zennaro, Ermanno Pietrosemoli, Marco Rainone - ICTP Wireless Lab\n# ===================================================================================\n#\n# The project is released with Mit License\n# https://opensource.org/licenses/MIT\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n# ===================================================================================\n#\n# Info\n# ----------------------------------------------------------------\n# Receives:\n# 1. log archive path of radiosonde\n# 2. time in format year month day hour min\n# 3. n. days of the radiosonde log to be analyzed\n# The program extracts the data of the radiosonde acquisitions closest to the date provided \n# and generates html graphs of N, M and slope as a function of the height H reached by the balloon.\n#\n# import required modules \nimport os\nimport os.path\nimport getopt, sys\nimport gc # garbage collector\nimport time\nfrom calendar import timegm\nimport datetime\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom staticmap import StaticMap, CircleMarker\nfrom collections import namedtuple\n\nimport csv, json, sys\nimport geopy.distance\nfrom array import *\n\nimport plotly\nimport plotly.graph_objs as go\nimport plotly.express as px\n\nimport ftplib\n\nfrom zipfile import ZipFile\n\n# ---------------------------------------------------------------\n# see documentation:\n# ftp://ftp.ncdc.noaa.gov/pub/data/igra/igra2-dataset-description.docx\n# ftp://ftp.ncdc.noaa.gov/pub/data/igra/derived/igra2-derived-format.txt\n# -------------------------------\n# Variable Columns Type \n# -------------------------------\n# PRESS 1- 7 Integer\n# REPGPH 9- 15 Integer reported geopotential height (meters).\n# CALCGPH 17- 23 Integer calculated geopotential height (meters)\n# TEMP 25- 31 Integer\n# TEMPGRAD 33- 39 Integer\n# PTEMP 41- 47 Integer\n# PTEMPGRAD 49- 55 Integer\n# VTEMP 57- 63 Integer\n# VPTEMP 65- 71 Integer\n# VAPPRESS 73- 79 Integer\n# SATVAP 81- 87 Integer\n# REPRH 89- 95 Integer\n# CALCRH 97-103 Integer\n# RHGRAD 105-111 Integer\n# UWND 113-119 Integer\n# UWDGRAD 121-127 Integer\n# VWND 129-135 Integer\n# VWNDGRAD 137-143 Integer\n# N 145-151 Integer the refractive index (unitless).\n# -------------------------------\n# \n# Notes:\n# REPGPH \treported geopotential height (meters). \n# This value is often not available at significant levels.\n# \t\t\n# CALCGPH calculated geopotential height (meters). \n# The geopotential height has been estimated by applying the hydrostatic balance to\n# \t\tthe atmospheric layer between the next lower level with a reported geopotential height and the current level.\n\n# ---------------------------------------------------------------\n# config\n#\nPathBaseDir = os.getcwd() # current working directory of the process\naccess_rights = 0o777 # define the access rights file/folder\ncsv_sep = ';' # char separator for csv\n\n# -------------------------------------------------------------------------\n#\n# get full path file or directory\ndef get_full_path(file_folder_name):\n return (os.path.abspath(file_folder_name))\n\n# check if the full path is file\ndef is_directory(full_path):\n # os.path.exists checks whether a file or directory exists:\n ris = os.path.exists(full_path)\n if ris:\n # os.path.isdir checks whether it's a directory\n return (os.path.isdir(full_path))\n return False # the path is not directory\n\n# check if the full path is file\ndef is_file_name(full_path):\n # os.path.exists checks whether a file or directory exists:\n ris = os.path.exists(full_path)\n if ris:\n # os.path.isdir checks whether it's a directory\n return (not os.path.isdir(full_path))\n return False # the path is not filename\n\ndef get_dir_name(full_path):\n dirname = os.path.dirname(full_path) # os independent\n return dirname\n\n# return the file name without path\ndef get_file_name(full_path):\n basename = os.path.basename(full_path) # os independent\n base = basename.split('.')[0]\n return base\n\n# return the extension of file\ndef get_file_ext(full_path):\n basename = os.path.basename(full_path) # os independent\n ext = '.'.join(basename.split('.')[1:])\n if ext == '.':\n return \"\"\n return ext\n\ndef igraDrvdExtract(dirIgraLog, stationID):\n # estrai dall'archivio zip il log igra\n # file name igra log archive\n fNameZipIgraLog = stationID + \"-drvd\" + \".txt.zip\"\n fpZipIgraLog = os.path.join(dirIgraLog, fNameZipIgraLog) # full path zip\n\n with ZipFile(fpZipIgraLog, 'r') as zipObj:\n # Extract all the contents of zip file in dirIgraLog\n zipObj.extractall(dirIgraLog)\n\n# Extract the record time string from line of derived record\n# return:\n# string formatted time\n# epoch time in seconds\n#\ndef strDrvdRecordTime(line, yearLimit):\n # get string fields\n year = line[13:17] # year\n month = line[18:20] # month\n day = line[21:23] # day\n hour = line[24:26] # hour\n # string date / time\n date_time_str = year # year\n date_time_str += '-' + month # month\n date_time_str += '-' + day # day\n date_time_str += ' ' + hour # hour\n date_time_str += ':00:00'\n # if int(year)<1970:\n if int(year)<yearLimit:\n return(date_time_str, 0)\n # date after or equal yearLimit (standard: 1970)\n # date_time_obj = datetime.datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S')\n utc_time = time.strptime(date_time_str, \"%Y-%m-%d %H:%M:%S\")\n epoch_time = timegm(utc_time)\n return(date_time_str, epoch_time)\n \n# from time string return time in \"%Y%m%d%H%M%S\", used to create filename\ndef time_compact(date_time_str):\n utc_time = time.strptime(date_time_str, \"%Y-%m-%d %H:%M:%S\")\n ris = time.strftime(\"%Y%m%d%H%M%S\", utc_time)\n return(ris)\n\ndef igraDrvdCreateIndex(dirIgraLog, stationID, yearLimit=2015):\n keySearch = \"#\" + stationID # esempio: #TSM00060760\n print(keySearch)\n \n # file name igra log archive, without extension\n fNameIgraLog = stationID + \"-drvd\"\n # file name radiosonda log index\n fNameIgraIndex = fNameIgraLog\n fNameIgraIndex += '.idx'\n # file name radiosonda log\n fNameIgraLog += '.txt'\n fpIgraLog = os.path.join(dirIgraLog, fNameIgraLog)\n print(fpIgraLog)\n fpIgraIndex = os.path.join(dirIgraLog, fNameIgraIndex)\n print(fpIgraIndex)\n\n fIdx = open(fpIgraIndex, 'w') # open file index to write\n\n # -------------------- write header\n lineIdx = \"date\" + csv_sep + \"tm_epoch\" + csv_sep + \"pos_header\" + csv_sep + \"pos_data\" + csv_sep + \"n_rec\" + '\\n'\n fIdx.write(lineIdx)\n\n # read file log Igra\n numHeader = -1 # n. row header con data/ora specificata\n numRecords = 0 # n. records for each section\n\n with open(fpIgraLog,'r') as rsLog:\n rsLog.seek(0, os.SEEK_END) # go to the end of the file.\n eof = rsLog.tell() # get the end of file location\n rsLog.seek(0, os.SEEK_SET) # go to the beginning of the file.\n while(rsLog.tell() != eof):\n pos = rsLog.tell() # posizione file prima della lettura riga\n line = rsLog.readline().strip()\n if line.startswith(keySearch)==False:\n numRecords +=1\n continue\n # -------------- new header\n # extract time acquisition\n date_acq, epoch_time = strDrvdRecordTime(line, yearLimit)\n # verify if time before yearLimit\n if epoch_time == 0:\n # record time before limit\n # print(\"!!! Time before {}:[{}]\".format(yearLimit, date_acq))\n numRecords = 0 # n. records for each section \n posStartData = rsLog.tell() # posizione file dopo della lettura riga\n continue\n # epoch time in limit\n \n # -------------- time new header is correct\n elif numHeader >= 0:\n # save numRecords and reinit value\n if numHeader == 0:\n numRecords = 0\n lineIdx = csv_sep + str(numRecords) + '\\n'\n fIdx.write(lineIdx)\n numRecords = 0 # n. records for each section \n numHeader += 1 # incrementa n. row header con data/ora specificata\n\n # -------------- init line to write in index file\n # lineIdx = str(numHeader)\n lineIdx = \"\"\n \n # create record to write csv\n # lineIdx += csv_sep + '\\\"' + date_acq + '\\\"'\n lineIdx += date_acq # date, time string\n lineIdx += csv_sep + str(epoch_time) # epoch time\n \n lineIdx += csv_sep + str(pos) # position header in file\n posStartData = rsLog.tell() # posizione file dopo della lettura riga\n lineIdx += csv_sep + str(posStartData)\n\n fIdx.write(lineIdx) # write record in csv\n\n # save last numRecords value\n lineIdx = csv_sep + str(numRecords) + '\\n'\n fIdx.write(lineIdx)\n rsLog.close()\n fIdx.close()\n # ------------------ end igraDrvdCreateIndex\n\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\ndef printHlpOptions():\n print('{} -i <zip Igra2 derived log file> -t <string time> -d <n. days>'.format(sys.argv[0]))\n\ndef printHlpFull():\n print('{} -i <zip Igra2 derived log file> -t <string time> -d <n. days>'.format(sys.argv[0]))\n print('Download the Igra2 derived data from:')\n print('ftp://ftp.ncdc.noaa.gov/pub/data/igra/derived/')\n print('Example:')\n print('{} -i radio/GMM00010393-drvd.txt.zip -t \\\"2020 02 16 00 00\\\" -d 7'.format(sys.argv[0]))\n print('Decompress radio/GMM00010393-drvd.txt.zip'.format(sys.argv[0]))\n print('Read measurements starting from \\\"2020-02-16 00:00\\\" for 7 days')\n\n\n# -------------------------------------------------------------------------\n# Get command-line arguments\n\n# initialize variables\ninpZipIgraLog = ''\noutDir = ''\nstrSearchTime = \"\"\ndateSearch = [] # [year, month, day, hour]\ndays = 0\n\ntry:\n opts, args = getopt.getopt(\n sys.argv[1:],\n 'i:t:d:',\n [\"inp=\",\"time=\",\"days=\"])\nexcept getopt.GetoptError:\n printHlpFull() # print full help\n sys.exit(2)\n\nnArg = 0\nfor opt, arg in opts:\n if opt == '-h':\n printHlpFull() # print full help\n sys.exit()\n elif opt in (\"-i\", \"--inp\"):\n inpZipIgraLog = arg\n nArg = nArg + 1\n elif opt in (\"-t\", \"--time\"):\n strSearchTime = arg.strip('\"')\n nArg = nArg + 1\n elif opt in (\"-d\", \"--days\"):\n days = int(arg)\n # print('n. days: {}'.format(days))\n nArg = nArg + 1\n\nif nArg < 3:\n printHlpFull() # print full help\n sys.exit()\n\n# -------------------------------------------------------------------------\n# fields of dateSearch\ndateSearch.append(strSearchTime[0:4]) # year\ndateSearch.append(strSearchTime[5:7]) # month\ndateSearch.append(strSearchTime[8:10]) # day\ndateSearch.append(strSearchTime[11:13]) # hour\ndateSearch.append(strSearchTime[14:16]) # minutes\n\nstrDateSearch = \"\"\nfor i in range(0, 5):\n strDateSearch += dateSearch[i]\nprint(\"dateSearch: [{}][{}]\".format(dateSearch, strDateSearch))\n\n# format Date in human format\nstrDateHuman = \"\"\nstrDateHuman += dateSearch[0]\nfor i in range(0, 3):\n strDateHuman += \"/\"\n strDateHuman += dateSearch[i]\nstrDateHuman += \" \" + dateSearch[3]\nstrDateHuman += \":\" + dateSearch[4]\n\n# -------------------------------------------------------------------------\n\nfpZipIgraLog = get_full_path(inpZipIgraLog) # full path zip\ndirZipIgraLog = get_dir_name(fpZipIgraLog) # directory zip \nnameZipIgraLog = get_file_name(fpZipIgraLog) # file name\nstationID = nameZipIgraLog[0:11]\n\n# get file name index igra \nfNameIdxIgraLog = nameZipIgraLog + '.idx'\nfpIdxIgraLog = os.path.join(dirZipIgraLog, fNameIdxIgraLog)\nprint(\"nameZipIgraLog[{}][{}]\".format(nameZipIgraLog, stationID))\nprint(\"fpIdxIgraLog[{}]\".format(fpIdxIgraLog))\n\noutDir = dirZipIgraLog\n\nwith ZipFile(fpZipIgraLog, 'r') as zipObj:\n # Extract all the contents of zip file in outDir\n zipObj.extractall(outDir)\n\n# -------------------------------------------------------------------------\nprint(\"create index ...\")\nigraDrvdCreateIndex(dirZipIgraLog, stationID)\n\n# get index file\nprint(\"... read file indice\")\nidxLog = pd.read_csv(fpIdxIgraLog, sep = csv_sep)\nprint(\"... end read indice\")\n\nprint(\"start search time in log ...\")\n\nsearch_time = dateSearch[0]\nsearch_time += '-' + dateSearch[1]\nsearch_time += '-' + dateSearch[2]\nsearch_time += ' ' + dateSearch[3]\nsearch_time += ':' + dateSearch[4] + ':00'\nprint(\"search_time: [{}]\".format(search_time))\n \n# -------------------------------------------------------------------------\n# calcola i limiti dei tempi\n# convert start time in epochtime\ntmUtc_search = time.strptime(search_time, \"%Y-%m-%d %H:%M:%S\")\nsearch_tmEpoch = timegm(tmUtc_search)\n# calculate end time in epochtime\n# sum days in seconds to search_tmEpoch\nendSrc_tmEpoch = search_tmEpoch + 86400 * days\n\n# get a column with time_epoch\n#### selected_columns = idxLog[[\"tm_epoch\"]]\n#### dfSearch[\"tm_epoch\"] = selected_columns.copy()\n#### dfSearch[\"tm_epoch\"] = dfSearch[\"tm_epoch\"] - search_tmEpoch\n\n# https://stackoverflow.com/questions/37761238/how-do-i-select-and-store-columns-greater-than-a-number-in-pandas\n# see example:\n# Minimum (for the b column) for the rows satisfying b > 10 condition\n# df.loc[df.b > 10, 'b'].min()\n# https://community.dataquest.io/t/pandas-return-row-with-the-maximum-value-of-a-column/258474\n# example:\n# df.iloc[df['column_name'].idxmax()]\n# idxmax() will return the index position of the row with the highest value.\n# Then you can use iloc to return the row with that index.\n# WARNING: If there are multiple max values this method will only return the first row with the max value.\n#\n\nmin_val = idxLog.loc[(search_tmEpoch >= idxLog.tm_epoch), 'tm_epoch'].max()\n#\nmax_val = idxLog.loc[(endSrc_tmEpoch >= idxLog.tm_epoch), 'tm_epoch'].max()\n\ndelta_sec = search_tmEpoch - min_val\n\n# get all rows from min_val and max_val\nrisIdx = idxLog[(idxLog['tm_epoch']>=min_val ) & (idxLog['tm_epoch']<=max_val)]\n\nprint(risIdx)\nprint(\"... end search in log\")\n\n# reset index and get data\nrisIdx.reset_index(drop=True, inplace=True)\n\n# -------------------------------------------------------------------------\n# https://stackoverflow.com/questions/15943769/how-do-i-get-the-row-count-of-a-pandas-dataframe\nnRows_risIdx = risIdx.shape[0] # gives number of row count\nprint(\"n. righe risIdx: {}\".format(nRows_risIdx))\n\nn_rec = risIdx.at[0, 'n_rec']\npos_data = risIdx.at[0, 'pos_data'] # start position in file\n\n# ---------------------------------------\ndelta_hour = int(delta_sec / 3600)\ndelta_day = delta_hour / 24\nprint(\"Differenza di tempo in ore: {}\".format(delta_hour))\nif delta_day >= 1:\n printf(\"Warning: il record radiosonda e' stato acquisito da {} giorni rispetto l'orario fornito in ingresso\".format(delta_day)) \n\n# ---------------------------------------\n\nif n_rec == 0:\n print(\"No record to analyze\")\n sys.exit()\n\nfNameIgraLog = get_file_name(fpZipIgraLog)\nfNameIgraLog += '.txt'\nfpIgraLog = os.path.join(dirZipIgraLog, fNameIgraLog)\nprint(fpIgraLog)\n\n# --------------------------------------------------------------------------------------\n# get inputstation, create report file names\n\ninputstation = stationID.upper() # esempio: TSM00060760\n\n# html report file from station\nstr_days = str(days).zfill(3)\nHtmlRep_N_Height = \"reNH-{}-{}-{}days.html\".format(inputstation, strDateSearch, str_days)\nHtmlRep_M_Height = \"reMH-{}-{}-{}days.html\".format(inputstation, strDateSearch, str_days)\nHtmlRep_N_slope = \"slNH-{}-{}-{}days.html\".format(inputstation, strDateSearch, str_days)\nHtmlRep_M_slope = \"slMH-{}-{}-{}days.html\".format(inputstation, strDateSearch, str_days)\n\n# full path file name html report\nfpHtmlRep_N_Height = os.path.join(dirZipIgraLog, HtmlRep_N_Height)\nfpHtmlRep_M_Height = os.path.join(dirZipIgraLog, HtmlRep_M_Height)\nfpHtmlRep_N_slope = os.path.join(dirZipIgraLog, HtmlRep_N_slope)\nfpHtmlRep_M_slope = os.path.join(dirZipIgraLog, HtmlRep_M_slope)\n\nkeySearch = \"#\" + inputstation # search string\nidxKey = 0\nkeySearch += \" \" + dateSearch[idxKey] # add time field\nprint(keySearch)\n\n# ---------------------------------------------------------------\n# see documentation:\n# ftp://ftp.ncdc.noaa.gov/pub/data/igra/igra2-dataset-description.docx\n# ftp://ftp.ncdc.noaa.gov/pub/data/igra/derived/igra2-derived-format.txt\n# -------------------------------\n# Variable Columns Type \n# -------------------------------\n# PRESS 1- 7 Integer\n# REPGPH 9- 15 Integer reported geopotential height (meters).\n# CALCGPH 17- 23 Integer calculated geopotential height (meters)\n# TEMP 25- 31 Integer\n# TEMPGRAD 33- 39 Integer\n# PTEMP 41- 47 Integer\n# PTEMPGRAD 49- 55 Integer\n# VTEMP 57- 63 Integer\n# VPTEMP 65- 71 Integer\n# VAPPRESS 73- 79 Integer\n# SATVAP 81- 87 Integer\n# REPRH 89- 95 Integer\n# CALCRH 97-103 Integer\n# RHGRAD 105-111 Integer\n# UWND 113-119 Integer\n# UWDGRAD 121-127 Integer\n# VWND 129-135 Integer\n# VWNDGRAD 137-143 Integer\n# N 145-151 Integer the refractive index (unitless).\n# -------------------------------\n# \n# Notes:\n# REPGPH \treported geopotential height (meters). \n# This value is often not available at significant levels.\n# \t\t\n# CALCGPH calculated geopotential height (meters). \n# The geopotential height has been estimated by applying the hydrostatic balance to\n# \t\tthe atmospheric layer between the next lower level with a reported geopotential height and the current level.\n\n# https://stackoverflow.com/questions/41386443/create-pandas-dataframe-from-txt-file-with-specific-pattern\n\n# CalcGph : calculated geopotential height (meters)\n# RefIndex : the refractive index (unitless).\n\nheight_limit = 4000 # max height for analysis\n\n# list of graph traces\ngrTrace_N_HGHT = [] # traces x:'N', y:'HGHT'\ngrTrace_M_HGHT = [] # traces x:'M', y:'HGHT'\ngrTrace_HGHT_SLOPE_N_H = [] # traces x:'HGHT', y:'slopeN_H'\ngrTrace_HGHT_SLOPE_M_H = [] # traces x:'HGHT', y:'slopeM_H'\n\n# ---------------------------------------------------------------\n\n# per ogni riga del dataframe risIdx, genera i file csv dei report\nItem = namedtuple('CalcGph', 'RefIndex')\nfor rowPos in range(nRows_risIdx):\n items = [] # launch acquisition data\n date_launch = risIdx.at[rowPos, 'date'] # date/time launch radiosonda\n pos_data = risIdx.at[rowPos, 'pos_data'] # start position in file log\n\n # form filename of output csv file\n fNameOutCsv = stationID + \"-\"\n fNameOutCsv += time_compact(date_launch)\n fNameOutCsv += '.csv'\n fpOutCsv = os.path.join(dirZipIgraLog, fNameOutCsv)\n print(fpOutCsv)\n\n with open(fpIgraLog,'r') as rsLog:\n rsLog.seek(pos_data, os.SEEK_SET) # go to the beginning of the file displacement pos_data.\n while True:\n line = rsLog.readline().rstrip()\n if not line:\n # eof\n break\n if line.startswith('#'):\n # new record\n break\n # insert data in dataframe\n CalcGph = int(line[16:23]) # calculated geopotential height (meters)\n RefIndex = int(line[144:151]) # N, the refractive index (unitless)\n items.append((CalcGph, RefIndex))\n # check if CalcGph is greater than height_limit\n if CalcGph > height_limit:\n # acquisition limit, based on height\n break\n rsLog.close()\n\n # sys.exit()\n\n # --------------------------------------------------------------------------------------\n\n # create dataframe \n dtAcq02 = pd.DataFrame.from_records(items, columns=['HGHT', 'N'])\n # dataframe columns:\n # HGHT : altezza (m) (used: CALCGPH calculated geopotential height (m))\n # N : the refractive index (unitless)\n\n # --------------------------------------------------------------------------------------\n # calculate dtAcq02['M'] = dtAcq02['N'] + 0.157 * dtAcq02['HGHT']\n dtAcq02['M'] = dtAcq02.N + 0.157 * dtAcq02.HGHT\n\n # --------------------------------------------------------------------------------------\n # calculate difference of 'N' and 'HGHT' columns\n\n # Calculates the difference of a DataFrame element compared with another element in the DataFrame \n # (default is the element in the same column of the previous row).\n dtAcq02['deltaN'] = dtAcq02['N'].diff()\n dtAcq02['deltaM'] = dtAcq02['M'].diff()\n\n dtAcq02['deltaH'] = dtAcq02['HGHT'].diff()\n\n # calc dtAcq02['deltaH']/dtAcq02['deltaN']\n dtAcq02['slopeN_H'] = dtAcq02['deltaN'].div(dtAcq02['deltaH'])\n # calc dtAcq02['deltaH']/dtAcq02['deltaM']\n dtAcq02['slopeM_H'] = dtAcq02['deltaM'].div(dtAcq02['deltaH'])\n\n # Access a group of rows and columns by label(s) or a boolean array.\n dtAcq02.loc[~np.isfinite(dtAcq02['slopeN_H']), 'slopeN_H'] = np.nan\n dtAcq02.loc[~np.isfinite(dtAcq02['slopeM_H']), 'slopeM_H'] = np.nan\n\n # note: slopeN_H must be divided by 1000. For this parameter the height is in km\n dtAcq02['slopeN_H'] = dtAcq02.slopeN_H * 1000\n dtAcq02['slopeM_H'] = dtAcq02.slopeM_H * 1000\n\n # Drop the rows even with single NaN or single missing values.\n dtAcq02 = dtAcq02.dropna()\n\n print(dtAcq02)\n dtAcq02.to_csv(fpOutCsv, header=True, index=True, sep=csv_sep)\n\n # ---------------------------------------------------------\n # save traces in lists\n\n # traces x:'N', y:'HGHT'\n grTrace_N_HGHT.append(\n go.Scatter(\n x=dtAcq02['N'], y=dtAcq02['HGHT'],\n # line=dict(width=3,color='green'),\n line=dict(width=3),\n name=date_launch\n )\n )\n # traces x:'M', y:'HGHT'\n grTrace_M_HGHT.append(\n go.Scatter(\n x=dtAcq02['M'], y=dtAcq02['HGHT'],\n # line=dict(width=3,color='green'),\n line=dict(width=3),\n name=date_launch\n )\n )\n # dtAcq02['HGHT'] in km\n dtAcq02.HGHT = dtAcq02['HGHT'] / 1000\n # traces x:'HGHT', y:'slopeN_H'\n grTrace_HGHT_SLOPE_N_H.append( \n go.Scatter(\n x=dtAcq02['HGHT'], y=dtAcq02['slopeN_H'],\n # line=dict(width=3,color='green'),\n line=dict(width=3), \n name=date_launch\n )\n )\n # traces x:'HGHT', y:'slopeM_H'\n grTrace_HGHT_SLOPE_M_H.append( \n go.Scatter(\n x=dtAcq02['HGHT'], y=dtAcq02['slopeM_H'],\n # line=dict(width=3,color='green'),\n line=dict(width=3), \n name=date_launch\n )\n )\n \n # ---------------------------------------------------------\n # free memory\n # delete dataframes\n # example:\n # This will delete the dataframe and will release the RAM/memory\n # del [[df_1,df_2]]\n # gc.collect()\n # df_1=pd.DataFrame()\n # df_2=pd.DataFrame()\n \n del items\n del [[dtAcq02]]\n gc.collect() # garbage collection\n ## ------------------------- end for rowPos in range(nRows_risIdx)\n\n# --------------------------------------------------------------------------------------\n# GRAPHS OUTPUT\n# --------------------------------------------------------------------------------------\n# https://plotly.com/python/v3/figure-labels/\n# for symbols, see:\n# https://www.w3schools.com/html/html_symbols.asp\n\nfig = go.Figure()\n\n# -------------- COMMON DATA\n\n# name_graph = \"Station: \" + inputstation + \" Date: \" + str(inputyear) + \"/\" + str(inputmonth) + \"/\" + str(inputday) + \" \" + str(inputhour) + \":00\"\nname_graph = \"Station: \" + inputstation + \"<br>Start date: \" + strDateHuman\n\n# default font parameters\ndef_font=dict(\n family='Arial, monospace',\n size=16,\n color=\"#000000\" # black\n)\n\ndef_tickfont = dict(\n size = 16,\n color = \"#000000\" # black\n), \n\n# mod 19/05\nEnableGraph = False\n\nif EnableGraph:\n # --------------------------------------------------[ grafici da non generare]\n\n # -------------- create graph HEIGHT / N\n GraphLayout = go.Layout(\n title=go.layout.Title(\n text='<b>' + name_graph + '</b>', # bold\n font=dict(\n family='Arial, monospace',\n size=16,\n color=\"#000000\" # black\n ),\n xref='paper',\n x=0\n ),\n xaxis=go.layout.XAxis( \n tickfont = dict(\n size = 16,\n color = \"#000000\" # black\n ), \n title=go.layout.xaxis.Title(\n text='N',\n font=dict(\n family='Arial, monospace',\n size=16,\n color=\"#000000\" # black\n )\n )\n ),\n yaxis=go.layout.YAxis(\n range=[0, height_limit],\n tickfont = dict(\n size = 16,\n color = \"#000000\" # black\n ), \n title=go.layout.yaxis.Title(\n text='Height (m)',\n font=dict(\n family='Arial, monospace',\n size=16,\n color=\"#000000\" # black\n )\n )\n )\n )\n fig = go.Figure(data=grTrace_N_HGHT, layout=GraphLayout)\n fig.write_html(fpHtmlRep_N_Height, auto_open=False)\n\n # -------------- create graph HEIGHT / M\n GraphLayout = go.Layout(\n title=go.layout.Title(\n text='<b>' + name_graph + '</b>', # bold\n font=dict(\n family='Arial, monospace',\n size=16,\n color=\"#000000\" # black\n ),\n xref='paper',\n x=0\n ),\n xaxis=go.layout.XAxis(\n tickfont = dict(\n size = 16,\n color = \"#000000\" # black\n ), \n title=go.layout.xaxis.Title(\n text='M',\n font=dict(\n family='Arial, monospace',\n size=16,\n color=\"#000000\" # black\n )\n )\n ),\n yaxis=go.layout.YAxis(\n range=[0, height_limit],\n tickfont = dict(\n size = 16,\n color = \"#000000\" # black\n ), \n title=go.layout.yaxis.Title(\n text='Height (m)',\n font=dict(\n family='Arial, monospace',\n size=16,\n color=\"#000000\" # black\n )\n )\n )\n )\n fig = go.Figure(data=grTrace_M_HGHT, layout=GraphLayout)\n fig.write_html(fpHtmlRep_M_Height, auto_open=False)\n # --------------------------------------------------[ end grafici da non generare]\n\n# -------------- create graph slope N - H\nGraphLayout = go.Layout(\n title=go.layout.Title(\n text='<b>' + name_graph + '</b>', # bold\n font=dict(\n family='Arial, monospace',\n size=16,\n color=\"#000000\" # black\n ),\n xref='paper',\n x=0\n ),\n xaxis=go.layout.XAxis(\n range=[0, 4],\n tickfont = dict(\n size = 16,\n color = \"#000000\" # black\n ), \n title=go.layout.xaxis.Title(\n text='<b>Height (km)</b>',\n font=dict(\n family='Arial, monospace',\n size=16,\n color=\"#000000\" # black\n )\n )\n ),\n yaxis=go.layout.YAxis(\n tickfont = dict(\n size = 16,\n color = \"#000000\" # black\n ), \n title=go.layout.yaxis.Title(\n text='<b>ΔN/ΔH, km<sup>-1</sup></b>',\n font=dict(\n family='Arial, monospace',\n size=16,\n color=\"#000000\" # black\n )\n )\n )\n)\n\nfig = go.Figure(data=grTrace_HGHT_SLOPE_N_H, layout=GraphLayout)\n\nfig.update_layout(plot_bgcolor='rgb(255,255,255)')\nfig.update_xaxes(showgrid=True,gridwidth=1, gridcolor='LightPink',showline=True, linewidth=1, linecolor='black', mirror=True)\nfig.update_yaxes(showgrid=True,gridwidth=1, gridcolor='LightPink',showline=True, linewidth=1, linecolor='black', mirror=True)\nfig.update_layout()\n\nfig.write_html(fpHtmlRep_N_slope, auto_open=False)\n\n# -------------- create graph slope M - H\nGraphLayout = go.Layout(\n title=go.layout.Title(\n text='<b>' + name_graph + '</b>', # bold\n font=dict(\n family='Arial, monospace',\n size=16,\n color=\"#000000\" # black\n ),\n xref='paper',\n x=0\n ),\n xaxis=go.layout.XAxis(\n range=[0, 4],\n tickfont = dict(\n size = 16,\n color = \"#000000\" # black\n ), \n title=go.layout.xaxis.Title(\n text='<b>Height (km)</b>',\n font=dict(\n family='Arial, monospace',\n size=16,\n color=\"#000000\" # black\n )\n )\n ),\n yaxis=go.layout.YAxis(\n tickfont = dict(\n size = 16,\n color = \"#000000\" # black\n ), \n title=go.layout.yaxis.Title(\n text='<b>ΔM/ΔH, km<sup>-1</sup></b>',\n font=dict(\n family='Arial, monospace',\n size=16,\n color=\"#000000\" # black\n )\n )\n )\n)\n\nfig = go.Figure(data=grTrace_HGHT_SLOPE_M_H, layout=GraphLayout)\n\nfig.update_layout(plot_bgcolor='rgb(255,255,255)')\nfig.update_xaxes(showgrid=True,gridwidth=1, gridcolor='LightPink',showline=True, linewidth=1, linecolor='black', mirror=True)\nfig.update_yaxes(showgrid=True,gridwidth=1, gridcolor='LightPink',showline=True, linewidth=1, linecolor='black', mirror=True)\nfig.update_layout()\n\nfig.write_html(fpHtmlRep_M_slope, auto_open=False)\n\n\n\n"
]
| [
[
"pandas.DataFrame.from_records",
"pandas.read_csv",
"numpy.isfinite"
]
]
|
hui2000ji/masif | [
"70a76c5f4639f70c546d5603612c7cc9f47a35b8"
]
| [
"source/data_preparation/01-pdb_extract_and_triangulate.py"
]
| [
"#!/usr/bin/python\nimport numpy as np\nimport os\nimport Bio\nimport shutil\nfrom Bio.PDB import * \nimport sys\nimport importlib\nfrom IPython.core.debugger import set_trace\n\n# Local includes\nfrom default_config.masif_opts import masif_opts\nfrom triangulation.computeMSMS import computeMSMS\nfrom triangulation.fixmesh import fix_mesh\nimport pymesh\nfrom input_output.extractPDB import extractPDB\nfrom input_output.save_ply import save_ply\nfrom input_output.read_ply import read_ply\nfrom input_output.protonate import protonate\nfrom triangulation.computeHydrophobicity import computeHydrophobicity\nfrom triangulation.computeCharges import computeCharges, assignChargesToNewMesh\nfrom triangulation.computeAPBS import computeAPBS\nfrom triangulation.compute_normal import compute_normal\nfrom sklearn.neighbors import KDTree\n\nif len(sys.argv) <= 1: \n print(\"Usage: {config} \"+sys.argv[0]+\" PDBID_A\")\n print(\"A or AB are the chains to include in this surface.\")\n sys.exit(1)\n\n\n# Save the chains as separate files. \nin_fields = sys.argv[1].split(\"_\")\npdb_id = in_fields[0]\n\npdb_filename = os.path.join(masif_opts[\"ligand\"][\"assembly_dir\"], pdb_id, pdb_id + \"_protein.pdb\")\ntmp_dir= masif_opts['tmp_dir']\n# protonated_file = tmp_dir+\"/\"+pdb_id+\".pdb\"\n# protonate(pdb_filename, protonated_file)\n# pdb_filename = protonated_file\n\n# Extract chains of interest.\nout_filename1 = tmp_dir+\"/\"+pdb_id\nextractPDB(pdb_filename, out_filename1+\".pdb\")\n\n# Compute MSMS of surface w/hydrogens, \ntry:\n vertices1, faces1, normals1, names1, areas1 = computeMSMS(out_filename1+\".pdb\",\\\n protonate=True)\nexcept:\n set_trace()\n\n# Compute \"charged\" vertices\nif masif_opts['use_hbond']:\n vertex_hbond = computeCharges(out_filename1, vertices1, names1)\n\n# For each surface residue, assign the hydrophobicity of its amino acid. \nif masif_opts['use_hphob']:\n vertex_hphobicity = computeHydrophobicity(names1)\n\n# If protonate = false, recompute MSMS of surface, but without hydrogens (set radius of hydrogens to 0).\nvertices2 = vertices1\nfaces2 = faces1\n\n# Fix the mesh.\nmesh = pymesh.form_mesh(vertices2, faces2)\nregular_mesh = fix_mesh(mesh, masif_opts['mesh_res'])\n\n# Compute the normals\nvertex_normal = compute_normal(regular_mesh.vertices, regular_mesh.faces)\n# Assign charges on new vertices based on charges of old vertices (nearest\n# neighbor)\n\nif masif_opts['use_hbond']:\n vertex_hbond = assignChargesToNewMesh(regular_mesh.vertices, vertices1,\\\n vertex_hbond, masif_opts)\n\nif masif_opts['use_hphob']:\n vertex_hphobicity = assignChargesToNewMesh(regular_mesh.vertices, vertices1,\\\n vertex_hphobicity, masif_opts)\n\nif masif_opts['use_apbs']:\n vertex_charges = computeAPBS(regular_mesh.vertices, out_filename1+\".pdb\", out_filename1)\n\niface = np.zeros(len(regular_mesh.vertices))\nif 'compute_iface' in masif_opts and masif_opts['compute_iface']:\n # Compute the surface of the entire complex and from that compute the interface.\n v3, f3, _, _, _ = computeMSMS(pdb_filename,\\\n protonate=True)\n # Regularize the mesh\n mesh = pymesh.form_mesh(v3, f3)\n # I believe It is not necessary to regularize the full mesh. This can speed up things by a lot.\n full_regular_mesh = mesh\n # Find the vertices that are in the iface.\n v3 = full_regular_mesh.vertices\n # Find the distance between every vertex in regular_mesh.vertices and those in the full complex.\n kdt = KDTree(v3)\n d, r = kdt.query(regular_mesh.vertices)\n d = np.square(d) # Square d, because this is how it was in the pyflann version.\n assert(len(d) == len(regular_mesh.vertices))\n iface_v = np.where(d >= 2.0)[0]\n iface[iface_v] = 1.0\n # Convert to ply and save.\n save_ply(out_filename1+\".ply\", regular_mesh.vertices,\\\n regular_mesh.faces, normals=vertex_normal, charges=vertex_charges,\\\n normalize_charges=True, hbond=vertex_hbond, hphob=vertex_hphobicity,\\\n iface=iface)\n\nelse:\n # Convert to ply and save.\n save_ply(out_filename1+\".ply\", regular_mesh.vertices,\\\n regular_mesh.faces, normals=vertex_normal, charges=vertex_charges,\\\n normalize_charges=True, hbond=vertex_hbond, hphob=vertex_hphobicity)\nif not os.path.exists(masif_opts['ply_chain_dir']):\n os.makedirs(masif_opts['ply_chain_dir'])\nif not os.path.exists(masif_opts['pdb_chain_dir']):\n os.makedirs(masif_opts['pdb_chain_dir'])\nshutil.copy(out_filename1+'.ply', masif_opts['ply_chain_dir']) \nshutil.copy(out_filename1+'.pdb', masif_opts['pdb_chain_dir']) \n"
]
| [
[
"sklearn.neighbors.KDTree",
"numpy.where",
"numpy.square"
]
]
|
UpSea/ZipLineMid | [
"1e0cdcfa7974f412dbee32809cffdaf2de6b4971"
]
| [
"xpower/Strategies/testTemp/testParams01.py"
]
| [
"import numpy as np\n\nt = np.arange(0.0, 10.0, 0.01)\ns = np.sin(2*np.pi*t)\nparams={}\nparams['s']=s\nparams['t']=t"
]
| [
[
"numpy.sin",
"numpy.arange"
]
]
|
IEM-Computer-Vision/pytorch3d | [
"e3819a49dfa855de1a7c99c0583fb69f9bdad75b"
]
| [
"pytorch3d/structures/meshes.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nfrom typing import List\n\nimport torch\n\nfrom . import utils as struct_utils\nfrom .textures import Textures\n\n\nclass Meshes(object):\n \"\"\"\n This class provides functions for working with batches of triangulated\n meshes with varying numbers of faces and vertices, and converting between\n representations.\n\n Within Meshes, there are three different representations of the faces and\n verts data:\n\n List\n - only used for input as a starting point to convert to other representations.\n Padded\n - has specific batch dimension.\n Packed\n - no batch dimension.\n - has auxillary variables used to index into the padded representation.\n\n Example:\n\n Input list of verts V_n = [[V_1], [V_2], ... , [V_N]]\n where V_1, ... , V_N are the number of verts in each mesh and N is the\n numer of meshes.\n\n Input list of faces F_n = [[F_1], [F_2], ... , [F_N]]\n where F_1, ... , F_N are the number of faces in each mesh.\n\n # SPHINX IGNORE\n List | Padded | Packed\n ---------------------------|-------------------------|------------------------\n [[V_1], ... , [V_N]] | size = (N, max(V_n), 3) | size = (sum(V_n), 3)\n | |\n Example for verts: | |\n | |\n V_1 = 3, V_2 = 4, V_3 = 5 | size = (3, 5, 3) | size = (12, 3)\n | |\n List([ | tensor([ | tensor([\n [ | [ | [0.1, 0.3, 0.5],\n [0.1, 0.3, 0.5], | [0.1, 0.3, 0.5], | [0.5, 0.2, 0.1],\n [0.5, 0.2, 0.1], | [0.5, 0.2, 0.1], | [0.6, 0.8, 0.7],\n [0.6, 0.8, 0.7], | [0.6, 0.8, 0.7], | [0.1, 0.3, 0.3],\n ], | [0, 0, 0], | [0.6, 0.7, 0.8],\n [ | [0, 0, 0], | [0.2, 0.3, 0.4],\n [0.1, 0.3, 0.3], | ], | [0.1, 0.5, 0.3],\n [0.6, 0.7, 0.8], | [ | [0.7, 0.3, 0.6],\n [0.2, 0.3, 0.4], | [0.1, 0.3, 0.3], | [0.2, 0.4, 0.8],\n [0.1, 0.5, 0.3], | [0.6, 0.7, 0.8], | [0.9, 0.5, 0.2],\n ], | [0.2, 0.3, 0.4], | [0.2, 0.3, 0.4],\n [ | [0.1, 0.5, 0.3], | [0.9, 0.3, 0.8],\n [0.7, 0.3, 0.6], | [0, 0, 0], | ])\n [0.2, 0.4, 0.8], | ], |\n [0.9, 0.5, 0.2], | [ |\n [0.2, 0.3, 0.4], | [0.7, 0.3, 0.6], |\n [0.9, 0.3, 0.8], | [0.2, 0.4, 0.8], |\n ] | [0.9, 0.5, 0.2], |\n ]) | [0.2, 0.3, 0.4], |\n | [0.9, 0.3, 0.8], |\n | ] |\n | ]) |\n Example for faces: | |\n | |\n F_1 = 1, F_2 = 2, F_3 = 7 | size = (3, 7, 3) | size = (10, 3)\n | |\n List([ | tensor([ | tensor([\n [ | [ | [ 0, 1, 2],\n [0, 1, 2], | [0, 1, 2], | [ 3, 4, 5],\n ], | [-1, -1, -1], | [ 4, 5, 6],\n [ | [-1, -1, -1] | [ 8, 9, 7],\n [0, 1, 2], | [-1, -1, -1] | [ 7, 8, 10],\n [1, 2, 3], | [-1, -1, -1] | [ 9, 10, 8],\n ], | [-1, -1, -1], | [11, 10, 9],\n [ | [-1, -1, -1], | [11, 7, 8],\n [1, 2, 0], | ], | [11, 10, 8],\n [0, 1, 3], | [ | [11, 9, 8],\n [2, 3, 1], | [0, 1, 2], | ])\n [4, 3, 2], | [1, 2, 3], |\n [4, 0, 1], | [-1, -1, -1], |\n [4, 3, 1], | [-1, -1, -1], |\n [4, 2, 1], | [-1, -1, -1], |\n ], | [-1, -1, -1], |\n ]) | [-1, -1, -1], |\n | ], |\n | [ |\n | [1, 2, 0], |\n | [0, 1, 3], |\n | [2, 3, 1], |\n | [4, 3, 2], |\n | [4, 0, 1], |\n | [4, 3, 1], |\n | [4, 2, 1], |\n | ] |\n | ]) |\n -----------------------------------------------------------------------------\n\n Auxillary variables for packed representation\n\n Name | Size | Example from above\n -------------------------------|---------------------|-----------------------\n | |\n verts_packed_to_mesh_idx | size = (sum(V_n)) | tensor([\n | | 0, 0, 0, 1, 1, 1,\n | | 1, 2, 2, 2, 2, 2\n | | )]\n | | size = (12)\n | |\n mesh_to_verts_packed_first_idx | size = (N) | tensor([0, 3, 7])\n | | size = (3)\n | |\n num_verts_per_mesh | size = (N) | tensor([3, 4, 5])\n | | size = (3)\n | |\n faces_packed_to_mesh_idx | size = (sum(F_n)) | tensor([\n | | 0, 1, 1, 2, 2, 2,\n | | 2, 2, 2, 2\n | | )]\n | | size = (10)\n | |\n mesh_to_faces_packed_first_idx | size = (N) | tensor([0, 1, 3])\n | | size = (3)\n | |\n num_faces_per_mesh | size = (N) | tensor([1, 2, 7])\n | | size = (3)\n | |\n verts_padded_to_packed_idx | size = (sum(V_n)) | tensor([\n | | 0, 1, 2, 5, 6, 7,\n | | 8, 10, 11, 12, 13,\n | | 14\n | | )]\n | | size = (12)\n -----------------------------------------------------------------------------\n # SPHINX IGNORE\n\n From the faces, edges are computed and have packed and padded\n representations with auxillary variables.\n\n E_n = [[E_1], ... , [E_N]]\n where E_1, ... , E_N are the number of unique edges in each mesh.\n Total number of unique edges = sum(E_n)\n\n # SPHINX IGNORE\n Name | Size | Example from above\n -------------------------------|-------------------------|----------------------\n | |\n edges_packed | size = (sum(E_n), 2) | tensor([\n | | [0, 1],\n | | [0, 2],\n | | [1, 2],\n | | ...\n | | [10, 11],\n | | )]\n | | size = (18, 2)\n | |\n num_edges_per_mesh | size = (N) | tensor([3, 5, 10])\n | | size = (3)\n | |\n edges_packed_to_mesh_idx | size = (sum(E_n)) | tensor([\n | | 0, 0, 0,\n | | . . .\n | | 2, 2, 2\n | | ])\n | | size = (18)\n | |\n faces_packed_to_edges_packed | size = (sum(F_n), 3) | tensor([\n | | [2, 1, 0],\n | | [5, 4, 3],\n | | . . .\n | | [12, 14, 16],\n | | ])\n | | size = (10, 3)\n | |\n mesh_to_edges_packed_first_idx | size = (N) | tensor([0, 3, 8])\n | | size = (3)\n ----------------------------------------------------------------------------\n # SPHINX IGNORE\n \"\"\"\n\n _INTERNAL_TENSORS = [\n \"_verts_packed\",\n \"_verts_packed_to_mesh_idx\",\n \"_mesh_to_verts_packed_first_idx\",\n \"_verts_padded\",\n \"_num_verts_per_mesh\",\n \"_faces_packed\",\n \"_faces_packed_to_mesh_idx\",\n \"_mesh_to_faces_packed_first_idx\",\n \"_faces_padded\",\n \"_faces_areas_packed\",\n \"_verts_normals_packed\",\n \"_faces_normals_packed\",\n \"_num_faces_per_mesh\",\n \"_edges_packed\",\n \"_edges_packed_to_mesh_idx\",\n \"_mesh_to_edges_packed_first_idx\",\n \"_faces_packed_to_edges_packed\",\n \"_num_edges_per_mesh\",\n \"_verts_padded_to_packed_idx\",\n \"_laplacian_packed\",\n \"valid\",\n \"equisized\",\n ]\n\n def __init__(self, verts=None, faces=None, textures=None):\n \"\"\"\n Args:\n verts:\n Can be either\n\n - List where each element is a tensor of shape (num_verts, 3)\n containing the (x, y, z) coordinates of each vertex.\n - Padded float tensor with shape (num_meshes, max_num_verts, 3).\n Meshes should be padded with fill value of 0 so they all have\n the same number of vertices.\n faces:\n Can be either\n\n - List where each element is a tensor of shape (num_faces, 3)\n containing the indices of the 3 vertices in the corresponding\n mesh in verts which form the triangular face.\n - Padded long tensor of shape (num_meshes, max_num_faces, 3).\n Meshes should be padded with fill value of -1 so they have\n the same number of faces.\n textures: Optional instance of the Textures class with mesh\n texture properties.\n\n Refer to comments above for descriptions of List and Padded representations.\n \"\"\"\n self.device = None\n if textures is not None and not isinstance(textures, Textures):\n msg = \"Expected textures to be of type Textures; got %r\"\n raise ValueError(msg % type(textures))\n self.textures = textures\n\n # Indicates whether the meshes in the list/batch have the same number\n # of faces and vertices.\n self.equisized = False\n\n # Boolean indicator for each mesh in the batch\n # True if mesh has non zero number of verts and face, False otherwise.\n self.valid = None\n\n self._N = 0 # batch size (number of meshes)\n self._V = 0 # (max) number of vertices per mesh\n self._F = 0 # (max) number of faces per mesh\n\n # List of Tensors of verts and faces.\n self._verts_list = None\n self._faces_list = None\n\n # Packed representation for verts.\n self._verts_packed = None # (sum(V_n), 3)\n self._verts_packed_to_mesh_idx = None # sum(V_n)\n\n # Index to convert verts from flattened padded to packed\n self._verts_padded_to_packed_idx = None # N * max_V\n\n # Index of each mesh's first vert in the packed verts.\n # Assumes packing is sequential.\n self._mesh_to_verts_packed_first_idx = None # N\n\n # Packed representation for faces.\n self._faces_packed = None # (sum(F_n), 3)\n self._faces_packed_to_mesh_idx = None # sum(F_n)\n\n # Index of each mesh's first face in packed faces.\n # Assumes packing is sequential.\n self._mesh_to_faces_packed_first_idx = None # N\n\n # Packed representation of edges sorted by index of the first vertex\n # in the edge. Edges can be shared between faces in a mesh.\n self._edges_packed = None # (sum(E_n), 2)\n\n # Map from packed edges to corresponding mesh index.\n self._edges_packed_to_mesh_idx = None # sum(E_n)\n self._num_edges_per_mesh = None # N\n self._mesh_to_edges_packed_first_idx = None # N\n\n # Map from packed faces to packed edges. This represents the index of\n # the edge opposite the vertex for each vertex in the face. E.g.\n #\n # v0\n # /\\\n # / \\\n # e1 / \\ e2\n # / \\\n # /________\\\n # v2 e0 v1\n #\n # Face (v0, v1, v2) => Edges (e0, e1, e2)\n self._faces_packed_to_edges_packed = None # (sum(F_n), 3)\n\n # Padded representation of verts.\n self._verts_padded = None # (N, max(V_n), 3)\n self._num_verts_per_mesh = None # N\n\n # Padded representation of faces.\n self._faces_padded = None # (N, max(F_n), 3)\n self._num_faces_per_mesh = None # N\n\n # Face areas\n self._faces_areas_packed = None\n\n # Normals\n self._verts_normals_packed = None\n self._faces_normals_packed = None\n\n # Packed representation of Laplacian Matrix\n self._laplacian_packed = None\n\n # Identify type of verts and faces.\n if isinstance(verts, list) and isinstance(faces, list):\n self._verts_list = verts\n self._faces_list = [\n f[f.gt(-1).all(1)].to(torch.int64) if len(f) > 0 else f for f in faces\n ]\n self._N = len(self._verts_list)\n self.device = torch.device(\"cpu\")\n self.valid = torch.zeros((self._N,), dtype=torch.bool, device=self.device)\n if self._N > 0:\n self.device = self._verts_list[0].device\n self._num_verts_per_mesh = torch.tensor(\n [len(v) for v in self._verts_list], device=self.device\n )\n self._V = self._num_verts_per_mesh.max()\n self._num_faces_per_mesh = torch.tensor(\n [len(f) for f in self._faces_list], device=self.device\n )\n self._F = self._num_faces_per_mesh.max()\n self.valid = torch.tensor(\n [\n len(v) > 0 and len(f) > 0\n for (v, f) in zip(self._verts_list, self._faces_list)\n ],\n dtype=torch.bool,\n device=self.device,\n )\n\n if (len(self._num_verts_per_mesh.unique()) == 1) and (\n len(self._num_faces_per_mesh.unique()) == 1\n ):\n self.equisized = True\n\n elif torch.is_tensor(verts) and torch.is_tensor(faces):\n if verts.size(2) != 3 and faces.size(2) != 3:\n raise ValueError(\"Verts and Faces tensors have incorrect dimensions.\")\n self._verts_padded = verts\n self._faces_padded = faces.to(torch.int64)\n self._N = self._verts_padded.shape[0]\n self._V = self._verts_padded.shape[1]\n\n self.device = self._verts_padded.device\n self.valid = torch.zeros((self._N,), dtype=torch.bool, device=self.device)\n if self._N > 0:\n # Check that padded faces - which have value -1 - are at the\n # end of the tensors\n faces_not_padded = self._faces_padded.gt(-1).all(2)\n self._num_faces_per_mesh = faces_not_padded.sum(1)\n if (faces_not_padded[:, :-1] < faces_not_padded[:, 1:]).any():\n raise ValueError(\"Padding of faces must be at the end\")\n\n # NOTE that we don't check for the ordering of padded verts\n # as long as the faces index correspond to the right vertices.\n\n self.valid = self._num_faces_per_mesh > 0\n self._F = self._num_faces_per_mesh.max()\n if len(self._num_faces_per_mesh.unique()) == 1:\n self.equisized = True\n\n self._num_verts_per_mesh = torch.full(\n size=(self._N,),\n fill_value=self._V,\n dtype=torch.int64,\n device=self.device,\n )\n\n else:\n raise ValueError(\n \"Verts and Faces must be either a list or a tensor with \\\n shape (batch_size, N, 3) where N is either the maximum \\\n number of verts or faces respectively.\"\n )\n\n if self.isempty():\n self._num_verts_per_mesh = torch.zeros(\n (0,), dtype=torch.int64, device=self.device\n )\n self._num_faces_per_mesh = torch.zeros(\n (0,), dtype=torch.int64, device=self.device\n )\n\n # Set the num verts/faces on the textures if present.\n if self.textures is not None:\n self.textures._num_faces_per_mesh = self._num_faces_per_mesh.tolist()\n self.textures._num_verts_per_mesh = self._num_verts_per_mesh.tolist()\n\n def __len__(self):\n return self._N\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index: Specifying the index of the mesh to retrieve.\n Can be an int, slice, list of ints or a boolean tensor.\n\n Returns:\n Meshes object with selected meshes. The mesh tensors are not cloned.\n \"\"\"\n if isinstance(index, (int, slice)):\n verts = self.verts_list()[index]\n faces = self.faces_list()[index]\n elif isinstance(index, list):\n verts = [self.verts_list()[i] for i in index]\n faces = [self.faces_list()[i] for i in index]\n elif isinstance(index, torch.Tensor):\n if index.dim() != 1 or index.dtype.is_floating_point:\n raise IndexError(index)\n # NOTE consider converting index to cpu for efficiency\n if index.dtype == torch.bool:\n # advanced indexing on a single dimension\n index = index.nonzero()\n index = index.squeeze(1) if index.numel() > 0 else index\n index = index.tolist()\n verts = [self.verts_list()[i] for i in index]\n faces = [self.faces_list()[i] for i in index]\n else:\n raise IndexError(index)\n\n textures = None if self.textures is None else self.textures[index]\n\n if torch.is_tensor(verts) and torch.is_tensor(faces):\n return self.__class__(verts=[verts], faces=[faces], textures=textures)\n elif isinstance(verts, list) and isinstance(faces, list):\n return self.__class__(verts=verts, faces=faces, textures=textures)\n else:\n raise ValueError(\"(verts, faces) not defined correctly\")\n\n def isempty(self) -> bool:\n \"\"\"\n Checks whether any mesh is valid.\n\n Returns:\n bool indicating whether there is any data.\n \"\"\"\n return self._N == 0 or self.valid.eq(False).all()\n\n def verts_list(self):\n \"\"\"\n Get the list representation of the vertices.\n\n Returns:\n list of tensors of vertices of shape (V_n, 3).\n \"\"\"\n if self._verts_list is None:\n assert (\n self._verts_padded is not None\n ), \"verts_padded is required to compute verts_list.\"\n self._verts_list = struct_utils.padded_to_list(\n self._verts_padded, self.num_verts_per_mesh().tolist()\n )\n return self._verts_list\n\n def faces_list(self):\n \"\"\"\n Get the list representation of the faces.\n\n Returns:\n list of tensors of faces of shape (F_n, 3).\n \"\"\"\n if self._faces_list is None:\n assert (\n self._faces_padded is not None\n ), \"faces_padded is required to compute faces_list.\"\n self._faces_list = struct_utils.padded_to_list(\n self._faces_padded, self.num_faces_per_mesh().tolist()\n )\n return self._faces_list\n\n def verts_packed(self):\n \"\"\"\n Get the packed representation of the vertices.\n\n Returns:\n tensor of vertices of shape (sum(V_n), 3).\n \"\"\"\n self._compute_packed()\n return self._verts_packed\n\n def verts_packed_to_mesh_idx(self):\n \"\"\"\n Return a 1D tensor with the same first dimension as verts_packed.\n verts_packed_to_mesh_idx[i] gives the index of the mesh which contains\n verts_packed[i].\n\n Returns:\n 1D tensor of indices.\n \"\"\"\n self._compute_packed()\n return self._verts_packed_to_mesh_idx\n\n def mesh_to_verts_packed_first_idx(self):\n \"\"\"\n Return a 1D tensor x with length equal to the number of meshes such that\n the first vertex of the ith mesh is verts_packed[x[i]].\n\n Returns:\n 1D tensor of indices of first items.\n \"\"\"\n self._compute_packed()\n return self._mesh_to_verts_packed_first_idx\n\n def num_verts_per_mesh(self):\n \"\"\"\n Return a 1D tensor x with length equal to the number of meshes giving\n the number of vertices in each mesh.\n\n Returns:\n 1D tensor of sizes.\n \"\"\"\n return self._num_verts_per_mesh\n\n def faces_packed(self):\n \"\"\"\n Get the packed representation of the faces.\n Faces are given by the indices of the three vertices in verts_packed.\n\n Returns:\n tensor of faces of shape (sum(F_n), 3).\n \"\"\"\n self._compute_packed()\n return self._faces_packed\n\n def faces_packed_to_mesh_idx(self):\n \"\"\"\n Return a 1D tensor with the same first dimension as faces_packed.\n faces_packed_to_mesh_idx[i] gives the index of the mesh which contains\n faces_packed[i].\n\n Returns:\n 1D tensor of indices.\n \"\"\"\n self._compute_packed()\n return self._faces_packed_to_mesh_idx\n\n def mesh_to_faces_packed_first_idx(self):\n \"\"\"\n Return a 1D tensor x with length equal to the number of meshes such that\n the first face of the ith mesh is faces_packed[x[i]].\n\n Returns:\n 1D tensor of indices of first items.\n \"\"\"\n self._compute_packed()\n return self._mesh_to_faces_packed_first_idx\n\n def verts_padded(self):\n \"\"\"\n Get the padded representation of the vertices.\n\n Returns:\n tensor of vertices of shape (N, max(V_n), 3).\n \"\"\"\n self._compute_padded()\n return self._verts_padded\n\n def faces_padded(self):\n \"\"\"\n Get the padded representation of the faces.\n\n Returns:\n tensor of faces of shape (N, max(F_n), 3).\n \"\"\"\n self._compute_padded()\n return self._faces_padded\n\n def num_faces_per_mesh(self):\n \"\"\"\n Return a 1D tensor x with length equal to the number of meshes giving\n the number of faces in each mesh.\n\n Returns:\n 1D tensor of sizes.\n \"\"\"\n return self._num_faces_per_mesh\n\n def edges_packed(self):\n \"\"\"\n Get the packed representation of the edges.\n\n Returns:\n tensor of edges of shape (sum(E_n), 2).\n \"\"\"\n self._compute_edges_packed()\n return self._edges_packed\n\n def edges_packed_to_mesh_idx(self):\n \"\"\"\n Return a 1D tensor with the same first dimension as edges_packed.\n edges_packed_to_mesh_idx[i] gives the index of the mesh which contains\n edges_packed[i].\n\n Returns:\n 1D tensor of indices.\n \"\"\"\n self._compute_edges_packed()\n return self._edges_packed_to_mesh_idx\n\n def mesh_to_edges_packed_first_idx(self):\n \"\"\"\n Return a 1D tensor x with length equal to the number of meshes such that\n the first edge of the ith mesh is edges_packed[x[i]].\n\n Returns:\n 1D tensor of indices of first items.\n \"\"\"\n self._compute_edges_packed()\n return self._mesh_to_edges_packed_first_idx\n\n def faces_packed_to_edges_packed(self):\n \"\"\"\n Get the packed representation of the faces in terms of edges.\n Faces are given by the indices of the three edges in\n the packed representation of the edges.\n\n Returns:\n tensor of faces of shape (sum(F_n), 3).\n \"\"\"\n self._compute_edges_packed()\n return self._faces_packed_to_edges_packed\n\n def num_edges_per_mesh(self):\n \"\"\"\n Return a 1D tensor x with length equal to the number of meshes giving\n the number of edges in each mesh.\n\n Returns:\n 1D tensor of sizes.\n \"\"\"\n self._compute_edges_packed()\n return self._num_edges_per_mesh\n\n def verts_padded_to_packed_idx(self):\n \"\"\"\n Return a 1D tensor x with length equal to the total number of vertices\n such that verts_packed()[i] is element x[i] of the flattened padded\n representation.\n The packed representation can be calculated as follows.\n\n .. code-block:: python\n\n p = verts_padded().reshape(-1, 3)\n verts_packed = p[x]\n\n Returns:\n 1D tensor of indices.\n \"\"\"\n if self._verts_padded_to_packed_idx is not None:\n return self._verts_padded_to_packed_idx\n\n self._verts_padded_to_packed_idx = torch.cat(\n [\n torch.arange(v, dtype=torch.int64, device=self.device) + i * self._V\n for (i, v) in enumerate(self.num_verts_per_mesh())\n ],\n dim=0,\n )\n return self._verts_padded_to_packed_idx\n\n def verts_normals_packed(self):\n \"\"\"\n Get the packed representation of the vertex normals.\n\n Returns:\n tensor of normals of shape (sum(V_n), 3).\n \"\"\"\n self._compute_vertex_normals()\n return self._verts_normals_packed\n\n def verts_normals_list(self):\n \"\"\"\n Get the list representation of the vertex normals.\n\n Returns:\n list of tensors of normals of shape (V_n, 3).\n \"\"\"\n if self.isempty():\n return [\n torch.empty((0, 3), dtype=torch.float32, device=self.device)\n ] * self._N\n verts_normals_packed = self.verts_normals_packed()\n split_size = self.num_verts_per_mesh().tolist()\n return struct_utils.packed_to_list(verts_normals_packed, split_size)\n\n def verts_normals_padded(self):\n \"\"\"\n Get the padded representation of the vertex normals.\n\n Returns:\n tensor of normals of shape (N, max(V_n), 3).\n \"\"\"\n if self.isempty():\n return torch.zeros((self._N, 0, 3), dtype=torch.float32, device=self.device)\n verts_normals_list = self.verts_normals_list()\n return struct_utils.list_to_padded(\n verts_normals_list, (self._V, 3), pad_value=0.0, equisized=self.equisized\n )\n\n def faces_normals_packed(self):\n \"\"\"\n Get the packed representation of the face normals.\n\n Returns:\n tensor of normals of shape (sum(F_n), 3).\n \"\"\"\n self._compute_face_areas_normals()\n return self._faces_normals_packed\n\n def faces_normals_list(self):\n \"\"\"\n Get the list representation of the face normals.\n\n Returns:\n list of tensors of normals of shape (F_n, 3).\n \"\"\"\n if self.isempty():\n return [\n torch.empty((0, 3), dtype=torch.float32, device=self.device)\n ] * self._N\n faces_normals_packed = self.faces_normals_packed()\n split_size = self.num_faces_per_mesh().tolist()\n return struct_utils.packed_to_list(faces_normals_packed, split_size)\n\n def faces_normals_padded(self):\n \"\"\"\n Get the padded representation of the face normals.\n\n Returns:\n tensor of normals of shape (N, max(F_n), 3).\n \"\"\"\n if self.isempty():\n return torch.zeros((self._N, 0, 3), dtype=torch.float32, device=self.device)\n faces_normals_list = self.faces_normals_list()\n return struct_utils.list_to_padded(\n faces_normals_list, (self._F, 3), pad_value=0.0, equisized=self.equisized\n )\n\n def faces_areas_packed(self):\n \"\"\"\n Get the packed representation of the face areas.\n\n Returns:\n tensor of areas of shape (sum(F_n),).\n \"\"\"\n self._compute_face_areas_normals()\n return self._faces_areas_packed\n\n def laplacian_packed(self):\n self._compute_laplacian_packed()\n return self._laplacian_packed\n\n def _compute_face_areas_normals(self, refresh: bool = False):\n \"\"\"\n Compute the area and normal of each face in faces_packed.\n The convention of a normal for a face consisting of verts [v0, v1, v2]\n is normal = (v1 - v0) x (v2 - v0)\n\n Args:\n refresh: Set to True to force recomputation of face areas.\n Default: False.\n \"\"\"\n from ..ops.mesh_face_areas_normals import mesh_face_areas_normals\n\n if not (\n refresh\n or any(\n v is None\n for v in [self._faces_areas_packed, self._faces_normals_packed]\n )\n ):\n return\n faces_packed = self.faces_packed()\n verts_packed = self.verts_packed()\n face_areas, face_normals = mesh_face_areas_normals(verts_packed, faces_packed)\n self._faces_areas_packed = face_areas\n self._faces_normals_packed = face_normals\n\n def _compute_vertex_normals(self, refresh: bool = False):\n \"\"\"Computes the packed version of vertex normals from the packed verts\n and faces. This assumes verts are shared between faces. The normal for\n a vertex is computed as the sum of the normals of all the faces it is\n part of weighed by the face areas.\n\n Args:\n refresh: Set to True to force recomputation of vertex normals.\n Default: False.\n \"\"\"\n if not (refresh or any(v is None for v in [self._verts_normals_packed])):\n return\n\n if self.isempty():\n self._verts_normals_packed = torch.zeros(\n (self._N, 3), dtype=torch.int64, device=self.device\n )\n else:\n faces_packed = self.faces_packed()\n verts_packed = self.verts_packed()\n verts_normals = torch.zeros_like(verts_packed)\n vertices_faces = verts_packed[faces_packed]\n\n # NOTE: this is already applying the area weighting as the magnitude\n # of the cross product is 2 x area of the triangle.\n # pyre-fixme[16]: `Tensor` has no attribute `index_add`.\n verts_normals = verts_normals.index_add(\n 0,\n faces_packed[:, 1],\n torch.cross(\n vertices_faces[:, 2] - vertices_faces[:, 1],\n vertices_faces[:, 0] - vertices_faces[:, 1],\n dim=1,\n ),\n )\n verts_normals = verts_normals.index_add(\n 0,\n faces_packed[:, 2],\n torch.cross(\n vertices_faces[:, 0] - vertices_faces[:, 2],\n vertices_faces[:, 1] - vertices_faces[:, 2],\n dim=1,\n ),\n )\n verts_normals = verts_normals.index_add(\n 0,\n faces_packed[:, 0],\n torch.cross(\n vertices_faces[:, 1] - vertices_faces[:, 0],\n vertices_faces[:, 2] - vertices_faces[:, 0],\n dim=1,\n ),\n )\n\n self._verts_normals_packed = torch.nn.functional.normalize(\n verts_normals, eps=1e-6, dim=1\n )\n\n def _compute_padded(self, refresh: bool = False):\n \"\"\"\n Computes the padded version of meshes from verts_list and faces_list.\n \"\"\"\n if not (\n refresh or any(v is None for v in [self._verts_padded, self._faces_padded])\n ):\n return\n\n verts_list = self.verts_list()\n faces_list = self.faces_list()\n assert (\n faces_list is not None and verts_list is not None\n ), \"faces_list and verts_list arguments are required\"\n\n if self.isempty():\n self._faces_padded = torch.zeros(\n (self._N, 0, 3), dtype=torch.int64, device=self.device\n )\n self._verts_padded = torch.zeros(\n (self._N, 0, 3), dtype=torch.float32, device=self.device\n )\n else:\n self._faces_padded = struct_utils.list_to_padded(\n faces_list, (self._F, 3), pad_value=-1.0, equisized=self.equisized\n )\n self._verts_padded = struct_utils.list_to_padded(\n verts_list, (self._V, 3), pad_value=0.0, equisized=self.equisized\n )\n\n # TODO(nikhilar) Improve performance of _compute_packed.\n def _compute_packed(self, refresh: bool = False):\n \"\"\"\n Computes the packed version of the meshes from verts_list and faces_list\n and sets the values of auxillary tensors.\n\n Args:\n refresh: Set to True to force recomputation of packed representations.\n Default: False.\n \"\"\"\n\n if not (\n refresh\n or any(\n v is None\n for v in [\n self._verts_packed,\n self._verts_packed_to_mesh_idx,\n self._mesh_to_verts_packed_first_idx,\n self._faces_packed,\n self._faces_packed_to_mesh_idx,\n self._mesh_to_faces_packed_first_idx,\n ]\n )\n ):\n return\n\n # Packed can be calculated from padded or list, so can call the\n # accessor function for verts_list and faces_list.\n verts_list = self.verts_list()\n faces_list = self.faces_list()\n if self.isempty():\n self._verts_packed = torch.zeros(\n (0, 3), dtype=torch.float32, device=self.device\n )\n self._verts_packed_to_mesh_idx = torch.zeros(\n (0,), dtype=torch.int64, device=self.device\n )\n self._mesh_to_verts_packed_first_idx = torch.zeros(\n (0,), dtype=torch.int64, device=self.device\n )\n self._num_verts_per_mesh = torch.zeros(\n (0,), dtype=torch.int64, device=self.device\n )\n self._faces_packed = -(\n torch.ones((0, 3), dtype=torch.int64, device=self.device)\n )\n self._faces_packed_to_mesh_idx = torch.zeros(\n (0,), dtype=torch.int64, device=self.device\n )\n self._mesh_to_faces_packed_first_idx = torch.zeros(\n (0,), dtype=torch.int64, device=self.device\n )\n self._num_faces_per_mesh = torch.zeros(\n (0,), dtype=torch.int64, device=self.device\n )\n return\n\n verts_list_to_packed = struct_utils.list_to_packed(verts_list)\n self._verts_packed = verts_list_to_packed[0]\n if not torch.allclose(self.num_verts_per_mesh(), verts_list_to_packed[1]):\n raise ValueError(\"The number of verts per mesh should be consistent.\")\n self._mesh_to_verts_packed_first_idx = verts_list_to_packed[2]\n self._verts_packed_to_mesh_idx = verts_list_to_packed[3]\n\n faces_list_to_packed = struct_utils.list_to_packed(faces_list)\n faces_packed = faces_list_to_packed[0]\n if not torch.allclose(self.num_faces_per_mesh(), faces_list_to_packed[1]):\n raise ValueError(\"The number of faces per mesh should be consistent.\")\n self._mesh_to_faces_packed_first_idx = faces_list_to_packed[2]\n self._faces_packed_to_mesh_idx = faces_list_to_packed[3]\n\n faces_packed_offset = self._mesh_to_verts_packed_first_idx[\n self._faces_packed_to_mesh_idx\n ]\n self._faces_packed = faces_packed + faces_packed_offset.view(-1, 1)\n\n def _compute_edges_packed(self, refresh: bool = False):\n \"\"\"\n Computes edges in packed form from the packed version of faces and verts.\n \"\"\"\n if not (\n refresh\n or any(\n v is None\n for v in [\n self._edges_packed,\n self._faces_packed_to_mesh_idx,\n self._edges_packed_to_mesh_idx,\n self._num_edges_per_mesh,\n self._mesh_to_edges_packed_first_idx,\n ]\n )\n ):\n return\n\n if self.isempty():\n self._edges_packed = torch.full(\n (0, 2), fill_value=-1, dtype=torch.int64, device=self.device\n )\n self._edges_packed_to_mesh_idx = torch.zeros(\n (0,), dtype=torch.int64, device=self.device\n )\n return\n\n faces = self.faces_packed()\n F = faces.shape[0]\n v0, v1, v2 = faces.chunk(3, dim=1)\n e01 = torch.cat([v0, v1], dim=1) # (sum(F_n), 2)\n e12 = torch.cat([v1, v2], dim=1) # (sum(F_n), 2)\n e20 = torch.cat([v2, v0], dim=1) # (sum(F_n), 2)\n\n # All edges including duplicates.\n edges = torch.cat([e12, e20, e01], dim=0) # (sum(F_n)*3, 2)\n edge_to_mesh = torch.cat(\n [\n self._faces_packed_to_mesh_idx,\n self._faces_packed_to_mesh_idx,\n self._faces_packed_to_mesh_idx,\n ],\n dim=0,\n ) # sum(F_n)*3\n\n # Sort the edges in increasing vertex order to remove duplicates as\n # the same edge may appear in different orientations in different faces.\n # i.e. rows in edges after sorting will be of the form (v0, v1) where v1 > v0.\n # This sorting does not change the order in dim=0.\n edges, _ = edges.sort(dim=1)\n\n # Remove duplicate edges: convert each edge (v0, v1) into an\n # integer hash = V * v0 + v1; this allows us to use the scalar version of\n # unique which is much faster than edges.unique(dim=1) which is very slow.\n # After finding the unique elements reconstruct the vertex indicies as:\n # (v0, v1) = (hash / V, hash % V)\n # The inverse maps from unique_edges back to edges:\n # unique_edges[inverse_idxs] == edges\n # i.e. inverse_idxs[i] == j means that edges[i] == unique_edges[j]\n\n V = self._verts_packed.shape[0]\n edges_hash = V * edges[:, 0] + edges[:, 1]\n u, inverse_idxs = torch.unique(edges_hash, return_inverse=True)\n\n # Find indices of unique elements.\n # TODO (nikhilar) remove following 4 lines when torch.unique has support\n # for returning unique indices\n sorted_hash, sort_idx = torch.sort(edges_hash, dim=0)\n unique_mask = torch.ones(\n edges_hash.shape[0], dtype=torch.bool, device=self.device\n )\n unique_mask[1:] = sorted_hash[1:] != sorted_hash[:-1]\n unique_idx = sort_idx[unique_mask]\n\n self._edges_packed = torch.stack([u / V, u % V], dim=1)\n self._edges_packed_to_mesh_idx = edge_to_mesh[unique_idx]\n\n face_to_edge = torch.arange(3 * F).view(3, F).t()\n face_to_edge = inverse_idxs[face_to_edge]\n self._faces_packed_to_edges_packed = face_to_edge\n\n # Compute number of edges per mesh\n num_edges_per_mesh = torch.zeros(self._N, dtype=torch.int32, device=self.device)\n ones = torch.ones(1, dtype=torch.int32, device=self.device).expand(\n self._edges_packed_to_mesh_idx.shape\n )\n num_edges_per_mesh = num_edges_per_mesh.scatter_add_(\n 0, self._edges_packed_to_mesh_idx, ones\n )\n self._num_edges_per_mesh = num_edges_per_mesh\n\n # Compute first idx for each mesh in edges_packed\n mesh_to_edges_packed_first_idx = torch.zeros(\n self._N, dtype=torch.int64, device=self.device\n )\n num_edges_cumsum = num_edges_per_mesh.cumsum(dim=0)\n mesh_to_edges_packed_first_idx[1:] = num_edges_cumsum[:-1].clone()\n\n self._mesh_to_edges_packed_first_idx = mesh_to_edges_packed_first_idx\n\n def _compute_laplacian_packed(self, refresh: bool = False):\n \"\"\"\n Computes the laplacian in packed form.\n The definition of the laplacian is\n L[i, j] = -1 , if i == j\n L[i, j] = 1 / deg(i) , if (i, j) is an edge\n L[i, j] = 0 , otherwise\n where deg(i) is the degree of the i-th vertex in the graph\n\n Returns:\n Sparse FloatTensor of shape (V, V) where V = sum(V_n)\n\n \"\"\"\n if not (refresh or self._laplacian_packed is None):\n return\n\n if self.isempty():\n self._laplacian_packed = torch.zeros(\n (0, 0), dtype=torch.float32, device=self.device\n ).to_sparse()\n return\n\n verts_packed = self.verts_packed() # (sum(V_n), 3)\n edges_packed = self.edges_packed() # (sum(E_n), 3)\n V = verts_packed.shape[0] # sum(V_n)\n\n e0, e1 = edges_packed.unbind(1)\n\n idx01 = torch.stack([e0, e1], dim=1) # (sum(E_n), 2)\n idx10 = torch.stack([e1, e0], dim=1) # (sum(E_n), 2)\n idx = torch.cat([idx01, idx10], dim=0).t() # (2, 2*sum(E_n))\n\n # First, we construct the adjacency matrix,\n # i.e. A[i, j] = 1 if (i,j) is an edge, or\n # A[e0, e1] = 1 & A[e1, e0] = 1\n ones = torch.ones(idx.shape[1], dtype=torch.float32, device=self.device)\n A = torch.sparse.FloatTensor(idx, ones, (V, V))\n\n # the sum of i-th row of A gives the degree of the i-th vertex\n deg = torch.sparse.sum(A, dim=1).to_dense()\n\n # We construct the Laplacian matrix by adding the non diagonal values\n # i.e. L[i, j] = 1 ./ deg(i) if (i, j) is an edge\n deg0 = deg[e0]\n deg0 = torch.where(deg0 > 0.0, 1.0 / deg0, deg0)\n deg1 = deg[e1]\n deg1 = torch.where(deg1 > 0.0, 1.0 / deg1, deg1)\n val = torch.cat([deg0, deg1])\n L = torch.sparse.FloatTensor(idx, val, (V, V))\n\n # Then we add the diagonal values L[i, i] = -1.\n idx = torch.arange(V, device=self.device)\n idx = torch.stack([idx, idx], dim=0)\n ones = torch.ones(idx.shape[1], dtype=torch.float32, device=self.device)\n L -= torch.sparse.FloatTensor(idx, ones, (V, V))\n\n self._laplacian_packed = L\n\n def clone(self):\n \"\"\"\n Deep copy of Meshes object. All internal tensors are cloned individually.\n\n Returns:\n new Meshes object.\n \"\"\"\n verts_list = self.verts_list()\n faces_list = self.faces_list()\n new_verts_list = [v.clone() for v in verts_list]\n new_faces_list = [f.clone() for f in faces_list]\n other = self.__class__(verts=new_verts_list, faces=new_faces_list)\n for k in self._INTERNAL_TENSORS:\n v = getattr(self, k)\n if torch.is_tensor(v):\n setattr(other, k, v.clone())\n\n # Textures is not a tensor but has a clone method\n if self.textures is not None:\n other.textures = self.textures.clone()\n return other\n\n def to(self, device, copy: bool = False):\n \"\"\"\n Match functionality of torch.Tensor.to()\n If copy = True or the self Tensor is on a different device, the\n returned tensor is a copy of self with the desired torch.device.\n If copy = False and the self Tensor already has the correct torch.device,\n then self is returned.\n\n Args:\n device: Device id for the new tensor.\n copy: Boolean indicator whether or not to clone self. Default False.\n\n Returns:\n Meshes object.\n \"\"\"\n if not copy and self.device == device:\n return self\n\n other = self.clone()\n if self.device != device:\n other.device = device\n if other._N > 0:\n other._verts_list = [v.to(device) for v in other._verts_list]\n other._faces_list = [f.to(device) for f in other._faces_list]\n for k in self._INTERNAL_TENSORS:\n v = getattr(self, k)\n if torch.is_tensor(v):\n setattr(other, k, v.to(device))\n if self.textures is not None:\n other.textures = self.textures.to(device)\n return other\n\n def cpu(self):\n return self.to(torch.device(\"cpu\"))\n\n def cuda(self):\n return self.to(torch.device(\"cuda\"))\n\n def get_mesh_verts_faces(self, index: int):\n \"\"\"\n Get tensors for a single mesh from the list representation.\n\n Args:\n index: Integer in the range [0, N).\n\n Returns:\n verts: Tensor of shape (V, 3).\n faces: LongTensor of shape (F, 3).\n \"\"\"\n if not isinstance(index, int):\n raise ValueError(\"Mesh index must be an integer.\")\n if index < 0 or index > self._N:\n raise ValueError(\n \"Mesh index must be in the range [0, N) where \\\n N is the number of meshes in the batch.\"\n )\n verts = self.verts_list()\n faces = self.faces_list()\n return verts[index], faces[index]\n\n # TODO(nikhilar) Move function to a utils file.\n def split(self, split_sizes: list):\n \"\"\"\n Splits Meshes object of size N into a list of Meshes objects of\n size len(split_sizes), where the i-th Meshes object is of size split_sizes[i].\n Similar to torch.split().\n\n Args:\n split_sizes: List of integer sizes of Meshes objects to be returned.\n\n Returns:\n list[Meshes].\n \"\"\"\n if not all(isinstance(x, int) for x in split_sizes):\n raise ValueError(\"Value of split_sizes must be a list of integers.\")\n meshlist = []\n curi = 0\n for i in split_sizes:\n meshlist.append(self[curi : curi + i])\n curi += i\n return meshlist\n\n def offset_verts_(self, vert_offsets_packed):\n \"\"\"\n Add an offset to the vertices of this Meshes. In place operation.\n\n Args:\n vert_offsets_packed: A Tensor of the same shape as self.verts_packed\n giving offsets to be added to all vertices.\n Returns:\n self.\n \"\"\"\n verts_packed = self.verts_packed()\n if vert_offsets_packed.shape != verts_packed.shape:\n raise ValueError(\"Verts offsets must have dimension (all_v, 2).\")\n # update verts packed\n self._verts_packed = verts_packed + vert_offsets_packed\n new_verts_list = list(\n self._verts_packed.split(self.num_verts_per_mesh().tolist(), 0)\n )\n # update verts list\n # Note that since _compute_packed() has been executed, verts_list\n # cannot be None even if not provided during construction.\n self._verts_list = new_verts_list\n\n # update verts padded\n if self._verts_padded is not None:\n for i, verts in enumerate(new_verts_list):\n if len(verts) > 0:\n self._verts_padded[i, : verts.shape[0], :] = verts\n\n # update face areas and normals and vertex normals\n # only if the original attributes are computed\n if any(\n v is not None\n for v in [self._faces_areas_packed, self._faces_normals_packed]\n ):\n self._compute_face_areas_normals(refresh=True)\n if self._verts_normals_packed is not None:\n self._compute_vertex_normals(refresh=True)\n\n return self\n\n # TODO(nikhilar) Move out of place operator to a utils file.\n def offset_verts(self, vert_offsets_packed):\n \"\"\"\n Out of place offset_verts.\n\n Args:\n vert_offsets_packed: A Tensor of the same shape as self.verts_packed\n giving offsets to be added to all vertices.\n Returns:\n new Meshes object.\n \"\"\"\n new_mesh = self.clone()\n return new_mesh.offset_verts_(vert_offsets_packed)\n\n def scale_verts_(self, scale):\n \"\"\"\n Multiply the vertices of this Meshes object by a scalar value.\n In place operation.\n\n Args:\n scale: A scalar, or a Tensor of shape (N,).\n\n Returns:\n self.\n \"\"\"\n if not torch.is_tensor(scale):\n scale = torch.full((len(self),), scale, device=self.device)\n new_verts_list = []\n verts_list = self.verts_list()\n for i, old_verts in enumerate(verts_list):\n new_verts_list.append(scale[i] * old_verts)\n # update list\n self._verts_list = new_verts_list\n # update packed\n if self._verts_packed is not None:\n self._verts_packed = torch.cat(new_verts_list, dim=0)\n # update padded\n if self._verts_padded is not None:\n for i, verts in enumerate(self._verts_list):\n if len(verts) > 0:\n self._verts_padded[i, : verts.shape[0], :] = verts\n\n # update face areas and normals and vertex normals\n # only if the original attributes are computed\n if any(\n v is not None\n for v in [self._faces_areas_packed, self._faces_normals_packed]\n ):\n self._compute_face_areas_normals(refresh=True)\n if self._verts_normals_packed is not None:\n self._compute_vertex_normals(refresh=True)\n return self\n\n def scale_verts(self, scale):\n \"\"\"\n Out of place scale_verts.\n\n Args:\n scale: A scalar, or a Tensor of shape (N,).\n\n Returns:\n new Meshes object.\n \"\"\"\n new_mesh = self.clone()\n return new_mesh.scale_verts_(scale)\n\n def update_padded(self, new_verts_padded):\n \"\"\"\n This function allows for an pdate of verts_padded without having to\n explicitly convert it to the list representation for heterogeneous batches.\n Returns a Meshes structure with updated padded tensors and copies of the\n auxiliary tensors at construction time.\n It updates self._verts_padded with new_verts_padded, and does a\n shallow copy of (faces_padded, faces_list, num_verts_per_mesh, num_faces_per_mesh).\n If packed representations are computed in self, they are updated as well.\n\n Args:\n new_points_padded: FloatTensor of shape (N, V, 3)\n\n Returns:\n Meshes with updated padded representations\n \"\"\"\n\n def check_shapes(x, size):\n if x.shape[0] != size[0]:\n raise ValueError(\"new values must have the same batch dimension.\")\n if x.shape[1] != size[1]:\n raise ValueError(\"new values must have the same number of points.\")\n if x.shape[2] != size[2]:\n raise ValueError(\"new values must have the same dimension.\")\n\n check_shapes(new_verts_padded, [self._N, self._V, 3])\n\n new = self.__class__(verts=new_verts_padded, faces=self.faces_padded())\n\n if new._N != self._N or new._V != self._V or new._F != self._F:\n raise ValueError(\"Inconsistent sizes after construction.\")\n\n # overwrite the equisized flag\n new.equisized = self.equisized\n\n # overwrite textures if any\n new.textures = self.textures\n\n # copy auxiliary tensors\n copy_tensors = [\"_num_verts_per_mesh\", \"_num_faces_per_mesh\", \"valid\"]\n\n for k in copy_tensors:\n v = getattr(self, k)\n if torch.is_tensor(v):\n setattr(new, k, v) # shallow copy\n\n # shallow copy of faces_list if any, st new.faces_list()\n # does not re-compute from _faces_padded\n new._faces_list = self._faces_list\n\n # update verts/faces packed if they are computed in self\n if self._verts_packed is not None:\n copy_tensors = [\n \"_faces_packed\",\n \"_verts_packed_to_mesh_idx\",\n \"_faces_packed_to_mesh_idx\",\n \"_mesh_to_verts_packed_first_idx\",\n \"_mesh_to_faces_packed_first_idx\",\n ]\n for k in copy_tensors:\n v = getattr(self, k)\n assert torch.is_tensor(v)\n setattr(new, k, v) # shallow copy\n # update verts_packed\n pad_to_packed = self.verts_padded_to_packed_idx()\n new_verts_packed = new_verts_padded.reshape(-1, 3)[pad_to_packed, :]\n new._verts_packed = new_verts_packed\n new._verts_padded_to_packed_idx = pad_to_packed\n\n # update edges packed if they are computed in self\n if self._edges_packed is not None:\n copy_tensors = [\n \"_edges_packed\",\n \"_edges_packed_to_mesh_idx\",\n \"_mesh_to_edges_packed_first_idx\",\n \"_faces_packed_to_edges_packed\",\n \"_num_edges_per_mesh\",\n ]\n for k in copy_tensors:\n v = getattr(self, k)\n assert torch.is_tensor(v)\n setattr(new, k, v) # shallow copy\n\n # update laplacian if it is compute in self\n if self._laplacian_packed is not None:\n new._laplacian_packed = self._laplacian_packed\n\n assert new._verts_list is None\n assert new._verts_normals_packed is None\n assert new._faces_normals_packed is None\n assert new._faces_areas_packed is None\n\n return new\n\n # TODO(nikhilar) Move function to utils file.\n def get_bounding_boxes(self):\n \"\"\"\n Compute an axis-aligned bounding box for each mesh in this Meshes object.\n\n Returns:\n bboxes: Tensor of shape (N, 3, 2) where bbox[i, j] gives the\n min and max values of mesh i along the jth coordinate axis.\n \"\"\"\n all_mins, all_maxes = [], []\n for verts in self.verts_list():\n cur_mins = verts.min(dim=0)[0] # (3,)\n cur_maxes = verts.max(dim=0)[0] # (3,)\n all_mins.append(cur_mins)\n all_maxes.append(cur_maxes)\n all_mins = torch.stack(all_mins, dim=0) # (N, 3)\n all_maxes = torch.stack(all_maxes, dim=0) # (N, 3)\n bboxes = torch.stack([all_mins, all_maxes], dim=2)\n return bboxes\n\n def extend(self, N: int):\n \"\"\"\n Create new Meshes class which contains each input mesh N times\n\n Args:\n N: number of new copies of each mesh.\n\n Returns:\n new Meshes object.\n \"\"\"\n if not isinstance(N, int):\n raise ValueError(\"N must be an integer.\")\n if N <= 0:\n raise ValueError(\"N must be > 0.\")\n new_verts_list, new_faces_list = [], []\n for verts, faces in zip(self.verts_list(), self.faces_list()):\n new_verts_list.extend(verts.clone() for _ in range(N))\n new_faces_list.extend(faces.clone() for _ in range(N))\n\n tex = None\n if self.textures is not None:\n tex = self.textures.extend(N)\n\n return self.__class__(verts=new_verts_list, faces=new_faces_list, textures=tex)\n\n\ndef join_meshes_as_batch(meshes: List[Meshes], include_textures: bool = True):\n \"\"\"\n Merge multiple Meshes objects, i.e. concatenate the meshes objects. They\n must all be on the same device. If include_textures is true, they must all\n be compatible, either all or none having textures, and all the Textures\n objects having the same members. If include_textures is False, textures are\n ignored.\n\n Args:\n meshes: list of meshes.\n include_textures: (bool) whether to try to join the textures.\n\n Returns:\n new Meshes object containing all the meshes from all the inputs.\n \"\"\"\n if isinstance(meshes, Meshes):\n # Meshes objects can be iterated and produce single Meshes. We avoid\n # letting join_meshes_as_batch(mesh1, mesh2) silently do the wrong thing.\n raise ValueError(\"Wrong first argument to join_meshes_as_batch.\")\n # pyre-fixme[10]: Name `mesh` is used but not defined.\n verts = [v for mesh in meshes for v in mesh.verts_list()]\n faces = [f for mesh in meshes for f in mesh.faces_list()]\n if len(meshes) == 0 or not include_textures:\n return Meshes(verts=verts, faces=faces)\n\n if meshes[0].textures is None:\n if any(mesh.textures is not None for mesh in meshes):\n raise ValueError(\"Inconsistent textures in join_meshes_as_batch.\")\n return Meshes(verts=verts, faces=faces)\n\n if any(mesh.textures is None for mesh in meshes):\n raise ValueError(\"Inconsistent textures in join_meshes_as_batch.\")\n\n # Now we know there are multiple meshes and they have textures to merge.\n first = meshes[0].textures\n kwargs = {}\n if first.maps_padded() is not None:\n if any(mesh.textures.maps_padded() is None for mesh in meshes):\n raise ValueError(\"Inconsistent maps_padded in join_meshes_as_batch.\")\n maps = [m for mesh in meshes for m in mesh.textures.maps_padded()]\n kwargs[\"maps\"] = maps\n elif any(mesh.textures.maps_padded() is not None for mesh in meshes):\n raise ValueError(\"Inconsistent maps_padded in join_meshes_as_batch.\")\n\n if first.verts_uvs_padded() is not None:\n if any(mesh.textures.verts_uvs_padded() is None for mesh in meshes):\n raise ValueError(\"Inconsistent verts_uvs_padded in join_meshes_as_batch.\")\n uvs = [uv for mesh in meshes for uv in mesh.textures.verts_uvs_list()]\n V = max(uv.shape[0] for uv in uvs)\n kwargs[\"verts_uvs\"] = struct_utils.list_to_padded(uvs, (V, 2), -1)\n elif any(mesh.textures.verts_uvs_padded() is not None for mesh in meshes):\n raise ValueError(\"Inconsistent verts_uvs_padded in join_meshes_as_batch.\")\n\n if first.faces_uvs_padded() is not None:\n if any(mesh.textures.faces_uvs_padded() is None for mesh in meshes):\n raise ValueError(\"Inconsistent faces_uvs_padded in join_meshes_as_batch.\")\n uvs = [uv for mesh in meshes for uv in mesh.textures.faces_uvs_list()]\n F = max(uv.shape[0] for uv in uvs)\n kwargs[\"faces_uvs\"] = struct_utils.list_to_padded(uvs, (F, 3), -1)\n elif any(mesh.textures.faces_uvs_padded() is not None for mesh in meshes):\n raise ValueError(\"Inconsistent faces_uvs_padded in join_meshes_as_batch.\")\n\n if first.verts_rgb_padded() is not None:\n if any(mesh.textures.verts_rgb_padded() is None for mesh in meshes):\n raise ValueError(\"Inconsistent verts_rgb_padded in join_meshes_as_batch.\")\n rgb = [i for mesh in meshes for i in mesh.textures.verts_rgb_list()]\n V = max(i.shape[0] for i in rgb)\n kwargs[\"verts_rgb\"] = struct_utils.list_to_padded(rgb, (V, 3))\n elif any(mesh.textures.verts_rgb_padded() is not None for mesh in meshes):\n raise ValueError(\"Inconsistent verts_rgb_padded in join_meshes_as_batch.\")\n\n tex = Textures(**kwargs)\n return Meshes(verts=verts, faces=faces, textures=tex)\n"
]
| [
[
"torch.zeros",
"torch.device",
"torch.cat",
"torch.nn.functional.normalize",
"torch.stack",
"torch.unique",
"torch.arange",
"torch.is_tensor",
"torch.sparse.FloatTensor",
"torch.ones",
"torch.full",
"torch.empty",
"torch.cross",
"torch.zeros_like",
"torch.sparse.sum",
"torch.sort",
"torch.where"
]
]
|
nderako/NeuroFeedback-api | [
"07be188a751c8e68a97f788d74c666dc158cd0f4"
]
| [
"pythonModules/parser.py"
]
| [
"import requests, json, sys, getopt, time, random\nfrom scipy.io import loadmat\nimport mne\nimport numpy as np\n\ndef main(argv):\n inputfile = ''\n try:\n opts, args = getopt.getopt(argv,\"hi:\",[\"ifile=\"])\n except getopt.GetoptError:\n print('transmissorClient.py -i <inputfile>')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print('transmissorClient.py -i <inputfile>')\n sys.exit()\n elif opt in (\"-i\", \"--ifile\"):\n inputfile = arg\n\n print('Input file is ', inputfile)\n\n raw_data = get_raw_data(inputfile)\n\n with open('memory3.txt', 'w+') as f:\n for i in range(len(raw_data[0])):\n string_data = str(raw_data[4][i] * 10)\n f.write(string_data + '\\n')\n\ndef get_raw_data(inputfile):\n\n data = loadmat(inputfile)\n\n S = data['SIGNAL'][:, 1:15]\n stim_close = data['SIGNAL'][:, 14]\n stim_open = data['SIGNAL'][:, 15]\n stim = 1 * stim_close + 2 * stim_open\n\n chnames = [\n 'Fp1',\n 'Fp2',\n 'Fz',\n 'T7',\n 'Cz',\n 'T8',\n 'P7',\n 'P3',\n 'Pz',\n 'P4',\n 'P8',\n 'O1',\n 'Oz',\n 'O2',\n 'stim']\n chtypes = ['eeg'] * 14 + ['stim']\n X = np.concatenate([S, stim[:, None]], axis=1).T\n\n info = mne.create_info(ch_names=chnames, sfreq=512,\n ch_types=chtypes, montage='standard_1020',\n verbose=False)\n raw = mne.io.RawArray(data=X, info=info, verbose=False)\n\n fmin = 3\n fmax = 40\n raw.filter(fmin, fmax, verbose=False)\n raw.resample(sfreq=128, verbose=False)\n\n print(len(raw['data'][0][2]))\n\n return raw['data'][0]\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])"
]
| [
[
"numpy.concatenate",
"scipy.io.loadmat"
]
]
|
xieliaing/scikit-learn | [
"9b210ae8ffdc40e210f30f24656779ac690b899a",
"9b210ae8ffdc40e210f30f24656779ac690b899a"
]
| [
"examples/compose/plot_compare_reduction.py",
"examples/compose/plot_transformed_target.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\n=================================================================\nSelecting dimensionality reduction with Pipeline and GridSearchCV\n=================================================================\n\nThis example constructs a pipeline that does dimensionality\nreduction followed by prediction with a support vector\nclassifier. It demonstrates the use of ``GridSearchCV`` and\n``Pipeline`` to optimize over different classes of estimators in a\nsingle CV run -- unsupervised ``PCA`` and ``NMF`` dimensionality\nreductions are compared to univariate feature selection during\nthe grid search.\n\nAdditionally, ``Pipeline`` can be instantiated with the ``memory``\nargument to memoize the transformers within the pipeline, avoiding to fit\nagain the same transformers over and over.\n\nNote that the use of ``memory`` to enable caching becomes interesting when the\nfitting of a transformer is costly.\n\"\"\"\n\n# %%\n# Illustration of ``Pipeline`` and ``GridSearchCV``\n###############################################################################\n\n# Authors: Robert McGibbon, Joel Nothman, Guillaume Lemaitre\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import load_digits\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import LinearSVC\nfrom sklearn.decomposition import PCA, NMF\nfrom sklearn.feature_selection import SelectKBest, chi2\n\nprint(__doc__)\n\npipe = Pipeline(\n [\n # the reduce_dim stage is populated by the param_grid\n (\"reduce_dim\", \"passthrough\"),\n (\"classify\", LinearSVC(dual=False, max_iter=10000)),\n ]\n)\n\nN_FEATURES_OPTIONS = [2, 4, 8]\nC_OPTIONS = [1, 10, 100, 1000]\nparam_grid = [\n {\n \"reduce_dim\": [PCA(iterated_power=7), NMF()],\n \"reduce_dim__n_components\": N_FEATURES_OPTIONS,\n \"classify__C\": C_OPTIONS,\n },\n {\n \"reduce_dim\": [SelectKBest(chi2)],\n \"reduce_dim__k\": N_FEATURES_OPTIONS,\n \"classify__C\": C_OPTIONS,\n },\n]\nreducer_labels = [\"PCA\", \"NMF\", \"KBest(chi2)\"]\n\ngrid = GridSearchCV(pipe, n_jobs=1, param_grid=param_grid)\nX, y = load_digits(return_X_y=True)\ngrid.fit(X, y)\n\nmean_scores = np.array(grid.cv_results_[\"mean_test_score\"])\n# scores are in the order of param_grid iteration, which is alphabetical\nmean_scores = mean_scores.reshape(len(C_OPTIONS), -1, len(N_FEATURES_OPTIONS))\n# select score for best C\nmean_scores = mean_scores.max(axis=0)\nbar_offsets = np.arange(len(N_FEATURES_OPTIONS)) * (len(reducer_labels) + 1) + 0.5\n\nplt.figure()\nCOLORS = \"bgrcmyk\"\nfor i, (label, reducer_scores) in enumerate(zip(reducer_labels, mean_scores)):\n plt.bar(bar_offsets + i, reducer_scores, label=label, color=COLORS[i])\n\nplt.title(\"Comparing feature reduction techniques\")\nplt.xlabel(\"Reduced number of features\")\nplt.xticks(bar_offsets + len(reducer_labels) / 2, N_FEATURES_OPTIONS)\nplt.ylabel(\"Digit classification accuracy\")\nplt.ylim((0, 1))\nplt.legend(loc=\"upper left\")\n\nplt.show()\n\n# %%\n# Caching transformers within a ``Pipeline``\n###############################################################################\n# It is sometimes worthwhile storing the state of a specific transformer\n# since it could be used again. Using a pipeline in ``GridSearchCV`` triggers\n# such situations. Therefore, we use the argument ``memory`` to enable caching.\n#\n# .. warning::\n# Note that this example is, however, only an illustration since for this\n# specific case fitting PCA is not necessarily slower than loading the\n# cache. Hence, use the ``memory`` constructor parameter when the fitting\n# of a transformer is costly.\n\nfrom joblib import Memory\nfrom shutil import rmtree\n\n# Create a temporary folder to store the transformers of the pipeline\nlocation = \"cachedir\"\nmemory = Memory(location=location, verbose=10)\ncached_pipe = Pipeline(\n [(\"reduce_dim\", PCA()), (\"classify\", LinearSVC(dual=False, max_iter=10000))],\n memory=memory,\n)\n\n# This time, a cached pipeline will be used within the grid search\n\n\n# Delete the temporary cache before exiting\nmemory.clear(warn=False)\nrmtree(location)\n\n# %%\n# The ``PCA`` fitting is only computed at the evaluation of the first\n# configuration of the ``C`` parameter of the ``LinearSVC`` classifier. The\n# other configurations of ``C`` will trigger the loading of the cached ``PCA``\n# estimator data, leading to save processing time. Therefore, the use of\n# caching the pipeline using ``memory`` is highly beneficial when fitting\n# a transformer is costly.\n",
"# -*- coding: utf-8 -*-\n\"\"\"\n======================================================\nEffect of transforming the targets in regression model\n======================================================\n\nIn this example, we give an overview of\n:class:`~sklearn.compose.TransformedTargetRegressor`. We use two examples\nto illustrate the benefit of transforming the targets before learning a linear\nregression model. The first example uses synthetic data while the second\nexample is based on the Ames housing data set.\n\"\"\"\n\n# Author: Guillaume Lemaitre <[email protected]>\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import make_regression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import RidgeCV\nfrom sklearn.compose import TransformedTargetRegressor\nfrom sklearn.metrics import median_absolute_error, r2_score\nfrom sklearn.utils.fixes import parse_version\n\n# %%\n# Synthetic example\n##############################################################################\n\n# `normed` is being deprecated in favor of `density` in histograms\nif parse_version(matplotlib.__version__) >= parse_version(\"2.1\"):\n density_param = {\"density\": True}\nelse:\n density_param = {\"normed\": True}\n\n# %%\n# A synthetic random regression dataset is generated. The targets ``y`` are\n# modified by:\n#\n# 1. translating all targets such that all entries are\n# non-negative (by adding the absolute value of the lowest ``y``) and\n# 2. applying an exponential function to obtain non-linear\n# targets which cannot be fitted using a simple linear model.\n#\n# Therefore, a logarithmic (`np.log1p`) and an exponential function\n# (`np.expm1`) will be used to transform the targets before training a linear\n# regression model and using it for prediction.\n\nX, y = make_regression(n_samples=10000, noise=100, random_state=0)\ny = np.expm1((y + abs(y.min())) / 200)\ny_trans = np.log1p(y)\n\n# %%\n# Below we plot the probability density functions of the target\n# before and after applying the logarithmic functions.\n\nf, (ax0, ax1) = plt.subplots(1, 2)\n\nax0.hist(y, bins=100, **density_param)\nax0.set_xlim([0, 2000])\nax0.set_ylabel(\"Probability\")\nax0.set_xlabel(\"Target\")\nax0.set_title(\"Target distribution\")\n\nax1.hist(y_trans, bins=100, **density_param)\nax1.set_ylabel(\"Probability\")\nax1.set_xlabel(\"Target\")\nax1.set_title(\"Transformed target distribution\")\n\nf.suptitle(\"Synthetic data\", y=0.06, x=0.53)\nf.tight_layout(rect=[0.05, 0.05, 0.95, 0.95])\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\n\n# %%\n# At first, a linear model will be applied on the original targets. Due to the\n# non-linearity, the model trained will not be precise during\n# prediction. Subsequently, a logarithmic function is used to linearize the\n# targets, allowing better prediction even with a similar linear model as\n# reported by the median absolute error (MAE).\n\nf, (ax0, ax1) = plt.subplots(1, 2, sharey=True)\n# Use linear model\nregr = RidgeCV()\nregr.fit(X_train, y_train)\ny_pred = regr.predict(X_test)\n# Plot results\nax0.scatter(y_test, y_pred)\nax0.plot([0, 2000], [0, 2000], \"--k\")\nax0.set_ylabel(\"Target predicted\")\nax0.set_xlabel(\"True Target\")\nax0.set_title(\"Ridge regression \\n without target transformation\")\nax0.text(\n 100,\n 1750,\n r\"$R^2$=%.2f, MAE=%.2f\"\n % (r2_score(y_test, y_pred), median_absolute_error(y_test, y_pred)),\n)\nax0.set_xlim([0, 2000])\nax0.set_ylim([0, 2000])\n# Transform targets and use same linear model\nregr_trans = TransformedTargetRegressor(\n regressor=RidgeCV(), func=np.log1p, inverse_func=np.expm1\n)\nregr_trans.fit(X_train, y_train)\ny_pred = regr_trans.predict(X_test)\n\nax1.scatter(y_test, y_pred)\nax1.plot([0, 2000], [0, 2000], \"--k\")\nax1.set_ylabel(\"Target predicted\")\nax1.set_xlabel(\"True Target\")\nax1.set_title(\"Ridge regression \\n with target transformation\")\nax1.text(\n 100,\n 1750,\n r\"$R^2$=%.2f, MAE=%.2f\"\n % (r2_score(y_test, y_pred), median_absolute_error(y_test, y_pred)),\n)\nax1.set_xlim([0, 2000])\nax1.set_ylim([0, 2000])\n\nf.suptitle(\"Synthetic data\", y=0.035)\nf.tight_layout(rect=[0.05, 0.05, 0.95, 0.95])\n\n# %%\n# Real-world data set\n###############################################################################\n#\n# In a similar manner, the Ames housing data set is used to show the impact\n# of transforming the targets before learning a model. In this example, the\n# target to be predicted is the selling price of each house.\n\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.preprocessing import QuantileTransformer, quantile_transform\n\names = fetch_openml(name=\"house_prices\", as_frame=True)\n# Keep only numeric columns\nX = ames.data.select_dtypes(np.number)\n# Remove columns with NaN or Inf values\nX = X.drop(columns=[\"LotFrontage\", \"GarageYrBlt\", \"MasVnrArea\"])\ny = ames.target\ny_trans = quantile_transform(\n y.to_frame(), n_quantiles=900, output_distribution=\"normal\", copy=True\n).squeeze()\n# %%\n# A :class:`~sklearn.preprocessing.QuantileTransformer` is used to normalize\n# the target distribution before applying a\n# :class:`~sklearn.linear_model.RidgeCV` model.\n\nf, (ax0, ax1) = plt.subplots(1, 2)\n\nax0.hist(y, bins=100, **density_param)\nax0.set_ylabel(\"Probability\")\nax0.set_xlabel(\"Target\")\nax0.text(s=\"Target distribution\", x=1.2e5, y=9.8e-6, fontsize=12)\nax0.ticklabel_format(axis=\"both\", style=\"sci\", scilimits=(0, 0))\n\nax1.hist(y_trans, bins=100, **density_param)\nax1.set_ylabel(\"Probability\")\nax1.set_xlabel(\"Target\")\nax1.text(s=\"Transformed target distribution\", x=-6.8, y=0.479, fontsize=12)\n\nf.suptitle(\"Ames housing data: selling price\", y=0.04)\nf.tight_layout(rect=[0.05, 0.05, 0.95, 0.95])\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)\n\n# %%\n# The effect of the transformer is weaker than on the synthetic data. However,\n# the transformation results in an increase in :math:`R^2` and large decrease\n# of the MAE. The residual plot (predicted target - true target vs predicted\n# target) without target transformation takes on a curved, 'reverse smile'\n# shape due to residual values that vary depending on the value of predicted\n# target. With target transformation, the shape is more linear indicating\n# better model fit.\n\nf, (ax0, ax1) = plt.subplots(2, 2, sharey=\"row\", figsize=(6.5, 8))\n\nregr = RidgeCV()\nregr.fit(X_train, y_train)\ny_pred = regr.predict(X_test)\n\nax0[0].scatter(y_pred, y_test, s=8)\nax0[0].plot([0, 7e5], [0, 7e5], \"--k\")\nax0[0].set_ylabel(\"True target\")\nax0[0].set_xlabel(\"Predicted target\")\nax0[0].text(\n s=\"Ridge regression \\n without target transformation\",\n x=-5e4,\n y=8e5,\n fontsize=12,\n multialignment=\"center\",\n)\nax0[0].text(\n 3e4,\n 64e4,\n r\"$R^2$=%.2f, MAE=%.2f\"\n % (r2_score(y_test, y_pred), median_absolute_error(y_test, y_pred)),\n)\nax0[0].set_xlim([0, 7e5])\nax0[0].set_ylim([0, 7e5])\nax0[0].ticklabel_format(axis=\"both\", style=\"sci\", scilimits=(0, 0))\n\nax1[0].scatter(y_pred, (y_pred - y_test), s=8)\nax1[0].set_ylabel(\"Residual\")\nax1[0].set_xlabel(\"Predicted target\")\nax1[0].ticklabel_format(axis=\"both\", style=\"sci\", scilimits=(0, 0))\n\nregr_trans = TransformedTargetRegressor(\n regressor=RidgeCV(),\n transformer=QuantileTransformer(n_quantiles=900, output_distribution=\"normal\"),\n)\nregr_trans.fit(X_train, y_train)\ny_pred = regr_trans.predict(X_test)\n\nax0[1].scatter(y_pred, y_test, s=8)\nax0[1].plot([0, 7e5], [0, 7e5], \"--k\")\nax0[1].set_ylabel(\"True target\")\nax0[1].set_xlabel(\"Predicted target\")\nax0[1].text(\n s=\"Ridge regression \\n with target transformation\",\n x=-5e4,\n y=8e5,\n fontsize=12,\n multialignment=\"center\",\n)\nax0[1].text(\n 3e4,\n 64e4,\n r\"$R^2$=%.2f, MAE=%.2f\"\n % (r2_score(y_test, y_pred), median_absolute_error(y_test, y_pred)),\n)\nax0[1].set_xlim([0, 7e5])\nax0[1].set_ylim([0, 7e5])\nax0[1].ticklabel_format(axis=\"both\", style=\"sci\", scilimits=(0, 0))\n\nax1[1].scatter(y_pred, (y_pred - y_test), s=8)\nax1[1].set_ylabel(\"Residual\")\nax1[1].set_xlabel(\"Predicted target\")\nax1[1].ticklabel_format(axis=\"both\", style=\"sci\", scilimits=(0, 0))\n\nf.suptitle(\"Ames housing data: selling price\", y=0.035)\n\nplt.show()\n"
]
| [
[
"numpy.array",
"sklearn.datasets.load_digits",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"sklearn.decomposition.NMF",
"matplotlib.pyplot.ylabel",
"sklearn.feature_selection.SelectKBest",
"sklearn.model_selection.GridSearchCV",
"matplotlib.pyplot.show",
"matplotlib.pyplot.bar",
"sklearn.decomposition.PCA",
"sklearn.svm.LinearSVC"
],
[
"sklearn.linear_model.RidgeCV",
"sklearn.datasets.fetch_openml",
"sklearn.utils.fixes.parse_version",
"sklearn.metrics.r2_score",
"sklearn.preprocessing.QuantileTransformer",
"matplotlib.pyplot.subplots",
"numpy.log1p",
"sklearn.metrics.median_absolute_error",
"sklearn.datasets.make_regression",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.show"
]
]
|
GitHubChuanYu/T3Project4_SystemIntegration | [
"1cd2224c5b94292927441e46df137749a0520f09"
]
| [
"ros/src/tl_detector/tl_detector.py"
]
| [
"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Int32\nfrom geometry_msgs.msg import PoseStamped, Pose\nfrom styx_msgs.msg import TrafficLightArray, TrafficLight\nfrom styx_msgs.msg import Lane, Waypoint\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\nfrom light_classification.tl_classifier import TLClassifier\nimport tf\nimport cv2\nimport yaml\nfrom scipy.spatial import KDTree\nimport numpy as np\n\nSTATE_COUNT_THRESHOLD = 3\n\nclass TLDetector(object):\n def __init__(self):\n rospy.init_node('tl_detector')\n\n self.pose = None\n self.waypoints = None\n self.waypoint_tree = None\n self.waypoints_2d = None\n self.camera_image = None\n self.lights = []\n\n sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n\n '''\n /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and\n helps you acquire an accurate ground truth data source for the traffic light\n classifier by sending the current color state of all traffic lights in the\n simulator. When testing on the vehicle, the color state will not be available. You'll need to\n rely on the position of the light and the camera image to predict it.\n '''\n sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)\n sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)\n\n config_string = rospy.get_param(\"/traffic_light_config\")\n self.config = yaml.load(config_string)\n\n self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)\n\n self.bridge = CvBridge()\n self.light_classifier = TLClassifier()\n self.listener = tf.TransformListener()\n\n self.state = TrafficLight.UNKNOWN\n self.last_state = TrafficLight.UNKNOWN\n self.last_wp = -1\n self.state_count = 0\n\n rospy.spin()\n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n self.waypoints = waypoints\n if not self.waypoints_2d:\n self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]\n self.waypoint_tree = KDTree(self.waypoints_2d)\n\n def traffic_cb(self, msg):\n self.lights = msg.lights\n\n def image_cb(self, msg):\n \"\"\"Identifies red lights in the incoming camera image and publishes the index\n of the waypoint closest to the red light's stop line to /traffic_waypoint\n\n Args:\n msg (Image): image from car-mounted camera\n\n \"\"\"\n self.has_image = True\n self.camera_image = msg\n light_wp, state = self.process_traffic_lights()\n\n '''\n Publish upcoming red lights at camera frequency.\n Each predicted state has to occur `STATE_COUNT_THRESHOLD` number\n of times till we start using it. Otherwise the previous stable state is\n used.\n '''\n if self.state != state:\n self.state_count = 0\n self.state = state\n elif self.state_count >= STATE_COUNT_THRESHOLD:\n self.last_state = self.state\n light_wp = light_wp if state == TrafficLight.RED else -1\n self.last_wp = light_wp\n self.upcoming_red_light_pub.publish(Int32(light_wp))\n else:\n self.upcoming_red_light_pub.publish(Int32(self.last_wp))\n self.state_count += 1\n\n def get_closest_waypoint(self, x, y):\n \"\"\"Identifies the closest path waypoint to the given position\n https://en.wikipedia.org/wiki/Closest_pair_of_points_problem\n Args:\n pose (Pose): position to match a waypoint to\n\n Returns:\n int: index of the closest waypoint in self.waypoints\n\n \"\"\"\n #TODO implement\n closest_idx = self.waypoint_tree.query([x, y], 1)[1]\n\n # Check if closest is ahead or behind vehicle\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx-1]\n\n # Equation for hyperplane through closest_coords\n cl_vect = np.array(closest_coord)\n prev_vect = np.array(prev_coord)\n pos_vect = np.array([x, y])\n \n val = np.dot(cl_vect-prev_vect, pos_vect-cl_vect)\n\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n return closest_idx\n\n def get_light_state(self, light):\n \"\"\"Determines the current color of the traffic light\n\n Args:\n light (TrafficLight): light to classify\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n #For testing, just return the light state\n return light.state\n # if(not self.has_image):\n # self.prev_light_loc = None\n # return False\n\n # cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"bgr8\")\n\n # #Get classification\n # return self.light_classifier.get_classification(cv_image)\n\n def process_traffic_lights(self):\n \"\"\"Finds closest visible traffic light, if one exists, and determines its\n location and color\n\n Returns:\n int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n closest_light = None\n line_wp_idx = None\n\n # List of positions that correspond to the line to stop in front of for a given intersection\n stop_line_positions = self.config['stop_line_positions']\n if(self.pose):\n car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)\n\n #TODO find the closest visible traffic light (if one exists)\n diff = len(self.waypoints.waypoints)\n for i, light in enumerate(self.lights):\n # Get stop line waypoint index\n line = stop_line_positions[i]\n temp_wp_idx = self.get_closest_waypoint(line[0], line[1])\n # Find closest stop line waypoint index\n d = temp_wp_idx - car_wp_idx\n if d >= 0 and d < diff:\n diff = d\n closest_light = light\n line_wp_idx = temp_wp_idx\n \n if closest_light:\n state = self.get_light_state(closest_light)\n return line_wp_idx, state\n\n return -1, TrafficLight.UNKNOWN\n\nif __name__ == '__main__':\n try:\n TLDetector()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start traffic node.')\n"
]
| [
[
"scipy.spatial.KDTree",
"numpy.array",
"numpy.dot"
]
]
|
SenseTime-Knowledge-Mining/BridgeDPI | [
"0dbbecb73d7ffe982ff4fbbf05a58b65591343ba"
]
| [
"nnLayer.py"
]
| [
"from torch import nn as nn\nfrom torch.nn import functional as F\nimport torch,time,os,random\nimport numpy as np\nfrom collections import OrderedDict\n\nclass TextSPP(nn.Module):\n def __init__(self, size=128, name='textSpp'):\n super(TextSPP, self).__init__()\n self.name = name\n self.spp = nn.AdaptiveAvgPool1d(size)\n def forward(self, x):\n return self.spp(x)\n\nclass TextSPP2(nn.Module):\n def __init__(self, size=128, name='textSpp2'):\n super(TextSPP2, self).__init__()\n self.name = name\n self.spp1 = nn.AdaptiveMaxPool1d(size)\n self.spp2 = nn.AdaptiveAvgPool1d(size)\n def forward(self, x):\n x1 = self.spp1(x).unsqueeze(dim=3) # => batchSize × feaSize × size × 1\n x2 = self.spp2(x).unsqueeze(dim=3) # => batchSize × feaSize × size × 1\n x3 = -self.spp1(-x).unsqueeze(dim=3) # => batchSize × feaSize × size × 1\n return torch.cat([x1,x2,x3], dim=3) # => batchSize × feaSize × size × 3\n\nclass TextEmbedding(nn.Module):\n def __init__(self, embedding, dropout=0.3, freeze=False, name='textEmbedding'):\n super(TextEmbedding, self).__init__()\n self.name = name\n self.embedding = nn.Embedding.from_pretrained(torch.tensor(embedding,dtype=torch.float32), freeze=freeze)\n self.dropout1 = nn.Dropout2d(p=dropout/2)\n self.dropout2 = nn.Dropout(p=dropout/2)\n self.p = dropout\n def forward(self, x):\n # x: batchSize × seqLen\n if self.p>0:\n x = self.dropout2(self.dropout1(self.embedding(x)))\n else:\n x = self.embedding(x)\n return x\n\nclass ResDilaCNNBlock(nn.Module):\n def __init__(self, dilaSize, filterSize=64, dropout=0.15, name='ResDilaCNNBlock'):\n super(ResDilaCNNBlock, self).__init__()\n self.layers = nn.Sequential(\n nn.ELU(),\n nn.Conv1d(filterSize,filterSize,kernel_size=3,padding=dilaSize,dilation=dilaSize),\n nn.InstanceNorm1d(filterSize),\n nn.ELU(),\n nn.Dropout(dropout),\n nn.Conv1d(filterSize,filterSize,kernel_size=3,padding=dilaSize,dilation=dilaSize),\n nn.InstanceNorm1d(filterSize),\n )\n self.name = name\n def forward(self, x):\n # x: batchSize × filterSize × seqLen\n return x + self.layers(x)\n\nclass ResDilaCNNBlocks(nn.Module):\n def __init__(self, feaSize, filterSize, blockNum=10, dilaSizeList=[1,2,4,8,16], dropout=0.15, name='ResDilaCNNBlocks'):\n super(ResDilaCNNBlocks, self).__init__()\n self.blockLayers = nn.Sequential()\n self.linear = nn.Linear(feaSize,filterSize)\n for i in range(blockNum):\n self.blockLayers.add_module(f\"ResDilaCNNBlock{i}\", ResDilaCNNBlock(dilaSizeList[i%len(dilaSizeList)],filterSize,dropout=dropout))\n self.name = name\n def forward(self, x):\n # x: batchSize × seqLen × feaSize\n x = self.linear(x) # => batchSize × seqLen × filterSize\n x = self.blockLayers(x.transpose(1,2)).transpose(1,2) # => batchSize × seqLen × filterSize\n return F.elu(x) # => batchSize × seqLen × filterSize\n\nclass BatchNorm1d(nn.Module):\n def __init__(self, inSize, name='batchNorm1d'):\n super(BatchNorm1d, self).__init__()\n self.bn = nn.BatchNorm1d(inSize)\n self.name = name\n def forward(self, x):\n return self.bn(x)\n\nclass TextCNN(nn.Module):\n def __init__(self, featureSize, filterSize, contextSizeList, reduction='pool', actFunc=nn.ReLU, bn=False, ln=False, name='textCNN'):\n super(TextCNN, self).__init__()\n moduleList = []\n bns,lns = [],[]\n for i in range(len(contextSizeList)):\n moduleList.append(\n nn.Conv1d(in_channels=featureSize, out_channels=filterSize, kernel_size=contextSizeList[i], padding=contextSizeList[i]//2),\n )\n bns.append(nn.BatchNorm1d(filterSize))\n lns.append(nn.LayerNorm(filterSize))\n if bn:\n self.bns = nn.ModuleList(bns)\n if ln:\n self.lns = nn.ModuleList(lns)\n self.actFunc = actFunc()\n self.conv1dList = nn.ModuleList(moduleList)\n self.reduction = reduction\n self.batcnNorm = nn.BatchNorm1d(filterSize)\n self.bn = bn\n self.ln = ln\n self.name = name\n def forward(self, x):\n # x: batchSize × seqLen × feaSize\n x = x.transpose(1,2) # => batchSize × feaSize × seqLen\n x = [conv(x).transpose(1,2) for conv in self.conv1dList] # => scaleNum * (batchSize × seqLen × filterSize)\n\n if self.bn:\n x = [b(i.transpose(1,2)).transpose(1,2) for b,i in zip(self.bns,x)]\n elif self.ln:\n x = [l(i) for l,i in zip(self.lns,x)]\n x = [self.actFunc(i) for i in x]\n\n if self.reduction=='pool':\n x = [F.adaptive_max_pool1d(i.transpose(1,2), 1).squeeze(dim=2) for i in x]\n return torch.cat(x, dim=1) # => batchSize × scaleNum*filterSize\n elif self.reduction=='None':\n return x # => scaleNum * (batchSize × seqLen × filterSize)\n\nclass TextLSTM(nn.Module):\n def __init__(self, feaSize, hiddenSize, num_layers=1, dropout=0.0, bidirectional=True, name='textBiLSTM'):\n super(TextLSTM, self).__init__()\n self.name = name\n self.biLSTM = nn.LSTM(feaSize, hiddenSize, bidirectional=bidirectional, batch_first=True, num_layers=num_layers, dropout=dropout)\n\n def forward(self, x, xlen=None):\n # x: batchSizeh × seqLen × feaSize\n if xlen is not None:\n xlen, indices = torch.sort(xlen, descending=True)\n _, desortedIndices = torch.sort(indices, descending=False)\n\n x = nn.utils.rnn.pack_padded_sequence(x[indices], xlen, batch_first=True)\n output, hn = self.biLSTM(x) # output: batchSize × seqLen × hiddenSize*2; hn: numLayers*2 × batchSize × hiddenSize\n if xlen is not None:\n output, _ = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)\n return output[desortedIndices]\n return output # output: batchSize × seqLen × hiddenSize*2\n def orthogonalize_gate(self):\n nn.init.orthogonal_(self.biLSTM.weight_ih_l0)\n nn.init.orthogonal_(self.biLSTM.weight_hh_l0)\n nn.init.ones_(self.biLSTM.bias_ih_l0)\n nn.init.ones_(self.biLSTM.bias_hh_l0)\n\nclass TextGRU(nn.Module):\n def __init__(self, feaSize, hiddenSize, num_layers=1, dropout=0.0, bidirectional=True, name='textBiGRU'):\n super(TextGRU, self).__init__()\n self.name = name\n self.biGRU = nn.GRU(feaSize, hiddenSize, bidirectional=bidirectional, batch_first=True, num_layers=num_layers, dropout=dropout)\n\n def forward(self, x, xlen=None):\n # x: batchSizeh × seqLen × feaSize\n if xlen is not None:\n xlen, indices = torch.sort(xlen, descending=True)\n _, desortedIndices = torch.sort(indices, descending=False)\n\n x = nn.utils.rnn.pack_padded_sequence(x[indices], xlen, batch_first=True)\n output, hn = self.biGRU(x) # output: batchSize × seqLen × hiddenSize*2; hn: numLayers*2 × batchSize × hiddenSize\n if xlen is not None:\n output, _ = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)\n return output[desortedIndices]\n\n return output # output: batchSize × seqLen × hiddenSize*2\n\nclass FastText(nn.Module):\n def __init__(self, feaSize, name='fastText'):\n super(FastText, self).__init__()\n self.name = name\n def forward(self, x, xLen):\n # x: batchSize × seqLen × feaSize; xLen: batchSize\n x = torch.sum(x, dim=1) / xLen.float().view(-1,1)\n return x\n\nclass MLP(nn.Module):\n def __init__(self, inSize, outSize, hiddenList=[], dropout=0.0, bnEveryLayer=False, dpEveryLayer=False, outBn=False, outAct=False, outDp=False, name='MLP', actFunc=nn.ReLU):\n super(MLP, self).__init__()\n self.name = name\n hiddens,bns = [],[]\n for i,os in enumerate(hiddenList):\n hiddens.append( nn.Sequential(\n nn.Linear(inSize, os),\n ) )\n bns.append(nn.BatchNorm1d(os))\n inSize = os\n bns.append(nn.BatchNorm1d(outSize))\n self.actFunc = actFunc()\n self.dropout = nn.Dropout(p=dropout)\n self.hiddens = nn.ModuleList(hiddens)\n self.bns = nn.ModuleList(bns)\n self.out = nn.Linear(inSize, outSize)\n self.bnEveryLayer = bnEveryLayer\n self.dpEveryLayer = dpEveryLayer\n self.outBn = outBn\n self.outAct = outAct\n self.outDp = outDp\n def forward(self, x):\n for h,bn in zip(self.hiddens,self.bns):\n x = h(x)\n if self.bnEveryLayer:\n x = bn(x) if len(x.shape)==2 else bn(x.transpose(-1,-2)).transpose(-1,-2)\n x = self.actFunc(x)\n if self.dpEveryLayer:\n x = self.dropout(x)\n x = self.out(x)\n if self.outBn: x = self.bns[-1](x) if len(x.shape)==2 else self.bns[-1](x.transpose(-1,-2)).transpose(-1,-2)\n if self.outAct: x = self.actFunc(x)\n if self.outDp: x = self.dropout(x)\n return x\n\nclass GCN(nn.Module):\n def __init__(self, inSize, outSize, hiddenSizeList=[], dropout=0.0, bnEveryLayer=False, dpEveryLayer=False, outBn=False, outAct=False, outDp=False, resnet=False, name='GCN', actFunc=nn.ReLU):\n super(GCN, self).__init__()\n self.name = name\n hiddens,bns = [],[]\n for i,os in enumerate(hiddenSizeList):\n hiddens.append(nn.Sequential(\n nn.Linear(inSize, os),\n ) )\n bns.append(nn.BatchNorm1d(os))\n inSize = os\n bns.append(nn.BatchNorm1d(outSize))\n self.actFunc = actFunc()\n self.dropout = nn.Dropout(p=dropout)\n self.hiddens = nn.ModuleList(hiddens)\n self.bns = nn.ModuleList(bns)\n self.out = nn.Linear(inSize, outSize)\n self.bnEveryLayer = bnEveryLayer\n self.dpEveryLayer = dpEveryLayer\n self.outBn = outBn\n self.outAct = outAct\n self.outDp = outDp\n self.resnet = resnet\n def forward(self, x, L):\n # x: nodeNum × feaSize; L: batchSize × nodeNum × nodeNum\n for h,bn in zip(self.hiddens,self.bns):\n a = h(torch.matmul(L,x)) # => batchSize × nodeNum × os\n if self.bnEveryLayer:\n if len(L.shape)==3:\n a = bn(a.transpose(1,2)).transpose(1,2)\n else:\n a = bn(a)\n a = self.actFunc(a)\n if self.dpEveryLayer:\n a = self.dropout(a)\n if self.resnet and a.shape==x.shape:\n a += x\n x = a\n a = self.out(torch.matmul(L, x)) # => batchSize × nodeNum × outSize\n if self.outBn:\n if len(L.shape)==3:\n a = self.bns[-1](a.transpose(1,2)).transpose(1,2)\n else:\n a = self.bns[-1](a)\n if self.outAct: a = self.actFunc(a)\n if self.outDp: a = self.dropout(a)\n if self.resnet and a.shape==x.shape:\n a += x\n x = a\n return x\n\nclass TextAttention(nn.Module):\n def __init__(self, method, name='textAttention'):\n super(TextAttention, self).__init__()\n self.attn = LuongAttention(method)\n self.name = name\n def forward(self, sequence, reference):\n # sequence: batchSize × seqLen × feaSize; reference: batchSize × classNum × feaSize\n alpha = self.attn(reference, sequence) # => batchSize × classNum × seqLen\n return torch.matmul(alpha, sequence) # => batchSize × classNum × feaSize\n\nclass LuongAttention(nn.Module):\n def __init__(self, method):\n super(LuongAttention, self).__init__()\n self.method = method\n def dot_score(self, hidden, encoderOutput):\n # hidden: batchSize × classNum × hiddenSize; encoderOutput: batchSize × seq_len × hiddenSize\n return torch.matmul(encoderOutput, hidden.transpose(-1,-2)) # => batchSize × seq_len × classNum\n def forward(self, hidden, encoderOutput):\n attentionScore = self.dot_score(hidden, encoderOutput).transpose(-1,-2)\n # attentionScore: batchSize × classNum × seq_len\n return F.softmax(attentionScore, dim=-1) # => batchSize × classNum × seq_len\n\nclass SimpleAttention(nn.Module):\n def __init__(self, inSize, actFunc=nn.Tanh(), name='SimpleAttention'):\n super(SimpleAttention, self).__init__()\n self.name = name\n self.W = nn.Linear(inSize, int(inSize//2))\n self.U = nn.Linear(int(inSize//2), 1)\n self.actFunc = actFunc\n def forward(self, input):\n # input: batchSize × seqLen × inSize\n x = self.W(input) # => batchSize × seqLen × inSize//2\n H = self.actFunc(x) # => batchSize × seqLen × inSize//2\n alpha = F.softmax(self.U(H), dim=1) # => batchSize × seqLen × 1\n return self.actFunc( torch.matmul(input.transpose(1,2), alpha).squeeze(2) ) # => batchSize × inSize\n\nclass InterationAttention(nn.Module):\n def __init__(self, feaSize1, feaSize2, dropout=0.0, attnType='poolAttn', name='interAttn'):\n super(InterationAttention, self).__init__()\n self.attnFunc = {'poolAttn':self.pooling_attention,\n 'poolAttn_s':self.pooling_attention_s,\n 'catSimAttn':self.concat_simple_attention,\n 'plaAttn':self.plane_attention,\n 'plaAttn_s':self.plane_attention_s}\n assert attnType in self.attnFunc.keys()\n self.name = name\n self.U = nn.Linear(feaSize1, feaSize2)\n self.W = nn.Linear(feaSize2, 1)\n self.simpleAttn1 = SimpleAttention(feaSize1+feaSize2)\n self.simpleAttn2 = SimpleAttention(feaSize1+feaSize2)\n self.feaSize1,self.feaSize2 = feaSize1,feaSize2\n self.attnType = attnType\n self.dropout = nn.Dropout(dropout)\n\n def pooling_attention_s(self, x, y):\n u = self.U(x).unsqueeze(dim=2) # => batchSize × seqLen1 × 1 × feaSize2\n v = y.unsqueeze(dim=1) # => batchSize × 1 × seqLen2 × feaSize2\n alpha = torch.sum(u*v,dim=3) # => batchSize × seqLen1 × seqLen2\n xAlpha,_ = torch.max(alpha, dim=2, keepdim=True) # => batchSize × seqLen1 × 1\n x = torch.matmul(x.transpose(1,2), F.softmax(xAlpha,dim=1)).squeeze(dim=2) # => batchSize × feaSize1\n yAlpha,_ = torch.max(alpha, dim=1, keepdim=True) # => batchSize × 1 × seqLen2\n y = torch.matmul(F.softmax(yAlpha,dim=2), y).squeeze(dim=1) # => batchSize × feaSize2\n return torch.cat([x,y], dim=1) # => batchSize × (feaSize1+feaSize2)\n\n def pooling_attention(self, x, y):\n u = self.U(x).unsqueeze(dim=2) # => batchSize × seqLen1 × 1 × feaSize2\n v = y.unsqueeze(dim=1) # => batchSize × 1 × seqLen2 × feaSize2\n alpha = F.tanh(u*v) # => batchSize × seqLen1 × seqLen2 × feaSize2\n alpha = self.W(alpha).squeeze(dim=3) # => batchSize × seqLen1 × seqLen2\n xAlpha,_ = torch.max(alpha, dim=2, keepdim=True) # => batchSize × seqLen1 × 1\n x = torch.matmul(x.transpose(1,2), F.softmax(xAlpha,dim=1)).squeeze(dim=2) # => batchSize × feaSize1\n yAlpha,_ = torch.max(alpha, dim=1, keepdim=True) # => batchSize × 1 × seqLen2\n y = torch.matmul(F.softmax(yAlpha,dim=2), y).squeeze(dim=1) # => batchSize × feaSize2\n return torch.cat([x,y], dim=1) # => batchSize × (feaSize1+feaSize2)\n\n def concat_simple_attention(self, x, y):\n x_pooled,_ = torch.max(x, dim=1) # => batchSize × feaSize1\n y_pooled,_ = torch.max(y, dim=1) # => batchSize × feaSize2\n u = torch.cat([x, y_pooled.unsqueeze(dim=1).expand(-1,x.shape[1],-1)], dim=2) # => batchSize × seqLen1 × (feaSize1+feaSize2)\n v = torch.cat([y, x_pooled.unsqueeze(dim=1).expand(-1,y.shape[1],-1)], dim=2) # => batchSize × seqLen2 × (feaSize1+feaSize2)\n x,y = self.simpleAttn1(u)[:,:self.feaSize1],self.simpleAttn2(v)[:,:self.feaSize2]\n return torch.cat([x,y], dim=1) # => batchSize × (feaSize1+feaSize2)\n\n def plane_attention_s(self, x, y):\n u = self.U(x).unsqueeze(dim=2) # => batchSize × seqLen1 × 1 × feaSize2\n v = y.unsqueeze(dim=1) # => batchSize × 1 × seqLen2 × feaSize2\n alpha = torch.sum(u*v,dim=3) # => batchSize × seqLen1 × seqLen2\n alpha = F.softmax(alpha.flatten(1,2),dim=1).unsqueeze(dim=1) # => batchSize × 1 × seqLen1*seqLen2\n\n x,y = x.unsqueeze(dim=2).expand(-1,-1,y.shape[1],-1),y.unsqueeze(dim=1).expand(-1,x.shape[1],-1,-1) # => batchSize × seqLen1 × seqLen2 × feaSize\n xy = torch.cat([x,y], dim=3).flatten(1,2) # => batchSize × seqLen1*seqLen2 × (feaSize1+feaSize2)\n return torch.matmul(alpha, xy).squeeze(dim=1) # => batchSize × (feaSize1+feaSize2)\n\n def plane_attention(self, x, y):\n u = self.U(x).unsqueeze(dim=2) # => batchSize × seqLen1 × 1 × feaSize2\n v = y.unsqueeze(dim=1) # => batchSize × 1 × seqLen2 × feaSize2\n alpha = F.tanh(u*v) # => batchSize × seqLen1 × seqLen2 × feaSize2\n alpha = self.W(alpha).squeeze(dim=3) # => batchSize × seqLen1 × seqLen2\n alpha = F.softmax(alpha.flatten(1,2),dim=1).unsqueeze(dim=1) # => batchSize × 1 × seqLen1*seqLen2\n\n x,y = x.unsqueeze(dim=2).expand(-1,-1,y.shape[1],-1),y.unsqueeze(dim=1).expand(-1,x.shape[1],-1,-1) # => batchSize × seqLen1 × seqLen2 × feaSize\n xy = torch.cat([x,y], dim=3).flatten(1,2) # => batchSize × seqLen1*seqLen2 × (feaSize1+feaSize2)\n return torch.matmul(alpha, xy).squeeze(dim=1) # => batchSize × (feaSize1+feaSize2)\n\n def forward(self, x, y):\n # x: batchSize × seqLen1 × feaSize1; y: batchSize × seqLen2 × feaSize2\n return self.dropout(self.attnFunc[self.attnType](x,y)) # => batchSize × (feaSize1+feaSize2)\n\nclass SelfAttention(nn.Module):\n def __init__(self, featureSize, dk, multiNum, name='selfAttn'):\n super(SelfAttention, self).__init__()\n self.dk = dk\n self.multiNum = multiNum\n self.WQ = nn.ModuleList([nn.Linear(featureSize, self.dk) for i in range(multiNum)])\n self.WK = nn.ModuleList([nn.Linear(featureSize, self.dk) for i in range(multiNum)])\n self.WV = nn.ModuleList([nn.Linear(featureSize, self.dk) for i in range(multiNum)])\n self.WO = nn.Linear(self.dk*multiNum, featureSize)\n self.name = name\n def forward(self, x, xlen=None):\n # x: batchSize × seqLen × feaSize; xlen: batchSize\n queries = [self.WQ[i](x) for i in range(self.multiNum)] # => multiNum*(batchSize × seqLen × dk)\n keys = [self.WK[i](x) for i in range(self.multiNum)] # => multiNum*(batchSize × seqLen × dk)\n values = [self.WV[i](x) for i in range(self.multiNum)] # => multiNum*(batchSize × seqLen × dk)\n scores = [torch.bmm(queries[i], keys[i].transpose(1,2))/np.sqrt(self.dk) for i in range(self.multiNum)] # => multiNum*(batchSize × seqLen × seqLen)\n # mask <EOS> padding\n if xlen is not None:\n for i in range(len(scores)):\n mask = torch.zeros(scores[0].shape, dtype=torch.float32, device=scores[i].device) # => batchSize × seqLen × seqLen\n for j,k in enumerate(xlen):\n mask[j,:,k-1:] -= 999999\n scores[i] = scores[i] + mask\n z = [torch.bmm(F.softmax(scores[i], dim=2), values[i]) for i in range(self.multiNum)] # => multiNum*(batchSize × seqLen × dk)\n z = self.WO(torch.cat(z, dim=2)) # => batchSize × seqLen × feaSize\n return z\n\nclass LayerNormAndDropout(nn.Module):\n def __init__(self, feaSize, dropout=0.1, name='layerNormAndDropout'):\n super(LayerNormAndDropout, self).__init__()\n self.layerNorm = nn.LayerNorm(feaSize)\n self.dropout = nn.Dropout(p=dropout)\n self.name = name\n def forward(self, x):\n return self.dropout(self.layerNorm(x))\n\nclass SimpleSelfAttention(nn.Module):\n def __init__(self, feaSize, name='simpleSelfAttn'):\n super(SimpleSelfAttention, self).__init__()\n self.feaSize = feaSize\n self.WO = nn.Linear(feaSize, feaSize)\n self.name = name\n def forward(self, x, xlen=None):\n # x: batchSize × seqLen × feaSize; xlen: batchSize\n querie = x # => batchSize × seqLen × feaSize\n key = x # => batchSize × seqLen × feaSize\n value = x # => batchSize × seqLen × feaSize\n score = torch.bmm(querie, key.transpose(1,2))/np.sqrt(self.feaSize) # => batchSize × seqLen × seqLen\n # mask <EOS> padding\n if xlen is not None:\n mask = torch.zeros(score.shape, dtype=torch.float32, device=score.device) # => batchSize × seqLen × seqLen\n for j,k in enumerate(xlen):\n mask[j,:,k-1:] -= 999999\n score = score + mask\n z = torch.bmm(F.softmax(score, dim=2), value) # => batchSize × seqLen × feaSize\n z = self.WO(z) # => batchSize × seqLen × feaSize\n return z\n\nclass FFN(nn.Module):\n def __init__(self, featureSize, seqMaxLen, dropout=0.1, name='FFN'):\n super(FFN, self).__init__()\n self.layerNorm1 = nn.LayerNorm([seqMaxLen, featureSize])\n self.layerNorm2 = nn.LayerNorm([seqMaxLen, featureSize])\n self.Wffn = nn.Sequential(\n nn.Linear(featureSize, featureSize*4), \n nn.ReLU(),\n nn.Linear(featureSize*4, featureSize)\n )\n self.dropout = nn.Dropout(p=dropout)\n self.name = name\n def forward(self, x, z):\n z = x + self.dropout(self.layerNorm1(z)) # => batchSize × seqLen × feaSize\n ffnx = self.Wffn(z) # => batchSize × seqLen × feaSize\n return z+self.dropout(self.layerNorm2(ffnx)) # => batchSize × seqLen × feaSize\n\nclass Transformer(nn.Module):\n def __init__(self, featureSize, dk, multiNum, seqMaxLen, dropout=0.1):\n super(Transformer, self).__init__()\n self.selfAttn = SelfAttention(featureSize, dk, multiNum)\n self.ffn = FFN(featureSize, seqMaxLen, dropout)\n\n def forward(self, input):\n x, xlen = input\n # x: batchSize × seqLen × feaSize; xlen: batchSize\n z = self.selfAttn(x, xlen) # => batchSize × seqLen × feaSize\n return (self.ffn(x, z),xlen) # => batchSize × seqLen × feaSize\n \nclass TextTransformer(nn.Module):\n def __init__(self, layersNum, featureSize, dk, multiNum, seqMaxLen, dropout=0.1, name='textTransformer'):\n super(TextTransformer, self).__init__()\n posEmb = [[np.sin(pos/10000**(2*i/featureSize)) if i%2==0 else np.cos(pos/10000**(2*i/featureSize)) for i in range(featureSize)] for pos in range(seqMaxLen)]\n self.posEmb = nn.Parameter(torch.tensor(posEmb, dtype=torch.float32), requires_grad=False) # seqLen × feaSize\n self.transformerLayers = nn.Sequential(\n OrderedDict(\n [('transformer%d'%i, Transformer(featureSize, dk, multiNum, seqMaxLen, dropout)) for i in range(layersNum)]\n )\n )\n self.dropout = nn.Dropout(p=dropout)\n self.name = name\n def forward(self, x, xlen=None):\n # x: batchSize × seqLen × feaSize; xlen: batchSize\n x = self.dropout(x+self.posEmb) # => batchSize × seqLen × feaSize\n return self.transformerLayers((x, xlen)) # => batchSize × seqLen × feaSize\n\nclass Transformer_Wcnn(nn.Module):\n def __init__(self, featureSize, dk, multiNum, dropout=0.1):\n super(Transformer_Wcnn, self).__init__()\n self.dk = dk\n self.multiNum = multiNum\n self.WQ = nn.ModuleList([nn.Linear(featureSize, self.dk) for i in range(multiNum)])\n self.WK = nn.ModuleList([nn.Linear(featureSize, self.dk) for i in range(multiNum)])\n self.WV = nn.ModuleList([nn.Linear(featureSize, self.dk) for i in range(multiNum)])\n self.WO = nn.Linear(self.dk*multiNum, featureSize)\n self.layerNorm1 = nn.LayerNorm(featureSize)\n self.layerNorm2 = nn.LayerNorm(featureSize)\n self.Wcnn = TextCNN(featureSize, featureSize, [1,3,5], reduction='None', actFunc=nn.ReLU(), name='Wffn_CNN')\n self.Wffn = nn.Sequential(\n nn.Linear(featureSize*3, featureSize), \n )\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, x):\n # x: batchSize × seqLen × feaSize\n queries = [self.WQ[i](x) for i in range(self.multiNum)] # => multiNum*(batchSize × seqLen × dk)\n keys = [self.WK[i](x) for i in range(self.multiNum)] # => multiNum*(batchSize × seqLen × dk)\n values = [self.WQ[i](x) for i in range(self.multiNum)] # => multiNum*(batchSize × seqLen × dk)\n score = [torch.bmm(queries[i], keys[i].transpose(1,2))/np.sqrt(self.dk) for i in range(self.multiNum)] # => multiNum*(batchSize × seqLen × seqLen)\n z = [torch.bmm(F.softmax(score[i], dim=2), values[i]) for i in range(self.multiNum)] # => multiNum*(batchSize × seqLen × dk)\n z = self.WO(torch.cat(z, dim=2)) # => batchSize × seqLen × feaSize\n z = x + self.dropout(self.layerNorm1(z)) # => batchSize × seqLen × feaSize\n ffnx = torch.cat(self.Wcnn(z), dim=2) # => batchSize × seqLen × feaSize*3\n ffnx = self.Wffn(ffnx) # => batchSize × seqLen × feaSize\n return z+self.dropout(self.layerNorm2(ffnx)) # => batchSize × seqLen × feaSize\n\nclass TextTransformer_Wcnn(nn.Module):\n def __init__(self, layersNum, featureSize, dk, multiNum, dropout=0.1, name='textTransformer'):\n super(TextTransformer_Wcnn, self).__init__()\n #posEmb = [[np.sin(pos/10000**(2*i/featureSize)) if i%2==0 else np.cos(pos/10000**(2*i/featureSize)) for i in range(featureSize)] for pos in range(seqMaxLen)]\n #self.posEmb = nn.Parameter(torch.tensor(posEmb, dtype=torch.float32), requires_grad=False) # seqLen × feaSize\n self.transformerLayers = nn.Sequential(\n OrderedDict(\n [('transformer%d'%i, Transformer_Wcnn(featureSize, dk, multiNum, dropout)) for i in range(layersNum)]\n )\n )\n self.dropout = nn.Dropout(p=dropout)\n self.name = name\n def forward(self, x):\n # x: batchSize × seqLen × feaSize\n x = self.dropout(x) # => batchSize × seqLen × feaSize\n return self.transformerLayers(x) # => batchSize × seqLen × feaSize\n\nclass HierarchicalSoftmax(nn.Module):\n def __init__(self, inSize, hierarchicalStructure, lab2id, hiddenList1=[], hiddenList2=[], dropout=0.1, name='HierarchicalSoftmax'):\n super(HierarchicalSoftmax, self).__init__()\n self.name = name\n self.dropout = nn.Dropout(p=dropout)\n layers = nn.Sequential()\n for i,os in enumerate(hiddenList1):\n layers.add_module(str(i*2), nn.Linear(inSize, os))\n layers.add_module(str(i*2+1), nn.ReLU())\n inSize = os\n self.hiddenLayers1 = layers\n moduleList = [nn.Linear(inSize, len(hierarchicalStructure))]\n\n layers = nn.Sequential()\n for i,os in enumerate(hiddenList2):\n layers.add_module(str(i*2), nn.Linear(inSize, os))\n layers.add_module(str(i*2+1), nn.ReLU())\n inSize = os\n self.hiddenLayers2 = layers\n\n for i in hierarchicalStructure:\n moduleList.append( nn.Linear(inSize, len(i)) )\n for j in range(len(i)):\n i[j] = lab2id[i[j]]\n self.hierarchicalNum = [len(i) for i in hierarchicalStructure]\n self.restoreIndex = np.argsort(sum(hierarchicalStructure,[]))\n self.linearList = nn.ModuleList(moduleList)\n def forward(self, x):\n # x: batchSize × feaSize\n x = self.hiddenLayers1(x)\n x = self.dropout(x)\n y = [F.softmax(linear(x), dim=1) for linear in self.linearList[:1]]\n x = self.hiddenLayers2(x)\n y += [F.softmax(linear(x), dim=1) for linear in self.linearList[1:]]\n y = torch.cat([y[0][:,i-1].unsqueeze(1)*y[i] for i in range(1,len(y))], dim=1) # => batchSize × classNum\n return y[:,self.restoreIndex]\n\nclass FocalCrossEntropyLoss(nn.Module):\n def __init__(self, gama=2, weight=-1, logit=True):\n super(FocalCrossEntropyLoss, self).__init__()\n self.weight = torch.nn.Parameter(torch.tensor(weight, dtype=torch.float32), requires_grad=False)\n self.gama = gama\n self.logit = logit\n def forward(self, Y_pre, Y):\n if self.logit:\n Y_pre = F.softmax(Y_pre, dim=1)\n P = Y_pre[list(range(len(Y))), Y]\n if self.weight.shape!=torch.Size([]):\n w = self.weight[Y]\n else:\n w = torch.tensor([1.0 for i in range(len(Y))], device=self.weight.device)\n w = (w/w.sum()).reshape(-1)\n return (-w*((1-P)**self.gama * torch.log(P))).sum()\n\nclass ContinusCrossEntropyLoss(nn.Module):\n def __init__(self, gama=2):\n super(ContinusCrossEntropyLoss, self).__init__()\n self.gama = gama\n def forward(self, Y_logit, Y):\n Y_pre = F.softmax(Y_logit, dim=1)\n lab_pre = Y_pre.argmax(dim=1)\n P = Y_pre[list(range(len(Y))), Y]\n w = ((1+(lab_pre-Y).abs())**self.gama).float()\n w = (w/w.sum()).reshape(-1)\n return (-w*torch.log(P)).sum()\n\nclass PairWiseRankingLoss(nn.Module):\n def __init__(self, gama=1):\n super(PairWiseRankingLoss, self).__init__()\n self.gama = gama\n def forward(self, Y_logit, Y):\n # Y_logit, Y: batchSize1 × batchSize2;\n Y_pre = F.sigmoid(Y_logit)\n loss,cnt = 0,0\n for y_pre,y in zip(Y_pre,Y):\n # batchSize2\n neg = y_pre[y==0].unsqueeze(dim=1) # negNum × 1\n pos = y_pre[y==1].unsqueeze(dim=0) # 1 × posNum\n tmp = self.gama+(neg-pos) # => negNum × posNum\n tmp[tmp<0] = 0\n loss += tmp.sum()\n cnt += tmp.shape[0]*tmp.shape[1]\n return loss\n\n'''\nimport torch\nfrom nnLayer import *\nY = torch.tensor([0,2], dtype=torch.long)\nY_logit = torch.tensor([[0.1,0.9,1],[0.6,2,0.4]], dtype=torch.float32)\nCCEL = ContinusCrossEntropyLoss()\nCCEL(Y_logit, Y)\n'''\n\nclass MultiTaskCEL(nn.Module):\n def __init__(self, lossBalanced=True, ageW=1, genderW=1, name='MTCEL'):\n super(MultiTaskCEL, self).__init__()\n self.genderCriterion,self.ageCriterion = nn.CrossEntropyLoss(),nn.CrossEntropyLoss()#ContinusCrossEntropyLoss()#\n self.genderS,self.ageS = nn.Parameter(torch.zeros(1,dtype=torch.float), requires_grad=lossBalanced),nn.Parameter(torch.zeros(1,dtype=torch.float), requires_grad=lossBalanced)\n self.lossBalanced = lossBalanced\n self.name = name\n self.ageW,self.genderW = ageW,genderW\n def forward(self, genderY_logit, genderY, ageY_logit, ageY):\n if self.lossBalanced:\n return self.genderW * torch.exp(-self.genderS) * self.genderCriterion(genderY_logit,genderY) + self.ageW * torch.exp(-self.ageS) * self.ageCriterion(ageY_logit,ageY) + (self.genderS+self.ageS)/2\n else:\n return self.genderW * self.genderCriterion(genderY_logit,genderY) + self.ageW * self.ageCriterion(ageY_logit,ageY)"
]
| [
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.LSTM",
"torch.nn.GRU",
"torch.nn.ModuleList",
"torch.nn.functional.elu",
"torch.nn.AdaptiveAvgPool1d",
"torch.nn.utils.rnn.pack_padded_sequence",
"numpy.cos",
"torch.exp",
"torch.nn.CrossEntropyLoss",
"torch.sum",
"torch.Size",
"numpy.sin",
"torch.nn.LayerNorm",
"torch.nn.Conv1d",
"torch.tensor",
"numpy.sqrt",
"torch.nn.init.orthogonal_",
"torch.nn.functional.tanh",
"torch.nn.AdaptiveMaxPool1d",
"torch.zeros",
"torch.nn.functional.sigmoid",
"torch.max",
"torch.nn.Sequential",
"torch.nn.Tanh",
"torch.nn.ReLU",
"torch.nn.functional.softmax",
"torch.log",
"torch.matmul",
"torch.sort",
"torch.nn.InstanceNorm1d",
"torch.nn.Dropout",
"torch.nn.init.ones_",
"torch.nn.BatchNorm1d",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.Dropout2d",
"torch.nn.ELU"
]
]
|
DrewAlexander98/Couch-Mk2 | [
"d9dc7768d0e6714c2eb58dcd901988617303ce95"
]
| [
"FormulaTest.py"
]
| [
"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef main():\n angleList = np.arange(1, 90, 10)\n numVals = angleList.size\n xValues = np.array(numVals)\n yValues = np.array(numVals)\n turnWheelVal1 = np.array(numVals)\n turnWheelVal2 = np.array(numVals)\n lvr = np.array(numVals)\n xvy = np.array(numVals)\n xValues = np.round(np.cos(np.radians(angleList)), 4)\n xValues *= 100\n xValues.fill(50)\n yValues = np.round(np.sin(np.radians(angleList)), 4)\n yValues *= 100\n xvy = xValues/yValues\n turnWheelVal1 = yValues - (yValues*((xValues)/100))\n turnWheelVal2 = yValues - (np.power(xValues, 2)*0.01) \n\n lvr1 = turnWheelVal1/yValues\n lvr2 = turnWheelVal2/yValues\n \n for i in range(numVals):\n print(\"- - - - - - - - - - - - - - - - \\n\")\n print(\"X: \" + str(round(xValues[i], 5)) + \" Y: \" + str(round(yValues[i], 5)) + \"\\n\")\n print(\"Formula 1 = LW: \" + str(round(yValues[i], 5)) + \" RW: \" + str(round(turnWheelVal1[i], 5)) + \"\\n\")\n print(\"Formula 2 = LW: \" + str(round(yValues[i], 5)) + \" RW: \" + str(round(turnWheelVal2[i], 5)) + \"\\n\")\n print(\"Left vs Right Ratio 1 = \" + str(round(lvr1[i], 5)))\n print(\"Left vs Right Ratio 2 = \" + str(round(lvr2[i], 5)))\n print(\"X vs Y Ratio = \" + str(round(xvy[i], 5)))\n plt.plot(xvy, lvr1)\n plt.plot(xvy, lvr2)\n \n plt.xlabel('X vs Y values')\n plt.ylabel('Left vs Right')\n\n plt.show()\n\nmain()"
]
| [
[
"numpy.array",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.radians",
"numpy.arange",
"numpy.power",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show"
]
]
|
cbim-medical-group/pytorch-template | [
"7f76ee095420e23efe22df560d8e0a314fbc16dd",
"7f76ee095420e23efe22df560d8e0a314fbc16dd"
]
| [
"data_loader/my_transforms/to_tensor.py",
"loss/bce_loss.py"
]
| [
"import numpy as np\nimport torch\nfrom skimage.transform import resize\n\n\nclass ToTensor:\n def __init__(self, training=True):\n \"\"\"\n Convert numpy array to Torch.Tensor\n \"\"\"\n self.training = training\n\n def __call__(self, sample):\n image, mask = sample['image'], sample['mask']\n\n image = torch.tensor(image).float()\n mask = torch.tensor(mask).long()\n\n return {'image': image, 'mask': mask}\n",
"import torch\nfrom torch.nn import functional as F\n\n\ndef bce_loss(output, target, weight=None):\n if weight is None:\n return F.binary_cross_entropy(output, target)\n else:\n assert len(weight) == len(\n torch.unique(target)), \"The weight array should as the same size as target label type number\"\n weight = torch.Tensor(weight)\n return F.binary_cross_entropy(output, target, weight=weight)\n"
]
| [
[
"torch.tensor"
],
[
"torch.unique",
"torch.Tensor",
"torch.nn.functional.binary_cross_entropy"
]
]
|
chaitanya2334/stain-normalization-tool | [
"8f7aab84466bf96c344f93f46f2103a8e906fe51"
]
| [
"mat_estimation/scd.py"
]
| [
"import cv2\nimport numpy as np\n\nfrom training.scd_trainer import SCDTrainer\n\n\ndef est_using_scd(img, trainer):\n assert isinstance(trainer, SCDTrainer)\n prob_maps, _ = trainer.classify_stain_regions(img)\n\n double_img = cv2.normalize(img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)\n\n col_img = double_img.reshape(-1, 3)\n\n stain_lbls = trainer.labels\n\n prob_maps = prob_maps.reshape(-1, len(stain_lbls))\n\n # background probability threshold\n tbg = 0.75\n\n # stain probability threshold\n tfg = 0.75\n\n # label used by classifier for background pixels should always be zero\n bg_lbl = 0\n bg_idx = -1\n\n labels = -np.ones((col_img.shape[0], 1))\n\n for i in range(stain_lbls):\n if stain_lbls(i) == bg_lbl:\n bg_idx = i\n else:\n # Set the label to the current stain's label for all pixels with a\n # classification probability above the stain threshold\n labels[prob_maps[:, i] > tfg] = stain_lbls[i]\n\n stain_lbls = stain_lbls[stain_lbls != bg_lbl]\n\n if bg_idx != -1:\n labels[prob_maps[:, bg_idx] > tbg] = bg_lbl\n\n # Generate Stain separation matrix\n m = np.zeros(3)\n\n m[0, :] = -np.log(np.mean(col_img[labels == stain_lbls[1], 1:3]) + (1 / 256))\n m[1, :] = -np.log(np.mean(col_img[labels == stain_lbls[0], 1:3]) + (1 / 256))\n\n # Third stain vector is computed as a cross product of the first two\n m[2, :] = np.cross(m[0, :], m[1, :])\n\n m = m / np.tile(np.sqrt(np.sum(m ^ 2, 2)), [1, 3])\n\n labels = labels.reshape(img.shape[0], img.shape[1])\n"
]
| [
[
"numpy.zeros",
"numpy.sum",
"numpy.ones",
"numpy.mean",
"numpy.cross"
]
]
|
astepanian1/gpt-2-simple | [
"6fd93398dc4f2df2910cebc7cf09d22c8704d5c3"
]
| [
"gpt_2_simple/gpt_2.py"
]
| [
"import tarfile\nimport os\nimport json\nimport requests\nimport sys\nimport shutil\nimport re\nfrom tqdm import tqdm, trange\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.python.client import device_lib\nimport time\nfrom datetime import datetime\nimport csv\nimport argparse\n\n# if in Google Colaboratory\ntry:\n from google.colab import drive\nexcept:\n pass\n\nfrom gpt_2_simple.src import model, sample, encoder, memory_saving_gradients\nfrom gpt_2_simple.src.load_dataset import load_dataset, Sampler\nfrom gpt_2_simple.src.accumulate import AccumulatingOptimizer\n\n\ndef download_file_with_progress(url_base, sub_dir, model_name, file_name):\n \"\"\"General utility for incrementally downloading files from the internet\n with progress bar\n from url_base / sub_dir / filename\n to local file system sub_dir / filename\n\n Parameters\n ----------\n file_name : str\n name of file to get e.g. \"hparams.json\"\n sub_dir: str\n subdirectory inside which to get and copy locally eg. \"models/124M\" \n no trailing slash\n url_base : str\n Start of URL location specifying server and any base directories no \n trailing slash\n e.g. \"https://storage.googleapis.com/gpt-2\"\n \"\"\"\n\n # set to download 1MB at a time. This could be much larger with no issue\n DOWNLOAD_CHUNK_SIZE = 1024 * 1024\n r = requests.get(url_base + \"/models/\" + model_name + \"/\" + file_name, stream=True)\n with open(os.path.join(sub_dir, file_name), 'wb') as f:\n file_size = int(r.headers[\"content-length\"])\n with tqdm(ncols=100, desc=\"Fetching \" + file_name,\n total=file_size, unit_scale=True) as pbar:\n for chunk in r.iter_content(chunk_size=DOWNLOAD_CHUNK_SIZE):\n f.write(chunk)\n pbar.update(DOWNLOAD_CHUNK_SIZE)\n \n\ndef download_gpt2(model_dir='models', model_name='124M'):\n \"\"\"Downloads the GPT-2 model into the current directory\n from Google Cloud Storage.\n\n Parameters\n ----------\n model_dir : str\n parent directory of model to download\n\n model_name : str\n name of the GPT-2 model to download. \n As of 22 May 2019 one of \"124M\" or \"355M\" but may later include other \n model sizes\n\n Adapted from https://github.com/openai/gpt-2/blob/master/download_model.py\n \"\"\"\n\n # create the <model_dir>/<model_name> subdirectory if not present\n sub_dir = os.path.join(model_dir, model_name)\n if not os.path.exists(sub_dir):\n os.makedirs(sub_dir)\n sub_dir = sub_dir.replace('\\\\', '/') # needed for Windows\n\n for file_name in ['checkpoint', 'encoder.json', 'hparams.json',\n 'model.ckpt.data-00000-of-00001', 'model.ckpt.index',\n 'model.ckpt.meta', 'vocab.bpe']:\n download_file_with_progress(url_base=\"https://storage.googleapis.com/gpt-2\",\n sub_dir=sub_dir,\n model_name=model_name,\n file_name=file_name)\n\n\ndef start_tf_sess(threads=-1, server=None):\n \"\"\"\n Returns a tf.Session w/ config\n \"\"\"\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth = True\n config.graph_options.rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.OFF\n if threads > 0:\n config.intra_op_parallelism_threads = threads\n config.inter_op_parallelism_threads = threads\n\n if server is not None:\n return tf.compat.v1.Session(target=server.target, config=config)\n \n return tf.compat.v1.Session(config=config)\n\n\ndef reset_session(sess, threads=-1, server=None):\n \"\"\"Resets the current TensorFlow session, to clear memory\n or load another model.\n \"\"\"\n\n tf.compat.v1.reset_default_graph()\n sess.close()\n sess = start_tf_sess(threads, server)\n return sess\n\ndef get_available_gpus():\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos if x.device_type == 'GPU']\n\ndef finetune(sess,\n dataset,\n steps=-1,\n model_name='124M',\n model_dir='models',\n combine=50000,\n batch_size=1,\n learning_rate=0.0001,\n accumulate_gradients=5,\n restore_from='latest',\n run_name='run1',\n checkpoint_dir='checkpoint',\n sample_every=100,\n sample_length=1023,\n sample_num=1,\n multi_gpu=False,\n save_every=1000,\n print_every=1,\n max_checkpoints=1,\n use_memory_saving_gradients=False,\n only_train_transformer_layers=False,\n optimizer='adam',\n overwrite=False):\n \"\"\"Finetunes the model on the given dataset.\n\n Adapted from https://github.com/nshepperd/gpt-2/blob/finetuning/train.py.\n See that file for parameter definitions.\n \"\"\"\n\n assert model_name not in ['774M', '1558M'] or multi_gpu, \"Currently, a modern single GPU cannot finetune the 774M GPT-2 model or larger.\"\n\n SAMPLE_DIR = 'samples'\n\n checkpoint_path = os.path.join(checkpoint_dir, run_name)\n\n def maketree(path):\n try:\n os.makedirs(path)\n except:\n pass\n\n maketree(checkpoint_path)\n files = [f for f in os.listdir(checkpoint_path)]\n for file in ['hparams.json', 'encoder.json', 'vocab.bpe']:\n try:\n shutil.copyfile(os.path.join(model_dir, model_name, file),\n os.path.join(checkpoint_path, file))\n except FileNotFoundError as fnf_error:\n print(\"You need to download the GPT-2 model first via download_gpt2()\")\n raise(fnf_error)\n\n enc = encoder.get_encoder(checkpoint_path)\n hparams = model.default_hparams()\n with open(os.path.join(checkpoint_path, 'hparams.json')) as f:\n hparams.override_from_dict(json.load(f))\n\n if sample_length > hparams.n_ctx:\n raise ValueError(\n \"Can't get samples longer than window size: %s\" % hparams.n_ctx)\n\n if model_name not in ['117M', '124M']:\n use_memory_saving_gradients = True\n only_train_transformer_layers = True\n accumulate_gradients = 1\n\n context = tf.compat.v1.placeholder(tf.int32, [batch_size, None])\n gpus = []\n\n if multi_gpu:\n gpus = get_available_gpus()\n\n output = model.model(hparams=hparams, X=context, gpus=gpus)\n loss = tf.reduce_mean(\n input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=context[:, 1:], logits=output['logits'][:, :-1]))\n\n tf_sample = sample.sample_sequence(\n hparams=hparams,\n length=sample_length,\n context=context,\n batch_size=batch_size,\n temperature=1.0,\n top_k=40)\n\n all_vars = [v for v in tf.compat.v1.trainable_variables() if 'model' in v.name]\n train_vars = [v for v in all_vars if '/h' in v.name] if only_train_transformer_layers else all_vars\n\n if optimizer == 'adam':\n opt = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n elif optimizer == 'sgd':\n opt = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=learning_rate)\n\n if accumulate_gradients > 1:\n if use_memory_saving_gradients:\n exit(\"Memory saving gradients are not implemented for gradient accumulation yet.\")\n opt = AccumulatingOptimizer(\n opt=opt,\n var_list=train_vars)\n opt_reset = opt.reset()\n opt_compute = opt.compute_gradients(loss)\n opt_apply = opt.apply_gradients()\n summary_loss = tf.compat.v1.summary.scalar('loss', opt_apply)\n else:\n if use_memory_saving_gradients:\n opt_grads = memory_saving_gradients.gradients(loss, train_vars)\n else:\n opt_grads = tf.gradients(ys=loss, xs=train_vars)\n opt_grads = list(zip(opt_grads, train_vars))\n opt_apply = opt.apply_gradients(opt_grads)\n summary_loss = tf.compat.v1.summary.scalar('loss', loss)\n\n summary_log = tf.compat.v1.summary.FileWriter(checkpoint_path)\n\n saver = tf.compat.v1.train.Saver(\n var_list=all_vars,\n max_to_keep=max_checkpoints)\n sess.run(tf.compat.v1.global_variables_initializer())\n\n if restore_from == 'latest':\n ckpt = tf.train.latest_checkpoint(checkpoint_path)\n if ckpt is None:\n # Get fresh GPT weights if new run.\n ckpt = tf.train.latest_checkpoint(\n os.path.join(model_dir, model_name))\n elif restore_from == 'fresh':\n ckpt = tf.train.latest_checkpoint(\n os.path.join(model_dir, model_name))\n else:\n ckpt = tf.train.latest_checkpoint(restore_from)\n print('Loading checkpoint', ckpt)\n saver.restore(sess, ckpt)\n\n print('Loading dataset...')\n chunks = load_dataset(enc, dataset, combine)\n data_sampler = Sampler(chunks)\n print('dataset has', data_sampler.total_size, 'tokens')\n print('Training...')\n\n counter = 1\n counter_path = os.path.join(checkpoint_path, 'counter')\n if os.path.exists(counter_path) and restore_from == 'latest':\n # Load the step number if we're resuming a run\n # Add 1 so we don't immediately try to save again\n with open(counter_path, 'r') as fp:\n counter = int(fp.read()) + 1\n counter_base = counter\n\n def save():\n maketree(checkpoint_path)\n print(\n 'Saving',\n os.path.join(checkpoint_path,\n 'model-{}').format(counter-1))\n saver.save(\n sess,\n os.path.join(checkpoint_path, 'model'),\n global_step=counter-1)\n with open(counter_path, 'w') as fp:\n fp.write(str(counter-1) + '\\n')\n\n def generate_samples():\n context_tokens = data_sampler.sample(1)\n all_text = []\n index = 0\n while index < sample_num:\n out = sess.run(\n tf_sample,\n feed_dict={context: batch_size * [context_tokens]})\n for i in range(min(sample_num - index, batch_size)):\n text = enc.decode(out[i])\n text = '======== SAMPLE {} ========\\n{}\\n'.format(\n index + 1, text)\n all_text.append(text)\n index += 1\n print(text)\n maketree(os.path.join(SAMPLE_DIR, run_name))\n with open(\n os.path.join(SAMPLE_DIR, run_name,\n 'samples-{}').format(counter), 'w') as fp:\n fp.write('\\n'.join(all_text))\n\n def sample_batch():\n return [data_sampler.sample(1024) for _ in range(batch_size)]\n\n if overwrite and restore_from == 'latest':\n for file in files:\n if file.startswith('model') or file.startswith('events'):\n os.remove(os.path.join(checkpoint_path, file))\n save()\n\n avg_loss = (0.0, 0.0)\n start_time = time.time()\n\n if steps:\n steps = int(steps)\n \n try:\n while True:\n if steps > 0 and counter == (counter_base + steps):\n save()\n return\n if (counter - 1) % save_every == 0 and counter > 1:\n save()\n if (counter - 1) % sample_every == 0 and counter > 1:\n generate_samples()\n\n if accumulate_gradients > 1:\n sess.run(opt_reset)\n for _ in range(accumulate_gradients):\n sess.run(\n opt_compute, feed_dict={context: sample_batch()})\n (v_loss, v_summary) = sess.run((opt_apply, summary_loss))\n else:\n (_, v_loss, v_summary) = sess.run(\n (opt_apply, loss, summary_loss),\n feed_dict={context: sample_batch()})\n\n summary_log.add_summary(v_summary, counter)\n\n if counter % print_every == 0:\n avg_loss = (avg_loss[0] * 0.99 + v_loss,\n avg_loss[1] * 0.99 + 1.0)\n\n print(\n '[{counter} | {time:2.2f}] loss={loss:2.2f} avg={avg:2.2f}'\n .format(\n counter=counter,\n time=time.time() - start_time,\n loss=v_loss,\n avg=avg_loss[0] / avg_loss[1]))\n\n counter += 1\n except KeyboardInterrupt:\n print('interrupted')\n save()\n\n\ndef load_gpt2(sess,\n run_name=\"run1\",\n checkpoint_dir=\"checkpoint\",\n model_name=None,\n model_dir='models',\n multi_gpu=False):\n \"\"\"Loads the model checkpoint or existing model into a TensorFlow session\n for repeated predictions.\n \"\"\"\n\n if model_name:\n checkpoint_path = os.path.join(model_dir, model_name)\n else:\n checkpoint_path = os.path.join(checkpoint_dir, run_name)\n\n hparams = model.default_hparams()\n with open(os.path.join(checkpoint_path, 'hparams.json')) as f:\n hparams.override_from_dict(json.load(f))\n\n context = tf.compat.v1.placeholder(tf.int32, [1, None])\n\n gpus = []\n if multi_gpu:\n gpus = get_available_gpus()\n\n output = model.model(hparams=hparams, X=context, gpus=gpus)\n\n ckpt = tf.train.latest_checkpoint(checkpoint_path)\n saver = tf.compat.v1.train.Saver(allow_empty=True)\n sess.run(tf.compat.v1.global_variables_initializer())\n\n if model_name:\n print('Loading pretrained model', ckpt)\n else:\n print('Loading checkpoint', ckpt)\n saver.restore(sess, ckpt)\n\n\ndef generate(sess,\n run_name='run1',\n checkpoint_dir='checkpoint',\n model_name=None,\n model_dir='models',\n sample_dir='samples',\n return_as_list=False,\n truncate=None,\n destination_path=None,\n sample_delim='=' * 20 + '\\n',\n prefix=None,\n seed=None,\n nsamples=1,\n batch_size=1,\n length=1023,\n temperature=0.7,\n top_k=0,\n top_p=0.0,\n include_prefix=True):\n \"\"\"Generates text from a model loaded into memory.\n\n Adapted from https://github.com/openai/gpt-2/blob/master/src/interactive_conditional_samples.py\n \"\"\"\n\n if batch_size is None:\n batch_size = 1\n assert nsamples % batch_size == 0\n\n if nsamples == 1:\n sample_delim = ''\n\n if prefix == '':\n prefix = None\n\n if model_name:\n checkpoint_path = os.path.join(model_dir, model_name)\n else:\n checkpoint_path = os.path.join(checkpoint_dir, run_name)\n\n enc = encoder.get_encoder(checkpoint_path)\n hparams = model.default_hparams()\n with open(os.path.join(checkpoint_path, 'hparams.json')) as f:\n hparams.override_from_dict(json.load(f))\n\n if prefix:\n context = tf.compat.v1.placeholder(tf.int32, [batch_size, None])\n context_tokens = enc.encode(prefix)\n\n np.random.seed(seed)\n tf.compat.v1.set_random_seed(seed)\n\n output = sample.sample_sequence(\n hparams=hparams,\n length=min(length, 1023 - (len(context_tokens) if prefix else 0)),\n start_token=enc.encoder['<|endoftext|>'] if not prefix else None,\n context=context if prefix else None,\n batch_size=batch_size,\n temperature=temperature, top_k=top_k, top_p=top_p\n )[:, 1:]\n\n if destination_path:\n f = open(destination_path, 'w')\n generated = 0\n gen_texts = []\n while generated < nsamples:\n if not prefix:\n out = sess.run(output)\n else:\n out = sess.run(output, feed_dict={\n context: batch_size * [context_tokens]\n })\n for i in range(batch_size):\n generated += 1\n gen_text = enc.decode(out[i])\n if prefix:\n gen_text = enc.decode(context_tokens[:1]) + gen_text\n if truncate:\n truncate_esc = re.escape(truncate)\n if prefix and not include_prefix:\n prefix_esc = re.escape(prefix)\n pattern = '(?:{})(.*?)(?:{})'.format(prefix_esc,\n truncate_esc)\n else:\n pattern = '(.*?)(?:{})'.format(truncate_esc)\n\n trunc_text = re.search(pattern, gen_text, re.S)\n if trunc_text:\n gen_text = trunc_text.group(1)\n gen_text = gen_text.lstrip('\\n')\n if destination_path:\n f.write(\"{}\\n{}\".format(gen_text, sample_delim))\n if not return_as_list and not destination_path:\n print(\"{}\\n{}\".format(gen_text, sample_delim), end='')\n gen_texts.append(gen_text)\n\n if destination_path:\n f.close()\n\n if return_as_list:\n return gen_texts\n\n\ndef generate_to_file(sess,\n run_name='run1',\n checkpoint_dir='checkpoint',\n model_name=None,\n model_dir='models',\n truncate=None,\n destination_path='gpt_2_gen_texts.txt',\n sample_delim='=' * 20 + '\\n',\n prefix=None,\n seed=None,\n nsamples=1,\n batch_size=1,\n length=1023,\n temperature=0.7,\n top_k=0,\n top_p=0.0,\n include_prefix=True):\n \"\"\"Generates the texts to a file.\n\n sample_delim separates texts: set to '' if each text is a small document.\n\n Adapted from https://github.com/minimaxir/textgenrnn/blob/master/textgenrnn/textgenrnn.py\n \"\"\"\n\n generate(sess=sess,\n run_name=run_name,\n checkpoint_dir=checkpoint_dir,\n model_name=model_name,\n model_dir=model_dir,\n return_as_list=False,\n truncate=truncate,\n destination_path=destination_path,\n sample_delim=sample_delim,\n prefix=prefix,\n seed=seed,\n nsamples=nsamples,\n batch_size=batch_size,\n length=length,\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n include_prefix=include_prefix)\n\n\ndef mount_gdrive():\n \"\"\"Mounts the user's Google Drive in Colaboratory.\"\"\"\n assert 'google.colab' in sys.modules, \"You must be in Colaboratory to mount your Google Drive\"\n\n drive.mount('/content/drive')\n\n\ndef is_mounted():\n \"\"\"Checks if the Google Drive is mounted.\"\"\"\n assert os.path.isdir('/content/drive'), \"You must mount first using mount_gdrive()\"\n\n\ndef get_tarfile_name(checkpoint_folder):\n \"\"\"Converts a folder path into a filename for a .tar archive\"\"\"\n tarfile_name = checkpoint_folder.replace(os.path.sep, '_') + '.tar'\n\n return tarfile_name\n\n\ndef copy_checkpoint_to_gdrive(run_name='run1', copy_folder=False):\n \"\"\"Copies the checkpoint folder to a mounted Google Drive.\"\"\"\n is_mounted()\n\n checkpoint_folder = os.path.join('checkpoint', run_name)\n\n if copy_folder:\n shutil.copytree(checkpoint_folder, \"/content/drive/My Drive/\" + checkpoint_folder)\n else:\n file_path = get_tarfile_name(checkpoint_folder)\n\n # Reference: https://stackoverflow.com/a/17081026\n with tarfile.open(file_path, 'w') as tar:\n tar.add(checkpoint_folder)\n\n shutil.copyfile(file_path, \"/content/drive/My Drive/\" + file_path)\n\n\ndef copy_checkpoint_from_gdrive(run_name='run1', copy_folder=False):\n \"\"\"Copies the checkpoint folder from a mounted Google Drive.\"\"\"\n is_mounted()\n\n checkpoint_folder = os.path.join('checkpoint', run_name)\n\n if copy_folder:\n shutil.copytree(\"/content/drive/My Drive/\" + checkpoint_folder, checkpoint_folder)\n else:\n file_path = get_tarfile_name(checkpoint_folder)\n\n shutil.copyfile(\"/content/drive/My Drive/\" + file_path, file_path)\n\n with tarfile.open(file_path, 'r') as tar:\n tar.extractall()\n\n\ndef copy_file_to_gdrive(file_path):\n \"\"\"Copies a file to a mounted Google Drive.\"\"\"\n is_mounted()\n\n shutil.copyfile(file_path, \"/content/drive/My Drive/\" + file_path)\n\n\ndef copy_file_from_gdrive(file_path):\n \"\"\"Copies a file from a mounted Google Drive.\"\"\"\n is_mounted()\n\n shutil.copyfile(\"/content/drive/My Drive/\" + file_path, file_path)\n\n\ndef is_gpt2_downloaded(model_dir='models', model_name='124M'):\n \"\"\"Checks if the original model + associated files are present in folder.\"\"\"\n\n for filename in ['checkpoint', 'encoder.json', 'hparams.json',\n 'model.ckpt.data-00000-of-00001', 'model.ckpt.index',\n 'model.ckpt.meta', 'vocab.bpe']:\n if not os.path.isfile(os.path.join(model_dir, model_name, filename)):\n return False\n return True\n\n\ndef encode_csv(csv_path, out_path='csv_encoded.txt', header=True,\n start_token=\"<|startoftext|>\",\n end_token=\"<|endoftext|>\"):\n \"\"\"Encodes a single-column CSV to a format suitable for gpt-2-simple.\n Automatically adds the specified prefix and suffix tokens.\n \"\"\"\n\n with open(csv_path, 'r', encoding='utf8', errors='ignore') as f:\n with open(out_path, 'w', encoding='utf8', errors='ignore') as w:\n if header:\n f.readline()\n reader = csv.reader(f)\n for row in reader:\n w.write(start_token + row[0] + end_token + \"\\n\")\n\n\ndef encode_dataset(file_path, model_dir='models', out_path='text_encoded.npz',\n model_name=\"124M\",\n combine=50000):\n \"\"\"Preencodes a text document into chunks and compresses it,\n saving time when generated.\n\n Adapted from https://github.com/nshepperd/gpt-2/blob/finetuning/encode.py\n \"\"\"\n\n model_path = os.path.join(model_dir, model_name)\n enc = encoder.get_encoder(model_path)\n print('Reading files')\n chunks = load_dataset(enc, file_path, combine)\n print('Writing', out_path)\n np.savez_compressed(out_path, *chunks)\n\n\ndef cmd():\n \"\"\"Function called when invoking from the terminal.\"\"\"\n\n parser = argparse.ArgumentParser(\n description=\"Easily retrain OpenAI's GPT-2 text-generating model on new texts. (https://github.com/minimaxir/gpt-2-simple)\"\n )\n\n # Explicit arguments\n \n parser.add_argument(\n '--mode', help='Mode for using the CLI (either \"finetune\" or \"generate\") [Required]', nargs='?')\n parser.add_argument(\n '--run_name', help=\"[finetune/generate] Run number to save/load the model\",\n nargs='?', default='run1')\n parser.add_argument(\n '--checkpoint_dir', help=\"[finetune] Path of the checkpoint directory\",\n nargs='?', default='checkpoint')\n parser.add_argument(\n '--model_name', help=\"[finetune] Name of the GPT-2 model to finetune\",\n nargs='?', default='124M')\n parser.add_argument(\n '--model_dir', help=\"[finetune] Path of directory of the GPT-2 model to finetune\",\n nargs='?', default='models')\n parser.add_argument(\n '--dataset', help=\"[finetune] Path to the source text.\",\n nargs='?', default=None)\n parser.add_argument(\n '--steps', help=\"[finetune] Number of steps to train (-1 for infinite)\",\n nargs='?', default=-1)\n parser.add_argument(\n '--restore_from', help=\"[finetune] Whether to load model 'fresh' or from 'latest' checkpoint.\",\n nargs='?', default='latest')\n parser.add_argument(\n '--sample_every', help=\"[finetune] After how many steps to print sample\",\n nargs='?', default=1000000, type=int)\n parser.add_argument(\n '--save_every', help=\"[finetune] After how many steps to save checkpoint\",\n nargs='?', default=100, type=int)\n parser.add_argument(\n '--print_every', help=\"[finetune] After how many steps to print progress\",\n nargs='?', default=10, type=int)\n parser.add_argument(\n '--optimizer', help=\"[finetune] Optimizer to use for finetuning (adam or sgd)\",\n nargs='?', default='adam')\n parser.add_argument(\n '--overwrite', help=\"[finetune] Overwrite existing model when continuing training\",\n nargs='?', default=False, type=lambda x: (str(x).lower() == 'true'))\n parser.add_argument(\n '--nfiles', help=\"[generate] How many files to generate.\",\n nargs='?', default=1, type=int)\n parser.add_argument(\n '--nsamples', help=\"[generate] How many texts to generate.\",\n nargs='?', default=1, type=int)\n parser.add_argument(\n '--folder', help=\"[generate] Folder to save the generated files\",\n nargs='?', default=\"gen\", type=str)\n parser.add_argument(\n '--length', help=\"[generate] Length (tokens) of the generated texts\",\n nargs='?', default=1023, type=int)\n parser.add_argument(\n '--temperature', help=\"[generate] Temperature of the generated texts\",\n nargs='?', default=0.7, type=float)\n parser.add_argument(\n '--top_k', help=\"[generate] Sample only from top k tokens\",\n nargs='?', default=0, type=int)\n parser.add_argument(\n '--top_p', help=\"[generate] Sample from top p prob (overrides top_k if nonzero)\",\n nargs='?', default=0.0, type=float)\n parser.add_argument(\n '--batch_size', help=\"[generate] Batch size for generation (increase for GPUs)\",\n nargs='?', default=1, type=int)\n parser.add_argument(\n '--prefix', help=\"[generate] Prefix for generated texts\",\n nargs='?', default=None)\n parser.add_argument(\n '--truncate', help=\"[generate] Truncation for generated texts\",\n nargs='?', default=None)\n # https://stackoverflow.com/a/46951029\n parser.add_argument(\n '--include_prefix', help=\"[generate] Include prefix when truncating.\",\n nargs='?', default=True, type=lambda x: (str(x).lower() == 'true'))\n parser.add_argument(\n '--sample_delim', help=\"[generate] Delimiter between each generated sample.\",\n nargs='?', default='=' * 20 + '\\n', type=str)\n parser.add_argument(\n '--multi_gpu', help=\"[generate/finetune] Attempt to allocate multiple GPUs for running.\",\n nargs='?', default=True, type=lambda x: (str(x).lower() == 'true'))\n\n # Positional arguments\n parser.add_argument('mode', nargs='?')\n parser.add_argument('dataset', nargs='?')\n\n args = parser.parse_args()\n assert args.mode in ['finetune', 'generate'], \"Mode must be 'finetune' or 'generate'\"\n\n if args.mode == 'finetune':\n assert args.dataset is not None, \"You need to provide a dataset.\"\n\n cmd_finetune(dataset=args.dataset, run_name=args.run_name,\n checkpoint_dir=args.checkpoint_dir,\n model_name=args.model_name,\n model_dir=args.model_dir,\n steps=args.steps, restore_from=args.restore_from,\n sample_every=args.sample_every,\n save_every=args.save_every,\n print_every=args.print_every,\n optimizer=args.optimizer,\n overwrite=args.overwrite,\n multi_gpu=args.multi_gpu)\n if args.mode == \"generate\":\n cmd_generate(nfiles=args.nfiles, nsamples=args.nsamples,\n folder=args.folder, length=args.length,\n temperature=args.temperature, batch_size=args.batch_size,\n prefix=args.prefix, truncate=args.truncate,\n include_prefix=args.include_prefix,\n sample_delim=args.sample_delim, run_name=args.run_name,\n checkpoint_dir=args.checkpoint_dir,\n top_k=args.top_k, top_p=args.top_p, multi_gpu=args.multi_gpu)\n\n\ndef cmd_finetune(dataset, run_name, checkpoint_dir, model_name, model_dir, steps,\n restore_from, sample_every,\n save_every, print_every, optimizer, overwrite, multi_gpu):\n \"\"\"Wrapper script for finetuning the model via the CLI.\"\"\"\n\n if not is_gpt2_downloaded(model_dir=model_dir, model_name=model_name):\n download_gpt2(model_dir=model_dir, model_name=model_name)\n\n sess = start_tf_sess()\n finetune(sess, dataset=dataset, run_name=run_name,\n checkpoint_dir=checkpoint_dir,\n model_name=model_name,\n model_dir=model_dir,\n steps=steps, restore_from=restore_from,\n sample_every=sample_every, save_every=save_every,\n print_every=print_every,\n optimizer=optimizer,\n overwrite=overwrite,\n multi_gpu=multi_gpu)\n\n\ndef cmd_generate(nfiles, nsamples, folder,\n length, temperature, batch_size,\n prefix, truncate, include_prefix,\n sample_delim, run_name,\n checkpoint_dir,\n top_k, top_p, multi_gpu):\n \"\"\"Wrapper script for generating text via the CLI.\n The files are generated into a folder, which can be downloaded\n recursively by downloading the entire folder.\n \"\"\"\n\n sess = start_tf_sess()\n load_gpt2(sess, run_name=run_name, checkpoint_dir=checkpoint_dir, multi_gpu=multi_gpu)\n\n try:\n os.mkdir(folder)\n except:\n shutil.rmtree(folder)\n os.mkdir(folder)\n\n for _ in trange(nfiles):\n gen_file = os.path.join(folder,\n 'gpt2_gentext_{:%Y%m%d_%H%M%S}.txt'.format(datetime.utcnow()))\n\n generate_to_file(sess,\n run_name=run_name,\n checkpoint_dir=checkpoint_dir,\n destination_path=gen_file,\n length=length,\n temperature=temperature,\n nsamples=nsamples,\n batch_size=batch_size,\n prefix=prefix,\n truncate=truncate,\n include_prefix=include_prefix,\n sample_delim=sample_delim,\n top_k=top_k,\n top_p=top_p\n )\n"
]
| [
[
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.summary.FileWriter",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.train.latest_checkpoint",
"tensorflow.compat.v1.train.AdamOptimizer",
"numpy.random.seed",
"tensorflow.compat.v1.train.Saver",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.compat.v1.train.GradientDescentOptimizer",
"tensorflow.gradients",
"tensorflow.compat.v1.Session",
"tensorflow.python.client.device_lib.list_local_devices",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.compat.v1.reset_default_graph",
"numpy.savez_compressed",
"tensorflow.compat.v1.summary.scalar",
"tensorflow.compat.v1.set_random_seed",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits"
]
]
|
madhukar-m-rao/deephyper | [
"d280701d9e4cae3e639be054bf1c5ef918d9a1a7"
]
| [
"deephyper/search/hps/automl/classifier/autosklearn1/run.py"
]
| [
"import inspect\nfrom inspect import signature\nfrom pprint import pprint\n\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\n\nfrom deephyper.search.hps.automl.classifier.mapping import CLASSIFIERS\nfrom deephyper.search.nas.model.preprocessing import minmaxstdscaler\n\n\ndef run(config: dict, load_data: callable) -> float:\n \"\"\"Run function which can be used for AutoML classification.\n\n Args:\n config (dict): [description]\n load_data (callable): [description]\n\n Returns:\n float: [description]\n \"\"\"\n seed = 42\n config[\"random_state\"] = seed\n\n X, y = load_data()\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.33, random_state=seed\n )\n\n preproc = minmaxstdscaler()\n X_train = preproc.fit_transform(X_train)\n X_test = preproc.transform(X_test)\n\n mapping = CLASSIFIERS\n\n clf_class = mapping[config[\"classifier\"]]\n\n # keep parameters possible for the current classifier\n sig = signature(clf_class)\n clf_allowed_params = list(sig.parameters.keys())\n clf_params = {\n k: v\n for k, v in config.items()\n if k in clf_allowed_params and not (v in [\"nan\", \"NA\"])\n }\n\n if \"n_jobs\" in clf_allowed_params: # performance parameter\n clf_params[\"n_jobs\"] = 8\n\n try: # good practice to manage the fail value yourself...\n clf = clf_class(**clf_params)\n\n clf.fit(X_train, y_train)\n\n fit_is_complete = True\n except:\n fit_is_complete = False\n\n if fit_is_complete:\n y_pred = clf.predict(X_test)\n acc = accuracy_score(y_test, y_pred)\n else:\n acc = -1.0\n\n return acc\n"
]
| [
[
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score"
]
]
|
MiVaVo/Antispoof-3d | [
"03ee614f4daf85069ce22c80cb6ed4642bdf762e"
]
| [
"src/icp/utils.py"
]
| [
"import copy\nimport functools\n\nimport numpy as np\nimport open3d as o3d\n\n\ndef points_to_pointcloud(func):\n @functools.wraps(func)\n def ensure_is_pointcloud(*args, **kwargs):\n if 'left_points_array' in kwargs.keys() and 'right_points_array' in kwargs.keys():\n if not isinstance(kwargs['left_points_array'], o3d.geometry.PointCloud):\n kwargs['left_points_array'] = xyz_to_pointcloud(kwargs['left_points_array'])\n kwargs['right_points_array'] = xyz_to_pointcloud(kwargs['right_points_array'])\n else:\n if isinstance(args[0], RigidICPRegistration):\n increment = 1\n else:\n increment = 0\n if not isinstance(args[0 + increment], o3d.geometry.PointCloud):\n args = list(args)\n args[0 + increment] = xyz_to_pointcloud(args[0 + increment])\n args[1 + increment] = xyz_to_pointcloud(args[1 + increment])\n args = tuple(args)\n return func(*args, **kwargs)\n\n return ensure_is_pointcloud\n\n\ndef xyz_to_pointcloud(*args):\n if len(args) == 3:\n array = np.asarray([args[0], args[1], args[2]]).T\n else:\n array = args[0]\n array = np.asarray(array)\n\n any_shape_is_3 = np.asarray(list(array.shape)) == 3\n if np.any(any_shape_is_3):\n array = array.T if any_shape_is_3[0] else array\n point_cloud_instance = o3d.geometry.PointCloud()\n point_cloud_instance.points = o3d.utility.Vector3dVector(array)\n return point_cloud_instance\n\n\nclass RigidICPRegistration():\n def __init__(self):\n self.threshold = 0.2\n self.trans_init = np.asarray([[1., 0., 0., 0.],\n [0., 1., 0., 0.],\n [0., 0., 1., 0.],\n [0., 0., 0., 1.]])\n self.estimation = o3d.registration.TransformationEstimationPointToPoint(with_scaling=True)\n self.criteria = o3d.registration.ICPConvergenceCriteria(max_iteration=200)\n\n @points_to_pointcloud\n def register(self, left_points_array, right_points_array):\n print(np.asanyarray(left_points_array.points))\n print(np.asanyarray(right_points_array.points))\n\n self.reg_p2p = o3d.registration.registration_icp(\n left_points_array,\n right_points_array,\n self.threshold,\n self.trans_init,\n self.estimation,\n self.criteria)\n\n\n@points_to_pointcloud\ndef draw_registration_result(left_points_array, right_points_array, transformation):\n left_points_pointcloud = copy.deepcopy(left_points_array)\n right_points_pointcloud = copy.deepcopy(right_points_array)\n left_points_pointcloud.paint_uniform_color([1, 0.706, 0])\n right_points_pointcloud.paint_uniform_color([0, 0.651, 0.929])\n if transformation is not None:\n left_points_pointcloud.transform(transformation)\n o3d.visualization.draw_geometries([left_points_pointcloud, right_points_pointcloud], width=640, height=480)\n\n\ndef concatenate(**words):\n result = \"\"\n for arg in words.values():\n result += arg\n return result\n"
]
| [
[
"numpy.any",
"numpy.asarray",
"numpy.asanyarray"
]
]
|
upscale-project/hslink_phy | [
"741f78da673d2e633da05d292aa6645125ebae32"
]
| [
"DaVE/mLingua/examples_ncsim/serdes/ch_response/plot.py"
]
| [
"#! /usr/bin/env python \n\n\"\"\"\n Generate eyediagram and save it to eye.png\n\"\"\"\n\nimport numpy as np\nfrom scipy.interpolate import interp1d\nimport matplotlib.pylab as plt\nimport matplotlib\nfrom scipy.signal import lsim, zpk2tf\n\nfont = { 'size' : 19}\nmatplotlib.rc('font', **font)\n\ndef main():\n ts = 2.0e-9 # time start\n te = 11e-9 # time end\n\n # load verilog simulation data and get 1d interpolator\n data = np.loadtxt('ch_out.txt')\n t = data[:,0]\n y = data[:,1]\n y = y[t>=ts]\n t = t[t>=ts]\n y = y[t<=te]\n t = t[t<=te]\n data = np.loadtxt('tx_out.txt')\n t1 = data[:,0]\n y1 = data[:,1]\n y1 = y1[t1>=ts]\n t1 = t1[t1>=ts]\n y1 = y1[t1<=te]\n t1 = t1[t1<=te]\n t1 = np.array([ts]+list(t1)+[te])\n y1 = np.array([y1[0]]+list(y1)+[y1[-1]])\n\n \n # plot time, value pairs at which events occur \n ax1 = plt.subplot(2,1,1)\n plt.plot(t*1e9,y,'o-r', markersize=5)\n plt.ylabel('Channel output [V]')\n plt.legend(loc=1)\n ax1.set_xlim([ts*1e9,te*1e9])\n plt.title('Pulse (200 psec wide) response')\n\n ax2 = plt.subplot(2,1,2)\n plt.plot(t1*1e9,y1,'o-b', markersize=5)\n plt.ylabel('Pulse input [V]')\n plt.tight_layout()\n ax2.set_xlim([ts*1e9,te*1e9])\n ax2.set_ylim([-1.1,1.1])\n plt.savefig('channel.eps')\n plt.close()\n\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"matplotlib.pylab.savefig",
"matplotlib.pylab.ylabel",
"matplotlib.pylab.legend",
"matplotlib.pylab.close",
"matplotlib.rc",
"numpy.loadtxt",
"matplotlib.pylab.subplot",
"matplotlib.pylab.tight_layout",
"matplotlib.pylab.title",
"matplotlib.pylab.plot"
]
]
|
songquanpeng/BigGAN-PyTorch | [
"6988f1f3ccfa4f6794ce269f056422da4ce9baf6"
]
| [
"BigGAN.py"
]
| [
"import numpy as np\nimport math\nimport functools\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.nn import Parameter as P\n\nimport layers\nfrom sync_batchnorm import SynchronizedBatchNorm2d as SyncBatchNorm2d\n\n\n# Architectures for G\n# Attention is passed in in the format '32_64' to mean applying an attention\n# block at both resolution 32x32 and 64x64. Just '64' will apply at 64x64.\ndef G_arch(ch=64, attention='64', ksize='333333', dilation='111111'):\n arch = {}\n arch[512] = {'in_channels' : [ch * item for item in [16, 16, 8, 8, 4, 2, 1]],\n 'out_channels' : [ch * item for item in [16, 8, 8, 4, 2, 1, 1]],\n 'upsample' : [True] * 7,\n 'resolution' : [8, 16, 32, 64, 128, 256, 512],\n 'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])\n for i in range(3,10)}}\n arch[256] = {'in_channels' : [ch * item for item in [16, 16, 8, 8, 4, 2]],\n 'out_channels' : [ch * item for item in [16, 8, 8, 4, 2, 1]],\n 'upsample' : [True] * 6,\n 'resolution' : [8, 16, 32, 64, 128, 256],\n 'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])\n for i in range(3,9)}}\n arch[128] = {'in_channels' : [ch * item for item in [16, 16, 8, 4, 2]],\n 'out_channels' : [ch * item for item in [16, 8, 4, 2, 1]],\n 'upsample' : [True] * 5,\n 'resolution' : [8, 16, 32, 64, 128],\n 'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])\n for i in range(3,8)}}\n arch[64] = {'in_channels' : [ch * item for item in [16, 16, 8, 4]],\n 'out_channels' : [ch * item for item in [16, 8, 4, 2]],\n 'upsample' : [True] * 4,\n 'resolution' : [8, 16, 32, 64],\n 'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])\n for i in range(3,7)}}\n arch[32] = {'in_channels' : [ch * item for item in [4, 4, 4]],\n 'out_channels' : [ch * item for item in [4, 4, 4]],\n 'upsample' : [True] * 3,\n 'resolution' : [8, 16, 32],\n 'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])\n for i in range(3,6)}}\n\n return arch\n\nclass Generator(nn.Module):\n def __init__(self, G_ch=64, dim_z=128, bottom_width=4, resolution=128,\n G_kernel_size=3, G_attn='64', n_classes=1000,\n num_G_SVs=1, num_G_SV_itrs=1,\n G_shared=True, shared_dim=0, hier=False,\n cross_replica=False, mybn=False,\n G_activation=nn.ReLU(inplace=False),\n G_lr=5e-5, G_B1=0.0, G_B2=0.999, adam_eps=1e-8,\n BN_eps=1e-5, SN_eps=1e-12, G_mixed_precision=False, G_fp16=False,\n G_init='ortho', skip_init=False, no_optim=False,\n G_param='SN', norm_style='bn',\n **kwargs):\n super(Generator, self).__init__()\n # Channel width mulitplier\n self.ch = G_ch\n # Dimensionality of the latent space\n self.dim_z = dim_z\n # The initial spatial dimensions\n self.bottom_width = bottom_width\n # Resolution of the output\n self.resolution = resolution\n # Kernel size?\n self.kernel_size = G_kernel_size\n # Attention?\n self.attention = G_attn\n # number of classes, for use in categorical conditional generation\n self.n_classes = n_classes\n # Use shared embeddings?\n self.G_shared = G_shared\n # Dimensionality of the shared embedding? Unused if not using G_shared\n self.shared_dim = shared_dim if shared_dim > 0 else dim_z\n # Hierarchical latent space?\n self.hier = hier\n # Cross replica batchnorm?\n self.cross_replica = cross_replica\n # Use my batchnorm?\n self.mybn = mybn\n # nonlinearity for residual blocks\n self.activation = G_activation\n # Initialization style\n self.init = G_init\n # Parameterization style\n self.G_param = G_param\n # Normalization style\n self.norm_style = norm_style\n # Epsilon for BatchNorm?\n self.BN_eps = BN_eps\n # Epsilon for Spectral Norm?\n self.SN_eps = SN_eps\n # fp16?\n self.fp16 = G_fp16\n # Architecture dict\n self.arch = G_arch(self.ch, self.attention)[resolution]\n\n # If using hierarchical latents, adjust z\n if self.hier:\n # Number of places z slots into\n self.num_slots = len(self.arch['in_channels']) + 1\n self.z_chunk_size = (self.dim_z // self.num_slots)\n # Recalculate latent dimensionality for even splitting into chunks\n self.dim_z = self.z_chunk_size * self.num_slots\n else:\n self.num_slots = 1\n self.z_chunk_size = 0\n\n # Which convs, batchnorms, and linear layers to use\n if self.G_param == 'SN':\n self.which_conv = functools.partial(layers.SNConv2d,\n kernel_size=3, padding=1,\n num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,\n eps=self.SN_eps)\n self.which_linear = functools.partial(layers.SNLinear,\n num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,\n eps=self.SN_eps)\n else:\n self.which_conv = functools.partial(nn.Conv2d, kernel_size=3, padding=1)\n self.which_linear = nn.Linear\n \n # We use a non-spectral-normed embedding here regardless;\n # For some reason applying SN to G's embedding seems to randomly cripple G\n self.which_embedding = nn.Embedding\n bn_linear = (functools.partial(self.which_linear, bias=False) if self.G_shared\n else self.which_embedding)\n self.which_bn = functools.partial(layers.ccbn,\n which_linear=bn_linear,\n cross_replica=self.cross_replica,\n mybn=self.mybn,\n input_size=(self.shared_dim + self.z_chunk_size if self.G_shared\n else self.n_classes),\n norm_style=self.norm_style,\n eps=self.BN_eps)\n\n\n # Prepare model\n # If not using shared embeddings, self.shared is just a passthrough\n self.shared = (self.which_embedding(n_classes, self.shared_dim) if G_shared \n else layers.identity())\n # First linear layer\n self.linear = self.which_linear(self.dim_z // self.num_slots,\n self.arch['in_channels'][0] * (self.bottom_width **2))\n\n # self.blocks is a doubly-nested list of modules, the outer loop intended\n # to be over blocks at a given resolution (resblocks and/or self-attention)\n # while the inner loop is over a given block\n self.blocks = []\n for index in range(len(self.arch['out_channels'])):\n self.blocks += [[layers.GBlock(in_channels=self.arch['in_channels'][index],\n out_channels=self.arch['out_channels'][index],\n which_conv=self.which_conv,\n which_bn=self.which_bn,\n activation=self.activation,\n upsample=(functools.partial(F.interpolate, scale_factor=2)\n if self.arch['upsample'][index] else None))]]\n\n # If attention on this block, attach it to the end\n if self.arch['attention'][self.arch['resolution'][index]]:\n print('Adding attention layer in G at resolution %d' % self.arch['resolution'][index])\n self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index], self.which_conv)]\n\n # Turn self.blocks into a ModuleList so that it's all properly registered.\n self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])\n\n # output layer: batchnorm-relu-conv.\n # Consider using a non-spectral conv here\n self.output_layer = nn.Sequential(layers.bn(self.arch['out_channels'][-1],\n cross_replica=self.cross_replica,\n mybn=self.mybn),\n self.activation,\n self.which_conv(self.arch['out_channels'][-1], 3))\n\n # Initialize weights. Optionally skip init for testing.\n if not skip_init:\n self.init_weights()\n\n # Set up optimizer\n # If this is an EMA copy, no need for an optim, so just return now\n if no_optim:\n return\n self.lr, self.B1, self.B2, self.adam_eps = G_lr, G_B1, G_B2, adam_eps\n if G_mixed_precision:\n print('Using fp16 adam in G...')\n import utils\n self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,\n betas=(self.B1, self.B2), weight_decay=0,\n eps=self.adam_eps)\n else:\n self.optim = optim.Adam(params=self.parameters(), lr=self.lr,\n betas=(self.B1, self.B2), weight_decay=0,\n eps=self.adam_eps)\n\n # LR scheduling, left here for forward compatibility\n # self.lr_sched = {'itr' : 0}# if self.progressive else {}\n # self.j = 0\n\n # Initialize\n def init_weights(self):\n self.param_count = 0\n for module in self.modules():\n if (isinstance(module, nn.Conv2d) \n or isinstance(module, nn.Linear) \n or isinstance(module, nn.Embedding)):\n if self.init == 'ortho':\n init.orthogonal_(module.weight)\n elif self.init == 'N02':\n init.normal_(module.weight, 0, 0.02)\n elif self.init in ['glorot', 'xavier']:\n init.xavier_uniform_(module.weight)\n else:\n print('Init style not recognized...')\n self.param_count += sum([p.data.nelement() for p in module.parameters()])\n print('Param count for G''s initialized parameters: %d' % self.param_count)\n\n # Note on this forward function: we pass in a y vector which has\n # already been passed through G.shared to enable easy class-wise\n # interpolation later. If we passed in the one-hot and then ran it through\n # G.shared in this forward function, it would be harder to handle.\n def forward(self, z, y):\n # If hierarchical, concatenate zs and ys\n if self.hier:\n zs = torch.split(z, self.z_chunk_size, 1)\n z = zs[0]\n ys = [torch.cat([y, item], 1) for item in zs[1:]]\n else:\n ys = [y] * len(self.blocks)\n \n # First linear layer\n h = self.linear(z)\n # Reshape\n h = h.view(h.size(0), -1, self.bottom_width, self.bottom_width)\n \n # Loop over blocks\n for index, blocklist in enumerate(self.blocks):\n # Second inner loop in case block has multiple layers\n for block in blocklist:\n h = block(h, ys[index])\n \n # Apply batchnorm-relu-conv-tanh at output\n return torch.tanh(self.output_layer(h))\n\n\n# Discriminator architecture, same paradigm as G's above\ndef D_arch(ch=64, attention='64',ksize='333333', dilation='111111'):\n arch = {}\n arch[256] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8, 8, 16]],\n 'out_channels' : [item * ch for item in [1, 2, 4, 8, 8, 16, 16]],\n 'downsample' : [True] * 6 + [False],\n 'resolution' : [128, 64, 32, 16, 8, 4, 4 ],\n 'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]\n for i in range(2,8)}}\n arch[128] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8, 16]],\n 'out_channels' : [item * ch for item in [1, 2, 4, 8, 16, 16]],\n 'downsample' : [True] * 5 + [False],\n 'resolution' : [64, 32, 16, 8, 4, 4],\n 'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]\n for i in range(2,8)}}\n arch[64] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8]],\n 'out_channels' : [item * ch for item in [1, 2, 4, 8, 16]],\n 'downsample' : [True] * 4 + [False],\n 'resolution' : [32, 16, 8, 4, 4],\n 'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]\n for i in range(2,7)}}\n arch[32] = {'in_channels' : [3] + [item * ch for item in [4, 4, 4]],\n 'out_channels' : [item * ch for item in [4, 4, 4, 4]],\n 'downsample' : [True, True, False, False],\n 'resolution' : [16, 16, 16, 16],\n 'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]\n for i in range(2,6)}}\n return arch\n\nclass Discriminator(nn.Module):\n\n def __init__(self, D_ch=64, D_wide=True, resolution=128,\n D_kernel_size=3, D_attn='64', n_classes=1000,\n num_D_SVs=1, num_D_SV_itrs=1, D_activation=nn.ReLU(inplace=False),\n D_lr=2e-4, D_B1=0.0, D_B2=0.999, adam_eps=1e-8,\n SN_eps=1e-12, output_dim=1, D_mixed_precision=False, D_fp16=False,\n D_init='ortho', skip_init=False, D_param='SN', **kwargs):\n super(Discriminator, self).__init__()\n # Width multiplier\n self.ch = D_ch\n # Use Wide D as in BigGAN and SA-GAN or skinny D as in SN-GAN?\n self.D_wide = D_wide\n # Resolution\n self.resolution = resolution\n # Kernel size\n self.kernel_size = D_kernel_size\n # Attention?\n self.attention = D_attn\n # Number of classes\n self.n_classes = n_classes\n # Activation\n self.activation = D_activation\n # Initialization style\n self.init = D_init\n # Parameterization style\n self.D_param = D_param\n # Epsilon for Spectral Norm?\n self.SN_eps = SN_eps\n # Fp16?\n self.fp16 = D_fp16\n # Architecture\n self.arch = D_arch(self.ch, self.attention)[resolution]\n\n # Which convs, batchnorms, and linear layers to use\n # No option to turn off SN in D right now\n if self.D_param == 'SN':\n self.which_conv = functools.partial(layers.SNConv2d,\n kernel_size=3, padding=1,\n num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,\n eps=self.SN_eps)\n self.which_linear = functools.partial(layers.SNLinear,\n num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,\n eps=self.SN_eps)\n self.which_embedding = functools.partial(layers.SNEmbedding,\n num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,\n eps=self.SN_eps)\n # Prepare model\n # self.blocks is a doubly-nested list of modules, the outer loop intended\n # to be over blocks at a given resolution (resblocks and/or self-attention)\n self.blocks = []\n for index in range(len(self.arch['out_channels'])):\n self.blocks += [[layers.DBlock(in_channels=self.arch['in_channels'][index],\n out_channels=self.arch['out_channels'][index],\n which_conv=self.which_conv,\n wide=self.D_wide,\n activation=self.activation,\n preactivation=(index > 0),\n downsample=(nn.AvgPool2d(2) if self.arch['downsample'][index] else None))]]\n # If attention on this block, attach it to the end\n if self.arch['attention'][self.arch['resolution'][index]]:\n print('Adding attention layer in D at resolution %d' % self.arch['resolution'][index])\n self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index],\n self.which_conv)]\n # Turn self.blocks into a ModuleList so that it's all properly registered.\n self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])\n # Linear output layer. The output dimension is typically 1, but may be\n # larger if we're e.g. turning this into a VAE with an inference output\n self.linear = self.which_linear(self.arch['out_channels'][-1], output_dim)\n # Embedding for projection discrimination\n self.embed = self.which_embedding(self.n_classes, self.arch['out_channels'][-1])\n\n # Initialize weights\n if not skip_init:\n self.init_weights()\n\n # Set up optimizer\n self.lr, self.B1, self.B2, self.adam_eps = D_lr, D_B1, D_B2, adam_eps\n if D_mixed_precision:\n print('Using fp16 adam in D...')\n import utils\n self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,\n betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)\n else:\n self.optim = optim.Adam(params=self.parameters(), lr=self.lr,\n betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)\n # LR scheduling, left here for forward compatibility\n # self.lr_sched = {'itr' : 0}# if self.progressive else {}\n # self.j = 0\n\n # Initialize\n def init_weights(self):\n self.param_count = 0\n for module in self.modules():\n if (isinstance(module, nn.Conv2d)\n or isinstance(module, nn.Linear)\n or isinstance(module, nn.Embedding)):\n if self.init == 'ortho':\n init.orthogonal_(module.weight)\n elif self.init == 'N02':\n init.normal_(module.weight, 0, 0.02)\n elif self.init in ['glorot', 'xavier']:\n init.xavier_uniform_(module.weight)\n else:\n print('Init style not recognized...')\n self.param_count += sum([p.data.nelement() for p in module.parameters()])\n print('Param count for D''s initialized parameters: %d' % self.param_count)\n\n def forward(self, x, y=None):\n # Stick x into h for cleaner for loops without flow control\n h = x\n # Loop over blocks\n for index, blocklist in enumerate(self.blocks):\n for block in blocklist:\n h = block(h)\n # Apply global sum pooling as in SN-GAN\n h = torch.sum(self.activation(h), [2, 3])\n # Get initial class-unconditional output\n out = self.linear(h)\n # Get projection of final featureset onto class vectors and add to evidence\n out = out + torch.sum(self.embed(y) * h, 1, keepdim=True)\n return out\n\n# Parallelized G_D to minimize cross-gpu communication\n# Without this, Generator outputs would get all-gathered and then rebroadcast.\nclass G_D(nn.Module):\n def __init__(self, G, D):\n super(G_D, self).__init__()\n self.G = G\n self.D = D\n\n def forward(self, z, gy, x=None, dy=None, train_G=False, return_G_z=False,\n split_D=False): \n # If training G, enable grad tape\n with torch.set_grad_enabled(train_G):\n # Get Generator output given noise\n G_z = self.G(z, self.G.shared(gy))\n # Cast as necessary\n if self.G.fp16 and not self.D.fp16:\n G_z = G_z.float()\n if self.D.fp16 and not self.G.fp16:\n G_z = G_z.half()\n # Split_D means to run D once with real data and once with fake,\n # rather than concatenating along the batch dimension.\n if split_D:\n D_fake = self.D(G_z, gy)\n if x is not None:\n D_real = self.D(x, dy)\n return D_fake, D_real\n else:\n if return_G_z:\n return D_fake, G_z\n else:\n return D_fake\n # If real data is provided, concatenate it with the Generator's output\n # along the batch dimension for improved efficiency.\n else:\n D_input = torch.cat([G_z, x], 0) if x is not None else G_z\n D_class = torch.cat([gy, dy], 0) if dy is not None else gy\n # Get Discriminator output\n D_out = self.D(D_input, D_class)\n if x is not None:\n return torch.split(D_out, [G_z.shape[0], x.shape[0]]) # D_fake, D_real\n else:\n if return_G_z:\n return D_out, G_z\n else:\n return D_out\n"
]
| [
[
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.AvgPool2d",
"torch.split",
"torch.nn.init.xavier_uniform_",
"torch.nn.ReLU",
"torch.nn.init.normal_",
"torch.nn.init.orthogonal_",
"torch.set_grad_enabled"
]
]
|
Koriban/Hummingbot-Demex | [
"0ed19e41285b41999eb4ea7c69b5c4a16722e00b"
]
| [
"test/test_pmm_take_if_cross.py"
]
| [
"#!/usr/bin/env python\n\nfrom os.path import join, realpath\nimport sys; sys.path.insert(0, realpath(join(__file__, \"../../\")))\n\nfrom typing import List\nfrom decimal import Decimal\nimport logging; logging.basicConfig(level=logging.ERROR)\nimport pandas as pd\nimport unittest\n\nfrom hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple\nfrom hummingsim.backtest.backtest_market import BacktestMarket\nfrom hummingsim.backtest.market import QuantizationParams\nfrom hummingsim.backtest.mock_order_book_loader import MockOrderBookLoader\nfrom hummingbot.core.clock import Clock, ClockMode\nfrom hummingbot.core.event.event_logger import EventLogger\nfrom hummingbot.core.event.events import (\n MarketEvent,\n OrderBookTradeEvent,\n TradeType\n)\nfrom hummingbot.strategy.pure_market_making.pure_market_making import PureMarketMakingStrategy\nfrom hummingbot.strategy.pure_market_making.order_book_asset_price_delegate import OrderBookAssetPriceDelegate\nfrom hummingbot.core.data_type.order_book import OrderBook\nfrom hummingbot.core.data_type.order_book_row import OrderBookRow\n\n\n# Update the orderbook so that the top bids and asks are lower than actual for a wider bid ask spread\n# this basially removes the orderbook entries above top bid and below top ask\ndef simulate_order_book_widening(order_book: OrderBook, top_bid: float, top_ask: float):\n bid_diffs: List[OrderBookRow] = []\n ask_diffs: List[OrderBookRow] = []\n update_id: int = order_book.last_diff_uid + 1\n for row in order_book.bid_entries():\n if row.price > top_bid:\n bid_diffs.append(OrderBookRow(row.price, 0, update_id))\n else:\n break\n for row in order_book.ask_entries():\n if row.price < top_ask:\n ask_diffs.append(OrderBookRow(row.price, 0, update_id))\n else:\n break\n order_book.apply_diffs(bid_diffs, ask_diffs, update_id)\n\n\nclass PureMMTakeIfCrossUnitTest(unittest.TestCase):\n start: pd.Timestamp = pd.Timestamp(\"2019-01-01\", tz=\"UTC\")\n end: pd.Timestamp = pd.Timestamp(\"2019-01-01 01:00:00\", tz=\"UTC\")\n start_timestamp: float = start.timestamp()\n end_timestamp: float = end.timestamp()\n trading_pair = \"HBOT-ETH\"\n base_asset = trading_pair.split(\"-\")[0]\n quote_asset = trading_pair.split(\"-\")[1]\n\n def setUp(self):\n self.clock_tick_size = 1\n self.clock: Clock = Clock(ClockMode.BACKTEST, self.clock_tick_size, self.start_timestamp, self.end_timestamp)\n self.market: BacktestMarket = BacktestMarket()\n self.book_data: MockOrderBookLoader = MockOrderBookLoader(self.trading_pair, self.base_asset, self.quote_asset)\n self.mid_price = 100\n self.bid_spread = 0.01\n self.ask_spread = 0.01\n self.order_refresh_time = 30\n self.book_data.set_balanced_order_book(mid_price=self.mid_price,\n min_price=1,\n max_price=200,\n price_step_size=1,\n volume_step_size=10)\n self.market.add_data(self.book_data)\n self.market.set_balance(\"HBOT\", 500)\n self.market.set_balance(\"ETH\", 5000)\n self.market.set_quantization_param(\n QuantizationParams(\n self.trading_pair, 6, 6, 6, 6\n )\n )\n self.market_info = MarketTradingPairTuple(self.market, self.trading_pair,\n self.base_asset, self.quote_asset)\n self.clock.add_iterator(self.market)\n self.order_fill_logger: EventLogger = EventLogger()\n self.cancel_order_logger: EventLogger = EventLogger()\n self.market.add_listener(MarketEvent.OrderFilled, self.order_fill_logger)\n self.market.add_listener(MarketEvent.OrderCancelled, self.cancel_order_logger)\n\n self.ext_market: BacktestMarket = BacktestMarket()\n self.ext_data: MockOrderBookLoader = MockOrderBookLoader(self.trading_pair, self.base_asset, self.quote_asset)\n self.ext_market_info: MarketTradingPairTuple = MarketTradingPairTuple(\n self.ext_market, self.trading_pair, self.base_asset, self.quote_asset\n )\n self.ext_data.set_balanced_order_book(mid_price=100, min_price=1, max_price=400, price_step_size=1,\n volume_step_size=100)\n self.ext_market.add_data(self.ext_data)\n self.order_book_asset_del = OrderBookAssetPriceDelegate(self.ext_market, self.trading_pair)\n\n self.one_level_strategy = PureMarketMakingStrategy(\n self.market_info,\n bid_spread=Decimal(\"0.01\"),\n ask_spread=Decimal(\"0.01\"),\n order_amount=Decimal(\"1\"),\n order_refresh_time=3.0,\n filled_order_delay=3.0,\n order_refresh_tolerance_pct=-1,\n minimum_spread=-1,\n asset_price_delegate=self.order_book_asset_del,\n take_if_crossed=True\n )\n\n def simulate_maker_market_trade(self, is_buy: bool, quantity: Decimal, price: Decimal):\n order_book = self.market.get_order_book(self.trading_pair)\n trade_event = OrderBookTradeEvent(\n self.trading_pair,\n self.clock.current_timestamp,\n TradeType.BUY if is_buy else TradeType.SELL,\n price,\n quantity\n )\n order_book.apply_trade(trade_event)\n\n def test_strategy_take_if_crossed_bid_order(self):\n simulate_order_book_widening(self.ext_market.get_order_book(self.trading_pair), 120.0, 130.0)\n self.strategy = self.one_level_strategy\n self.clock.add_iterator(self.strategy)\n self.clock.backtest_til(self.start_timestamp + self.clock_tick_size)\n self.assertEqual(0, len(self.order_fill_logger.event_log))\n self.assertEqual(1, len(self.strategy.active_buys))\n self.assertEqual(1, len(self.strategy.active_sells))\n\n self.clock.backtest_til(\n self.start_timestamp + 2 * self.clock_tick_size\n )\n self.assertEqual(1, len(self.order_fill_logger.event_log))\n self.assertEqual(0, len(self.strategy.active_buys))\n self.assertEqual(1, len(self.strategy.active_sells))\n\n self.clock.backtest_til(\n self.start_timestamp + 7 * self.clock_tick_size\n )\n self.assertEqual(2, len(self.order_fill_logger.event_log))\n self.assertEqual(0, len(self.strategy.active_buys))\n self.assertEqual(1, len(self.strategy.active_sells))\n\n self.clock.backtest_til(\n self.start_timestamp + 10 * self.clock_tick_size\n )\n self.assertEqual(3, len(self.order_fill_logger.event_log))\n self.assertEqual(0, len(self.strategy.active_buys))\n self.assertEqual(1, len(self.strategy.active_sells))\n self.order_fill_logger.clear()\n\n def test_strategy_take_if_crossed_ask_order(self):\n simulate_order_book_widening(self.ext_market.get_order_book(self.trading_pair), 80.0, 90.0)\n self.strategy = self.one_level_strategy\n self.clock.add_iterator(self.strategy)\n\n self.clock.backtest_til(self.start_timestamp + self.clock_tick_size)\n self.assertEqual(0, len(self.order_fill_logger.event_log))\n self.assertEqual(1, len(self.strategy.active_buys))\n self.assertEqual(1, len(self.strategy.active_sells))\n\n self.clock.backtest_til(\n self.start_timestamp + 2 * self.clock_tick_size\n )\n self.assertEqual(1, len(self.order_fill_logger.event_log))\n self.assertEqual(1, len(self.strategy.active_buys))\n self.assertEqual(0, len(self.strategy.active_sells))\n\n self.clock.backtest_til(\n self.start_timestamp + 6 * self.clock_tick_size\n )\n self.assertEqual(2, len(self.order_fill_logger.event_log))\n self.assertEqual(1, len(self.strategy.active_buys))\n self.assertEqual(0, len(self.strategy.active_sells))\n\n self.clock.backtest_til(\n self.start_timestamp + 10 * self.clock_tick_size\n )\n self.assertEqual(3, len(self.order_fill_logger.event_log))\n self.assertEqual(1, len(self.strategy.active_buys))\n self.assertEqual(0, len(self.strategy.active_sells))\n self.order_fill_logger.clear()\n"
]
| [
[
"pandas.Timestamp"
]
]
|
changyu98/GoogLeNet-PyTorch | [
"a2fae2b8b14e830a3f64c81bc4e62dadb6cfe5b7"
]
| [
"examples/simple/test.py"
]
| [
"# Copyright 2020 Lorna Authors. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Example\nIn this simple example, we load an image, pre-process it, and classify it with a pretrained GoogLeNet.\n\"\"\"\n\nimport json\n\nimport torch\nimport torchvision.transforms as transforms\nfrom PIL import Image\nfrom googlenet import GoogLeNet\n\nimage_size = 224\n\n# Open image\nimg = Image.open('panda.jpg')\n\n# Preprocess image\ntfms = transforms.Compose([transforms.Resize(image_size), transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ])\nimg = tfms(img).unsqueeze(0)\n\n# Load class names\nlabels_map = json.load(open('labels_map.txt'))\nlabels_map = [labels_map[str(i)] for i in range(1000)]\n\n# Classify with AlexNet\nprint(\"=> loading checkpoint 'googlenet'.\")\nmodel = GoogLeNet.from_pretrained('googlenet')\nprint(\"=> loaded checkpoint 'googlenet'.\")\nmodel.eval()\nwith torch.no_grad():\n logits = model(img)\npreds = torch.topk(logits, k=5).indices.squeeze(0).tolist()\n\nprint('-----')\nfor idx in preds:\n label = labels_map[idx]\n prob = torch.softmax(logits, dim=1)[0, idx].item()\n print('{:<75} ({:.2f}%)'.format(label, prob * 100))\n"
]
| [
[
"torch.no_grad",
"torch.softmax",
"torch.topk"
]
]
|
polltooh/FineGrainedAction | [
"4582b4179e643119448c7c20ab06044fb211163e"
]
| [
"nn/test_nn_fc7.py"
]
| [
"import tensorflow as tf\nfrom bvlc_alexnet_fc7 import AlexNet\nimport nt\nimport numpy as np\nimport utility_function as uf\nimport os\nimport time\nimport cv2\nimport image_io\nimport sys\nimport math\n# the dimension of the final layer = feature dim\nNN_DIM = 100\n\n# TEST_TXT = 'file_list_test_nba_dunk_fc7.txt'\nTEST_TXT = 'file_list_train_fc7.txt'\n# TEST_TXT = 'file_list_train.txt'\nRES_TXT = 'test_res_nba_dunk_fc7.txt'\nTRAIN = False\nSHUFFLE_DATA = False\nBATCH_SIZE = 50\nFEATURE_ROW = 227\nFEATURE_COL = 227\nLABEL_DIM = 27\n\nFLAGS = tf.app.flags.FLAGS\ntf.app.flags.DEFINE_string('train_log_dir','logs',\n '''directory wherer to write event logs''')\n\n# will be changed in the program\ntf.app.flags.DEFINE_integer('max_training_iter', 1000,'''the max number of training iteration''')\ntf.app.flags.DEFINE_float('init_learning_rate',0.001,'''initial learning rate''')\ntf.app.flags.DEFINE_string('model_dir', 'model_logs_fc7','''directory where to save the model''')\n\ndef write_to_file(name_list, value):\n with open(RES_TXT, \"w\") as f:\n for i in range(len(name_list)):\n f.write(name_list[i].replace(\".fc7\", \".jpg\"))\n f.write(\" \")\n f.write(str(value[i]))\n f.write(\"\\n\")\n\ndef calculate_iter():\n with open(TEST_TXT, 'r') as f:\n s = f.read()\n s_l = s.split('\\n')\n total_num = len(s_l)\n \n FLAGS.max_training_iter = int(total_num / BATCH_SIZE) + 1\n print(FLAGS.max_training_iter)\n\ndef filequeue_to_batch_data(filename_queue, line_reader, batch_size = BATCH_SIZE):\n \n key, next_line = line_reader.read(filename_queue)\n query_image_name, retrieve_image_name, label = tf.decode_csv(\n next_line, [tf.constant([], dtype=tf.string), tf.constant([], dtype=tf.string),\n tf.constant([], dtype = tf.int32)], field_delim=\" \")\n \n reverse_channel = True # for pre-trained purpose\n\n query_tensor = uf.read_binary(query_image_name, 4096)\n retrieve_tensor = uf.read_binary(retrieve_image_name, 4096)\n\n batch_query_image, batch_retrieve_image, batch_label, batch_retrieve_image_name = tf.train.batch([\n query_tensor, retrieve_tensor, label, retrieve_image_name], batch_size=batch_size)\n \n batch_image = tf.concat(0,[batch_query_image, batch_retrieve_image])\n\n return batch_image, batch_label, batch_retrieve_image_name\n\n\ndef train():\n calculate_iter()\n\n train_filenamequeue=tf.train.string_input_producer([TEST_TXT], shuffle=SHUFFLE_DATA)\n\n line_reader = tf.TextLineReader()\n batch_image, batch_label, batch_image_name = filequeue_to_batch_data(train_filenamequeue, line_reader)\n\n global_step = tf.Variable(0, name = 'global_step', trainable = False)\n image_data_ph = tf.placeholder(tf.float32, shape = (2 * BATCH_SIZE, 4096))\n label_ph = tf.placeholder(tf.int32, shape = (BATCH_SIZE))\n\n # net = AlexNet({'data':image_data_ph})\n\n infer = nt.inference3(image_data_ph, NN_DIM)\n\n eva = nt.evaluation(infer, BATCH_SIZE)\n\n saver = tf.train.Saver()\n\n sess = tf.Session()\n\n init_op = tf.initialize_all_variables()\n sess.run(init_op)\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord = coord, sess = sess)\n\n ckpt = tf.train.get_checkpoint_state(FLAGS.model_dir)\n print(ckpt.all_model_checkpoint_paths[-1])\n\n if ckpt and ckpt.all_model_checkpoint_paths[-1]:\n saver.restore(sess, ckpt.all_model_checkpoint_paths[-1])\n else:\n print('no check point, start from begining')\n\n name_list = list()\n dist_list = list()\n for i in xrange(FLAGS.max_training_iter):\n batch_image_v, batch_label_v , batch_image_name_v = sess.run([\n batch_image, batch_label, batch_image_name])\n feed_data = {image_data_ph: batch_image_v}\n eva_v = sess.run(eva, feed_dict = feed_data)\n name_list = name_list + batch_image_name_v.tolist()\n dist_list = dist_list + eva_v.tolist()\n if i % 100 == 0:\n print(\"i:%d\"%(i))\n\n write_to_file(name_list, dist_list)\n\ndef main(argv = None):\n train()\n\nif __name__ == '__main__':\n if (len(sys.argv) >= 2):\n global TEST_TXT\n TEST_TXT = sys.argv[1]\n if (len(sys.argv) > 2):\n global RES_TXT\n RES_TXT = sys.argv[2]\n\n tf.app.run()\n"
]
| [
[
"tensorflow.train.start_queue_runners",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.concat",
"tensorflow.train.batch",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.TextLineReader",
"tensorflow.initialize_all_variables",
"tensorflow.train.Coordinator",
"tensorflow.Session",
"tensorflow.Variable",
"tensorflow.train.Saver",
"tensorflow.train.get_checkpoint_state",
"tensorflow.train.string_input_producer",
"tensorflow.constant",
"tensorflow.app.flags.DEFINE_float",
"tensorflow.placeholder",
"tensorflow.app.run"
]
]
|
soham97/Predictive-Threat-Intelligence | [
"fa0bfbea905c9179aa67791d26f1f219e59a1b32"
]
| [
"codes/kafka/flask-server/deploy_functions.py"
]
| [
"import numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom tqdm import tqdm_notebook as tqdm\n\nenc = LabelEncoder()\nenc.fit(['cowrie.session.connect', 'cowrie.client.version',\n 'cowrie.login.success', 'cowrie.session.closed',\n 'cowrie.login.failed', 'cowrie.log.closed',\n 'cowrie.direct-tcpip.request', 'cowrie.log.open',\n 'cowrie.command.input/other', 'cowrie.command.success',\n 'cowrie.command.failed', 'cowrie.command.input/delete',\n 'cowrie.command.input/dir_sudo', 'cowrie.command.input/write',\n 'cowrie.direct-tcpip.data', 'cowrie.client.size',\n 'cowrie.session.file_download', 'cowrie.command.input/system',\n 'cowrie.session.input'])\n\ndef array_to_df(d):\n k = pd.DataFrame(d)\n return k \n\ndef find_inp_type(eventid, inp):\n if eventid != \"cowrie.command.input\":\n return eventid\n if inp.find(\"rm\")!=-1:\n return \"cowrie.command.input/delete\"\n if inp.find(\"nano\")!=-1 or inp.find(\"gedit\")!=-1 or inp.find(\"cp\")!=-1 or inp.find(\"mv\")!=-1 or inp.find(\"mkdir\")!=-1:\n return \"cowrie.command.input/write\"\n if inp.find(\"sudo\")!=-1 or inp.find(\"cd\")!=-1 or inp.find(\"pwd\")!=-1 or inp.find(\"ls\")!=-1:\n return \"cowrie.command.input/dir_sudo\"\n if inp.find(\"free\")!=-1 or inp.find(\"uname\")!=-1 or inp.find(\"history\")!=-1 or inp.find(\"df\")!=-1 or inp.find(\"du\")!=-1 or inp.find(\"top\")!=-1 or inp.find(\"lsb_release\")!=-1:\n return \"cowrie.command.input/system\"\n if inp.find(\"adduser\")!=-1 or inp.find(\"passwd\")!=-1:\n return \"cowrie.command.input/adduser\"\n return \"cowrie.command.input/other\"\n\ndef proc_data(df):\n df['eventid'] = df.apply(lambda x: find_inp_type(x['eventid'], x['input']), axis=1)\n df['eventid'] = enc.transform(df['eventid'])\n df_list = df.groupby('session', as_index=False).agg(lambda x: x.tolist())\n a = df_list[['session', 'eventid']]\n del df_list,df\n seq = []\n for i in tqdm(a.index):\n i_seq = []\n for j in a.loc[i, 'eventid']:\n i_seq.append(j)\n seq.append(i_seq)\n return seq\n\ndef predict_seq_probability(mod, seq, length):\n if(len(seq) <= length):\n return mod.likelihood(seq)\n else:\n return mod.likelihood(seq[len(seq)-length:])\n \ndef predict_next_state(mod, seq, length):\n prob = []\n for i in range(1,19):\n seq.append(i)\n if(len(seq) <= length):\n prob.append(mod.likelihood(seq))\n else:\n prob.append(mod.likelihood(seq[len(seq)-length:]))\n seq = seq[:-1]\n return np.argmax(prob,axis=0) + 1\n\ndef mc_predict_seq_probability(mod, seq):\n prob = 1\n for i in range(0, len(seq)-1):\n state = seq[i]\n next_state = seq[i+1]\n prob *= mod.transition[state][next_state]\n return prob\n\ndef mc_predict_next_state(mod, seq):\n state = seq[-1]\n# if np.max(mod.transition[state]) == 0:\n# return None\n return np.argmax(mod.transition[state])"
]
| [
[
"pandas.DataFrame",
"sklearn.preprocessing.LabelEncoder",
"numpy.argmax"
]
]
|
Hunter8moon/h8m2 | [
"9cb2ced9d701650258f5e4d14e6036a3b56b0b96"
]
| [
"source/util/image_util.py"
]
| [
"import os\nfrom random import randint, random\n\nimport numpy as np\nfrom PIL import Image\n\n\nclass ImageUtil:\n @staticmethod\n def file_to_array(file_path, width, height, augment=True):\n \"\"\"\n Loads a image from disk and returns an np.array pf that image.\n\n :param file_path: Path to the image to load.\n :param width: Width to resize the image to.\n :param height: Height to resize the image to.\n :param augment: Wether to randomly crop the image or not.\n :return: An np.array of the image.\n \"\"\"\n\n im = Image.open(file_path)\n im = im.convert('RGB')\n\n if augment:\n im = ImageUtil.augment(im, height, width)\n\n if im.size != (width, height):\n im = im.resize((width, height))\n\n img = np.asarray(im, dtype=np.uint8)\n img = img / 127.5 - 1.\n return img\n\n @staticmethod\n def augment(im, height, width):\n x_add = randint(0, 30)\n y_add = randint(0, 30)\n x = randint(0, x_add)\n y = randint(0, y_add)\n\n # TODO flipping?\n if random() >= 0.5:\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\n\n im = im.resize((width + x_add, height + y_add))\n im = im.crop((x, y, x + width, y + height))\n return im\n\n @staticmethod\n def array_to_image(image_array):\n \"\"\"\n Converts an np.array to a PIL Image.\n\n :param image_array: np.array of the image.\n :return: An PIL image of the array.\n \"\"\"\n img = image_array\n img = img * 127.5 + 127.5\n img = img.astype(np.uint8)\n return Image.fromarray(img)\n\n @staticmethod\n def save(image, out_dir, filename):\n \"\"\"\n Saves the image in .png format.\n :param image: The PIL image to save.\n :param out_dir: The directory to save to. Will be created if it does not exist.\n :param filename: The filename of the image.\n \"\"\"\n\n directory = f\"{out_dir}/\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n image.save(f\"{directory}{filename}.png\")\n\n @staticmethod\n def make_snapshot_image(images, width, height):\n \"\"\"\n Lays a 2d array of images out in a grid.\n\n :param images: The 2d array of images.\n :param width: The width of the images\n :param height: The height of the images.\n :return: An PIL image of the images layed out in a grid.\n \"\"\"\n\n n_r = len(images)\n n_c = len(images[0])\n snapshot = Image.new('RGB', (width * n_c, height * n_r))\n\n r = 0\n for imgs in images:\n c = 0\n for img in imgs:\n img = ImageUtil.array_to_image(img[0])\n snapshot.paste(img, (width * c, height * r))\n c += 1\n r += 1\n\n return snapshot\n"
]
| [
[
"numpy.asarray"
]
]
|
jawad26/numpy | [
"07447fd215ebffbce2f4e516ef02629e91fca6b0"
]
| [
"numpy/ma/core.py"
]
| [
"\"\"\"\nnumpy.ma : a package to handle missing or invalid values.\n\nThis package was initially written for numarray by Paul F. Dubois\nat Lawrence Livermore National Laboratory.\nIn 2006, the package was completely rewritten by Pierre Gerard-Marchant\n(University of Georgia) to make the MaskedArray class a subclass of ndarray,\nand to improve support of structured arrays.\n\n\nCopyright 1999, 2000, 2001 Regents of the University of California.\nReleased for unlimited redistribution.\n\n* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois.\n* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant\n (pgmdevlist_AT_gmail_DOT_com)\n* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com)\n\n.. moduleauthor:: Pierre Gerard-Marchant\n\n\"\"\"\n# pylint: disable-msg=E1002\nimport builtins\nimport inspect\nimport operator\nimport warnings\nimport textwrap\nimport re\nfrom functools import reduce\n\nimport numpy as np\nimport numpy.core.umath as umath\nimport numpy.core.numerictypes as ntypes\nfrom numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\nfrom numpy import array as narray\nfrom numpy.lib.function_base import angle\nfrom numpy.compat import (\n getargspec, formatargspec, long, unicode, bytes\n )\nfrom numpy import expand_dims\nfrom numpy.core.numeric import normalize_axis_tuple\n\n\n__all__ = [\n 'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute',\n 'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin',\n 'angle', 'anom', 'anomalies', 'any', 'append', 'arange', 'arccos',\n 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh',\n 'argmax', 'argmin', 'argsort', 'around', 'array', 'asanyarray',\n 'asarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'bool_', 'ceil',\n 'choose', 'clip', 'common_fill_value', 'compress', 'compressed',\n 'concatenate', 'conjugate', 'convolve', 'copy', 'correlate', 'cos', 'cosh',\n 'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal',\n 'diff', 'divide', 'empty', 'empty_like', 'equal', 'exp',\n 'expand_dims', 'fabs', 'filled', 'fix_invalid', 'flatten_mask',\n 'flatten_structured_array', 'floor', 'floor_divide', 'fmod',\n 'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask',\n 'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot',\n 'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA',\n 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift',\n 'less', 'less_equal', 'log', 'log10', 'log2',\n 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask',\n 'make_mask_descr', 'make_mask_none', 'mask_or', 'masked',\n 'masked_array', 'masked_equal', 'masked_greater',\n 'masked_greater_equal', 'masked_inside', 'masked_invalid',\n 'masked_less', 'masked_less_equal', 'masked_not_equal',\n 'masked_object', 'masked_outside', 'masked_print_option',\n 'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum',\n 'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value',\n 'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero',\n 'not_equal', 'ones', 'ones_like', 'outer', 'outerproduct', 'power', 'prod',\n 'product', 'ptp', 'put', 'putmask', 'ravel', 'remainder',\n 'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_',\n 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask',\n 'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum',\n 'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide',\n 'var', 'where', 'zeros', 'zeros_like',\n ]\n\nMaskType = np.bool_\nnomask = MaskType(0)\n\nclass MaskedArrayFutureWarning(FutureWarning):\n pass\n\ndef _deprecate_argsort_axis(arr):\n \"\"\"\n Adjust the axis passed to argsort, warning if necessary\n\n Parameters\n ----------\n arr\n The array which argsort was called on\n\n np.ma.argsort has a long-term bug where the default of the axis argument\n is wrong (gh-8701), which now must be kept for backwards compatibility.\n Thankfully, this only makes a difference when arrays are 2- or more-\n dimensional, so we only need a warning then.\n \"\"\"\n if arr.ndim <= 1:\n # no warning needed - but switch to -1 anyway, to avoid surprising\n # subclasses, which are more likely to implement scalar axes.\n return -1\n else:\n # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default\n warnings.warn(\n \"In the future the default for argsort will be axis=-1, not the \"\n \"current None, to match its documentation and np.argsort. \"\n \"Explicitly pass -1 or None to silence this warning.\",\n MaskedArrayFutureWarning, stacklevel=3)\n return None\n\n\ndef doc_note(initialdoc, note):\n \"\"\"\n Adds a Notes section to an existing docstring.\n\n \"\"\"\n if initialdoc is None:\n return\n if note is None:\n return initialdoc\n\n notesplit = re.split(r'\\n\\s*?Notes\\n\\s*?-----', inspect.cleandoc(initialdoc))\n notedoc = \"\\n\\nNotes\\n-----\\n%s\\n\" % inspect.cleandoc(note)\n\n return ''.join(notesplit[:1] + [notedoc] + notesplit[1:])\n\n\ndef get_object_signature(obj):\n \"\"\"\n Get the signature from obj\n\n \"\"\"\n try:\n sig = formatargspec(*getargspec(obj))\n except TypeError:\n sig = ''\n return sig\n\n\n###############################################################################\n# Exceptions #\n###############################################################################\n\n\nclass MAError(Exception):\n \"\"\"\n Class for masked array related errors.\n\n \"\"\"\n pass\n\n\nclass MaskError(MAError):\n \"\"\"\n Class for mask related errors.\n\n \"\"\"\n pass\n\n\n###############################################################################\n# Filling options #\n###############################################################################\n\n\n# b: boolean - c: complex - f: floats - i: integer - O: object - S: string\ndefault_filler = {'b': True,\n 'c': 1.e20 + 0.0j,\n 'f': 1.e20,\n 'i': 999999,\n 'O': '?',\n 'S': b'N/A',\n 'u': 999999,\n 'V': b'???',\n 'U': u'N/A'\n }\n\n# Add datetime64 and timedelta64 types\nfor v in [\"Y\", \"M\", \"W\", \"D\", \"h\", \"m\", \"s\", \"ms\", \"us\", \"ns\", \"ps\",\n \"fs\", \"as\"]:\n default_filler[\"M8[\" + v + \"]\"] = np.datetime64(\"NaT\", v)\n default_filler[\"m8[\" + v + \"]\"] = np.timedelta64(\"NaT\", v)\n\nfloat_types_list = [np.half, np.single, np.double, np.longdouble,\n np.csingle, np.cdouble, np.clongdouble]\nmax_filler = ntypes._minvals\nmax_filler.update([(k, -np.inf) for k in float_types_list[:4]])\nmax_filler.update([(k, complex(-np.inf, -np.inf)) for k in float_types_list[-3:]])\n\nmin_filler = ntypes._maxvals\nmin_filler.update([(k, +np.inf) for k in float_types_list[:4]])\nmin_filler.update([(k, complex(+np.inf, +np.inf)) for k in float_types_list[-3:]])\n\ndel float_types_list\n\ndef _recursive_fill_value(dtype, f):\n \"\"\"\n Recursively produce a fill value for `dtype`, calling f on scalar dtypes\n \"\"\"\n if dtype.names is not None:\n vals = tuple(_recursive_fill_value(dtype[name], f) for name in dtype.names)\n return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d\n elif dtype.subdtype:\n subtype, shape = dtype.subdtype\n subval = _recursive_fill_value(subtype, f)\n return np.full(shape, subval)\n else:\n return f(dtype)\n\n\ndef _get_dtype_of(obj):\n \"\"\" Convert the argument for *_fill_value into a dtype \"\"\"\n if isinstance(obj, np.dtype):\n return obj\n elif hasattr(obj, 'dtype'):\n return obj.dtype\n else:\n return np.asanyarray(obj).dtype\n\n\ndef default_fill_value(obj):\n \"\"\"\n Return the default fill value for the argument object.\n\n The default filling value depends on the datatype of the input\n array or the type of the input scalar:\n\n ======== ========\n datatype default\n ======== ========\n bool True\n int 999999\n float 1.e20\n complex 1.e20+0j\n object '?'\n string 'N/A'\n ======== ========\n\n For structured types, a structured scalar is returned, with each field the\n default fill value for its type.\n\n For subarray types, the fill value is an array of the same size containing\n the default scalar fill value.\n\n Parameters\n ----------\n obj : ndarray, dtype or scalar\n The array data-type or scalar for which the default fill value\n is returned.\n\n Returns\n -------\n fill_value : scalar\n The default fill value.\n\n Examples\n --------\n >>> np.ma.default_fill_value(1)\n 999999\n >>> np.ma.default_fill_value(np.array([1.1, 2., np.pi]))\n 1e+20\n >>> np.ma.default_fill_value(np.dtype(complex))\n (1e+20+0j)\n\n \"\"\"\n def _scalar_fill_value(dtype):\n if dtype.kind in 'Mm':\n return default_filler.get(dtype.str[1:], '?')\n else:\n return default_filler.get(dtype.kind, '?')\n\n dtype = _get_dtype_of(obj)\n return _recursive_fill_value(dtype, _scalar_fill_value)\n\n\ndef _extremum_fill_value(obj, extremum, extremum_name):\n\n def _scalar_fill_value(dtype):\n try:\n return extremum[dtype]\n except KeyError as e:\n raise TypeError(\n f\"Unsuitable type {dtype} for calculating {extremum_name}.\"\n ) from None\n\n dtype = _get_dtype_of(obj)\n return _recursive_fill_value(dtype, _scalar_fill_value)\n\n\ndef minimum_fill_value(obj):\n \"\"\"\n Return the maximum value that can be represented by the dtype of an object.\n\n This function is useful for calculating a fill value suitable for\n taking the minimum of an array with a given dtype.\n\n Parameters\n ----------\n obj : ndarray, dtype or scalar\n An object that can be queried for it's numeric type.\n\n Returns\n -------\n val : scalar\n The maximum representable value.\n\n Raises\n ------\n TypeError\n If `obj` isn't a suitable numeric type.\n\n See Also\n --------\n maximum_fill_value : The inverse function.\n set_fill_value : Set the filling value of a masked array.\n MaskedArray.fill_value : Return current fill value.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> a = np.int8()\n >>> ma.minimum_fill_value(a)\n 127\n >>> a = np.int32()\n >>> ma.minimum_fill_value(a)\n 2147483647\n\n An array of numeric data can also be passed.\n\n >>> a = np.array([1, 2, 3], dtype=np.int8)\n >>> ma.minimum_fill_value(a)\n 127\n >>> a = np.array([1, 2, 3], dtype=np.float32)\n >>> ma.minimum_fill_value(a)\n inf\n\n \"\"\"\n return _extremum_fill_value(obj, min_filler, \"minimum\")\n\n\ndef maximum_fill_value(obj):\n \"\"\"\n Return the minimum value that can be represented by the dtype of an object.\n\n This function is useful for calculating a fill value suitable for\n taking the maximum of an array with a given dtype.\n\n Parameters\n ----------\n obj : ndarray, dtype or scalar\n An object that can be queried for it's numeric type.\n\n Returns\n -------\n val : scalar\n The minimum representable value.\n\n Raises\n ------\n TypeError\n If `obj` isn't a suitable numeric type.\n\n See Also\n --------\n minimum_fill_value : The inverse function.\n set_fill_value : Set the filling value of a masked array.\n MaskedArray.fill_value : Return current fill value.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> a = np.int8()\n >>> ma.maximum_fill_value(a)\n -128\n >>> a = np.int32()\n >>> ma.maximum_fill_value(a)\n -2147483648\n\n An array of numeric data can also be passed.\n\n >>> a = np.array([1, 2, 3], dtype=np.int8)\n >>> ma.maximum_fill_value(a)\n -128\n >>> a = np.array([1, 2, 3], dtype=np.float32)\n >>> ma.maximum_fill_value(a)\n -inf\n\n \"\"\"\n return _extremum_fill_value(obj, max_filler, \"maximum\")\n\n\ndef _recursive_set_fill_value(fillvalue, dt):\n \"\"\"\n Create a fill value for a structured dtype.\n\n Parameters\n ----------\n fillvalue : scalar or array_like\n Scalar or array representing the fill value. If it is of shorter\n length than the number of fields in dt, it will be resized.\n dt : dtype\n The structured dtype for which to create the fill value.\n\n Returns\n -------\n val: tuple\n A tuple of values corresponding to the structured fill value.\n\n \"\"\"\n fillvalue = np.resize(fillvalue, len(dt.names))\n output_value = []\n for (fval, name) in zip(fillvalue, dt.names):\n cdtype = dt[name]\n if cdtype.subdtype:\n cdtype = cdtype.subdtype[0]\n\n if cdtype.names is not None:\n output_value.append(tuple(_recursive_set_fill_value(fval, cdtype)))\n else:\n output_value.append(np.array(fval, dtype=cdtype).item())\n return tuple(output_value)\n\n\ndef _check_fill_value(fill_value, ndtype):\n \"\"\"\n Private function validating the given `fill_value` for the given dtype.\n\n If fill_value is None, it is set to the default corresponding to the dtype.\n\n If fill_value is not None, its value is forced to the given dtype.\n\n The result is always a 0d array.\n\n \"\"\"\n ndtype = np.dtype(ndtype)\n if fill_value is None:\n fill_value = default_fill_value(ndtype)\n elif ndtype.names is not None:\n if isinstance(fill_value, (ndarray, np.void)):\n try:\n fill_value = np.array(fill_value, copy=False, dtype=ndtype)\n except ValueError as e:\n err_msg = \"Unable to transform %s to dtype %s\"\n raise ValueError(err_msg % (fill_value, ndtype)) from e\n else:\n fill_value = np.asarray(fill_value, dtype=object)\n fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype),\n dtype=ndtype)\n else:\n if isinstance(fill_value, str) and (ndtype.char not in 'OSVU'):\n # Note this check doesn't work if fill_value is not a scalar\n err_msg = \"Cannot set fill value of string with array of dtype %s\"\n raise TypeError(err_msg % ndtype)\n else:\n # In case we want to convert 1e20 to int.\n # Also in case of converting string arrays.\n try:\n fill_value = np.array(fill_value, copy=False, dtype=ndtype)\n except (OverflowError, ValueError) as e:\n # Raise TypeError instead of OverflowError or ValueError.\n # OverflowError is seldom used, and the real problem here is\n # that the passed fill_value is not compatible with the ndtype.\n err_msg = \"Cannot convert fill_value %s to dtype %s\"\n raise TypeError(err_msg % (fill_value, ndtype)) from e\n return np.array(fill_value)\n\n\ndef set_fill_value(a, fill_value):\n \"\"\"\n Set the filling value of a, if a is a masked array.\n\n This function changes the fill value of the masked array `a` in place.\n If `a` is not a masked array, the function returns silently, without\n doing anything.\n\n Parameters\n ----------\n a : array_like\n Input array.\n fill_value : dtype\n Filling value. A consistency test is performed to make sure\n the value is compatible with the dtype of `a`.\n\n Returns\n -------\n None\n Nothing returned by this function.\n\n See Also\n --------\n maximum_fill_value : Return the default fill value for a dtype.\n MaskedArray.fill_value : Return current fill value.\n MaskedArray.set_fill_value : Equivalent method.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> a = np.arange(5)\n >>> a\n array([0, 1, 2, 3, 4])\n >>> a = ma.masked_where(a < 3, a)\n >>> a\n masked_array(data=[--, --, --, 3, 4],\n mask=[ True, True, True, False, False],\n fill_value=999999)\n >>> ma.set_fill_value(a, -999)\n >>> a\n masked_array(data=[--, --, --, 3, 4],\n mask=[ True, True, True, False, False],\n fill_value=-999)\n\n Nothing happens if `a` is not a masked array.\n\n >>> a = list(range(5))\n >>> a\n [0, 1, 2, 3, 4]\n >>> ma.set_fill_value(a, 100)\n >>> a\n [0, 1, 2, 3, 4]\n >>> a = np.arange(5)\n >>> a\n array([0, 1, 2, 3, 4])\n >>> ma.set_fill_value(a, 100)\n >>> a\n array([0, 1, 2, 3, 4])\n\n \"\"\"\n if isinstance(a, MaskedArray):\n a.set_fill_value(fill_value)\n return\n\n\ndef get_fill_value(a):\n \"\"\"\n Return the filling value of a, if any. Otherwise, returns the\n default filling value for that type.\n\n \"\"\"\n if isinstance(a, MaskedArray):\n result = a.fill_value\n else:\n result = default_fill_value(a)\n return result\n\n\ndef common_fill_value(a, b):\n \"\"\"\n Return the common filling value of two masked arrays, if any.\n\n If ``a.fill_value == b.fill_value``, return the fill value,\n otherwise return None.\n\n Parameters\n ----------\n a, b : MaskedArray\n The masked arrays for which to compare fill values.\n\n Returns\n -------\n fill_value : scalar or None\n The common fill value, or None.\n\n Examples\n --------\n >>> x = np.ma.array([0, 1.], fill_value=3)\n >>> y = np.ma.array([0, 1.], fill_value=3)\n >>> np.ma.common_fill_value(x, y)\n 3.0\n\n \"\"\"\n t1 = get_fill_value(a)\n t2 = get_fill_value(b)\n if t1 == t2:\n return t1\n return None\n\n\ndef filled(a, fill_value=None):\n \"\"\"\n Return input as an array with masked data replaced by a fill value.\n\n If `a` is not a `MaskedArray`, `a` itself is returned.\n If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to\n ``a.fill_value``.\n\n Parameters\n ----------\n a : MaskedArray or array_like\n An input object.\n fill_value : array_like, optional.\n Can be scalar or non-scalar. If non-scalar, the\n resulting filled array should be broadcastable\n over input array. Default is None.\n\n Returns\n -------\n a : ndarray\n The filled array.\n\n See Also\n --------\n compressed\n\n Examples\n --------\n >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],\n ... [1, 0, 0],\n ... [0, 0, 0]])\n >>> x.filled()\n array([[999999, 1, 2],\n [999999, 4, 5],\n [ 6, 7, 8]])\n >>> x.filled(fill_value=333)\n array([[333, 1, 2],\n [333, 4, 5],\n [ 6, 7, 8]])\n >>> x.filled(fill_value=np.arange(3))\n array([[0, 1, 2],\n [0, 4, 5],\n [6, 7, 8]])\n\n \"\"\"\n if hasattr(a, 'filled'):\n return a.filled(fill_value)\n\n elif isinstance(a, ndarray):\n # Should we check for contiguity ? and a.flags['CONTIGUOUS']:\n return a\n elif isinstance(a, dict):\n return np.array(a, 'O')\n else:\n return np.array(a)\n\n\ndef get_masked_subclass(*arrays):\n \"\"\"\n Return the youngest subclass of MaskedArray from a list of (masked) arrays.\n\n In case of siblings, the first listed takes over.\n\n \"\"\"\n if len(arrays) == 1:\n arr = arrays[0]\n if isinstance(arr, MaskedArray):\n rcls = type(arr)\n else:\n rcls = MaskedArray\n else:\n arrcls = [type(a) for a in arrays]\n rcls = arrcls[0]\n if not issubclass(rcls, MaskedArray):\n rcls = MaskedArray\n for cls in arrcls[1:]:\n if issubclass(cls, rcls):\n rcls = cls\n # Don't return MaskedConstant as result: revert to MaskedArray\n if rcls.__name__ == 'MaskedConstant':\n return MaskedArray\n return rcls\n\n\ndef getdata(a, subok=True):\n \"\"\"\n Return the data of a masked array as an ndarray.\n\n Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``,\n else return `a` as a ndarray or subclass (depending on `subok`) if not.\n\n Parameters\n ----------\n a : array_like\n Input ``MaskedArray``, alternatively a ndarray or a subclass thereof.\n subok : bool\n Whether to force the output to be a `pure` ndarray (False) or to\n return a subclass of ndarray if appropriate (True, default).\n\n See Also\n --------\n getmask : Return the mask of a masked array, or nomask.\n getmaskarray : Return the mask of a masked array, or full array of False.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> a = ma.masked_equal([[1,2],[3,4]], 2)\n >>> a\n masked_array(\n data=[[1, --],\n [3, 4]],\n mask=[[False, True],\n [False, False]],\n fill_value=2)\n >>> ma.getdata(a)\n array([[1, 2],\n [3, 4]])\n\n Equivalently use the ``MaskedArray`` `data` attribute.\n\n >>> a.data\n array([[1, 2],\n [3, 4]])\n\n \"\"\"\n try:\n data = a._data\n except AttributeError:\n data = np.array(a, copy=False, subok=subok)\n if not subok:\n return data.view(ndarray)\n return data\n\n\nget_data = getdata\n\n\ndef fix_invalid(a, mask=nomask, copy=True, fill_value=None):\n \"\"\"\n Return input with invalid data masked and replaced by a fill value.\n\n Invalid data means values of `nan`, `inf`, etc.\n\n Parameters\n ----------\n a : array_like\n Input array, a (subclass of) ndarray.\n mask : sequence, optional\n Mask. Must be convertible to an array of booleans with the same\n shape as `data`. True indicates a masked (i.e. invalid) data.\n copy : bool, optional\n Whether to use a copy of `a` (True) or to fix `a` in place (False).\n Default is True.\n fill_value : scalar, optional\n Value used for fixing invalid data. Default is None, in which case\n the ``a.fill_value`` is used.\n\n Returns\n -------\n b : MaskedArray\n The input array with invalid entries fixed.\n\n Notes\n -----\n A copy is performed by default.\n\n Examples\n --------\n >>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3)\n >>> x\n masked_array(data=[--, -1.0, nan, inf],\n mask=[ True, False, False, False],\n fill_value=1e+20)\n >>> np.ma.fix_invalid(x)\n masked_array(data=[--, -1.0, --, --],\n mask=[ True, False, True, True],\n fill_value=1e+20)\n\n >>> fixed = np.ma.fix_invalid(x)\n >>> fixed.data\n array([ 1.e+00, -1.e+00, 1.e+20, 1.e+20])\n >>> x.data\n array([ 1., -1., nan, inf])\n\n \"\"\"\n a = masked_array(a, copy=copy, mask=mask, subok=True)\n invalid = np.logical_not(np.isfinite(a._data))\n if not invalid.any():\n return a\n a._mask |= invalid\n if fill_value is None:\n fill_value = a.fill_value\n a._data[invalid] = fill_value\n return a\n\ndef is_string_or_list_of_strings(val):\n return (isinstance(val, str) or\n (isinstance(val, list) and val and\n builtins.all(isinstance(s, str) for s in val)))\n\n###############################################################################\n# Ufuncs #\n###############################################################################\n\n\nufunc_domain = {}\nufunc_fills = {}\n\n\nclass _DomainCheckInterval:\n \"\"\"\n Define a valid interval, so that :\n\n ``domain_check_interval(a,b)(x) == True`` where\n ``x < a`` or ``x > b``.\n\n \"\"\"\n\n def __init__(self, a, b):\n \"domain_check_interval(a,b)(x) = true where x < a or y > b\"\n if a > b:\n (a, b) = (b, a)\n self.a = a\n self.b = b\n\n def __call__(self, x):\n \"Execute the call behavior.\"\n # nans at masked positions cause RuntimeWarnings, even though\n # they are masked. To avoid this we suppress warnings.\n with np.errstate(invalid='ignore'):\n return umath.logical_or(umath.greater(x, self.b),\n umath.less(x, self.a))\n\n\nclass _DomainTan:\n \"\"\"\n Define a valid interval for the `tan` function, so that:\n\n ``domain_tan(eps) = True`` where ``abs(cos(x)) < eps``\n\n \"\"\"\n\n def __init__(self, eps):\n \"domain_tan(eps) = true where abs(cos(x)) < eps)\"\n self.eps = eps\n\n def __call__(self, x):\n \"Executes the call behavior.\"\n with np.errstate(invalid='ignore'):\n return umath.less(umath.absolute(umath.cos(x)), self.eps)\n\n\nclass _DomainSafeDivide:\n \"\"\"\n Define a domain for safe division.\n\n \"\"\"\n\n def __init__(self, tolerance=None):\n self.tolerance = tolerance\n\n def __call__(self, a, b):\n # Delay the selection of the tolerance to here in order to reduce numpy\n # import times. The calculation of these parameters is a substantial\n # component of numpy's import time.\n if self.tolerance is None:\n self.tolerance = np.finfo(float).tiny\n # don't call ma ufuncs from __array_wrap__ which would fail for scalars\n a, b = np.asarray(a), np.asarray(b)\n with np.errstate(invalid='ignore'):\n return umath.absolute(a) * self.tolerance >= umath.absolute(b)\n\n\nclass _DomainGreater:\n \"\"\"\n DomainGreater(v)(x) is True where x <= v.\n\n \"\"\"\n\n def __init__(self, critical_value):\n \"DomainGreater(v)(x) = true where x <= v\"\n self.critical_value = critical_value\n\n def __call__(self, x):\n \"Executes the call behavior.\"\n with np.errstate(invalid='ignore'):\n return umath.less_equal(x, self.critical_value)\n\n\nclass _DomainGreaterEqual:\n \"\"\"\n DomainGreaterEqual(v)(x) is True where x < v.\n\n \"\"\"\n\n def __init__(self, critical_value):\n \"DomainGreaterEqual(v)(x) = true where x < v\"\n self.critical_value = critical_value\n\n def __call__(self, x):\n \"Executes the call behavior.\"\n with np.errstate(invalid='ignore'):\n return umath.less(x, self.critical_value)\n\n\nclass _MaskedUFunc:\n def __init__(self, ufunc):\n self.f = ufunc\n self.__doc__ = ufunc.__doc__\n self.__name__ = ufunc.__name__\n\n def __str__(self):\n return f\"Masked version of {self.f}\"\n\n\nclass _MaskedUnaryOperation(_MaskedUFunc):\n \"\"\"\n Defines masked version of unary operations, where invalid values are\n pre-masked.\n\n Parameters\n ----------\n mufunc : callable\n The function for which to define a masked version. Made available\n as ``_MaskedUnaryOperation.f``.\n fill : scalar, optional\n Filling value, default is 0.\n domain : class instance\n Domain for the function. Should be one of the ``_Domain*``\n classes. Default is None.\n\n \"\"\"\n\n def __init__(self, mufunc, fill=0, domain=None):\n super().__init__(mufunc)\n self.fill = fill\n self.domain = domain\n ufunc_domain[mufunc] = domain\n ufunc_fills[mufunc] = fill\n\n def __call__(self, a, *args, **kwargs):\n \"\"\"\n Execute the call behavior.\n\n \"\"\"\n d = getdata(a)\n # Deal with domain\n if self.domain is not None:\n # Case 1.1. : Domained function\n # nans at masked positions cause RuntimeWarnings, even though\n # they are masked. To avoid this we suppress warnings.\n with np.errstate(divide='ignore', invalid='ignore'):\n result = self.f(d, *args, **kwargs)\n # Make a mask\n m = ~umath.isfinite(result)\n m |= self.domain(d)\n m |= getmask(a)\n else:\n # Case 1.2. : Function without a domain\n # Get the result and the mask\n with np.errstate(divide='ignore', invalid='ignore'):\n result = self.f(d, *args, **kwargs)\n m = getmask(a)\n\n if not result.ndim:\n # Case 2.1. : The result is scalarscalar\n if m:\n return masked\n return result\n\n if m is not nomask:\n # Case 2.2. The result is an array\n # We need to fill the invalid data back w/ the input Now,\n # that's plain silly: in C, we would just skip the element and\n # keep the original, but we do have to do it that way in Python\n\n # In case result has a lower dtype than the inputs (as in\n # equal)\n try:\n np.copyto(result, d, where=m)\n except TypeError:\n pass\n # Transform to\n masked_result = result.view(get_masked_subclass(a))\n masked_result._mask = m\n masked_result._update_from(a)\n return masked_result\n\n\nclass _MaskedBinaryOperation(_MaskedUFunc):\n \"\"\"\n Define masked version of binary operations, where invalid\n values are pre-masked.\n\n Parameters\n ----------\n mbfunc : function\n The function for which to define a masked version. Made available\n as ``_MaskedBinaryOperation.f``.\n domain : class instance\n Default domain for the function. Should be one of the ``_Domain*``\n classes. Default is None.\n fillx : scalar, optional\n Filling value for the first argument, default is 0.\n filly : scalar, optional\n Filling value for the second argument, default is 0.\n\n \"\"\"\n\n def __init__(self, mbfunc, fillx=0, filly=0):\n \"\"\"\n abfunc(fillx, filly) must be defined.\n\n abfunc(x, filly) = x for all x to enable reduce.\n\n \"\"\"\n super().__init__(mbfunc)\n self.fillx = fillx\n self.filly = filly\n ufunc_domain[mbfunc] = None\n ufunc_fills[mbfunc] = (fillx, filly)\n\n def __call__(self, a, b, *args, **kwargs):\n \"\"\"\n Execute the call behavior.\n\n \"\"\"\n # Get the data, as ndarray\n (da, db) = (getdata(a), getdata(b))\n # Get the result\n with np.errstate():\n np.seterr(divide='ignore', invalid='ignore')\n result = self.f(da, db, *args, **kwargs)\n # Get the mask for the result\n (ma, mb) = (getmask(a), getmask(b))\n if ma is nomask:\n if mb is nomask:\n m = nomask\n else:\n m = umath.logical_or(getmaskarray(a), mb)\n elif mb is nomask:\n m = umath.logical_or(ma, getmaskarray(b))\n else:\n m = umath.logical_or(ma, mb)\n\n # Case 1. : scalar\n if not result.ndim:\n if m:\n return masked\n return result\n\n # Case 2. : array\n # Revert result to da where masked\n if m is not nomask and m.any():\n # any errors, just abort; impossible to guarantee masked values\n try:\n np.copyto(result, da, casting='unsafe', where=m)\n except Exception:\n pass\n\n # Transforms to a (subclass of) MaskedArray\n masked_result = result.view(get_masked_subclass(a, b))\n masked_result._mask = m\n if isinstance(a, MaskedArray):\n masked_result._update_from(a)\n elif isinstance(b, MaskedArray):\n masked_result._update_from(b)\n return masked_result\n\n def reduce(self, target, axis=0, dtype=None):\n \"\"\"\n Reduce `target` along the given `axis`.\n\n \"\"\"\n tclass = get_masked_subclass(target)\n m = getmask(target)\n t = filled(target, self.filly)\n if t.shape == ():\n t = t.reshape(1)\n if m is not nomask:\n m = make_mask(m, copy=True)\n m.shape = (1,)\n\n if m is nomask:\n tr = self.f.reduce(t, axis)\n mr = nomask\n else:\n tr = self.f.reduce(t, axis, dtype=dtype)\n mr = umath.logical_and.reduce(m, axis)\n\n if not tr.shape:\n if mr:\n return masked\n else:\n return tr\n masked_tr = tr.view(tclass)\n masked_tr._mask = mr\n return masked_tr\n\n def outer(self, a, b):\n \"\"\"\n Return the function applied to the outer product of a and b.\n\n \"\"\"\n (da, db) = (getdata(a), getdata(b))\n d = self.f.outer(da, db)\n ma = getmask(a)\n mb = getmask(b)\n if ma is nomask and mb is nomask:\n m = nomask\n else:\n ma = getmaskarray(a)\n mb = getmaskarray(b)\n m = umath.logical_or.outer(ma, mb)\n if (not m.ndim) and m:\n return masked\n if m is not nomask:\n np.copyto(d, da, where=m)\n if not d.shape:\n return d\n masked_d = d.view(get_masked_subclass(a, b))\n masked_d._mask = m\n return masked_d\n\n def accumulate(self, target, axis=0):\n \"\"\"Accumulate `target` along `axis` after filling with y fill\n value.\n\n \"\"\"\n tclass = get_masked_subclass(target)\n t = filled(target, self.filly)\n result = self.f.accumulate(t, axis)\n masked_result = result.view(tclass)\n return masked_result\n\n\n\nclass _DomainedBinaryOperation(_MaskedUFunc):\n \"\"\"\n Define binary operations that have a domain, like divide.\n\n They have no reduce, outer or accumulate.\n\n Parameters\n ----------\n mbfunc : function\n The function for which to define a masked version. Made available\n as ``_DomainedBinaryOperation.f``.\n domain : class instance\n Default domain for the function. Should be one of the ``_Domain*``\n classes.\n fillx : scalar, optional\n Filling value for the first argument, default is 0.\n filly : scalar, optional\n Filling value for the second argument, default is 0.\n\n \"\"\"\n\n def __init__(self, dbfunc, domain, fillx=0, filly=0):\n \"\"\"abfunc(fillx, filly) must be defined.\n abfunc(x, filly) = x for all x to enable reduce.\n \"\"\"\n super().__init__(dbfunc)\n self.domain = domain\n self.fillx = fillx\n self.filly = filly\n ufunc_domain[dbfunc] = domain\n ufunc_fills[dbfunc] = (fillx, filly)\n\n def __call__(self, a, b, *args, **kwargs):\n \"Execute the call behavior.\"\n # Get the data\n (da, db) = (getdata(a), getdata(b))\n # Get the result\n with np.errstate(divide='ignore', invalid='ignore'):\n result = self.f(da, db, *args, **kwargs)\n # Get the mask as a combination of the source masks and invalid\n m = ~umath.isfinite(result)\n m |= getmask(a)\n m |= getmask(b)\n # Apply the domain\n domain = ufunc_domain.get(self.f, None)\n if domain is not None:\n m |= domain(da, db)\n # Take care of the scalar case first\n if not m.ndim:\n if m:\n return masked\n else:\n return result\n # When the mask is True, put back da if possible\n # any errors, just abort; impossible to guarantee masked values\n try:\n np.copyto(result, 0, casting='unsafe', where=m)\n # avoid using \"*\" since this may be overlaid\n masked_da = umath.multiply(m, da)\n # only add back if it can be cast safely\n if np.can_cast(masked_da.dtype, result.dtype, casting='safe'):\n result += masked_da\n except Exception:\n pass\n\n # Transforms to a (subclass of) MaskedArray\n masked_result = result.view(get_masked_subclass(a, b))\n masked_result._mask = m\n if isinstance(a, MaskedArray):\n masked_result._update_from(a)\n elif isinstance(b, MaskedArray):\n masked_result._update_from(b)\n return masked_result\n\n\n# Unary ufuncs\nexp = _MaskedUnaryOperation(umath.exp)\nconjugate = _MaskedUnaryOperation(umath.conjugate)\nsin = _MaskedUnaryOperation(umath.sin)\ncos = _MaskedUnaryOperation(umath.cos)\narctan = _MaskedUnaryOperation(umath.arctan)\narcsinh = _MaskedUnaryOperation(umath.arcsinh)\nsinh = _MaskedUnaryOperation(umath.sinh)\ncosh = _MaskedUnaryOperation(umath.cosh)\ntanh = _MaskedUnaryOperation(umath.tanh)\nabs = absolute = _MaskedUnaryOperation(umath.absolute)\nangle = _MaskedUnaryOperation(angle) # from numpy.lib.function_base\nfabs = _MaskedUnaryOperation(umath.fabs)\nnegative = _MaskedUnaryOperation(umath.negative)\nfloor = _MaskedUnaryOperation(umath.floor)\nceil = _MaskedUnaryOperation(umath.ceil)\naround = _MaskedUnaryOperation(np.round_)\nlogical_not = _MaskedUnaryOperation(umath.logical_not)\n\n# Domained unary ufuncs\nsqrt = _MaskedUnaryOperation(umath.sqrt, 0.0,\n _DomainGreaterEqual(0.0))\nlog = _MaskedUnaryOperation(umath.log, 1.0,\n _DomainGreater(0.0))\nlog2 = _MaskedUnaryOperation(umath.log2, 1.0,\n _DomainGreater(0.0))\nlog10 = _MaskedUnaryOperation(umath.log10, 1.0,\n _DomainGreater(0.0))\ntan = _MaskedUnaryOperation(umath.tan, 0.0,\n _DomainTan(1e-35))\narcsin = _MaskedUnaryOperation(umath.arcsin, 0.0,\n _DomainCheckInterval(-1.0, 1.0))\narccos = _MaskedUnaryOperation(umath.arccos, 0.0,\n _DomainCheckInterval(-1.0, 1.0))\narccosh = _MaskedUnaryOperation(umath.arccosh, 1.0,\n _DomainGreaterEqual(1.0))\narctanh = _MaskedUnaryOperation(umath.arctanh, 0.0,\n _DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15))\n\n# Binary ufuncs\nadd = _MaskedBinaryOperation(umath.add)\nsubtract = _MaskedBinaryOperation(umath.subtract)\nmultiply = _MaskedBinaryOperation(umath.multiply, 1, 1)\narctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0)\nequal = _MaskedBinaryOperation(umath.equal)\nequal.reduce = None\nnot_equal = _MaskedBinaryOperation(umath.not_equal)\nnot_equal.reduce = None\nless_equal = _MaskedBinaryOperation(umath.less_equal)\nless_equal.reduce = None\ngreater_equal = _MaskedBinaryOperation(umath.greater_equal)\ngreater_equal.reduce = None\nless = _MaskedBinaryOperation(umath.less)\nless.reduce = None\ngreater = _MaskedBinaryOperation(umath.greater)\ngreater.reduce = None\nlogical_and = _MaskedBinaryOperation(umath.logical_and)\nalltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce\nlogical_or = _MaskedBinaryOperation(umath.logical_or)\nsometrue = logical_or.reduce\nlogical_xor = _MaskedBinaryOperation(umath.logical_xor)\nbitwise_and = _MaskedBinaryOperation(umath.bitwise_and)\nbitwise_or = _MaskedBinaryOperation(umath.bitwise_or)\nbitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor)\nhypot = _MaskedBinaryOperation(umath.hypot)\n\n# Domained binary ufuncs\ndivide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1)\ntrue_divide = _DomainedBinaryOperation(umath.true_divide,\n _DomainSafeDivide(), 0, 1)\nfloor_divide = _DomainedBinaryOperation(umath.floor_divide,\n _DomainSafeDivide(), 0, 1)\nremainder = _DomainedBinaryOperation(umath.remainder,\n _DomainSafeDivide(), 0, 1)\nfmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1)\nmod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1)\n\n\n###############################################################################\n# Mask creation functions #\n###############################################################################\n\n\ndef _replace_dtype_fields_recursive(dtype, primitive_dtype):\n \"Private function allowing recursion in _replace_dtype_fields.\"\n _recurse = _replace_dtype_fields_recursive\n\n # Do we have some name fields ?\n if dtype.names is not None:\n descr = []\n for name in dtype.names:\n field = dtype.fields[name]\n if len(field) == 3:\n # Prepend the title to the name\n name = (field[-1], name)\n descr.append((name, _recurse(field[0], primitive_dtype)))\n new_dtype = np.dtype(descr)\n\n # Is this some kind of composite a la (float,2)\n elif dtype.subdtype:\n descr = list(dtype.subdtype)\n descr[0] = _recurse(dtype.subdtype[0], primitive_dtype)\n new_dtype = np.dtype(tuple(descr))\n\n # this is a primitive type, so do a direct replacement\n else:\n new_dtype = primitive_dtype\n\n # preserve identity of dtypes\n if new_dtype == dtype:\n new_dtype = dtype\n\n return new_dtype\n\n\ndef _replace_dtype_fields(dtype, primitive_dtype):\n \"\"\"\n Construct a dtype description list from a given dtype.\n\n Returns a new dtype object, with all fields and subtypes in the given type\n recursively replaced with `primitive_dtype`.\n\n Arguments are coerced to dtypes first.\n \"\"\"\n dtype = np.dtype(dtype)\n primitive_dtype = np.dtype(primitive_dtype)\n return _replace_dtype_fields_recursive(dtype, primitive_dtype)\n\n\ndef make_mask_descr(ndtype):\n \"\"\"\n Construct a dtype description list from a given dtype.\n\n Returns a new dtype object, with the type of all fields in `ndtype` to a\n boolean type. Field names are not altered.\n\n Parameters\n ----------\n ndtype : dtype\n The dtype to convert.\n\n Returns\n -------\n result : dtype\n A dtype that looks like `ndtype`, the type of all fields is boolean.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> dtype = np.dtype({'names':['foo', 'bar'],\n ... 'formats':[np.float32, np.int64]})\n >>> dtype\n dtype([('foo', '<f4'), ('bar', '<i8')])\n >>> ma.make_mask_descr(dtype)\n dtype([('foo', '|b1'), ('bar', '|b1')])\n >>> ma.make_mask_descr(np.float32)\n dtype('bool')\n\n \"\"\"\n return _replace_dtype_fields(ndtype, MaskType)\n\n\ndef getmask(a):\n \"\"\"\n Return the mask of a masked array, or nomask.\n\n Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the\n mask is not `nomask`, else return `nomask`. To guarantee a full array\n of booleans of the same shape as a, use `getmaskarray`.\n\n Parameters\n ----------\n a : array_like\n Input `MaskedArray` for which the mask is required.\n\n See Also\n --------\n getdata : Return the data of a masked array as an ndarray.\n getmaskarray : Return the mask of a masked array, or full array of False.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> a = ma.masked_equal([[1,2],[3,4]], 2)\n >>> a\n masked_array(\n data=[[1, --],\n [3, 4]],\n mask=[[False, True],\n [False, False]],\n fill_value=2)\n >>> ma.getmask(a)\n array([[False, True],\n [False, False]])\n\n Equivalently use the `MaskedArray` `mask` attribute.\n\n >>> a.mask\n array([[False, True],\n [False, False]])\n\n Result when mask == `nomask`\n\n >>> b = ma.masked_array([[1,2],[3,4]])\n >>> b\n masked_array(\n data=[[1, 2],\n [3, 4]],\n mask=False,\n fill_value=999999)\n >>> ma.nomask\n False\n >>> ma.getmask(b) == ma.nomask\n True\n >>> b.mask == ma.nomask\n True\n\n \"\"\"\n return getattr(a, '_mask', nomask)\n\n\nget_mask = getmask\n\n\ndef getmaskarray(arr):\n \"\"\"\n Return the mask of a masked array, or full boolean array of False.\n\n Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and\n the mask is not `nomask`, else return a full boolean array of False of\n the same shape as `arr`.\n\n Parameters\n ----------\n arr : array_like\n Input `MaskedArray` for which the mask is required.\n\n See Also\n --------\n getmask : Return the mask of a masked array, or nomask.\n getdata : Return the data of a masked array as an ndarray.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> a = ma.masked_equal([[1,2],[3,4]], 2)\n >>> a\n masked_array(\n data=[[1, --],\n [3, 4]],\n mask=[[False, True],\n [False, False]],\n fill_value=2)\n >>> ma.getmaskarray(a)\n array([[False, True],\n [False, False]])\n\n Result when mask == ``nomask``\n\n >>> b = ma.masked_array([[1,2],[3,4]])\n >>> b\n masked_array(\n data=[[1, 2],\n [3, 4]],\n mask=False,\n fill_value=999999)\n >>> ma.getmaskarray(b)\n array([[False, False],\n [False, False]])\n\n \"\"\"\n mask = getmask(arr)\n if mask is nomask:\n mask = make_mask_none(np.shape(arr), getattr(arr, 'dtype', None))\n return mask\n\n\ndef is_mask(m):\n \"\"\"\n Return True if m is a valid, standard mask.\n\n This function does not check the contents of the input, only that the\n type is MaskType. In particular, this function returns False if the\n mask has a flexible dtype.\n\n Parameters\n ----------\n m : array_like\n Array to test.\n\n Returns\n -------\n result : bool\n True if `m.dtype.type` is MaskType, False otherwise.\n\n See Also\n --------\n ma.isMaskedArray : Test whether input is an instance of MaskedArray.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> m = ma.masked_equal([0, 1, 0, 2, 3], 0)\n >>> m\n masked_array(data=[--, 1, --, 2, 3],\n mask=[ True, False, True, False, False],\n fill_value=0)\n >>> ma.is_mask(m)\n False\n >>> ma.is_mask(m.mask)\n True\n\n Input must be an ndarray (or have similar attributes)\n for it to be considered a valid mask.\n\n >>> m = [False, True, False]\n >>> ma.is_mask(m)\n False\n >>> m = np.array([False, True, False])\n >>> m\n array([False, True, False])\n >>> ma.is_mask(m)\n True\n\n Arrays with complex dtypes don't return True.\n\n >>> dtype = np.dtype({'names':['monty', 'pithon'],\n ... 'formats':[bool, bool]})\n >>> dtype\n dtype([('monty', '|b1'), ('pithon', '|b1')])\n >>> m = np.array([(True, False), (False, True), (True, False)],\n ... dtype=dtype)\n >>> m\n array([( True, False), (False, True), ( True, False)],\n dtype=[('monty', '?'), ('pithon', '?')])\n >>> ma.is_mask(m)\n False\n\n \"\"\"\n try:\n return m.dtype.type is MaskType\n except AttributeError:\n return False\n\n\ndef _shrink_mask(m):\n \"\"\"\n Shrink a mask to nomask if possible\n \"\"\"\n if m.dtype.names is None and not m.any():\n return nomask\n else:\n return m\n\n\ndef make_mask(m, copy=False, shrink=True, dtype=MaskType):\n \"\"\"\n Create a boolean mask from an array.\n\n Return `m` as a boolean mask, creating a copy if necessary or requested.\n The function can accept any sequence that is convertible to integers,\n or ``nomask``. Does not require that contents must be 0s and 1s, values\n of 0 are interpreted as False, everything else as True.\n\n Parameters\n ----------\n m : array_like\n Potential mask.\n copy : bool, optional\n Whether to return a copy of `m` (True) or `m` itself (False).\n shrink : bool, optional\n Whether to shrink `m` to ``nomask`` if all its values are False.\n dtype : dtype, optional\n Data-type of the output mask. By default, the output mask has a\n dtype of MaskType (bool). If the dtype is flexible, each field has\n a boolean dtype. This is ignored when `m` is ``nomask``, in which\n case ``nomask`` is always returned.\n\n Returns\n -------\n result : ndarray\n A boolean mask derived from `m`.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> m = [True, False, True, True]\n >>> ma.make_mask(m)\n array([ True, False, True, True])\n >>> m = [1, 0, 1, 1]\n >>> ma.make_mask(m)\n array([ True, False, True, True])\n >>> m = [1, 0, 2, -3]\n >>> ma.make_mask(m)\n array([ True, False, True, True])\n\n Effect of the `shrink` parameter.\n\n >>> m = np.zeros(4)\n >>> m\n array([0., 0., 0., 0.])\n >>> ma.make_mask(m)\n False\n >>> ma.make_mask(m, shrink=False)\n array([False, False, False, False])\n\n Using a flexible `dtype`.\n\n >>> m = [1, 0, 1, 1]\n >>> n = [0, 1, 0, 0]\n >>> arr = []\n >>> for man, mouse in zip(m, n):\n ... arr.append((man, mouse))\n >>> arr\n [(1, 0), (0, 1), (1, 0), (1, 0)]\n >>> dtype = np.dtype({'names':['man', 'mouse'],\n ... 'formats':[np.int64, np.int64]})\n >>> arr = np.array(arr, dtype=dtype)\n >>> arr\n array([(1, 0), (0, 1), (1, 0), (1, 0)],\n dtype=[('man', '<i8'), ('mouse', '<i8')])\n >>> ma.make_mask(arr, dtype=dtype)\n array([(True, False), (False, True), (True, False), (True, False)],\n dtype=[('man', '|b1'), ('mouse', '|b1')])\n\n \"\"\"\n if m is nomask:\n return nomask\n\n # Make sure the input dtype is valid.\n dtype = make_mask_descr(dtype)\n\n # legacy boolean special case: \"existence of fields implies true\"\n if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool_:\n return np.ones(m.shape, dtype=dtype)\n\n # Fill the mask in case there are missing data; turn it into an ndarray.\n result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True)\n # Bas les masques !\n if shrink:\n result = _shrink_mask(result)\n return result\n\n\ndef make_mask_none(newshape, dtype=None):\n \"\"\"\n Return a boolean mask of the given shape, filled with False.\n\n This function returns a boolean ndarray with all entries False, that can\n be used in common mask manipulations. If a complex dtype is specified, the\n type of each field is converted to a boolean type.\n\n Parameters\n ----------\n newshape : tuple\n A tuple indicating the shape of the mask.\n dtype : {None, dtype}, optional\n If None, use a MaskType instance. Otherwise, use a new datatype with\n the same fields as `dtype`, converted to boolean types.\n\n Returns\n -------\n result : ndarray\n An ndarray of appropriate shape and dtype, filled with False.\n\n See Also\n --------\n make_mask : Create a boolean mask from an array.\n make_mask_descr : Construct a dtype description list from a given dtype.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> ma.make_mask_none((3,))\n array([False, False, False])\n\n Defining a more complex dtype.\n\n >>> dtype = np.dtype({'names':['foo', 'bar'],\n ... 'formats':[np.float32, np.int64]})\n >>> dtype\n dtype([('foo', '<f4'), ('bar', '<i8')])\n >>> ma.make_mask_none((3,), dtype=dtype)\n array([(False, False), (False, False), (False, False)],\n dtype=[('foo', '|b1'), ('bar', '|b1')])\n\n \"\"\"\n if dtype is None:\n result = np.zeros(newshape, dtype=MaskType)\n else:\n result = np.zeros(newshape, dtype=make_mask_descr(dtype))\n return result\n\n\ndef _recursive_mask_or(m1, m2, newmask):\n names = m1.dtype.names\n for name in names:\n current1 = m1[name]\n if current1.dtype.names is not None:\n _recursive_mask_or(current1, m2[name], newmask[name])\n else:\n umath.logical_or(current1, m2[name], newmask[name])\n\n\ndef mask_or(m1, m2, copy=False, shrink=True):\n \"\"\"\n Combine two masks with the ``logical_or`` operator.\n\n The result may be a view on `m1` or `m2` if the other is `nomask`\n (i.e. False).\n\n Parameters\n ----------\n m1, m2 : array_like\n Input masks.\n copy : bool, optional\n If copy is False and one of the inputs is `nomask`, return a view\n of the other input mask. Defaults to False.\n shrink : bool, optional\n Whether to shrink the output to `nomask` if all its values are\n False. Defaults to True.\n\n Returns\n -------\n mask : output mask\n The result masks values that are masked in either `m1` or `m2`.\n\n Raises\n ------\n ValueError\n If `m1` and `m2` have different flexible dtypes.\n\n Examples\n --------\n >>> m1 = np.ma.make_mask([0, 1, 1, 0])\n >>> m2 = np.ma.make_mask([1, 0, 0, 0])\n >>> np.ma.mask_or(m1, m2)\n array([ True, True, True, False])\n\n \"\"\"\n\n if (m1 is nomask) or (m1 is False):\n dtype = getattr(m2, 'dtype', MaskType)\n return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype)\n if (m2 is nomask) or (m2 is False):\n dtype = getattr(m1, 'dtype', MaskType)\n return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype)\n if m1 is m2 and is_mask(m1):\n return m1\n (dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None))\n if dtype1 != dtype2:\n raise ValueError(\"Incompatible dtypes '%s'<>'%s'\" % (dtype1, dtype2))\n if dtype1.names is not None:\n # Allocate an output mask array with the properly broadcast shape.\n newmask = np.empty(np.broadcast(m1, m2).shape, dtype1)\n _recursive_mask_or(m1, m2, newmask)\n return newmask\n return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink)\n\n\ndef flatten_mask(mask):\n \"\"\"\n Returns a completely flattened version of the mask, where nested fields\n are collapsed.\n\n Parameters\n ----------\n mask : array_like\n Input array, which will be interpreted as booleans.\n\n Returns\n -------\n flattened_mask : ndarray of bools\n The flattened input.\n\n Examples\n --------\n >>> mask = np.array([0, 0, 1])\n >>> np.ma.flatten_mask(mask)\n array([False, False, True])\n\n >>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])\n >>> np.ma.flatten_mask(mask)\n array([False, False, False, True])\n\n >>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]\n >>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype)\n >>> np.ma.flatten_mask(mask)\n array([False, False, False, False, False, True])\n\n \"\"\"\n\n def _flatmask(mask):\n \"Flatten the mask and returns a (maybe nested) sequence of booleans.\"\n mnames = mask.dtype.names\n if mnames is not None:\n return [flatten_mask(mask[name]) for name in mnames]\n else:\n return mask\n\n def _flatsequence(sequence):\n \"Generates a flattened version of the sequence.\"\n try:\n for element in sequence:\n if hasattr(element, '__iter__'):\n yield from _flatsequence(element)\n else:\n yield element\n except TypeError:\n yield sequence\n\n mask = np.asarray(mask)\n flattened = _flatsequence(_flatmask(mask))\n return np.array([_ for _ in flattened], dtype=bool)\n\n\ndef _check_mask_axis(mask, axis, keepdims=np._NoValue):\n \"Check whether there are masked values along the given axis\"\n kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}\n if mask is not nomask:\n return mask.all(axis=axis, **kwargs)\n return nomask\n\n\n###############################################################################\n# Masking functions #\n###############################################################################\n\ndef masked_where(condition, a, copy=True):\n \"\"\"\n Mask an array where a condition is met.\n\n Return `a` as an array masked where `condition` is True.\n Any masked values of `a` or `condition` are also masked in the output.\n\n Parameters\n ----------\n condition : array_like\n Masking condition. When `condition` tests floating point values for\n equality, consider using ``masked_values`` instead.\n a : array_like\n Array to mask.\n copy : bool\n If True (default) make a copy of `a` in the result. If False modify\n `a` in place and return a view.\n\n Returns\n -------\n result : MaskedArray\n The result of masking `a` where `condition` is True.\n\n See Also\n --------\n masked_values : Mask using floating point equality.\n masked_equal : Mask where equal to a given value.\n masked_not_equal : Mask where `not` equal to a given value.\n masked_less_equal : Mask where less than or equal to a given value.\n masked_greater_equal : Mask where greater than or equal to a given value.\n masked_less : Mask where less than a given value.\n masked_greater : Mask where greater than a given value.\n masked_inside : Mask inside a given interval.\n masked_outside : Mask outside a given interval.\n masked_invalid : Mask invalid values (NaNs or infs).\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> a = np.arange(4)\n >>> a\n array([0, 1, 2, 3])\n >>> ma.masked_where(a <= 2, a)\n masked_array(data=[--, --, --, 3],\n mask=[ True, True, True, False],\n fill_value=999999)\n\n Mask array `b` conditional on `a`.\n\n >>> b = ['a', 'b', 'c', 'd']\n >>> ma.masked_where(a == 2, b)\n masked_array(data=['a', 'b', --, 'd'],\n mask=[False, False, True, False],\n fill_value='N/A',\n dtype='<U1')\n\n Effect of the `copy` argument.\n\n >>> c = ma.masked_where(a <= 2, a)\n >>> c\n masked_array(data=[--, --, --, 3],\n mask=[ True, True, True, False],\n fill_value=999999)\n >>> c[0] = 99\n >>> c\n masked_array(data=[99, --, --, 3],\n mask=[False, True, True, False],\n fill_value=999999)\n >>> a\n array([0, 1, 2, 3])\n >>> c = ma.masked_where(a <= 2, a, copy=False)\n >>> c[0] = 99\n >>> c\n masked_array(data=[99, --, --, 3],\n mask=[False, True, True, False],\n fill_value=999999)\n >>> a\n array([99, 1, 2, 3])\n\n When `condition` or `a` contain masked values.\n\n >>> a = np.arange(4)\n >>> a = ma.masked_where(a == 2, a)\n >>> a\n masked_array(data=[0, 1, --, 3],\n mask=[False, False, True, False],\n fill_value=999999)\n >>> b = np.arange(4)\n >>> b = ma.masked_where(b == 0, b)\n >>> b\n masked_array(data=[--, 1, 2, 3],\n mask=[ True, False, False, False],\n fill_value=999999)\n >>> ma.masked_where(a == 3, b)\n masked_array(data=[--, 1, --, --],\n mask=[ True, False, True, True],\n fill_value=999999)\n\n \"\"\"\n # Make sure that condition is a valid standard-type mask.\n cond = make_mask(condition, shrink=False)\n a = np.array(a, copy=copy, subok=True)\n\n (cshape, ashape) = (cond.shape, a.shape)\n if cshape and cshape != ashape:\n raise IndexError(\"Inconsistent shape between the condition and the input\"\n \" (got %s and %s)\" % (cshape, ashape))\n if hasattr(a, '_mask'):\n cond = mask_or(cond, a._mask)\n cls = type(a)\n else:\n cls = MaskedArray\n result = a.view(cls)\n # Assign to *.mask so that structured masks are handled correctly.\n result.mask = _shrink_mask(cond)\n # There is no view of a boolean so when 'a' is a MaskedArray with nomask\n # the update to the result's mask has no effect.\n if not copy and hasattr(a, '_mask') and getmask(a) is nomask:\n a._mask = result._mask.view()\n return result\n\n\ndef masked_greater(x, value, copy=True):\n \"\"\"\n Mask an array where greater than a given value.\n\n This function is a shortcut to ``masked_where``, with\n `condition` = (x > value).\n\n See Also\n --------\n masked_where : Mask where a condition is met.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> a = np.arange(4)\n >>> a\n array([0, 1, 2, 3])\n >>> ma.masked_greater(a, 2)\n masked_array(data=[0, 1, 2, --],\n mask=[False, False, False, True],\n fill_value=999999)\n\n \"\"\"\n return masked_where(greater(x, value), x, copy=copy)\n\n\ndef masked_greater_equal(x, value, copy=True):\n \"\"\"\n Mask an array where greater than or equal to a given value.\n\n This function is a shortcut to ``masked_where``, with\n `condition` = (x >= value).\n\n See Also\n --------\n masked_where : Mask where a condition is met.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> a = np.arange(4)\n >>> a\n array([0, 1, 2, 3])\n >>> ma.masked_greater_equal(a, 2)\n masked_array(data=[0, 1, --, --],\n mask=[False, False, True, True],\n fill_value=999999)\n\n \"\"\"\n return masked_where(greater_equal(x, value), x, copy=copy)\n\n\ndef masked_less(x, value, copy=True):\n \"\"\"\n Mask an array where less than a given value.\n\n This function is a shortcut to ``masked_where``, with\n `condition` = (x < value).\n\n See Also\n --------\n masked_where : Mask where a condition is met.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> a = np.arange(4)\n >>> a\n array([0, 1, 2, 3])\n >>> ma.masked_less(a, 2)\n masked_array(data=[--, --, 2, 3],\n mask=[ True, True, False, False],\n fill_value=999999)\n\n \"\"\"\n return masked_where(less(x, value), x, copy=copy)\n\n\ndef masked_less_equal(x, value, copy=True):\n \"\"\"\n Mask an array where less than or equal to a given value.\n\n This function is a shortcut to ``masked_where``, with\n `condition` = (x <= value).\n\n See Also\n --------\n masked_where : Mask where a condition is met.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> a = np.arange(4)\n >>> a\n array([0, 1, 2, 3])\n >>> ma.masked_less_equal(a, 2)\n masked_array(data=[--, --, --, 3],\n mask=[ True, True, True, False],\n fill_value=999999)\n\n \"\"\"\n return masked_where(less_equal(x, value), x, copy=copy)\n\n\ndef masked_not_equal(x, value, copy=True):\n \"\"\"\n Mask an array where `not` equal to a given value.\n\n This function is a shortcut to ``masked_where``, with\n `condition` = (x != value).\n\n See Also\n --------\n masked_where : Mask where a condition is met.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> a = np.arange(4)\n >>> a\n array([0, 1, 2, 3])\n >>> ma.masked_not_equal(a, 2)\n masked_array(data=[--, --, 2, --],\n mask=[ True, True, False, True],\n fill_value=999999)\n\n \"\"\"\n return masked_where(not_equal(x, value), x, copy=copy)\n\n\ndef masked_equal(x, value, copy=True):\n \"\"\"\n Mask an array where equal to a given value.\n\n This function is a shortcut to ``masked_where``, with\n `condition` = (x == value). For floating point arrays,\n consider using ``masked_values(x, value)``.\n\n See Also\n --------\n masked_where : Mask where a condition is met.\n masked_values : Mask using floating point equality.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> a = np.arange(4)\n >>> a\n array([0, 1, 2, 3])\n >>> ma.masked_equal(a, 2)\n masked_array(data=[0, 1, --, 3],\n mask=[False, False, True, False],\n fill_value=2)\n\n \"\"\"\n output = masked_where(equal(x, value), x, copy=copy)\n output.fill_value = value\n return output\n\n\ndef masked_inside(x, v1, v2, copy=True):\n \"\"\"\n Mask an array inside a given interval.\n\n Shortcut to ``masked_where``, where `condition` is True for `x` inside\n the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2`\n can be given in either order.\n\n See Also\n --------\n masked_where : Mask where a condition is met.\n\n Notes\n -----\n The array `x` is prefilled with its filling value.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]\n >>> ma.masked_inside(x, -0.3, 0.3)\n masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1],\n mask=[False, False, True, True, False, False],\n fill_value=1e+20)\n\n The order of `v1` and `v2` doesn't matter.\n\n >>> ma.masked_inside(x, 0.3, -0.3)\n masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1],\n mask=[False, False, True, True, False, False],\n fill_value=1e+20)\n\n \"\"\"\n if v2 < v1:\n (v1, v2) = (v2, v1)\n xf = filled(x)\n condition = (xf >= v1) & (xf <= v2)\n return masked_where(condition, x, copy=copy)\n\n\ndef masked_outside(x, v1, v2, copy=True):\n \"\"\"\n Mask an array outside a given interval.\n\n Shortcut to ``masked_where``, where `condition` is True for `x` outside\n the interval [v1,v2] (x < v1)|(x > v2).\n The boundaries `v1` and `v2` can be given in either order.\n\n See Also\n --------\n masked_where : Mask where a condition is met.\n\n Notes\n -----\n The array `x` is prefilled with its filling value.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]\n >>> ma.masked_outside(x, -0.3, 0.3)\n masked_array(data=[--, --, 0.01, 0.2, --, --],\n mask=[ True, True, False, False, True, True],\n fill_value=1e+20)\n\n The order of `v1` and `v2` doesn't matter.\n\n >>> ma.masked_outside(x, 0.3, -0.3)\n masked_array(data=[--, --, 0.01, 0.2, --, --],\n mask=[ True, True, False, False, True, True],\n fill_value=1e+20)\n\n \"\"\"\n if v2 < v1:\n (v1, v2) = (v2, v1)\n xf = filled(x)\n condition = (xf < v1) | (xf > v2)\n return masked_where(condition, x, copy=copy)\n\n\ndef masked_object(x, value, copy=True, shrink=True):\n \"\"\"\n Mask the array `x` where the data are exactly equal to value.\n\n This function is similar to `masked_values`, but only suitable\n for object arrays: for floating point, use `masked_values` instead.\n\n Parameters\n ----------\n x : array_like\n Array to mask\n value : object\n Comparison value\n copy : {True, False}, optional\n Whether to return a copy of `x`.\n shrink : {True, False}, optional\n Whether to collapse a mask full of False to nomask\n\n Returns\n -------\n result : MaskedArray\n The result of masking `x` where equal to `value`.\n\n See Also\n --------\n masked_where : Mask where a condition is met.\n masked_equal : Mask where equal to a given value (integers).\n masked_values : Mask using floating point equality.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> food = np.array(['green_eggs', 'ham'], dtype=object)\n >>> # don't eat spoiled food\n >>> eat = ma.masked_object(food, 'green_eggs')\n >>> eat\n masked_array(data=[--, 'ham'],\n mask=[ True, False],\n fill_value='green_eggs',\n dtype=object)\n >>> # plain ol` ham is boring\n >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object)\n >>> eat = ma.masked_object(fresh_food, 'green_eggs')\n >>> eat\n masked_array(data=['cheese', 'ham', 'pineapple'],\n mask=False,\n fill_value='green_eggs',\n dtype=object)\n\n Note that `mask` is set to ``nomask`` if possible.\n\n >>> eat\n masked_array(data=['cheese', 'ham', 'pineapple'],\n mask=False,\n fill_value='green_eggs',\n dtype=object)\n\n \"\"\"\n if isMaskedArray(x):\n condition = umath.equal(x._data, value)\n mask = x._mask\n else:\n condition = umath.equal(np.asarray(x), value)\n mask = nomask\n mask = mask_or(mask, make_mask(condition, shrink=shrink))\n return masked_array(x, mask=mask, copy=copy, fill_value=value)\n\n\ndef masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True):\n \"\"\"\n Mask using floating point equality.\n\n Return a MaskedArray, masked where the data in array `x` are approximately\n equal to `value`, determined using `isclose`. The default tolerances for\n `masked_values` are the same as those for `isclose`.\n\n For integer types, exact equality is used, in the same way as\n `masked_equal`.\n\n The fill_value is set to `value` and the mask is set to ``nomask`` if\n possible.\n\n Parameters\n ----------\n x : array_like\n Array to mask.\n value : float\n Masking value.\n rtol, atol : float, optional\n Tolerance parameters passed on to `isclose`\n copy : bool, optional\n Whether to return a copy of `x`.\n shrink : bool, optional\n Whether to collapse a mask full of False to ``nomask``.\n\n Returns\n -------\n result : MaskedArray\n The result of masking `x` where approximately equal to `value`.\n\n See Also\n --------\n masked_where : Mask where a condition is met.\n masked_equal : Mask where equal to a given value (integers).\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> x = np.array([1, 1.1, 2, 1.1, 3])\n >>> ma.masked_values(x, 1.1)\n masked_array(data=[1.0, --, 2.0, --, 3.0],\n mask=[False, True, False, True, False],\n fill_value=1.1)\n\n Note that `mask` is set to ``nomask`` if possible.\n\n >>> ma.masked_values(x, 1.5)\n masked_array(data=[1. , 1.1, 2. , 1.1, 3. ],\n mask=False,\n fill_value=1.5)\n\n For integers, the fill value will be different in general to the\n result of ``masked_equal``.\n\n >>> x = np.arange(5)\n >>> x\n array([0, 1, 2, 3, 4])\n >>> ma.masked_values(x, 2)\n masked_array(data=[0, 1, --, 3, 4],\n mask=[False, False, True, False, False],\n fill_value=2)\n >>> ma.masked_equal(x, 2)\n masked_array(data=[0, 1, --, 3, 4],\n mask=[False, False, True, False, False],\n fill_value=2)\n\n \"\"\"\n xnew = filled(x, value)\n if np.issubdtype(xnew.dtype, np.floating):\n mask = np.isclose(xnew, value, atol=atol, rtol=rtol)\n else:\n mask = umath.equal(xnew, value)\n ret = masked_array(xnew, mask=mask, copy=copy, fill_value=value)\n if shrink:\n ret.shrink_mask()\n return ret\n\n\ndef masked_invalid(a, copy=True):\n \"\"\"\n Mask an array where invalid values occur (NaNs or infs).\n\n This function is a shortcut to ``masked_where``, with\n `condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved.\n Only applies to arrays with a dtype where NaNs or infs make sense\n (i.e. floating point types), but accepts any array_like object.\n\n See Also\n --------\n masked_where : Mask where a condition is met.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> a = np.arange(5, dtype=float)\n >>> a[2] = np.NaN\n >>> a[3] = np.PINF\n >>> a\n array([ 0., 1., nan, inf, 4.])\n >>> ma.masked_invalid(a)\n masked_array(data=[0.0, 1.0, --, --, 4.0],\n mask=[False, False, True, True, False],\n fill_value=1e+20)\n\n \"\"\"\n a = np.array(a, copy=copy, subok=True)\n mask = getattr(a, '_mask', None)\n if mask is not None:\n condition = ~(np.isfinite(getdata(a)))\n if mask is not nomask:\n condition |= mask\n cls = type(a)\n else:\n condition = ~(np.isfinite(a))\n cls = MaskedArray\n result = a.view(cls)\n result._mask = condition\n return result\n\n\n###############################################################################\n# Printing options #\n###############################################################################\n\n\nclass _MaskedPrintOption:\n \"\"\"\n Handle the string used to represent missing data in a masked array.\n\n \"\"\"\n\n def __init__(self, display):\n \"\"\"\n Create the masked_print_option object.\n\n \"\"\"\n self._display = display\n self._enabled = True\n\n def display(self):\n \"\"\"\n Display the string to print for masked values.\n\n \"\"\"\n return self._display\n\n def set_display(self, s):\n \"\"\"\n Set the string to print for masked values.\n\n \"\"\"\n self._display = s\n\n def enabled(self):\n \"\"\"\n Is the use of the display value enabled?\n\n \"\"\"\n return self._enabled\n\n def enable(self, shrink=1):\n \"\"\"\n Set the enabling shrink to `shrink`.\n\n \"\"\"\n self._enabled = shrink\n\n def __str__(self):\n return str(self._display)\n\n __repr__ = __str__\n\n# if you single index into a masked location you get this object.\nmasked_print_option = _MaskedPrintOption('--')\n\n\ndef _recursive_printoption(result, mask, printopt):\n \"\"\"\n Puts printoptions in result where mask is True.\n\n Private function allowing for recursion\n\n \"\"\"\n names = result.dtype.names\n if names is not None:\n for name in names:\n curdata = result[name]\n curmask = mask[name]\n _recursive_printoption(curdata, curmask, printopt)\n else:\n np.copyto(result, printopt, where=mask)\n return\n\n# For better or worse, these end in a newline\n_legacy_print_templates = dict(\n long_std=textwrap.dedent(\"\"\"\\\n masked_%(name)s(data =\n %(data)s,\n %(nlen)s mask =\n %(mask)s,\n %(nlen)s fill_value = %(fill)s)\n \"\"\"),\n long_flx=textwrap.dedent(\"\"\"\\\n masked_%(name)s(data =\n %(data)s,\n %(nlen)s mask =\n %(mask)s,\n %(nlen)s fill_value = %(fill)s,\n %(nlen)s dtype = %(dtype)s)\n \"\"\"),\n short_std=textwrap.dedent(\"\"\"\\\n masked_%(name)s(data = %(data)s,\n %(nlen)s mask = %(mask)s,\n %(nlen)s fill_value = %(fill)s)\n \"\"\"),\n short_flx=textwrap.dedent(\"\"\"\\\n masked_%(name)s(data = %(data)s,\n %(nlen)s mask = %(mask)s,\n %(nlen)s fill_value = %(fill)s,\n %(nlen)s dtype = %(dtype)s)\n \"\"\")\n)\n\n###############################################################################\n# MaskedArray class #\n###############################################################################\n\n\ndef _recursive_filled(a, mask, fill_value):\n \"\"\"\n Recursively fill `a` with `fill_value`.\n\n \"\"\"\n names = a.dtype.names\n for name in names:\n current = a[name]\n if current.dtype.names is not None:\n _recursive_filled(current, mask[name], fill_value[name])\n else:\n np.copyto(current, fill_value[name], where=mask[name])\n\n\ndef flatten_structured_array(a):\n \"\"\"\n Flatten a structured array.\n\n The data type of the output is chosen such that it can represent all of the\n (nested) fields.\n\n Parameters\n ----------\n a : structured array\n\n Returns\n -------\n output : masked array or ndarray\n A flattened masked array if the input is a masked array, otherwise a\n standard ndarray.\n\n Examples\n --------\n >>> ndtype = [('a', int), ('b', float)]\n >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype)\n >>> np.ma.flatten_structured_array(a)\n array([[1., 1.],\n [2., 2.]])\n\n \"\"\"\n\n def flatten_sequence(iterable):\n \"\"\"\n Flattens a compound of nested iterables.\n\n \"\"\"\n for elm in iter(iterable):\n if hasattr(elm, '__iter__'):\n yield from flatten_sequence(elm)\n else:\n yield elm\n\n a = np.asanyarray(a)\n inishape = a.shape\n a = a.ravel()\n if isinstance(a, MaskedArray):\n out = np.array([tuple(flatten_sequence(d.item())) for d in a._data])\n out = out.view(MaskedArray)\n out._mask = np.array([tuple(flatten_sequence(d.item()))\n for d in getmaskarray(a)])\n else:\n out = np.array([tuple(flatten_sequence(d.item())) for d in a])\n if len(inishape) > 1:\n newshape = list(out.shape)\n newshape[0] = inishape\n out.shape = tuple(flatten_sequence(newshape))\n return out\n\n\ndef _arraymethod(funcname, onmask=True):\n \"\"\"\n Return a class method wrapper around a basic array method.\n\n Creates a class method which returns a masked array, where the new\n ``_data`` array is the output of the corresponding basic method called\n on the original ``_data``.\n\n If `onmask` is True, the new mask is the output of the method called\n on the initial mask. Otherwise, the new mask is just a reference\n to the initial mask.\n\n Parameters\n ----------\n funcname : str\n Name of the function to apply on data.\n onmask : bool\n Whether the mask must be processed also (True) or left\n alone (False). Default is True. Make available as `_onmask`\n attribute.\n\n Returns\n -------\n method : instancemethod\n Class method wrapper of the specified basic array method.\n\n \"\"\"\n def wrapped_method(self, *args, **params):\n result = getattr(self._data, funcname)(*args, **params)\n result = result.view(type(self))\n result._update_from(self)\n mask = self._mask\n if not onmask:\n result.__setmask__(mask)\n elif mask is not nomask:\n # __setmask__ makes a copy, which we don't want\n result._mask = getattr(mask, funcname)(*args, **params)\n return result\n methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None)\n if methdoc is not None:\n wrapped_method.__doc__ = methdoc.__doc__\n wrapped_method.__name__ = funcname\n return wrapped_method\n\n\nclass MaskedIterator:\n \"\"\"\n Flat iterator object to iterate over masked arrays.\n\n A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array\n `x`. It allows iterating over the array as if it were a 1-D array,\n either in a for-loop or by calling its `next` method.\n\n Iteration is done in C-contiguous style, with the last index varying the\n fastest. The iterator can also be indexed using basic slicing or\n advanced indexing.\n\n See Also\n --------\n MaskedArray.flat : Return a flat iterator over an array.\n MaskedArray.flatten : Returns a flattened copy of an array.\n\n Notes\n -----\n `MaskedIterator` is not exported by the `ma` module. Instead of\n instantiating a `MaskedIterator` directly, use `MaskedArray.flat`.\n\n Examples\n --------\n >>> x = np.ma.array(arange(6).reshape(2, 3))\n >>> fl = x.flat\n >>> type(fl)\n <class 'numpy.ma.core.MaskedIterator'>\n >>> for item in fl:\n ... print(item)\n ...\n 0\n 1\n 2\n 3\n 4\n 5\n\n Extracting more than a single element b indexing the `MaskedIterator`\n returns a masked array:\n\n >>> fl[2:4]\n masked_array(data = [2 3],\n mask = False,\n fill_value = 999999)\n\n \"\"\"\n\n def __init__(self, ma):\n self.ma = ma\n self.dataiter = ma._data.flat\n\n if ma._mask is nomask:\n self.maskiter = None\n else:\n self.maskiter = ma._mask.flat\n\n def __iter__(self):\n return self\n\n def __getitem__(self, indx):\n result = self.dataiter.__getitem__(indx).view(type(self.ma))\n if self.maskiter is not None:\n _mask = self.maskiter.__getitem__(indx)\n if isinstance(_mask, ndarray):\n # set shape to match that of data; this is needed for matrices\n _mask.shape = result.shape\n result._mask = _mask\n elif isinstance(_mask, np.void):\n return mvoid(result, mask=_mask, hardmask=self.ma._hardmask)\n elif _mask: # Just a scalar, masked\n return masked\n return result\n\n # This won't work if ravel makes a copy\n def __setitem__(self, index, value):\n self.dataiter[index] = getdata(value)\n if self.maskiter is not None:\n self.maskiter[index] = getmaskarray(value)\n\n def __next__(self):\n \"\"\"\n Return the next value, or raise StopIteration.\n\n Examples\n --------\n >>> x = np.ma.array([3, 2], mask=[0, 1])\n >>> fl = x.flat\n >>> next(fl)\n 3\n >>> next(fl)\n masked\n >>> next(fl)\n Traceback (most recent call last):\n ...\n StopIteration\n\n \"\"\"\n d = next(self.dataiter)\n if self.maskiter is not None:\n m = next(self.maskiter)\n if isinstance(m, np.void):\n return mvoid(d, mask=m, hardmask=self.ma._hardmask)\n elif m: # Just a scalar, masked\n return masked\n return d\n\n\nclass MaskedArray(ndarray):\n \"\"\"\n An array class with possibly masked values.\n\n Masked values of True exclude the corresponding element from any\n computation.\n\n Construction::\n\n x = MaskedArray(data, mask=nomask, dtype=None, copy=False, subok=True,\n ndmin=0, fill_value=None, keep_mask=True, hard_mask=None,\n shrink=True, order=None)\n\n Parameters\n ----------\n data : array_like\n Input data.\n mask : sequence, optional\n Mask. Must be convertible to an array of booleans with the same\n shape as `data`. True indicates a masked (i.e. invalid) data.\n dtype : dtype, optional\n Data type of the output.\n If `dtype` is None, the type of the data argument (``data.dtype``)\n is used. If `dtype` is not None and different from ``data.dtype``,\n a copy is performed.\n copy : bool, optional\n Whether to copy the input data (True), or to use a reference instead.\n Default is False.\n subok : bool, optional\n Whether to return a subclass of `MaskedArray` if possible (True) or a\n plain `MaskedArray`. Default is True.\n ndmin : int, optional\n Minimum number of dimensions. Default is 0.\n fill_value : scalar, optional\n Value used to fill in the masked values when necessary.\n If None, a default based on the data-type is used.\n keep_mask : bool, optional\n Whether to combine `mask` with the mask of the input data, if any\n (True), or to use only `mask` for the output (False). Default is True.\n hard_mask : bool, optional\n Whether to use a hard mask or not. With a hard mask, masked values\n cannot be unmasked. Default is False.\n shrink : bool, optional\n Whether to force compression of an empty mask. Default is True.\n order : {'C', 'F', 'A'}, optional\n Specify the order of the array. If order is 'C', then the array\n will be in C-contiguous order (last-index varies the fastest).\n If order is 'F', then the returned array will be in\n Fortran-contiguous order (first-index varies the fastest).\n If order is 'A' (default), then the returned array may be\n in any order (either C-, Fortran-contiguous, or even discontiguous),\n unless a copy is required, in which case it will be C-contiguous.\n\n Examples\n --------\n\n The ``mask`` can be initialized with an array of boolean values\n with the same shape as ``data``.\n\n >>> data = np.arange(6).reshape((2, 3))\n >>> np.ma.MaskedArray(data, mask=[[False, True, False],\n ... [False, False, True]])\n masked_array(\n data=[[0, --, 2],\n [3, 4, --]],\n mask=[[False, True, False],\n [False, False, True]],\n fill_value=999999)\n\n Alternatively, the ``mask`` can be initialized to homogeneous boolean\n array with the same shape as ``data`` by passing in a scalar\n boolean value:\n\n >>> np.ma.MaskedArray(data, mask=False)\n masked_array(\n data=[[0, 1, 2],\n [3, 4, 5]],\n mask=[[False, False, False],\n [False, False, False]],\n fill_value=999999)\n\n >>> np.ma.MaskedArray(data, mask=True)\n masked_array(\n data=[[--, --, --],\n [--, --, --]],\n mask=[[ True, True, True],\n [ True, True, True]],\n fill_value=999999,\n dtype=int64)\n\n .. note::\n The recommended practice for initializing ``mask`` with a scalar\n boolean value is to use ``True``/``False`` rather than\n ``np.True_``/``np.False_``. The reason is :attr:`nomask`\n is represented internally as ``np.False_``.\n\n >>> np.False_ is np.ma.nomask\n True\n\n \"\"\"\n\n __array_priority__ = 15\n _defaultmask = nomask\n _defaulthardmask = False\n _baseclass = ndarray\n\n # Maximum number of elements per axis used when printing an array. The\n # 1d case is handled separately because we need more values in this case.\n _print_width = 100\n _print_width_1d = 1500\n\n def __new__(cls, data=None, mask=nomask, dtype=None, copy=False,\n subok=True, ndmin=0, fill_value=None, keep_mask=True,\n hard_mask=None, shrink=True, order=None):\n \"\"\"\n Create a new masked array from scratch.\n\n Notes\n -----\n A masked array can also be created by taking a .view(MaskedArray).\n\n \"\"\"\n # Process data.\n _data = np.array(data, dtype=dtype, copy=copy,\n order=order, subok=True, ndmin=ndmin)\n _baseclass = getattr(data, '_baseclass', type(_data))\n # Check that we're not erasing the mask.\n if isinstance(data, MaskedArray) and (data.shape != _data.shape):\n copy = True\n\n # Here, we copy the _view_, so that we can attach new properties to it\n # we must never do .view(MaskedConstant), as that would create a new\n # instance of np.ma.masked, which make identity comparison fail\n if isinstance(data, cls) and subok and not isinstance(data, MaskedConstant):\n _data = ndarray.view(_data, type(data))\n else:\n _data = ndarray.view(_data, cls)\n # Process mask.\n # Type of the mask\n mdtype = make_mask_descr(_data.dtype)\n\n if mask is nomask:\n # Case 1. : no mask in input.\n # Erase the current mask ?\n if not keep_mask:\n # With a reduced version\n if shrink:\n _data._mask = nomask\n # With full version\n else:\n _data._mask = np.zeros(_data.shape, dtype=mdtype)\n # Check whether we missed something\n elif isinstance(data, (tuple, list)):\n try:\n # If data is a sequence of masked array\n mask = np.array(\n [getmaskarray(np.asanyarray(m, dtype=_data.dtype))\n for m in data], dtype=mdtype)\n except ValueError:\n # If data is nested\n mask = nomask\n # Force shrinking of the mask if needed (and possible)\n if (mdtype == MaskType) and mask.any():\n _data._mask = mask\n _data._sharedmask = False\n else:\n _data._sharedmask = not copy\n if copy:\n _data._mask = _data._mask.copy()\n # Reset the shape of the original mask\n if getmask(data) is not nomask:\n data._mask.shape = data.shape\n else:\n # Case 2. : With a mask in input.\n # If mask is boolean, create an array of True or False\n if mask is True and mdtype == MaskType:\n mask = np.ones(_data.shape, dtype=mdtype)\n elif mask is False and mdtype == MaskType:\n mask = np.zeros(_data.shape, dtype=mdtype)\n else:\n # Read the mask with the current mdtype\n try:\n mask = np.array(mask, copy=copy, dtype=mdtype)\n # Or assume it's a sequence of bool/int\n except TypeError:\n mask = np.array([tuple([m] * len(mdtype)) for m in mask],\n dtype=mdtype)\n # Make sure the mask and the data have the same shape\n if mask.shape != _data.shape:\n (nd, nm) = (_data.size, mask.size)\n if nm == 1:\n mask = np.resize(mask, _data.shape)\n elif nm == nd:\n mask = np.reshape(mask, _data.shape)\n else:\n msg = \"Mask and data not compatible: data size is %i, \" + \\\n \"mask size is %i.\"\n raise MaskError(msg % (nd, nm))\n copy = True\n # Set the mask to the new value\n if _data._mask is nomask:\n _data._mask = mask\n _data._sharedmask = not copy\n else:\n if not keep_mask:\n _data._mask = mask\n _data._sharedmask = not copy\n else:\n if _data.dtype.names is not None:\n def _recursive_or(a, b):\n \"do a|=b on each field of a, recursively\"\n for name in a.dtype.names:\n (af, bf) = (a[name], b[name])\n if af.dtype.names is not None:\n _recursive_or(af, bf)\n else:\n af |= bf\n\n _recursive_or(_data._mask, mask)\n else:\n _data._mask = np.logical_or(mask, _data._mask)\n _data._sharedmask = False\n # Update fill_value.\n if fill_value is None:\n fill_value = getattr(data, '_fill_value', None)\n # But don't run the check unless we have something to check.\n if fill_value is not None:\n _data._fill_value = _check_fill_value(fill_value, _data.dtype)\n # Process extra options ..\n if hard_mask is None:\n _data._hardmask = getattr(data, '_hardmask', False)\n else:\n _data._hardmask = hard_mask\n _data._baseclass = _baseclass\n return _data\n\n\n def _update_from(self, obj):\n \"\"\"\n Copies some attributes of obj to self.\n\n \"\"\"\n if isinstance(obj, ndarray):\n _baseclass = type(obj)\n else:\n _baseclass = ndarray\n # We need to copy the _basedict to avoid backward propagation\n _optinfo = {}\n _optinfo.update(getattr(obj, '_optinfo', {}))\n _optinfo.update(getattr(obj, '_basedict', {}))\n if not isinstance(obj, MaskedArray):\n _optinfo.update(getattr(obj, '__dict__', {}))\n _dict = dict(_fill_value=getattr(obj, '_fill_value', None),\n _hardmask=getattr(obj, '_hardmask', False),\n _sharedmask=getattr(obj, '_sharedmask', False),\n _isfield=getattr(obj, '_isfield', False),\n _baseclass=getattr(obj, '_baseclass', _baseclass),\n _optinfo=_optinfo,\n _basedict=_optinfo)\n self.__dict__.update(_dict)\n self.__dict__.update(_optinfo)\n return\n\n def __array_finalize__(self, obj):\n \"\"\"\n Finalizes the masked array.\n\n \"\"\"\n # Get main attributes.\n self._update_from(obj)\n\n # We have to decide how to initialize self.mask, based on\n # obj.mask. This is very difficult. There might be some\n # correspondence between the elements in the array we are being\n # created from (= obj) and us. Or there might not. This method can\n # be called in all kinds of places for all kinds of reasons -- could\n # be empty_like, could be slicing, could be a ufunc, could be a view.\n # The numpy subclassing interface simply doesn't give us any way\n # to know, which means that at best this method will be based on\n # guesswork and heuristics. To make things worse, there isn't even any\n # clear consensus about what the desired behavior is. For instance,\n # most users think that np.empty_like(marr) -- which goes via this\n # method -- should return a masked array with an empty mask (see\n # gh-3404 and linked discussions), but others disagree, and they have\n # existing code which depends on empty_like returning an array that\n # matches the input mask.\n #\n # Historically our algorithm was: if the template object mask had the\n # same *number of elements* as us, then we used *it's mask object\n # itself* as our mask, so that writes to us would also write to the\n # original array. This is horribly broken in multiple ways.\n #\n # Now what we do instead is, if the template object mask has the same\n # number of elements as us, and we do not have the same base pointer\n # as the template object (b/c views like arr[...] should keep the same\n # mask), then we make a copy of the template object mask and use\n # that. This is also horribly broken but somewhat less so. Maybe.\n if isinstance(obj, ndarray):\n # XX: This looks like a bug -- shouldn't it check self.dtype\n # instead?\n if obj.dtype.names is not None:\n _mask = getmaskarray(obj)\n else:\n _mask = getmask(obj)\n\n # If self and obj point to exactly the same data, then probably\n # self is a simple view of obj (e.g., self = obj[...]), so they\n # should share the same mask. (This isn't 100% reliable, e.g. self\n # could be the first row of obj, or have strange strides, but as a\n # heuristic it's not bad.) In all other cases, we make a copy of\n # the mask, so that future modifications to 'self' do not end up\n # side-effecting 'obj' as well.\n if (_mask is not nomask and obj.__array_interface__[\"data\"][0]\n != self.__array_interface__[\"data\"][0]):\n # We should make a copy. But we could get here via astype,\n # in which case the mask might need a new dtype as well\n # (e.g., changing to or from a structured dtype), and the\n # order could have changed. So, change the mask type if\n # needed and use astype instead of copy.\n if self.dtype == obj.dtype:\n _mask_dtype = _mask.dtype\n else:\n _mask_dtype = make_mask_descr(self.dtype)\n\n if self.flags.c_contiguous:\n order = \"C\"\n elif self.flags.f_contiguous:\n order = \"F\"\n else:\n order = \"K\"\n\n _mask = _mask.astype(_mask_dtype, order)\n else:\n # Take a view so shape changes, etc., do not propagate back.\n _mask = _mask.view()\n else:\n _mask = nomask\n\n self._mask = _mask\n # Finalize the mask\n if self._mask is not nomask:\n try:\n self._mask.shape = self.shape\n except ValueError:\n self._mask = nomask\n except (TypeError, AttributeError):\n # When _mask.shape is not writable (because it's a void)\n pass\n\n # Finalize the fill_value\n if self._fill_value is not None:\n self._fill_value = _check_fill_value(self._fill_value, self.dtype)\n elif self.dtype.names is not None:\n # Finalize the default fill_value for structured arrays\n self._fill_value = _check_fill_value(None, self.dtype)\n\n def __array_wrap__(self, obj, context=None):\n \"\"\"\n Special hook for ufuncs.\n\n Wraps the numpy array and sets the mask according to context.\n\n \"\"\"\n if obj is self: # for in-place operations\n result = obj\n else:\n result = obj.view(type(self))\n result._update_from(self)\n\n if context is not None:\n result._mask = result._mask.copy()\n func, args, out_i = context\n # args sometimes contains outputs (gh-10459), which we don't want\n input_args = args[:func.nin]\n m = reduce(mask_or, [getmaskarray(arg) for arg in input_args])\n # Get the domain mask\n domain = ufunc_domain.get(func, None)\n if domain is not None:\n # Take the domain, and make sure it's a ndarray\n with np.errstate(divide='ignore', invalid='ignore'):\n d = filled(domain(*input_args), True)\n\n if d.any():\n # Fill the result where the domain is wrong\n try:\n # Binary domain: take the last value\n fill_value = ufunc_fills[func][-1]\n except TypeError:\n # Unary domain: just use this one\n fill_value = ufunc_fills[func]\n except KeyError:\n # Domain not recognized, use fill_value instead\n fill_value = self.fill_value\n\n np.copyto(result, fill_value, where=d)\n\n # Update the mask\n if m is nomask:\n m = d\n else:\n # Don't modify inplace, we risk back-propagation\n m = (m | d)\n\n # Make sure the mask has the proper size\n if result is not self and result.shape == () and m:\n return masked\n else:\n result._mask = m\n result._sharedmask = False\n\n return result\n\n def view(self, dtype=None, type=None, fill_value=None):\n \"\"\"\n Return a view of the MaskedArray data.\n\n Parameters\n ----------\n dtype : data-type or ndarray sub-class, optional\n Data-type descriptor of the returned view, e.g., float32 or int16.\n The default, None, results in the view having the same data-type\n as `a`. As with ``ndarray.view``, dtype can also be specified as\n an ndarray sub-class, which then specifies the type of the\n returned object (this is equivalent to setting the ``type``\n parameter).\n type : Python type, optional\n Type of the returned view, either ndarray or a subclass. The\n default None results in type preservation.\n fill_value : scalar, optional\n The value to use for invalid entries (None by default).\n If None, then this argument is inferred from the passed `dtype`, or\n in its absence the original array, as discussed in the notes below.\n\n See Also\n --------\n numpy.ndarray.view : Equivalent method on ndarray object.\n\n Notes\n -----\n\n ``a.view()`` is used two different ways:\n\n ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view\n of the array's memory with a different data-type. This can cause a\n reinterpretation of the bytes of memory.\n\n ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just\n returns an instance of `ndarray_subclass` that looks at the same array\n (same shape, dtype, etc.) This does not cause a reinterpretation of the\n memory.\n\n If `fill_value` is not specified, but `dtype` is specified (and is not\n an ndarray sub-class), the `fill_value` of the MaskedArray will be\n reset. If neither `fill_value` nor `dtype` are specified (or if\n `dtype` is an ndarray sub-class), then the fill value is preserved.\n Finally, if `fill_value` is specified, but `dtype` is not, the fill\n value is set to the specified value.\n\n For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of\n bytes per entry than the previous dtype (for example, converting a\n regular array to a structured array), then the behavior of the view\n cannot be predicted just from the superficial appearance of ``a`` (shown\n by ``print(a)``). It also depends on exactly how ``a`` is stored in\n memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus\n defined as a slice or transpose, etc., the view may give different\n results.\n \"\"\"\n\n if dtype is None:\n if type is None:\n output = ndarray.view(self)\n else:\n output = ndarray.view(self, type)\n elif type is None:\n try:\n if issubclass(dtype, ndarray):\n output = ndarray.view(self, dtype)\n dtype = None\n else:\n output = ndarray.view(self, dtype)\n except TypeError:\n output = ndarray.view(self, dtype)\n else:\n output = ndarray.view(self, dtype, type)\n\n # also make the mask be a view (so attr changes to the view's\n # mask do no affect original object's mask)\n # (especially important to avoid affecting np.masked singleton)\n if getmask(output) is not nomask:\n output._mask = output._mask.view()\n\n # Make sure to reset the _fill_value if needed\n if getattr(output, '_fill_value', None) is not None:\n if fill_value is None:\n if dtype is None:\n pass # leave _fill_value as is\n else:\n output._fill_value = None\n else:\n output.fill_value = fill_value\n return output\n\n def __getitem__(self, indx):\n \"\"\"\n x.__getitem__(y) <==> x[y]\n\n Return the item described by i, as a masked array.\n\n \"\"\"\n # We could directly use ndarray.__getitem__ on self.\n # But then we would have to modify __array_finalize__ to prevent the\n # mask of being reshaped if it hasn't been set up properly yet\n # So it's easier to stick to the current version\n dout = self.data[indx]\n _mask = self._mask\n\n def _is_scalar(m):\n return not isinstance(m, np.ndarray)\n\n def _scalar_heuristic(arr, elem):\n \"\"\"\n Return whether `elem` is a scalar result of indexing `arr`, or None\n if undecidable without promoting nomask to a full mask\n \"\"\"\n # obviously a scalar\n if not isinstance(elem, np.ndarray):\n return True\n\n # object array scalar indexing can return anything\n elif arr.dtype.type is np.object_:\n if arr.dtype is not elem.dtype:\n # elem is an array, but dtypes do not match, so must be\n # an element\n return True\n\n # well-behaved subclass that only returns 0d arrays when\n # expected - this is not a scalar\n elif type(arr).__getitem__ == ndarray.__getitem__:\n return False\n\n return None\n\n if _mask is not nomask:\n # _mask cannot be a subclass, so it tells us whether we should\n # expect a scalar. It also cannot be of dtype object.\n mout = _mask[indx]\n scalar_expected = _is_scalar(mout)\n\n else:\n # attempt to apply the heuristic to avoid constructing a full mask\n mout = nomask\n scalar_expected = _scalar_heuristic(self.data, dout)\n if scalar_expected is None:\n # heuristics have failed\n # construct a full array, so we can be certain. This is costly.\n # we could also fall back on ndarray.__getitem__(self.data, indx)\n scalar_expected = _is_scalar(getmaskarray(self)[indx])\n\n # Did we extract a single item?\n if scalar_expected:\n # A record\n if isinstance(dout, np.void):\n # We should always re-cast to mvoid, otherwise users can\n # change masks on rows that already have masked values, but not\n # on rows that have no masked values, which is inconsistent.\n return mvoid(dout, mask=mout, hardmask=self._hardmask)\n\n # special case introduced in gh-5962\n elif (self.dtype.type is np.object_ and\n isinstance(dout, np.ndarray) and\n dout is not masked):\n # If masked, turn into a MaskedArray, with everything masked.\n if mout:\n return MaskedArray(dout, mask=True)\n else:\n return dout\n\n # Just a scalar\n else:\n if mout:\n return masked\n else:\n return dout\n else:\n # Force dout to MA\n dout = dout.view(type(self))\n # Inherit attributes from self\n dout._update_from(self)\n # Check the fill_value\n if is_string_or_list_of_strings(indx):\n if self._fill_value is not None:\n dout._fill_value = self._fill_value[indx]\n\n # Something like gh-15895 has happened if this check fails.\n # _fill_value should always be an ndarray.\n if not isinstance(dout._fill_value, np.ndarray):\n raise RuntimeError('Internal NumPy error.')\n # If we're indexing a multidimensional field in a\n # structured array (such as dtype(\"(2,)i2,(2,)i1\")),\n # dimensionality goes up (M[field].ndim == M.ndim +\n # M.dtype[field].ndim). That's fine for\n # M[field] but problematic for M[field].fill_value\n # which should have shape () to avoid breaking several\n # methods. There is no great way out, so set to\n # first element. See issue #6723.\n if dout._fill_value.ndim > 0:\n if not (dout._fill_value ==\n dout._fill_value.flat[0]).all():\n warnings.warn(\n \"Upon accessing multidimensional field \"\n f\"{indx!s}, need to keep dimensionality \"\n \"of fill_value at 0. Discarding \"\n \"heterogeneous fill_value and setting \"\n f\"all to {dout._fill_value[0]!s}.\",\n stacklevel=2)\n # Need to use `.flat[0:1].squeeze(...)` instead of just\n # `.flat[0]` to ensure the result is a 0d array and not\n # a scalar.\n dout._fill_value = dout._fill_value.flat[0:1].squeeze(axis=0)\n dout._isfield = True\n # Update the mask if needed\n if mout is not nomask:\n # set shape to match that of data; this is needed for matrices\n dout._mask = reshape(mout, dout.shape)\n dout._sharedmask = True\n # Note: Don't try to check for m.any(), that'll take too long\n return dout\n\n def __setitem__(self, indx, value):\n \"\"\"\n x.__setitem__(i, y) <==> x[i]=y\n\n Set item described by index. If value is masked, masks those\n locations.\n\n \"\"\"\n if self is masked:\n raise MaskError('Cannot alter the masked element.')\n _data = self._data\n _mask = self._mask\n if isinstance(indx, str):\n _data[indx] = value\n if _mask is nomask:\n self._mask = _mask = make_mask_none(self.shape, self.dtype)\n _mask[indx] = getmask(value)\n return\n\n _dtype = _data.dtype\n\n if value is masked:\n # The mask wasn't set: create a full version.\n if _mask is nomask:\n _mask = self._mask = make_mask_none(self.shape, _dtype)\n # Now, set the mask to its value.\n if _dtype.names is not None:\n _mask[indx] = tuple([True] * len(_dtype.names))\n else:\n _mask[indx] = True\n return\n\n # Get the _data part of the new value\n dval = getattr(value, '_data', value)\n # Get the _mask part of the new value\n mval = getmask(value)\n if _dtype.names is not None and mval is nomask:\n mval = tuple([False] * len(_dtype.names))\n if _mask is nomask:\n # Set the data, then the mask\n _data[indx] = dval\n if mval is not nomask:\n _mask = self._mask = make_mask_none(self.shape, _dtype)\n _mask[indx] = mval\n elif not self._hardmask:\n # Set the data, then the mask\n if (isinstance(indx, masked_array) and\n not isinstance(value, masked_array)):\n _data[indx.data] = dval\n else:\n _data[indx] = dval\n _mask[indx] = mval\n elif hasattr(indx, 'dtype') and (indx.dtype == MaskType):\n indx = indx * umath.logical_not(_mask)\n _data[indx] = dval\n else:\n if _dtype.names is not None:\n err_msg = \"Flexible 'hard' masks are not yet supported.\"\n raise NotImplementedError(err_msg)\n mindx = mask_or(_mask[indx], mval, copy=True)\n dindx = self._data[indx]\n if dindx.size > 1:\n np.copyto(dindx, dval, where=~mindx)\n elif mindx is nomask:\n dindx = dval\n _data[indx] = dindx\n _mask[indx] = mindx\n return\n\n # Define so that we can overwrite the setter.\n @property\n def dtype(self):\n return super().dtype\n\n @dtype.setter\n def dtype(self, dtype):\n super(MaskedArray, type(self)).dtype.__set__(self, dtype)\n if self._mask is not nomask:\n self._mask = self._mask.view(make_mask_descr(dtype), ndarray)\n # Try to reset the shape of the mask (if we don't have a void).\n # This raises a ValueError if the dtype change won't work.\n try:\n self._mask.shape = self.shape\n except (AttributeError, TypeError):\n pass\n\n @property\n def shape(self):\n return super().shape\n\n @shape.setter\n def shape(self, shape):\n super(MaskedArray, type(self)).shape.__set__(self, shape)\n # Cannot use self._mask, since it may not (yet) exist when a\n # masked matrix sets the shape.\n if getmask(self) is not nomask:\n self._mask.shape = self.shape\n\n def __setmask__(self, mask, copy=False):\n \"\"\"\n Set the mask.\n\n \"\"\"\n idtype = self.dtype\n current_mask = self._mask\n if mask is masked:\n mask = True\n\n if current_mask is nomask:\n # Make sure the mask is set\n # Just don't do anything if there's nothing to do.\n if mask is nomask:\n return\n current_mask = self._mask = make_mask_none(self.shape, idtype)\n\n if idtype.names is None:\n # No named fields.\n # Hardmask: don't unmask the data\n if self._hardmask:\n current_mask |= mask\n # Softmask: set everything to False\n # If it's obviously a compatible scalar, use a quick update\n # method.\n elif isinstance(mask, (int, float, np.bool_, np.number)):\n current_mask[...] = mask\n # Otherwise fall back to the slower, general purpose way.\n else:\n current_mask.flat = mask\n else:\n # Named fields w/\n mdtype = current_mask.dtype\n mask = np.array(mask, copy=False)\n # Mask is a singleton\n if not mask.ndim:\n # It's a boolean : make a record\n if mask.dtype.kind == 'b':\n mask = np.array(tuple([mask.item()] * len(mdtype)),\n dtype=mdtype)\n # It's a record: make sure the dtype is correct\n else:\n mask = mask.astype(mdtype)\n # Mask is a sequence\n else:\n # Make sure the new mask is a ndarray with the proper dtype\n try:\n mask = np.array(mask, copy=copy, dtype=mdtype)\n # Or assume it's a sequence of bool/int\n except TypeError:\n mask = np.array([tuple([m] * len(mdtype)) for m in mask],\n dtype=mdtype)\n # Hardmask: don't unmask the data\n if self._hardmask:\n for n in idtype.names:\n current_mask[n] |= mask[n]\n # Softmask: set everything to False\n # If it's obviously a compatible scalar, use a quick update\n # method.\n elif isinstance(mask, (int, float, np.bool_, np.number)):\n current_mask[...] = mask\n # Otherwise fall back to the slower, general purpose way.\n else:\n current_mask.flat = mask\n # Reshape if needed\n if current_mask.shape:\n current_mask.shape = self.shape\n return\n\n _set_mask = __setmask__\n\n @property\n def mask(self):\n \"\"\" Current mask. \"\"\"\n\n # We could try to force a reshape, but that wouldn't work in some\n # cases.\n # Return a view so that the dtype and shape cannot be changed in place\n # This still preserves nomask by identity\n return self._mask.view()\n\n @mask.setter\n def mask(self, value):\n self.__setmask__(value)\n\n @property\n def recordmask(self):\n \"\"\"\n Get or set the mask of the array if it has no named fields. For\n structured arrays, returns a ndarray of booleans where entries are\n ``True`` if **all** the fields are masked, ``False`` otherwise:\n\n >>> x = np.ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)],\n ... mask=[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)],\n ... dtype=[('a', int), ('b', int)])\n >>> x.recordmask\n array([False, False, True, False, False])\n \"\"\"\n\n _mask = self._mask.view(ndarray)\n if _mask.dtype.names is None:\n return _mask\n return np.all(flatten_structured_array(_mask), axis=-1)\n\n @recordmask.setter\n def recordmask(self, mask):\n raise NotImplementedError(\"Coming soon: setting the mask per records!\")\n\n def harden_mask(self):\n \"\"\"\n Force the mask to hard.\n\n Whether the mask of a masked array is hard or soft is determined by\n its `~ma.MaskedArray.hardmask` property. `harden_mask` sets\n `~ma.MaskedArray.hardmask` to ``True``.\n\n See Also\n --------\n ma.MaskedArray.hardmask\n\n \"\"\"\n self._hardmask = True\n return self\n\n def soften_mask(self):\n \"\"\"\n Force the mask to soft.\n\n Whether the mask of a masked array is hard or soft is determined by\n its `~ma.MaskedArray.hardmask` property. `soften_mask` sets\n `~ma.MaskedArray.hardmask` to ``False``.\n\n See Also\n --------\n ma.MaskedArray.hardmask\n\n \"\"\"\n self._hardmask = False\n return self\n\n @property\n def hardmask(self):\n \"\"\" Hardness of the mask \"\"\"\n return self._hardmask\n\n def unshare_mask(self):\n \"\"\"\n Copy the mask and set the sharedmask flag to False.\n\n Whether the mask is shared between masked arrays can be seen from\n the `sharedmask` property. `unshare_mask` ensures the mask is not shared.\n A copy of the mask is only made if it was shared.\n\n See Also\n --------\n sharedmask\n\n \"\"\"\n if self._sharedmask:\n self._mask = self._mask.copy()\n self._sharedmask = False\n return self\n\n @property\n def sharedmask(self):\n \"\"\" Share status of the mask (read-only). \"\"\"\n return self._sharedmask\n\n def shrink_mask(self):\n \"\"\"\n Reduce a mask to nomask when possible.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n\n Examples\n --------\n >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4)\n >>> x.mask\n array([[False, False],\n [False, False]])\n >>> x.shrink_mask()\n masked_array(\n data=[[1, 2],\n [3, 4]],\n mask=False,\n fill_value=999999)\n >>> x.mask\n False\n\n \"\"\"\n self._mask = _shrink_mask(self._mask)\n return self\n\n @property\n def baseclass(self):\n \"\"\" Class of the underlying data (read-only). \"\"\"\n return self._baseclass\n\n def _get_data(self):\n \"\"\"\n Returns the underlying data, as a view of the masked array.\n\n If the underlying data is a subclass of :class:`numpy.ndarray`, it is\n returned as such.\n\n >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])\n >>> x.data\n matrix([[1, 2],\n [3, 4]])\n\n The type of the data can be accessed through the :attr:`baseclass`\n attribute.\n \"\"\"\n return ndarray.view(self, self._baseclass)\n\n _data = property(fget=_get_data)\n data = property(fget=_get_data)\n\n @property\n def flat(self):\n \"\"\" Return a flat iterator, or set a flattened version of self to value. \"\"\"\n return MaskedIterator(self)\n\n @flat.setter\n def flat(self, value):\n y = self.ravel()\n y[:] = value\n\n @property\n def fill_value(self):\n \"\"\"\n The filling value of the masked array is a scalar. When setting, None\n will set to a default based on the data type.\n\n Examples\n --------\n >>> for dt in [np.int32, np.int64, np.float64, np.complex128]:\n ... np.ma.array([0, 1], dtype=dt).get_fill_value()\n ...\n 999999\n 999999\n 1e+20\n (1e+20+0j)\n\n >>> x = np.ma.array([0, 1.], fill_value=-np.inf)\n >>> x.fill_value\n -inf\n >>> x.fill_value = np.pi\n >>> x.fill_value\n 3.1415926535897931 # may vary\n\n Reset to default:\n\n >>> x.fill_value = None\n >>> x.fill_value\n 1e+20\n\n \"\"\"\n if self._fill_value is None:\n self._fill_value = _check_fill_value(None, self.dtype)\n\n # Temporary workaround to account for the fact that str and bytes\n # scalars cannot be indexed with (), whereas all other numpy\n # scalars can. See issues #7259 and #7267.\n # The if-block can be removed after #7267 has been fixed.\n if isinstance(self._fill_value, ndarray):\n return self._fill_value[()]\n return self._fill_value\n\n @fill_value.setter\n def fill_value(self, value=None):\n target = _check_fill_value(value, self.dtype)\n if not target.ndim == 0:\n # 2019-11-12, 1.18.0\n warnings.warn(\n \"Non-scalar arrays for the fill value are deprecated. Use \"\n \"arrays with scalar values instead. The filled function \"\n \"still supports any array as `fill_value`.\",\n DeprecationWarning, stacklevel=2)\n\n _fill_value = self._fill_value\n if _fill_value is None:\n # Create the attribute if it was undefined\n self._fill_value = target\n else:\n # Don't overwrite the attribute, just fill it (for propagation)\n _fill_value[()] = target\n\n # kept for compatibility\n get_fill_value = fill_value.fget\n set_fill_value = fill_value.fset\n\n def filled(self, fill_value=None):\n \"\"\"\n Return a copy of self, with masked values filled with a given value.\n **However**, if there are no masked values to fill, self will be\n returned instead as an ndarray.\n\n Parameters\n ----------\n fill_value : array_like, optional\n The value to use for invalid entries. Can be scalar or non-scalar.\n If non-scalar, the resulting ndarray must be broadcastable over\n input array. Default is None, in which case, the `fill_value`\n attribute of the array is used instead.\n\n Returns\n -------\n filled_array : ndarray\n A copy of ``self`` with invalid entries replaced by *fill_value*\n (be it the function argument or the attribute of ``self``), or\n ``self`` itself as an ndarray if there are no invalid entries to\n be replaced.\n\n Notes\n -----\n The result is **not** a MaskedArray!\n\n Examples\n --------\n >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999)\n >>> x.filled()\n array([ 1, 2, -999, 4, -999])\n >>> x.filled(fill_value=1000)\n array([ 1, 2, 1000, 4, 1000])\n >>> type(x.filled())\n <class 'numpy.ndarray'>\n\n Subclassing is preserved. This means that if, e.g., the data part of\n the masked array is a recarray, `filled` returns a recarray:\n\n >>> x = np.array([(-1, 2), (-3, 4)], dtype='i8,i8').view(np.recarray)\n >>> m = np.ma.array(x, mask=[(True, False), (False, True)])\n >>> m.filled()\n rec.array([(999999, 2), ( -3, 999999)],\n dtype=[('f0', '<i8'), ('f1', '<i8')])\n \"\"\"\n m = self._mask\n if m is nomask:\n return self._data\n\n if fill_value is None:\n fill_value = self.fill_value\n else:\n fill_value = _check_fill_value(fill_value, self.dtype)\n\n if self is masked_singleton:\n return np.asanyarray(fill_value)\n\n if m.dtype.names is not None:\n result = self._data.copy('K')\n _recursive_filled(result, self._mask, fill_value)\n elif not m.any():\n return self._data\n else:\n result = self._data.copy('K')\n try:\n np.copyto(result, fill_value, where=m)\n except (TypeError, AttributeError):\n fill_value = narray(fill_value, dtype=object)\n d = result.astype(object)\n result = np.choose(m, (d, fill_value))\n except IndexError:\n # ok, if scalar\n if self._data.shape:\n raise\n elif m:\n result = np.array(fill_value, dtype=self.dtype)\n else:\n result = self._data\n return result\n\n def compressed(self):\n \"\"\"\n Return all the non-masked data as a 1-D array.\n\n Returns\n -------\n data : ndarray\n A new `ndarray` holding the non-masked data is returned.\n\n Notes\n -----\n The result is **not** a MaskedArray!\n\n Examples\n --------\n >>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3)\n >>> x.compressed()\n array([0, 1])\n >>> type(x.compressed())\n <class 'numpy.ndarray'>\n\n \"\"\"\n data = ndarray.ravel(self._data)\n if self._mask is not nomask:\n data = data.compress(np.logical_not(ndarray.ravel(self._mask)))\n return data\n\n def compress(self, condition, axis=None, out=None):\n \"\"\"\n Return `a` where condition is ``True``.\n\n If condition is a `~ma.MaskedArray`, missing values are considered\n as ``False``.\n\n Parameters\n ----------\n condition : var\n Boolean 1-d array selecting which entries to return. If len(condition)\n is less than the size of a along the axis, then output is truncated\n to length of condition array.\n axis : {None, int}, optional\n Axis along which the operation must be performed.\n out : {None, ndarray}, optional\n Alternative output array in which to place the result. It must have\n the same shape as the expected output but the type will be cast if\n necessary.\n\n Returns\n -------\n result : MaskedArray\n A :class:`~ma.MaskedArray` object.\n\n Notes\n -----\n Please note the difference with :meth:`compressed` !\n The output of :meth:`compress` has a mask, the output of\n :meth:`compressed` does not.\n\n Examples\n --------\n >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)\n >>> x\n masked_array(\n data=[[1, --, 3],\n [--, 5, --],\n [7, --, 9]],\n mask=[[False, True, False],\n [ True, False, True],\n [False, True, False]],\n fill_value=999999)\n >>> x.compress([1, 0, 1])\n masked_array(data=[1, 3],\n mask=[False, False],\n fill_value=999999)\n\n >>> x.compress([1, 0, 1], axis=1)\n masked_array(\n data=[[1, 3],\n [--, --],\n [7, 9]],\n mask=[[False, False],\n [ True, True],\n [False, False]],\n fill_value=999999)\n\n \"\"\"\n # Get the basic components\n (_data, _mask) = (self._data, self._mask)\n\n # Force the condition to a regular ndarray and forget the missing\n # values.\n condition = np.asarray(condition)\n\n _new = _data.compress(condition, axis=axis, out=out).view(type(self))\n _new._update_from(self)\n if _mask is not nomask:\n _new._mask = _mask.compress(condition, axis=axis)\n return _new\n\n def _insert_masked_print(self):\n \"\"\"\n Replace masked values with masked_print_option, casting all innermost\n dtypes to object.\n \"\"\"\n if masked_print_option.enabled():\n mask = self._mask\n if mask is nomask:\n res = self._data\n else:\n # convert to object array to make filled work\n data = self._data\n # For big arrays, to avoid a costly conversion to the\n # object dtype, extract the corners before the conversion.\n print_width = (self._print_width if self.ndim > 1\n else self._print_width_1d)\n for axis in range(self.ndim):\n if data.shape[axis] > print_width:\n ind = print_width // 2\n arr = np.split(data, (ind, -ind), axis=axis)\n data = np.concatenate((arr[0], arr[2]), axis=axis)\n arr = np.split(mask, (ind, -ind), axis=axis)\n mask = np.concatenate((arr[0], arr[2]), axis=axis)\n\n rdtype = _replace_dtype_fields(self.dtype, \"O\")\n res = data.astype(rdtype)\n _recursive_printoption(res, mask, masked_print_option)\n else:\n res = self.filled(self.fill_value)\n return res\n\n def __str__(self):\n return str(self._insert_masked_print())\n\n def __repr__(self):\n \"\"\"\n Literal string representation.\n\n \"\"\"\n if self._baseclass is np.ndarray:\n name = 'array'\n else:\n name = self._baseclass.__name__\n\n\n # 2016-11-19: Demoted to legacy format\n if np.core.arrayprint._get_legacy_print_mode() <= 113:\n is_long = self.ndim > 1\n parameters = dict(\n name=name,\n nlen=\" \" * len(name),\n data=str(self),\n mask=str(self._mask),\n fill=str(self.fill_value),\n dtype=str(self.dtype)\n )\n is_structured = bool(self.dtype.names)\n key = '{}_{}'.format(\n 'long' if is_long else 'short',\n 'flx' if is_structured else 'std'\n )\n return _legacy_print_templates[key] % parameters\n\n prefix = f\"masked_{name}(\"\n\n dtype_needed = (\n not np.core.arrayprint.dtype_is_implied(self.dtype) or\n np.all(self.mask) or\n self.size == 0\n )\n\n # determine which keyword args need to be shown\n keys = ['data', 'mask', 'fill_value']\n if dtype_needed:\n keys.append('dtype')\n\n # array has only one row (non-column)\n is_one_row = builtins.all(dim == 1 for dim in self.shape[:-1])\n\n # choose what to indent each keyword with\n min_indent = 2\n if is_one_row:\n # first key on the same line as the type, remaining keys\n # aligned by equals\n indents = {}\n indents[keys[0]] = prefix\n for k in keys[1:]:\n n = builtins.max(min_indent, len(prefix + keys[0]) - len(k))\n indents[k] = ' ' * n\n prefix = '' # absorbed into the first indent\n else:\n # each key on its own line, indented by two spaces\n indents = {k: ' ' * min_indent for k in keys}\n prefix = prefix + '\\n' # first key on the next line\n\n # format the field values\n reprs = {}\n reprs['data'] = np.array2string(\n self._insert_masked_print(),\n separator=\", \",\n prefix=indents['data'] + 'data=',\n suffix=',')\n reprs['mask'] = np.array2string(\n self._mask,\n separator=\", \",\n prefix=indents['mask'] + 'mask=',\n suffix=',')\n reprs['fill_value'] = repr(self.fill_value)\n if dtype_needed:\n reprs['dtype'] = np.core.arrayprint.dtype_short_repr(self.dtype)\n\n # join keys with values and indentations\n result = ',\\n'.join(\n '{}{}={}'.format(indents[k], k, reprs[k])\n for k in keys\n )\n return prefix + result + ')'\n\n def _delegate_binop(self, other):\n # This emulates the logic in\n # private/binop_override.h:forward_binop_should_defer\n if isinstance(other, type(self)):\n return False\n array_ufunc = getattr(other, \"__array_ufunc__\", False)\n if array_ufunc is False:\n other_priority = getattr(other, \"__array_priority__\", -1000000)\n return self.__array_priority__ < other_priority\n else:\n # If array_ufunc is not None, it will be called inside the ufunc;\n # None explicitly tells us to not call the ufunc, i.e., defer.\n return array_ufunc is None\n\n def _comparison(self, other, compare):\n \"\"\"Compare self with other using operator.eq or operator.ne.\n\n When either of the elements is masked, the result is masked as well,\n but the underlying boolean data are still set, with self and other\n considered equal if both are masked, and unequal otherwise.\n\n For structured arrays, all fields are combined, with masked values\n ignored. The result is masked if all fields were masked, with self\n and other considered equal only if both were fully masked.\n \"\"\"\n omask = getmask(other)\n smask = self.mask\n mask = mask_or(smask, omask, copy=True)\n\n odata = getdata(other)\n if mask.dtype.names is not None:\n # For possibly masked structured arrays we need to be careful,\n # since the standard structured array comparison will use all\n # fields, masked or not. To avoid masked fields influencing the\n # outcome, we set all masked fields in self to other, so they'll\n # count as equal. To prepare, we ensure we have the right shape.\n broadcast_shape = np.broadcast(self, odata).shape\n sbroadcast = np.broadcast_to(self, broadcast_shape, subok=True)\n sbroadcast._mask = mask\n sdata = sbroadcast.filled(odata)\n # Now take care of the mask; the merged mask should have an item\n # masked if all fields were masked (in one and/or other).\n mask = (mask == np.ones((), mask.dtype))\n\n else:\n # For regular arrays, just use the data as they come.\n sdata = self.data\n\n check = compare(sdata, odata)\n\n if isinstance(check, (np.bool_, bool)):\n return masked if mask else check\n\n if mask is not nomask:\n # Adjust elements that were masked, which should be treated\n # as equal if masked in both, unequal if masked in one.\n # Note that this works automatically for structured arrays too.\n check = np.where(mask, compare(smask, omask), check)\n if mask.shape != check.shape:\n # Guarantee consistency of the shape, making a copy since the\n # the mask may need to get written to later.\n mask = np.broadcast_to(mask, check.shape).copy()\n\n check = check.view(type(self))\n check._update_from(self)\n check._mask = mask\n\n # Cast fill value to bool_ if needed. If it cannot be cast, the\n # default boolean fill value is used.\n if check._fill_value is not None:\n try:\n fill = _check_fill_value(check._fill_value, np.bool_)\n except (TypeError, ValueError):\n fill = _check_fill_value(None, np.bool_)\n check._fill_value = fill\n\n return check\n\n def __eq__(self, other):\n \"\"\"Check whether other equals self elementwise.\n\n When either of the elements is masked, the result is masked as well,\n but the underlying boolean data are still set, with self and other\n considered equal if both are masked, and unequal otherwise.\n\n For structured arrays, all fields are combined, with masked values\n ignored. The result is masked if all fields were masked, with self\n and other considered equal only if both were fully masked.\n \"\"\"\n return self._comparison(other, operator.eq)\n\n def __ne__(self, other):\n \"\"\"Check whether other does not equal self elementwise.\n\n When either of the elements is masked, the result is masked as well,\n but the underlying boolean data are still set, with self and other\n considered equal if both are masked, and unequal otherwise.\n\n For structured arrays, all fields are combined, with masked values\n ignored. The result is masked if all fields were masked, with self\n and other considered equal only if both were fully masked.\n \"\"\"\n return self._comparison(other, operator.ne)\n\n def __add__(self, other):\n \"\"\"\n Add self to other, and return a new masked array.\n\n \"\"\"\n if self._delegate_binop(other):\n return NotImplemented\n return add(self, other)\n\n def __radd__(self, other):\n \"\"\"\n Add other to self, and return a new masked array.\n\n \"\"\"\n # In analogy with __rsub__ and __rdiv__, use original order:\n # we get here from `other + self`.\n return add(other, self)\n\n def __sub__(self, other):\n \"\"\"\n Subtract other from self, and return a new masked array.\n\n \"\"\"\n if self._delegate_binop(other):\n return NotImplemented\n return subtract(self, other)\n\n def __rsub__(self, other):\n \"\"\"\n Subtract self from other, and return a new masked array.\n\n \"\"\"\n return subtract(other, self)\n\n def __mul__(self, other):\n \"Multiply self by other, and return a new masked array.\"\n if self._delegate_binop(other):\n return NotImplemented\n return multiply(self, other)\n\n def __rmul__(self, other):\n \"\"\"\n Multiply other by self, and return a new masked array.\n\n \"\"\"\n # In analogy with __rsub__ and __rdiv__, use original order:\n # we get here from `other * self`.\n return multiply(other, self)\n\n def __div__(self, other):\n \"\"\"\n Divide other into self, and return a new masked array.\n\n \"\"\"\n if self._delegate_binop(other):\n return NotImplemented\n return divide(self, other)\n\n def __truediv__(self, other):\n \"\"\"\n Divide other into self, and return a new masked array.\n\n \"\"\"\n if self._delegate_binop(other):\n return NotImplemented\n return true_divide(self, other)\n\n def __rtruediv__(self, other):\n \"\"\"\n Divide self into other, and return a new masked array.\n\n \"\"\"\n return true_divide(other, self)\n\n def __floordiv__(self, other):\n \"\"\"\n Divide other into self, and return a new masked array.\n\n \"\"\"\n if self._delegate_binop(other):\n return NotImplemented\n return floor_divide(self, other)\n\n def __rfloordiv__(self, other):\n \"\"\"\n Divide self into other, and return a new masked array.\n\n \"\"\"\n return floor_divide(other, self)\n\n def __pow__(self, other):\n \"\"\"\n Raise self to the power other, masking the potential NaNs/Infs\n\n \"\"\"\n if self._delegate_binop(other):\n return NotImplemented\n return power(self, other)\n\n def __rpow__(self, other):\n \"\"\"\n Raise other to the power self, masking the potential NaNs/Infs\n\n \"\"\"\n return power(other, self)\n\n def __iadd__(self, other):\n \"\"\"\n Add other to self in-place.\n\n \"\"\"\n m = getmask(other)\n if self._mask is nomask:\n if m is not nomask and m.any():\n self._mask = make_mask_none(self.shape, self.dtype)\n self._mask += m\n else:\n if m is not nomask:\n self._mask += m\n self._data.__iadd__(np.where(self._mask, self.dtype.type(0),\n getdata(other)))\n return self\n\n def __isub__(self, other):\n \"\"\"\n Subtract other from self in-place.\n\n \"\"\"\n m = getmask(other)\n if self._mask is nomask:\n if m is not nomask and m.any():\n self._mask = make_mask_none(self.shape, self.dtype)\n self._mask += m\n elif m is not nomask:\n self._mask += m\n self._data.__isub__(np.where(self._mask, self.dtype.type(0),\n getdata(other)))\n return self\n\n def __imul__(self, other):\n \"\"\"\n Multiply self by other in-place.\n\n \"\"\"\n m = getmask(other)\n if self._mask is nomask:\n if m is not nomask and m.any():\n self._mask = make_mask_none(self.shape, self.dtype)\n self._mask += m\n elif m is not nomask:\n self._mask += m\n self._data.__imul__(np.where(self._mask, self.dtype.type(1),\n getdata(other)))\n return self\n\n def __idiv__(self, other):\n \"\"\"\n Divide self by other in-place.\n\n \"\"\"\n other_data = getdata(other)\n dom_mask = _DomainSafeDivide().__call__(self._data, other_data)\n other_mask = getmask(other)\n new_mask = mask_or(other_mask, dom_mask)\n # The following 3 lines control the domain filling\n if dom_mask.any():\n (_, fval) = ufunc_fills[np.divide]\n other_data = np.where(dom_mask, fval, other_data)\n self._mask |= new_mask\n self._data.__idiv__(np.where(self._mask, self.dtype.type(1),\n other_data))\n return self\n\n def __ifloordiv__(self, other):\n \"\"\"\n Floor divide self by other in-place.\n\n \"\"\"\n other_data = getdata(other)\n dom_mask = _DomainSafeDivide().__call__(self._data, other_data)\n other_mask = getmask(other)\n new_mask = mask_or(other_mask, dom_mask)\n # The following 3 lines control the domain filling\n if dom_mask.any():\n (_, fval) = ufunc_fills[np.floor_divide]\n other_data = np.where(dom_mask, fval, other_data)\n self._mask |= new_mask\n self._data.__ifloordiv__(np.where(self._mask, self.dtype.type(1),\n other_data))\n return self\n\n def __itruediv__(self, other):\n \"\"\"\n True divide self by other in-place.\n\n \"\"\"\n other_data = getdata(other)\n dom_mask = _DomainSafeDivide().__call__(self._data, other_data)\n other_mask = getmask(other)\n new_mask = mask_or(other_mask, dom_mask)\n # The following 3 lines control the domain filling\n if dom_mask.any():\n (_, fval) = ufunc_fills[np.true_divide]\n other_data = np.where(dom_mask, fval, other_data)\n self._mask |= new_mask\n self._data.__itruediv__(np.where(self._mask, self.dtype.type(1),\n other_data))\n return self\n\n def __ipow__(self, other):\n \"\"\"\n Raise self to the power other, in place.\n\n \"\"\"\n other_data = getdata(other)\n other_mask = getmask(other)\n with np.errstate(divide='ignore', invalid='ignore'):\n self._data.__ipow__(np.where(self._mask, self.dtype.type(1),\n other_data))\n invalid = np.logical_not(np.isfinite(self._data))\n if invalid.any():\n if self._mask is not nomask:\n self._mask |= invalid\n else:\n self._mask = invalid\n np.copyto(self._data, self.fill_value, where=invalid)\n new_mask = mask_or(other_mask, invalid)\n self._mask = mask_or(self._mask, new_mask)\n return self\n\n def __float__(self):\n \"\"\"\n Convert to float.\n\n \"\"\"\n if self.size > 1:\n raise TypeError(\"Only length-1 arrays can be converted \"\n \"to Python scalars\")\n elif self._mask:\n warnings.warn(\"Warning: converting a masked element to nan.\", stacklevel=2)\n return np.nan\n return float(self.item())\n\n def __int__(self):\n \"\"\"\n Convert to int.\n\n \"\"\"\n if self.size > 1:\n raise TypeError(\"Only length-1 arrays can be converted \"\n \"to Python scalars\")\n elif self._mask:\n raise MaskError('Cannot convert masked element to a Python int.')\n return int(self.item())\n\n @property\n def imag(self):\n \"\"\"\n The imaginary part of the masked array.\n\n This property is a view on the imaginary part of this `MaskedArray`.\n\n See Also\n --------\n real\n\n Examples\n --------\n >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])\n >>> x.imag\n masked_array(data=[1.0, --, 1.6],\n mask=[False, True, False],\n fill_value=1e+20)\n\n \"\"\"\n result = self._data.imag.view(type(self))\n result.__setmask__(self._mask)\n return result\n\n # kept for compatibility\n get_imag = imag.fget\n\n @property\n def real(self):\n \"\"\"\n The real part of the masked array.\n\n This property is a view on the real part of this `MaskedArray`.\n\n See Also\n --------\n imag\n\n Examples\n --------\n >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])\n >>> x.real\n masked_array(data=[1.0, --, 3.45],\n mask=[False, True, False],\n fill_value=1e+20)\n\n \"\"\"\n result = self._data.real.view(type(self))\n result.__setmask__(self._mask)\n return result\n\n # kept for compatibility\n get_real = real.fget\n\n def count(self, axis=None, keepdims=np._NoValue):\n \"\"\"\n Count the non-masked elements of the array along the given axis.\n\n Parameters\n ----------\n axis : None or int or tuple of ints, optional\n Axis or axes along which the count is performed.\n The default, None, performs the count over all\n the dimensions of the input array. `axis` may be negative, in\n which case it counts from the last to the first axis.\n\n .. versionadded:: 1.10.0\n\n If this is a tuple of ints, the count is performed on multiple\n axes, instead of a single axis or all the axes as before.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the array.\n\n Returns\n -------\n result : ndarray or scalar\n An array with the same shape as the input array, with the specified\n axis removed. If the array is a 0-d array, or if `axis` is None, a\n scalar is returned.\n\n See Also\n --------\n ma.count_masked : Count masked elements in array or along a given axis.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> a = ma.arange(6).reshape((2, 3))\n >>> a[1, :] = ma.masked\n >>> a\n masked_array(\n data=[[0, 1, 2],\n [--, --, --]],\n mask=[[False, False, False],\n [ True, True, True]],\n fill_value=999999)\n >>> a.count()\n 3\n\n When the `axis` keyword is specified an array of appropriate size is\n returned.\n\n >>> a.count(axis=0)\n array([1, 1, 1])\n >>> a.count(axis=1)\n array([3, 0])\n\n \"\"\"\n kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}\n\n m = self._mask\n # special case for matrices (we assume no other subclasses modify\n # their dimensions)\n if isinstance(self.data, np.matrix):\n if m is nomask:\n m = np.zeros(self.shape, dtype=np.bool_)\n m = m.view(type(self.data))\n\n if m is nomask:\n # compare to _count_reduce_items in _methods.py\n\n if self.shape == ():\n if axis not in (None, 0):\n raise np.AxisError(axis=axis, ndim=self.ndim)\n return 1\n elif axis is None:\n if kwargs.get('keepdims', False):\n return np.array(self.size, dtype=np.intp, ndmin=self.ndim)\n return self.size\n\n axes = normalize_axis_tuple(axis, self.ndim)\n items = 1\n for ax in axes:\n items *= self.shape[ax]\n\n if kwargs.get('keepdims', False):\n out_dims = list(self.shape)\n for a in axes:\n out_dims[a] = 1\n else:\n out_dims = [d for n, d in enumerate(self.shape)\n if n not in axes]\n # make sure to return a 0-d array if axis is supplied\n return np.full(out_dims, items, dtype=np.intp)\n\n # take care of the masked singleton\n if self is masked:\n return 0\n\n return (~m).sum(axis=axis, dtype=np.intp, **kwargs)\n\n def ravel(self, order='C'):\n \"\"\"\n Returns a 1D version of self, as a view.\n\n Parameters\n ----------\n order : {'C', 'F', 'A', 'K'}, optional\n The elements of `a` are read using this index order. 'C' means to\n index the elements in C-like order, with the last axis index\n changing fastest, back to the first axis index changing slowest.\n 'F' means to index the elements in Fortran-like index order, with\n the first index changing fastest, and the last index changing\n slowest. Note that the 'C' and 'F' options take no account of the\n memory layout of the underlying array, and only refer to the order\n of axis indexing. 'A' means to read the elements in Fortran-like\n index order if `m` is Fortran *contiguous* in memory, C-like order\n otherwise. 'K' means to read the elements in the order they occur\n in memory, except for reversing the data when strides are negative.\n By default, 'C' index order is used.\n\n Returns\n -------\n MaskedArray\n Output view is of shape ``(self.size,)`` (or\n ``(np.ma.product(self.shape),)``).\n\n Examples\n --------\n >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)\n >>> x\n masked_array(\n data=[[1, --, 3],\n [--, 5, --],\n [7, --, 9]],\n mask=[[False, True, False],\n [ True, False, True],\n [False, True, False]],\n fill_value=999999)\n >>> x.ravel()\n masked_array(data=[1, --, 3, --, 5, --, 7, --, 9],\n mask=[False, True, False, True, False, True, False, True,\n False],\n fill_value=999999)\n\n \"\"\"\n r = ndarray.ravel(self._data, order=order).view(type(self))\n r._update_from(self)\n if self._mask is not nomask:\n r._mask = ndarray.ravel(self._mask, order=order).reshape(r.shape)\n else:\n r._mask = nomask\n return r\n\n\n def reshape(self, *s, **kwargs):\n \"\"\"\n Give a new shape to the array without changing its data.\n\n Returns a masked array containing the same data, but with a new shape.\n The result is a view on the original array; if this is not possible, a\n ValueError is raised.\n\n Parameters\n ----------\n shape : int or tuple of ints\n The new shape should be compatible with the original shape. If an\n integer is supplied, then the result will be a 1-D array of that\n length.\n order : {'C', 'F'}, optional\n Determines whether the array data should be viewed as in C\n (row-major) or FORTRAN (column-major) order.\n\n Returns\n -------\n reshaped_array : array\n A new view on the array.\n\n See Also\n --------\n reshape : Equivalent function in the masked array module.\n numpy.ndarray.reshape : Equivalent method on ndarray object.\n numpy.reshape : Equivalent function in the NumPy module.\n\n Notes\n -----\n The reshaping operation cannot guarantee that a copy will not be made,\n to modify the shape in place, use ``a.shape = s``\n\n Examples\n --------\n >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1])\n >>> x\n masked_array(\n data=[[--, 2],\n [3, --]],\n mask=[[ True, False],\n [False, True]],\n fill_value=999999)\n >>> x = x.reshape((4,1))\n >>> x\n masked_array(\n data=[[--],\n [2],\n [3],\n [--]],\n mask=[[ True],\n [False],\n [False],\n [ True]],\n fill_value=999999)\n\n \"\"\"\n kwargs.update(order=kwargs.get('order', 'C'))\n result = self._data.reshape(*s, **kwargs).view(type(self))\n result._update_from(self)\n mask = self._mask\n if mask is not nomask:\n result._mask = mask.reshape(*s, **kwargs)\n return result\n\n def resize(self, newshape, refcheck=True, order=False):\n \"\"\"\n .. warning::\n\n This method does nothing, except raise a ValueError exception. A\n masked array does not own its data and therefore cannot safely be\n resized in place. Use the `numpy.ma.resize` function instead.\n\n This method is difficult to implement safely and may be deprecated in\n future releases of NumPy.\n\n \"\"\"\n # Note : the 'order' keyword looks broken, let's just drop it\n errmsg = \"A masked array does not own its data \"\\\n \"and therefore cannot be resized.\\n\" \\\n \"Use the numpy.ma.resize function instead.\"\n raise ValueError(errmsg)\n\n def put(self, indices, values, mode='raise'):\n \"\"\"\n Set storage-indexed locations to corresponding values.\n\n Sets self._data.flat[n] = values[n] for each n in indices.\n If `values` is shorter than `indices` then it will repeat.\n If `values` has some masked values, the initial mask is updated\n in consequence, else the corresponding values are unmasked.\n\n Parameters\n ----------\n indices : 1-D array_like\n Target indices, interpreted as integers.\n values : array_like\n Values to place in self._data copy at target indices.\n mode : {'raise', 'wrap', 'clip'}, optional\n Specifies how out-of-bounds indices will behave.\n 'raise' : raise an error.\n 'wrap' : wrap around.\n 'clip' : clip to the range.\n\n Notes\n -----\n `values` can be a scalar or length 1 array.\n\n Examples\n --------\n >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)\n >>> x\n masked_array(\n data=[[1, --, 3],\n [--, 5, --],\n [7, --, 9]],\n mask=[[False, True, False],\n [ True, False, True],\n [False, True, False]],\n fill_value=999999)\n >>> x.put([0,4,8],[10,20,30])\n >>> x\n masked_array(\n data=[[10, --, 3],\n [--, 20, --],\n [7, --, 30]],\n mask=[[False, True, False],\n [ True, False, True],\n [False, True, False]],\n fill_value=999999)\n\n >>> x.put(4,999)\n >>> x\n masked_array(\n data=[[10, --, 3],\n [--, 999, --],\n [7, --, 30]],\n mask=[[False, True, False],\n [ True, False, True],\n [False, True, False]],\n fill_value=999999)\n\n \"\"\"\n # Hard mask: Get rid of the values/indices that fall on masked data\n if self._hardmask and self._mask is not nomask:\n mask = self._mask[indices]\n indices = narray(indices, copy=False)\n values = narray(values, copy=False, subok=True)\n values.resize(indices.shape)\n indices = indices[~mask]\n values = values[~mask]\n\n self._data.put(indices, values, mode=mode)\n\n # short circuit if neither self nor values are masked\n if self._mask is nomask and getmask(values) is nomask:\n return\n\n m = getmaskarray(self)\n\n if getmask(values) is nomask:\n m.put(indices, False, mode=mode)\n else:\n m.put(indices, values._mask, mode=mode)\n m = make_mask(m, copy=False, shrink=True)\n self._mask = m\n return\n\n def ids(self):\n \"\"\"\n Return the addresses of the data and mask areas.\n\n Parameters\n ----------\n None\n\n Examples\n --------\n >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1])\n >>> x.ids()\n (166670640, 166659832) # may vary\n\n If the array has no mask, the address of `nomask` is returned. This address\n is typically not close to the data in memory:\n\n >>> x = np.ma.array([1, 2, 3])\n >>> x.ids()\n (166691080, 3083169284) # may vary\n\n \"\"\"\n if self._mask is nomask:\n return (self.ctypes.data, id(nomask))\n return (self.ctypes.data, self._mask.ctypes.data)\n\n def iscontiguous(self):\n \"\"\"\n Return a boolean indicating whether the data is contiguous.\n\n Parameters\n ----------\n None\n\n Examples\n --------\n >>> x = np.ma.array([1, 2, 3])\n >>> x.iscontiguous()\n True\n\n `iscontiguous` returns one of the flags of the masked array:\n\n >>> x.flags\n C_CONTIGUOUS : True\n F_CONTIGUOUS : True\n OWNDATA : False\n WRITEABLE : True\n ALIGNED : True\n WRITEBACKIFCOPY : False\n UPDATEIFCOPY : False\n\n \"\"\"\n return self.flags['CONTIGUOUS']\n\n def all(self, axis=None, out=None, keepdims=np._NoValue):\n \"\"\"\n Returns True if all elements evaluate to True.\n\n The output array is masked where all the values along the given axis\n are masked: if the output would have been a scalar and that all the\n values are masked, then the output is `masked`.\n\n Refer to `numpy.all` for full documentation.\n\n See Also\n --------\n numpy.ndarray.all : corresponding function for ndarrays\n numpy.all : equivalent function\n\n Examples\n --------\n >>> np.ma.array([1,2,3]).all()\n True\n >>> a = np.ma.array([1,2,3], mask=True)\n >>> (a.all() is np.ma.masked)\n True\n\n \"\"\"\n kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}\n\n mask = _check_mask_axis(self._mask, axis, **kwargs)\n if out is None:\n d = self.filled(True).all(axis=axis, **kwargs).view(type(self))\n if d.ndim:\n d.__setmask__(mask)\n elif mask:\n return masked\n return d\n self.filled(True).all(axis=axis, out=out, **kwargs)\n if isinstance(out, MaskedArray):\n if out.ndim or mask:\n out.__setmask__(mask)\n return out\n\n def any(self, axis=None, out=None, keepdims=np._NoValue):\n \"\"\"\n Returns True if any of the elements of `a` evaluate to True.\n\n Masked values are considered as False during computation.\n\n Refer to `numpy.any` for full documentation.\n\n See Also\n --------\n numpy.ndarray.any : corresponding function for ndarrays\n numpy.any : equivalent function\n\n \"\"\"\n kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}\n\n mask = _check_mask_axis(self._mask, axis, **kwargs)\n if out is None:\n d = self.filled(False).any(axis=axis, **kwargs).view(type(self))\n if d.ndim:\n d.__setmask__(mask)\n elif mask:\n d = masked\n return d\n self.filled(False).any(axis=axis, out=out, **kwargs)\n if isinstance(out, MaskedArray):\n if out.ndim or mask:\n out.__setmask__(mask)\n return out\n\n def nonzero(self):\n \"\"\"\n Return the indices of unmasked elements that are not zero.\n\n Returns a tuple of arrays, one for each dimension, containing the\n indices of the non-zero elements in that dimension. The corresponding\n non-zero values can be obtained with::\n\n a[a.nonzero()]\n\n To group the indices by element, rather than dimension, use\n instead::\n\n np.transpose(a.nonzero())\n\n The result of this is always a 2d array, with a row for each non-zero\n element.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n tuple_of_arrays : tuple\n Indices of elements that are non-zero.\n\n See Also\n --------\n numpy.nonzero :\n Function operating on ndarrays.\n flatnonzero :\n Return indices that are non-zero in the flattened version of the input\n array.\n numpy.ndarray.nonzero :\n Equivalent ndarray method.\n count_nonzero :\n Counts the number of non-zero elements in the input array.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> x = ma.array(np.eye(3))\n >>> x\n masked_array(\n data=[[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]],\n mask=False,\n fill_value=1e+20)\n >>> x.nonzero()\n (array([0, 1, 2]), array([0, 1, 2]))\n\n Masked elements are ignored.\n\n >>> x[1, 1] = ma.masked\n >>> x\n masked_array(\n data=[[1.0, 0.0, 0.0],\n [0.0, --, 0.0],\n [0.0, 0.0, 1.0]],\n mask=[[False, False, False],\n [False, True, False],\n [False, False, False]],\n fill_value=1e+20)\n >>> x.nonzero()\n (array([0, 2]), array([0, 2]))\n\n Indices can also be grouped by element.\n\n >>> np.transpose(x.nonzero())\n array([[0, 0],\n [2, 2]])\n\n A common use for ``nonzero`` is to find the indices of an array, where\n a condition is True. Given an array `a`, the condition `a` > 3 is a\n boolean array and since False is interpreted as 0, ma.nonzero(a > 3)\n yields the indices of the `a` where the condition is true.\n\n >>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]])\n >>> a > 3\n masked_array(\n data=[[False, False, False],\n [ True, True, True],\n [ True, True, True]],\n mask=False,\n fill_value=True)\n >>> ma.nonzero(a > 3)\n (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))\n\n The ``nonzero`` method of the condition array can also be called.\n\n >>> (a > 3).nonzero()\n (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))\n\n \"\"\"\n return narray(self.filled(0), copy=False).nonzero()\n\n def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):\n \"\"\"\n (this docstring should be overwritten)\n \"\"\"\n #!!!: implement out + test!\n m = self._mask\n if m is nomask:\n result = super().trace(offset=offset, axis1=axis1, axis2=axis2,\n out=out)\n return result.astype(dtype)\n else:\n D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)\n return D.astype(dtype).filled(0).sum(axis=-1, out=out)\n trace.__doc__ = ndarray.trace.__doc__\n\n def dot(self, b, out=None, strict=False):\n \"\"\"\n a.dot(b, out=None)\n\n Masked dot product of two arrays. Note that `out` and `strict` are\n located in different positions than in `ma.dot`. In order to\n maintain compatibility with the functional version, it is\n recommended that the optional arguments be treated as keyword only.\n At some point that may be mandatory.\n\n .. versionadded:: 1.10.0\n\n Parameters\n ----------\n b : masked_array_like\n Inputs array.\n out : masked_array, optional\n Output argument. This must have the exact kind that would be\n returned if it was not used. In particular, it must have the\n right type, must be C-contiguous, and its dtype must be the\n dtype that would be returned for `ma.dot(a,b)`. This is a\n performance feature. Therefore, if these conditions are not\n met, an exception is raised, instead of attempting to be\n flexible.\n strict : bool, optional\n Whether masked data are propagated (True) or set to 0 (False)\n for the computation. Default is False. Propagating the mask\n means that if a masked value appears in a row or column, the\n whole row or column is considered masked.\n\n .. versionadded:: 1.10.2\n\n See Also\n --------\n numpy.ma.dot : equivalent function\n\n \"\"\"\n return dot(self, b, out=out, strict=strict)\n\n def sum(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):\n \"\"\"\n Return the sum of the array elements over the given axis.\n\n Masked elements are set to 0 internally.\n\n Refer to `numpy.sum` for full documentation.\n\n See Also\n --------\n numpy.ndarray.sum : corresponding function for ndarrays\n numpy.sum : equivalent function\n\n Examples\n --------\n >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)\n >>> x\n masked_array(\n data=[[1, --, 3],\n [--, 5, --],\n [7, --, 9]],\n mask=[[False, True, False],\n [ True, False, True],\n [False, True, False]],\n fill_value=999999)\n >>> x.sum()\n 25\n >>> x.sum(axis=1)\n masked_array(data=[4, 5, 16],\n mask=[False, False, False],\n fill_value=999999)\n >>> x.sum(axis=0)\n masked_array(data=[8, 5, 12],\n mask=[False, False, False],\n fill_value=999999)\n >>> print(type(x.sum(axis=0, dtype=np.int64)[0]))\n <class 'numpy.int64'>\n\n \"\"\"\n kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}\n\n _mask = self._mask\n newmask = _check_mask_axis(_mask, axis, **kwargs)\n # No explicit output\n if out is None:\n result = self.filled(0).sum(axis, dtype=dtype, **kwargs)\n rndim = getattr(result, 'ndim', 0)\n if rndim:\n result = result.view(type(self))\n result.__setmask__(newmask)\n elif newmask:\n result = masked\n return result\n # Explicit output\n result = self.filled(0).sum(axis, dtype=dtype, out=out, **kwargs)\n if isinstance(out, MaskedArray):\n outmask = getmask(out)\n if outmask is nomask:\n outmask = out._mask = make_mask_none(out.shape)\n outmask.flat = newmask\n return out\n\n def cumsum(self, axis=None, dtype=None, out=None):\n \"\"\"\n Return the cumulative sum of the array elements over the given axis.\n\n Masked values are set to 0 internally during the computation.\n However, their position is saved, and the result will be masked at\n the same locations.\n\n Refer to `numpy.cumsum` for full documentation.\n\n Notes\n -----\n The mask is lost if `out` is not a valid :class:`ma.MaskedArray` !\n\n Arithmetic is modular when using integer types, and no error is\n raised on overflow.\n\n See Also\n --------\n numpy.ndarray.cumsum : corresponding function for ndarrays\n numpy.cumsum : equivalent function\n\n Examples\n --------\n >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0])\n >>> marr.cumsum()\n masked_array(data=[0, 1, 3, --, --, --, 9, 16, 24, 33],\n mask=[False, False, False, True, True, True, False, False,\n False, False],\n fill_value=999999)\n\n \"\"\"\n result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out)\n if out is not None:\n if isinstance(out, MaskedArray):\n out.__setmask__(self.mask)\n return out\n result = result.view(type(self))\n result.__setmask__(self._mask)\n return result\n\n def prod(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):\n \"\"\"\n Return the product of the array elements over the given axis.\n\n Masked elements are set to 1 internally for computation.\n\n Refer to `numpy.prod` for full documentation.\n\n Notes\n -----\n Arithmetic is modular when using integer types, and no error is raised\n on overflow.\n\n See Also\n --------\n numpy.ndarray.prod : corresponding function for ndarrays\n numpy.prod : equivalent function\n \"\"\"\n kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}\n\n _mask = self._mask\n newmask = _check_mask_axis(_mask, axis, **kwargs)\n # No explicit output\n if out is None:\n result = self.filled(1).prod(axis, dtype=dtype, **kwargs)\n rndim = getattr(result, 'ndim', 0)\n if rndim:\n result = result.view(type(self))\n result.__setmask__(newmask)\n elif newmask:\n result = masked\n return result\n # Explicit output\n result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs)\n if isinstance(out, MaskedArray):\n outmask = getmask(out)\n if outmask is nomask:\n outmask = out._mask = make_mask_none(out.shape)\n outmask.flat = newmask\n return out\n product = prod\n\n def cumprod(self, axis=None, dtype=None, out=None):\n \"\"\"\n Return the cumulative product of the array elements over the given axis.\n\n Masked values are set to 1 internally during the computation.\n However, their position is saved, and the result will be masked at\n the same locations.\n\n Refer to `numpy.cumprod` for full documentation.\n\n Notes\n -----\n The mask is lost if `out` is not a valid MaskedArray !\n\n Arithmetic is modular when using integer types, and no error is\n raised on overflow.\n\n See Also\n --------\n numpy.ndarray.cumprod : corresponding function for ndarrays\n numpy.cumprod : equivalent function\n \"\"\"\n result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out)\n if out is not None:\n if isinstance(out, MaskedArray):\n out.__setmask__(self._mask)\n return out\n result = result.view(type(self))\n result.__setmask__(self._mask)\n return result\n\n def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):\n \"\"\"\n Returns the average of the array elements along given axis.\n\n Masked entries are ignored, and result elements which are not\n finite will be masked.\n\n Refer to `numpy.mean` for full documentation.\n\n See Also\n --------\n numpy.ndarray.mean : corresponding function for ndarrays\n numpy.mean : Equivalent function\n numpy.ma.average : Weighted average.\n\n Examples\n --------\n >>> a = np.ma.array([1,2,3], mask=[False, False, True])\n >>> a\n masked_array(data=[1, 2, --],\n mask=[False, False, True],\n fill_value=999999)\n >>> a.mean()\n 1.5\n\n \"\"\"\n kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}\n\n if self._mask is nomask:\n result = super().mean(axis=axis, dtype=dtype, **kwargs)[()]\n else:\n dsum = self.sum(axis=axis, dtype=dtype, **kwargs)\n cnt = self.count(axis=axis, **kwargs)\n if cnt.shape == () and (cnt == 0):\n result = masked\n else:\n result = dsum * 1. / cnt\n if out is not None:\n out.flat = result\n if isinstance(out, MaskedArray):\n outmask = getmask(out)\n if outmask is nomask:\n outmask = out._mask = make_mask_none(out.shape)\n outmask.flat = getmask(result)\n return out\n return result\n\n def anom(self, axis=None, dtype=None):\n \"\"\"\n Compute the anomalies (deviations from the arithmetic mean)\n along the given axis.\n\n Returns an array of anomalies, with the same shape as the input and\n where the arithmetic mean is computed along the given axis.\n\n Parameters\n ----------\n axis : int, optional\n Axis over which the anomalies are taken.\n The default is to use the mean of the flattened array as reference.\n dtype : dtype, optional\n Type to use in computing the variance. For arrays of integer type\n the default is float32; for arrays of float types it is the same as\n the array type.\n\n See Also\n --------\n mean : Compute the mean of the array.\n\n Examples\n --------\n >>> a = np.ma.array([1,2,3])\n >>> a.anom()\n masked_array(data=[-1., 0., 1.],\n mask=False,\n fill_value=1e+20)\n\n \"\"\"\n m = self.mean(axis, dtype)\n if not axis:\n return self - m\n else:\n return self - expand_dims(m, axis)\n\n def var(self, axis=None, dtype=None, out=None, ddof=0,\n keepdims=np._NoValue):\n \"\"\"\n Returns the variance of the array elements along given axis.\n\n Masked entries are ignored, and result elements which are not\n finite will be masked.\n\n Refer to `numpy.var` for full documentation.\n\n See Also\n --------\n numpy.ndarray.var : corresponding function for ndarrays\n numpy.var : Equivalent function\n \"\"\"\n kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}\n\n # Easy case: nomask, business as usual\n if self._mask is nomask:\n ret = super().var(axis=axis, dtype=dtype, out=out, ddof=ddof,\n **kwargs)[()]\n if out is not None:\n if isinstance(out, MaskedArray):\n out.__setmask__(nomask)\n return out\n return ret\n\n # Some data are masked, yay!\n cnt = self.count(axis=axis, **kwargs) - ddof\n danom = self - self.mean(axis, dtype, keepdims=True)\n if iscomplexobj(self):\n danom = umath.absolute(danom) ** 2\n else:\n danom *= danom\n dvar = divide(danom.sum(axis, **kwargs), cnt).view(type(self))\n # Apply the mask if it's not a scalar\n if dvar.ndim:\n dvar._mask = mask_or(self._mask.all(axis, **kwargs), (cnt <= 0))\n dvar._update_from(self)\n elif getmask(dvar):\n # Make sure that masked is returned when the scalar is masked.\n dvar = masked\n if out is not None:\n if isinstance(out, MaskedArray):\n out.flat = 0\n out.__setmask__(True)\n elif out.dtype.kind in 'biu':\n errmsg = \"Masked data information would be lost in one or \"\\\n \"more location.\"\n raise MaskError(errmsg)\n else:\n out.flat = np.nan\n return out\n # In case with have an explicit output\n if out is not None:\n # Set the data\n out.flat = dvar\n # Set the mask if needed\n if isinstance(out, MaskedArray):\n out.__setmask__(dvar.mask)\n return out\n return dvar\n var.__doc__ = np.var.__doc__\n\n def std(self, axis=None, dtype=None, out=None, ddof=0,\n keepdims=np._NoValue):\n \"\"\"\n Returns the standard deviation of the array elements along given axis.\n\n Masked entries are ignored.\n\n Refer to `numpy.std` for full documentation.\n\n See Also\n --------\n numpy.ndarray.std : corresponding function for ndarrays\n numpy.std : Equivalent function\n \"\"\"\n kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}\n\n dvar = self.var(axis, dtype, out, ddof, **kwargs)\n if dvar is not masked:\n if out is not None:\n np.power(out, 0.5, out=out, casting='unsafe')\n return out\n dvar = sqrt(dvar)\n return dvar\n\n def round(self, decimals=0, out=None):\n \"\"\"\n Return each element rounded to the given number of decimals.\n\n Refer to `numpy.around` for full documentation.\n\n See Also\n --------\n numpy.ndarray.round : corresponding function for ndarrays\n numpy.around : equivalent function\n \"\"\"\n result = self._data.round(decimals=decimals, out=out).view(type(self))\n if result.ndim > 0:\n result._mask = self._mask\n result._update_from(self)\n elif self._mask:\n # Return masked when the scalar is masked\n result = masked\n # No explicit output: we're done\n if out is None:\n return result\n if isinstance(out, MaskedArray):\n out.__setmask__(self._mask)\n return out\n\n def argsort(self, axis=np._NoValue, kind=None, order=None,\n endwith=True, fill_value=None):\n \"\"\"\n Return an ndarray of indices that sort the array along the\n specified axis. Masked values are filled beforehand to\n `fill_value`.\n\n Parameters\n ----------\n axis : int, optional\n Axis along which to sort. If None, the default, the flattened array\n is used.\n\n .. versionchanged:: 1.13.0\n Previously, the default was documented to be -1, but that was\n in error. At some future date, the default will change to -1, as\n originally intended.\n Until then, the axis should be given explicitly when\n ``arr.ndim > 1``, to avoid a FutureWarning.\n kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional\n The sorting algorithm used.\n order : list, optional\n When `a` is an array with fields defined, this argument specifies\n which fields to compare first, second, etc. Not all fields need be\n specified.\n endwith : {True, False}, optional\n Whether missing values (if any) should be treated as the largest values\n (True) or the smallest values (False)\n When the array contains unmasked values at the same extremes of the\n datatype, the ordering of these values and the masked values is\n undefined.\n fill_value : scalar or None, optional\n Value used internally for the masked values.\n If ``fill_value`` is not None, it supersedes ``endwith``.\n\n Returns\n -------\n index_array : ndarray, int\n Array of indices that sort `a` along the specified axis.\n In other words, ``a[index_array]`` yields a sorted `a`.\n\n See Also\n --------\n ma.MaskedArray.sort : Describes sorting algorithms used.\n lexsort : Indirect stable sort with multiple keys.\n numpy.ndarray.sort : Inplace sort.\n\n Notes\n -----\n See `sort` for notes on the different sorting algorithms.\n\n Examples\n --------\n >>> a = np.ma.array([3,2,1], mask=[False, False, True])\n >>> a\n masked_array(data=[3, 2, --],\n mask=[False, False, True],\n fill_value=999999)\n >>> a.argsort()\n array([1, 0, 2])\n\n \"\"\"\n\n # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default\n if axis is np._NoValue:\n axis = _deprecate_argsort_axis(self)\n\n if fill_value is None:\n if endwith:\n # nan > inf\n if np.issubdtype(self.dtype, np.floating):\n fill_value = np.nan\n else:\n fill_value = minimum_fill_value(self)\n else:\n fill_value = maximum_fill_value(self)\n\n filled = self.filled(fill_value)\n return filled.argsort(axis=axis, kind=kind, order=order)\n\n def argmin(self, axis=None, fill_value=None, out=None, *,\n keepdims=np._NoValue):\n \"\"\"\n Return array of indices to the minimum values along the given axis.\n\n Parameters\n ----------\n axis : {None, integer}\n If None, the index is into the flattened array, otherwise along\n the specified axis\n fill_value : scalar or None, optional\n Value used to fill in the masked values. If None, the output of\n minimum_fill_value(self._data) is used instead.\n out : {None, array}, optional\n Array into which the result can be placed. Its type is preserved\n and it must be of the right shape to hold the output.\n\n Returns\n -------\n ndarray or scalar\n If multi-dimension input, returns a new ndarray of indices to the\n minimum values along the given axis. Otherwise, returns a scalar\n of index to the minimum values along the given axis.\n\n Examples\n --------\n >>> x = np.ma.array(np.arange(4), mask=[1,1,0,0])\n >>> x.shape = (2,2)\n >>> x\n masked_array(\n data=[[--, --],\n [2, 3]],\n mask=[[ True, True],\n [False, False]],\n fill_value=999999)\n >>> x.argmin(axis=0, fill_value=-1)\n array([0, 0])\n >>> x.argmin(axis=0, fill_value=9)\n array([1, 1])\n\n \"\"\"\n if fill_value is None:\n fill_value = minimum_fill_value(self)\n d = self.filled(fill_value).view(ndarray)\n keepdims = False if keepdims is np._NoValue else bool(keepdims)\n return d.argmin(axis, out=out, keepdims=keepdims)\n\n def argmax(self, axis=None, fill_value=None, out=None, *,\n keepdims=np._NoValue):\n \"\"\"\n Returns array of indices of the maximum values along the given axis.\n Masked values are treated as if they had the value fill_value.\n\n Parameters\n ----------\n axis : {None, integer}\n If None, the index is into the flattened array, otherwise along\n the specified axis\n fill_value : scalar or None, optional\n Value used to fill in the masked values. If None, the output of\n maximum_fill_value(self._data) is used instead.\n out : {None, array}, optional\n Array into which the result can be placed. Its type is preserved\n and it must be of the right shape to hold the output.\n\n Returns\n -------\n index_array : {integer_array}\n\n Examples\n --------\n >>> a = np.arange(6).reshape(2,3)\n >>> a.argmax()\n 5\n >>> a.argmax(0)\n array([1, 1, 1])\n >>> a.argmax(1)\n array([2, 2])\n\n \"\"\"\n if fill_value is None:\n fill_value = maximum_fill_value(self._data)\n d = self.filled(fill_value).view(ndarray)\n keepdims = False if keepdims is np._NoValue else bool(keepdims)\n return d.argmax(axis, out=out, keepdims=keepdims)\n\n def sort(self, axis=-1, kind=None, order=None,\n endwith=True, fill_value=None):\n \"\"\"\n Sort the array, in-place\n\n Parameters\n ----------\n a : array_like\n Array to be sorted.\n axis : int, optional\n Axis along which to sort. If None, the array is flattened before\n sorting. The default is -1, which sorts along the last axis.\n kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional\n The sorting algorithm used.\n order : list, optional\n When `a` is a structured array, this argument specifies which fields\n to compare first, second, and so on. This list does not need to\n include all of the fields.\n endwith : {True, False}, optional\n Whether missing values (if any) should be treated as the largest values\n (True) or the smallest values (False)\n When the array contains unmasked values sorting at the same extremes of the\n datatype, the ordering of these values and the masked values is\n undefined.\n fill_value : scalar or None, optional\n Value used internally for the masked values.\n If ``fill_value`` is not None, it supersedes ``endwith``.\n\n Returns\n -------\n sorted_array : ndarray\n Array of the same type and shape as `a`.\n\n See Also\n --------\n numpy.ndarray.sort : Method to sort an array in-place.\n argsort : Indirect sort.\n lexsort : Indirect stable sort on multiple keys.\n searchsorted : Find elements in a sorted array.\n\n Notes\n -----\n See ``sort`` for notes on the different sorting algorithms.\n\n Examples\n --------\n >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])\n >>> # Default\n >>> a.sort()\n >>> a\n masked_array(data=[1, 3, 5, --, --],\n mask=[False, False, False, True, True],\n fill_value=999999)\n\n >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])\n >>> # Put missing values in the front\n >>> a.sort(endwith=False)\n >>> a\n masked_array(data=[--, --, 1, 3, 5],\n mask=[ True, True, False, False, False],\n fill_value=999999)\n\n >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])\n >>> # fill_value takes over endwith\n >>> a.sort(endwith=False, fill_value=3)\n >>> a\n masked_array(data=[1, --, --, 3, 5],\n mask=[False, True, True, False, False],\n fill_value=999999)\n\n \"\"\"\n if self._mask is nomask:\n ndarray.sort(self, axis=axis, kind=kind, order=order)\n return\n\n if self is masked:\n return\n\n sidx = self.argsort(axis=axis, kind=kind, order=order,\n fill_value=fill_value, endwith=endwith)\n\n self[...] = np.take_along_axis(self, sidx, axis=axis)\n\n def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue):\n \"\"\"\n Return the minimum along a given axis.\n\n Parameters\n ----------\n axis : {None, int}, optional\n Axis along which to operate. By default, ``axis`` is None and the\n flattened input is used.\n out : array_like, optional\n Alternative output array in which to place the result. Must be of\n the same shape and buffer length as the expected output.\n fill_value : scalar or None, optional\n Value used to fill in the masked values.\n If None, use the output of `minimum_fill_value`.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the array.\n\n Returns\n -------\n amin : array_like\n New array holding the result.\n If ``out`` was specified, ``out`` is returned.\n\n See Also\n --------\n ma.minimum_fill_value\n Returns the minimum filling value for a given datatype.\n\n \"\"\"\n kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}\n\n _mask = self._mask\n newmask = _check_mask_axis(_mask, axis, **kwargs)\n if fill_value is None:\n fill_value = minimum_fill_value(self)\n # No explicit output\n if out is None:\n result = self.filled(fill_value).min(\n axis=axis, out=out, **kwargs).view(type(self))\n if result.ndim:\n # Set the mask\n result.__setmask__(newmask)\n # Get rid of Infs\n if newmask.ndim:\n np.copyto(result, result.fill_value, where=newmask)\n elif newmask:\n result = masked\n return result\n # Explicit output\n result = self.filled(fill_value).min(axis=axis, out=out, **kwargs)\n if isinstance(out, MaskedArray):\n outmask = getmask(out)\n if outmask is nomask:\n outmask = out._mask = make_mask_none(out.shape)\n outmask.flat = newmask\n else:\n if out.dtype.kind in 'biu':\n errmsg = \"Masked data information would be lost in one or more\"\\\n \" location.\"\n raise MaskError(errmsg)\n np.copyto(out, np.nan, where=newmask)\n return out\n\n # unique to masked arrays\n def mini(self, axis=None):\n \"\"\"\n Return the array minimum along the specified axis.\n\n .. deprecated:: 1.13.0\n This function is identical to both:\n\n * ``self.min(keepdims=True, axis=axis).squeeze(axis=axis)``\n * ``np.ma.minimum.reduce(self, axis=axis)``\n\n Typically though, ``self.min(axis=axis)`` is sufficient.\n\n Parameters\n ----------\n axis : int, optional\n The axis along which to find the minima. Default is None, in which case\n the minimum value in the whole array is returned.\n\n Returns\n -------\n min : scalar or MaskedArray\n If `axis` is None, the result is a scalar. Otherwise, if `axis` is\n given and the array is at least 2-D, the result is a masked array with\n dimension one smaller than the array on which `mini` is called.\n\n Examples\n --------\n >>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2)\n >>> x\n masked_array(\n data=[[0, --],\n [2, 3],\n [4, --]],\n mask=[[False, True],\n [False, False],\n [False, True]],\n fill_value=999999)\n >>> x.mini()\n masked_array(data=0,\n mask=False,\n fill_value=999999)\n >>> x.mini(axis=0)\n masked_array(data=[0, 3],\n mask=[False, False],\n fill_value=999999)\n >>> x.mini(axis=1)\n masked_array(data=[0, 2, 4],\n mask=[False, False, False],\n fill_value=999999)\n\n There is a small difference between `mini` and `min`:\n\n >>> x[:,1].mini(axis=0)\n masked_array(data=3,\n mask=False,\n fill_value=999999)\n >>> x[:,1].min(axis=0)\n 3\n \"\"\"\n\n # 2016-04-13, 1.13.0, gh-8764\n warnings.warn(\n \"`mini` is deprecated; use the `min` method or \"\n \"`np.ma.minimum.reduce instead.\",\n DeprecationWarning, stacklevel=2)\n return minimum.reduce(self, axis)\n\n def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue):\n \"\"\"\n Return the maximum along a given axis.\n\n Parameters\n ----------\n axis : {None, int}, optional\n Axis along which to operate. By default, ``axis`` is None and the\n flattened input is used.\n out : array_like, optional\n Alternative output array in which to place the result. Must\n be of the same shape and buffer length as the expected output.\n fill_value : scalar or None, optional\n Value used to fill in the masked values.\n If None, use the output of maximum_fill_value().\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the array.\n\n Returns\n -------\n amax : array_like\n New array holding the result.\n If ``out`` was specified, ``out`` is returned.\n\n See Also\n --------\n ma.maximum_fill_value\n Returns the maximum filling value for a given datatype.\n\n \"\"\"\n kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}\n\n _mask = self._mask\n newmask = _check_mask_axis(_mask, axis, **kwargs)\n if fill_value is None:\n fill_value = maximum_fill_value(self)\n # No explicit output\n if out is None:\n result = self.filled(fill_value).max(\n axis=axis, out=out, **kwargs).view(type(self))\n if result.ndim:\n # Set the mask\n result.__setmask__(newmask)\n # Get rid of Infs\n if newmask.ndim:\n np.copyto(result, result.fill_value, where=newmask)\n elif newmask:\n result = masked\n return result\n # Explicit output\n result = self.filled(fill_value).max(axis=axis, out=out, **kwargs)\n if isinstance(out, MaskedArray):\n outmask = getmask(out)\n if outmask is nomask:\n outmask = out._mask = make_mask_none(out.shape)\n outmask.flat = newmask\n else:\n\n if out.dtype.kind in 'biu':\n errmsg = \"Masked data information would be lost in one or more\"\\\n \" location.\"\n raise MaskError(errmsg)\n np.copyto(out, np.nan, where=newmask)\n return out\n\n def ptp(self, axis=None, out=None, fill_value=None, keepdims=False):\n \"\"\"\n Return (maximum - minimum) along the given dimension\n (i.e. peak-to-peak value).\n\n .. warning::\n `ptp` preserves the data type of the array. This means the\n return value for an input of signed integers with n bits\n (e.g. `np.int8`, `np.int16`, etc) is also a signed integer\n with n bits. In that case, peak-to-peak values greater than\n ``2**(n-1)-1`` will be returned as negative values. An example\n with a work-around is shown below.\n\n Parameters\n ----------\n axis : {None, int}, optional\n Axis along which to find the peaks. If None (default) the\n flattened array is used.\n out : {None, array_like}, optional\n Alternative output array in which to place the result. It must\n have the same shape and buffer length as the expected output\n but the type will be cast if necessary.\n fill_value : scalar or None, optional\n Value used to fill in the masked values.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the array.\n\n Returns\n -------\n ptp : ndarray.\n A new array holding the result, unless ``out`` was\n specified, in which case a reference to ``out`` is returned.\n\n Examples\n --------\n >>> x = np.ma.MaskedArray([[4, 9, 2, 10],\n ... [6, 9, 7, 12]])\n\n >>> x.ptp(axis=1)\n masked_array(data=[8, 6],\n mask=False,\n fill_value=999999)\n\n >>> x.ptp(axis=0)\n masked_array(data=[2, 0, 5, 2],\n mask=False,\n fill_value=999999)\n\n >>> x.ptp()\n 10\n\n This example shows that a negative value can be returned when\n the input is an array of signed integers.\n\n >>> y = np.ma.MaskedArray([[1, 127],\n ... [0, 127],\n ... [-1, 127],\n ... [-2, 127]], dtype=np.int8)\n >>> y.ptp(axis=1)\n masked_array(data=[ 126, 127, -128, -127],\n mask=False,\n fill_value=999999,\n dtype=int8)\n\n A work-around is to use the `view()` method to view the result as\n unsigned integers with the same bit width:\n\n >>> y.ptp(axis=1).view(np.uint8)\n masked_array(data=[126, 127, 128, 129],\n mask=False,\n fill_value=999999,\n dtype=uint8)\n \"\"\"\n if out is None:\n result = self.max(axis=axis, fill_value=fill_value,\n keepdims=keepdims)\n result -= self.min(axis=axis, fill_value=fill_value,\n keepdims=keepdims)\n return result\n out.flat = self.max(axis=axis, out=out, fill_value=fill_value,\n keepdims=keepdims)\n min_value = self.min(axis=axis, fill_value=fill_value,\n keepdims=keepdims)\n np.subtract(out, min_value, out=out, casting='unsafe')\n return out\n\n def partition(self, *args, **kwargs):\n warnings.warn(\"Warning: 'partition' will ignore the 'mask' \"\n f\"of the {self.__class__.__name__}.\",\n stacklevel=2)\n return super().partition(*args, **kwargs)\n\n def argpartition(self, *args, **kwargs):\n warnings.warn(\"Warning: 'argpartition' will ignore the 'mask' \"\n f\"of the {self.__class__.__name__}.\",\n stacklevel=2)\n return super().argpartition(*args, **kwargs)\n\n def take(self, indices, axis=None, out=None, mode='raise'):\n \"\"\"\n \"\"\"\n (_data, _mask) = (self._data, self._mask)\n cls = type(self)\n # Make sure the indices are not masked\n maskindices = getmask(indices)\n if maskindices is not nomask:\n indices = indices.filled(0)\n # Get the data, promoting scalars to 0d arrays with [...] so that\n # .view works correctly\n if out is None:\n out = _data.take(indices, axis=axis, mode=mode)[...].view(cls)\n else:\n np.take(_data, indices, axis=axis, mode=mode, out=out)\n # Get the mask\n if isinstance(out, MaskedArray):\n if _mask is nomask:\n outmask = maskindices\n else:\n outmask = _mask.take(indices, axis=axis, mode=mode)\n outmask |= maskindices\n out.__setmask__(outmask)\n # demote 0d arrays back to scalars, for consistency with ndarray.take\n return out[()]\n\n # Array methods\n copy = _arraymethod('copy')\n diagonal = _arraymethod('diagonal')\n flatten = _arraymethod('flatten')\n repeat = _arraymethod('repeat')\n squeeze = _arraymethod('squeeze')\n swapaxes = _arraymethod('swapaxes')\n T = property(fget=lambda self: self.transpose())\n transpose = _arraymethod('transpose')\n\n def tolist(self, fill_value=None):\n \"\"\"\n Return the data portion of the masked array as a hierarchical Python list.\n\n Data items are converted to the nearest compatible Python type.\n Masked values are converted to `fill_value`. If `fill_value` is None,\n the corresponding entries in the output list will be ``None``.\n\n Parameters\n ----------\n fill_value : scalar, optional\n The value to use for invalid entries. Default is None.\n\n Returns\n -------\n result : list\n The Python list representation of the masked array.\n\n Examples\n --------\n >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4)\n >>> x.tolist()\n [[1, None, 3], [None, 5, None], [7, None, 9]]\n >>> x.tolist(-999)\n [[1, -999, 3], [-999, 5, -999], [7, -999, 9]]\n\n \"\"\"\n _mask = self._mask\n # No mask ? Just return .data.tolist ?\n if _mask is nomask:\n return self._data.tolist()\n # Explicit fill_value: fill the array and get the list\n if fill_value is not None:\n return self.filled(fill_value).tolist()\n # Structured array.\n names = self.dtype.names\n if names:\n result = self._data.astype([(_, object) for _ in names])\n for n in names:\n result[n][_mask[n]] = None\n return result.tolist()\n # Standard arrays.\n if _mask is nomask:\n return [None]\n # Set temps to save time when dealing w/ marrays.\n inishape = self.shape\n result = np.array(self._data.ravel(), dtype=object)\n result[_mask.ravel()] = None\n result.shape = inishape\n return result.tolist()\n\n def tostring(self, fill_value=None, order='C'):\n r\"\"\"\n A compatibility alias for `tobytes`, with exactly the same behavior.\n\n Despite its name, it returns `bytes` not `str`\\ s.\n\n .. deprecated:: 1.19.0\n \"\"\"\n # 2020-03-30, Numpy 1.19.0\n warnings.warn(\n \"tostring() is deprecated. Use tobytes() instead.\",\n DeprecationWarning, stacklevel=2)\n\n return self.tobytes(fill_value, order=order)\n\n def tobytes(self, fill_value=None, order='C'):\n \"\"\"\n Return the array data as a string containing the raw bytes in the array.\n\n The array is filled with a fill value before the string conversion.\n\n .. versionadded:: 1.9.0\n\n Parameters\n ----------\n fill_value : scalar, optional\n Value used to fill in the masked values. Default is None, in which\n case `MaskedArray.fill_value` is used.\n order : {'C','F','A'}, optional\n Order of the data item in the copy. Default is 'C'.\n\n - 'C' -- C order (row major).\n - 'F' -- Fortran order (column major).\n - 'A' -- Any, current order of array.\n - None -- Same as 'A'.\n\n See Also\n --------\n numpy.ndarray.tobytes\n tolist, tofile\n\n Notes\n -----\n As for `ndarray.tobytes`, information about the shape, dtype, etc.,\n but also about `fill_value`, will be lost.\n\n Examples\n --------\n >>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])\n >>> x.tobytes()\n b'\\\\x01\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00?B\\\\x0f\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00?B\\\\x0f\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x04\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00'\n\n \"\"\"\n return self.filled(fill_value).tobytes(order=order)\n\n def tofile(self, fid, sep=\"\", format=\"%s\"):\n \"\"\"\n Save a masked array to a file in binary format.\n\n .. warning::\n This function is not implemented yet.\n\n Raises\n ------\n NotImplementedError\n When `tofile` is called.\n\n \"\"\"\n raise NotImplementedError(\"MaskedArray.tofile() not implemented yet.\")\n\n def toflex(self):\n \"\"\"\n Transforms a masked array into a flexible-type array.\n\n The flexible type array that is returned will have two fields:\n\n * the ``_data`` field stores the ``_data`` part of the array.\n * the ``_mask`` field stores the ``_mask`` part of the array.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n record : ndarray\n A new flexible-type `ndarray` with two fields: the first element\n containing a value, the second element containing the corresponding\n mask boolean. The returned record shape matches self.shape.\n\n Notes\n -----\n A side-effect of transforming a masked array into a flexible `ndarray` is\n that meta information (``fill_value``, ...) will be lost.\n\n Examples\n --------\n >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)\n >>> x\n masked_array(\n data=[[1, --, 3],\n [--, 5, --],\n [7, --, 9]],\n mask=[[False, True, False],\n [ True, False, True],\n [False, True, False]],\n fill_value=999999)\n >>> x.toflex()\n array([[(1, False), (2, True), (3, False)],\n [(4, True), (5, False), (6, True)],\n [(7, False), (8, True), (9, False)]],\n dtype=[('_data', '<i8'), ('_mask', '?')])\n\n \"\"\"\n # Get the basic dtype.\n ddtype = self.dtype\n # Make sure we have a mask\n _mask = self._mask\n if _mask is None:\n _mask = make_mask_none(self.shape, ddtype)\n # And get its dtype\n mdtype = self._mask.dtype\n\n record = np.ndarray(shape=self.shape,\n dtype=[('_data', ddtype), ('_mask', mdtype)])\n record['_data'] = self._data\n record['_mask'] = self._mask\n return record\n torecords = toflex\n\n # Pickling\n def __getstate__(self):\n \"\"\"Return the internal state of the masked array, for pickling\n purposes.\n\n \"\"\"\n cf = 'CF'[self.flags.fnc]\n data_state = super().__reduce__()[2]\n return data_state + (getmaskarray(self).tobytes(cf), self._fill_value)\n\n def __setstate__(self, state):\n \"\"\"Restore the internal state of the masked array, for\n pickling purposes. ``state`` is typically the output of the\n ``__getstate__`` output, and is a 5-tuple:\n\n - class name\n - a tuple giving the shape of the data\n - a typecode for the data\n - a binary string for the data\n - a binary string for the mask.\n\n \"\"\"\n (_, shp, typ, isf, raw, msk, flv) = state\n super().__setstate__((shp, typ, isf, raw))\n self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk))\n self.fill_value = flv\n\n def __reduce__(self):\n \"\"\"Return a 3-tuple for pickling a MaskedArray.\n\n \"\"\"\n return (_mareconstruct,\n (self.__class__, self._baseclass, (0,), 'b',),\n self.__getstate__())\n\n def __deepcopy__(self, memo=None):\n from copy import deepcopy\n copied = MaskedArray.__new__(type(self), self, copy=True)\n if memo is None:\n memo = {}\n memo[id(self)] = copied\n for (k, v) in self.__dict__.items():\n copied.__dict__[k] = deepcopy(v, memo)\n return copied\n\n\ndef _mareconstruct(subtype, baseclass, baseshape, basetype,):\n \"\"\"Internal function that builds a new MaskedArray from the\n information stored in a pickle.\n\n \"\"\"\n _data = ndarray.__new__(baseclass, baseshape, basetype)\n _mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype))\n return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)\n\n\nclass mvoid(MaskedArray):\n \"\"\"\n Fake a 'void' object to use for masked array with structured dtypes.\n \"\"\"\n\n def __new__(self, data, mask=nomask, dtype=None, fill_value=None,\n hardmask=False, copy=False, subok=True):\n _data = np.array(data, copy=copy, subok=subok, dtype=dtype)\n _data = _data.view(self)\n _data._hardmask = hardmask\n if mask is not nomask:\n if isinstance(mask, np.void):\n _data._mask = mask\n else:\n try:\n # Mask is already a 0D array\n _data._mask = np.void(mask)\n except TypeError:\n # Transform the mask to a void\n mdtype = make_mask_descr(dtype)\n _data._mask = np.array(mask, dtype=mdtype)[()]\n if fill_value is not None:\n _data.fill_value = fill_value\n return _data\n\n @property\n def _data(self):\n # Make sure that the _data part is a np.void\n return super()._data[()]\n\n def __getitem__(self, indx):\n \"\"\"\n Get the index.\n\n \"\"\"\n m = self._mask\n if isinstance(m[indx], ndarray):\n # Can happen when indx is a multi-dimensional field:\n # A = ma.masked_array(data=[([0,1],)], mask=[([True,\n # False],)], dtype=[(\"A\", \">i2\", (2,))])\n # x = A[0]; y = x[\"A\"]; then y.mask[\"A\"].size==2\n # and we can not say masked/unmasked.\n # The result is no longer mvoid!\n # See also issue #6724.\n return masked_array(\n data=self._data[indx], mask=m[indx],\n fill_value=self._fill_value[indx],\n hard_mask=self._hardmask)\n if m is not nomask and m[indx]:\n return masked\n return self._data[indx]\n\n def __setitem__(self, indx, value):\n self._data[indx] = value\n if self._hardmask:\n self._mask[indx] |= getattr(value, \"_mask\", False)\n else:\n self._mask[indx] = getattr(value, \"_mask\", False)\n\n def __str__(self):\n m = self._mask\n if m is nomask:\n return str(self._data)\n\n rdtype = _replace_dtype_fields(self._data.dtype, \"O\")\n data_arr = super()._data\n res = data_arr.astype(rdtype)\n _recursive_printoption(res, self._mask, masked_print_option)\n return str(res)\n\n __repr__ = __str__\n\n def __iter__(self):\n \"Defines an iterator for mvoid\"\n (_data, _mask) = (self._data, self._mask)\n if _mask is nomask:\n yield from _data\n else:\n for (d, m) in zip(_data, _mask):\n if m:\n yield masked\n else:\n yield d\n\n def __len__(self):\n return self._data.__len__()\n\n def filled(self, fill_value=None):\n \"\"\"\n Return a copy with masked fields filled with a given value.\n\n Parameters\n ----------\n fill_value : array_like, optional\n The value to use for invalid entries. Can be scalar or\n non-scalar. If latter is the case, the filled array should\n be broadcastable over input array. Default is None, in\n which case the `fill_value` attribute is used instead.\n\n Returns\n -------\n filled_void\n A `np.void` object\n\n See Also\n --------\n MaskedArray.filled\n\n \"\"\"\n return asarray(self).filled(fill_value)[()]\n\n def tolist(self):\n \"\"\"\n Transforms the mvoid object into a tuple.\n\n Masked fields are replaced by None.\n\n Returns\n -------\n returned_tuple\n Tuple of fields\n \"\"\"\n _mask = self._mask\n if _mask is nomask:\n return self._data.tolist()\n result = []\n for (d, m) in zip(self._data, self._mask):\n if m:\n result.append(None)\n else:\n # .item() makes sure we return a standard Python object\n result.append(d.item())\n return tuple(result)\n\n\n##############################################################################\n# Shortcuts #\n##############################################################################\n\n\ndef isMaskedArray(x):\n \"\"\"\n Test whether input is an instance of MaskedArray.\n\n This function returns True if `x` is an instance of MaskedArray\n and returns False otherwise. Any object is accepted as input.\n\n Parameters\n ----------\n x : object\n Object to test.\n\n Returns\n -------\n result : bool\n True if `x` is a MaskedArray.\n\n See Also\n --------\n isMA : Alias to isMaskedArray.\n isarray : Alias to isMaskedArray.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> a = np.eye(3, 3)\n >>> a\n array([[ 1., 0., 0.],\n [ 0., 1., 0.],\n [ 0., 0., 1.]])\n >>> m = ma.masked_values(a, 0)\n >>> m\n masked_array(\n data=[[1.0, --, --],\n [--, 1.0, --],\n [--, --, 1.0]],\n mask=[[False, True, True],\n [ True, False, True],\n [ True, True, False]],\n fill_value=0.0)\n >>> ma.isMaskedArray(a)\n False\n >>> ma.isMaskedArray(m)\n True\n >>> ma.isMaskedArray([0, 1, 2])\n False\n\n \"\"\"\n return isinstance(x, MaskedArray)\n\n\nisarray = isMaskedArray\nisMA = isMaskedArray # backward compatibility\n\n\nclass MaskedConstant(MaskedArray):\n # the lone np.ma.masked instance\n __singleton = None\n\n @classmethod\n def __has_singleton(cls):\n # second case ensures `cls.__singleton` is not just a view on the\n # superclass singleton\n return cls.__singleton is not None and type(cls.__singleton) is cls\n\n def __new__(cls):\n if not cls.__has_singleton():\n # We define the masked singleton as a float for higher precedence.\n # Note that it can be tricky sometimes w/ type comparison\n data = np.array(0.)\n mask = np.array(True)\n\n # prevent any modifications\n data.flags.writeable = False\n mask.flags.writeable = False\n\n # don't fall back on MaskedArray.__new__(MaskedConstant), since\n # that might confuse it - this way, the construction is entirely\n # within our control\n cls.__singleton = MaskedArray(data, mask=mask).view(cls)\n\n return cls.__singleton\n\n def __array_finalize__(self, obj):\n if not self.__has_singleton():\n # this handles the `.view` in __new__, which we want to copy across\n # properties normally\n return super().__array_finalize__(obj)\n elif self is self.__singleton:\n # not clear how this can happen, play it safe\n pass\n else:\n # everywhere else, we want to downcast to MaskedArray, to prevent a\n # duplicate maskedconstant.\n self.__class__ = MaskedArray\n MaskedArray.__array_finalize__(self, obj)\n\n def __array_prepare__(self, obj, context=None):\n return self.view(MaskedArray).__array_prepare__(obj, context)\n\n def __array_wrap__(self, obj, context=None):\n return self.view(MaskedArray).__array_wrap__(obj, context)\n\n def __str__(self):\n return str(masked_print_option._display)\n\n def __repr__(self):\n if self is MaskedConstant.__singleton:\n return 'masked'\n else:\n # it's a subclass, or something is wrong, make it obvious\n return object.__repr__(self)\n\n def __format__(self, format_spec):\n # Replace ndarray.__format__ with the default, which supports no format characters.\n # Supporting format characters is unwise here, because we do not know what type\n # the user was expecting - better to not guess.\n try:\n return object.__format__(self, format_spec)\n except TypeError:\n # 2020-03-23, NumPy 1.19.0\n warnings.warn(\n \"Format strings passed to MaskedConstant are ignored, but in future may \"\n \"error or produce different behavior\",\n FutureWarning, stacklevel=2\n )\n return object.__format__(self, \"\")\n\n def __reduce__(self):\n \"\"\"Override of MaskedArray's __reduce__.\n \"\"\"\n return (self.__class__, ())\n\n # inplace operations have no effect. We have to override them to avoid\n # trying to modify the readonly data and mask arrays\n def __iop__(self, other):\n return self\n __iadd__ = \\\n __isub__ = \\\n __imul__ = \\\n __ifloordiv__ = \\\n __itruediv__ = \\\n __ipow__ = \\\n __iop__\n del __iop__ # don't leave this around\n\n def copy(self, *args, **kwargs):\n \"\"\" Copy is a no-op on the maskedconstant, as it is a scalar \"\"\"\n # maskedconstant is a scalar, so copy doesn't need to copy. There's\n # precedent for this with `np.bool_` scalars.\n return self\n\n def __copy__(self):\n return self\n\n def __deepcopy__(self, memo):\n return self\n\n def __setattr__(self, attr, value):\n if not self.__has_singleton():\n # allow the singleton to be initialized\n return super().__setattr__(attr, value)\n elif self is self.__singleton:\n raise AttributeError(\n f\"attributes of {self!r} are not writeable\")\n else:\n # duplicate instance - we can end up here from __array_finalize__,\n # where we set the __class__ attribute\n return super().__setattr__(attr, value)\n\n\nmasked = masked_singleton = MaskedConstant()\nmasked_array = MaskedArray\n\n\ndef array(data, dtype=None, copy=False, order=None,\n mask=nomask, fill_value=None, keep_mask=True,\n hard_mask=False, shrink=True, subok=True, ndmin=0):\n \"\"\"\n Shortcut to MaskedArray.\n\n The options are in a different order for convenience and backwards\n compatibility.\n\n \"\"\"\n return MaskedArray(data, mask=mask, dtype=dtype, copy=copy,\n subok=subok, keep_mask=keep_mask,\n hard_mask=hard_mask, fill_value=fill_value,\n ndmin=ndmin, shrink=shrink, order=order)\narray.__doc__ = masked_array.__doc__\n\n\ndef is_masked(x):\n \"\"\"\n Determine whether input has masked values.\n\n Accepts any object as input, but always returns False unless the\n input is a MaskedArray containing masked values.\n\n Parameters\n ----------\n x : array_like\n Array to check for masked values.\n\n Returns\n -------\n result : bool\n True if `x` is a MaskedArray with masked values, False otherwise.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0)\n >>> x\n masked_array(data=[--, 1, --, 2, 3],\n mask=[ True, False, True, False, False],\n fill_value=0)\n >>> ma.is_masked(x)\n True\n >>> x = ma.masked_equal([0, 1, 0, 2, 3], 42)\n >>> x\n masked_array(data=[0, 1, 0, 2, 3],\n mask=False,\n fill_value=42)\n >>> ma.is_masked(x)\n False\n\n Always returns False if `x` isn't a MaskedArray.\n\n >>> x = [False, True, False]\n >>> ma.is_masked(x)\n False\n >>> x = 'a string'\n >>> ma.is_masked(x)\n False\n\n \"\"\"\n m = getmask(x)\n if m is nomask:\n return False\n elif m.any():\n return True\n return False\n\n\n##############################################################################\n# Extrema functions #\n##############################################################################\n\n\nclass _extrema_operation(_MaskedUFunc):\n \"\"\"\n Generic class for maximum/minimum functions.\n\n .. note::\n This is the base class for `_maximum_operation` and\n `_minimum_operation`.\n\n \"\"\"\n def __init__(self, ufunc, compare, fill_value):\n super().__init__(ufunc)\n self.compare = compare\n self.fill_value_func = fill_value\n\n def __call__(self, a, b=None):\n \"Executes the call behavior.\"\n if b is None:\n # 2016-04-13, 1.13.0\n warnings.warn(\n f\"Single-argument form of np.ma.{self.__name__} is deprecated. Use \"\n f\"np.ma.{self.__name__}.reduce instead.\",\n DeprecationWarning, stacklevel=2)\n return self.reduce(a)\n return where(self.compare(a, b), a, b)\n\n def reduce(self, target, axis=np._NoValue):\n \"Reduce target along the given axis.\"\n target = narray(target, copy=False, subok=True)\n m = getmask(target)\n\n if axis is np._NoValue and target.ndim > 1:\n # 2017-05-06, Numpy 1.13.0: warn on axis default\n warnings.warn(\n f\"In the future the default for ma.{self.__name__}.reduce will be axis=0, \"\n f\"not the current None, to match np.{self.__name__}.reduce. \"\n \"Explicitly pass 0 or None to silence this warning.\",\n MaskedArrayFutureWarning, stacklevel=2)\n axis = None\n\n if axis is not np._NoValue:\n kwargs = dict(axis=axis)\n else:\n kwargs = dict()\n\n if m is nomask:\n t = self.f.reduce(target, **kwargs)\n else:\n target = target.filled(\n self.fill_value_func(target)).view(type(target))\n t = self.f.reduce(target, **kwargs)\n m = umath.logical_and.reduce(m, **kwargs)\n if hasattr(t, '_mask'):\n t._mask = m\n elif m:\n t = masked\n return t\n\n def outer(self, a, b):\n \"Return the function applied to the outer product of a and b.\"\n ma = getmask(a)\n mb = getmask(b)\n if ma is nomask and mb is nomask:\n m = nomask\n else:\n ma = getmaskarray(a)\n mb = getmaskarray(b)\n m = logical_or.outer(ma, mb)\n result = self.f.outer(filled(a), filled(b))\n if not isinstance(result, MaskedArray):\n result = result.view(MaskedArray)\n result._mask = m\n return result\n\ndef min(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue):\n kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}\n\n try:\n return obj.min(axis=axis, fill_value=fill_value, out=out, **kwargs)\n except (AttributeError, TypeError):\n # If obj doesn't have a min method, or if the method doesn't accept a\n # fill_value argument\n return asanyarray(obj).min(axis=axis, fill_value=fill_value,\n out=out, **kwargs)\nmin.__doc__ = MaskedArray.min.__doc__\n\ndef max(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue):\n kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}\n\n try:\n return obj.max(axis=axis, fill_value=fill_value, out=out, **kwargs)\n except (AttributeError, TypeError):\n # If obj doesn't have a max method, or if the method doesn't accept a\n # fill_value argument\n return asanyarray(obj).max(axis=axis, fill_value=fill_value,\n out=out, **kwargs)\nmax.__doc__ = MaskedArray.max.__doc__\n\n\ndef ptp(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue):\n kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}\n try:\n return obj.ptp(axis, out=out, fill_value=fill_value, **kwargs)\n except (AttributeError, TypeError):\n # If obj doesn't have a ptp method or if the method doesn't accept\n # a fill_value argument\n return asanyarray(obj).ptp(axis=axis, fill_value=fill_value,\n out=out, **kwargs)\nptp.__doc__ = MaskedArray.ptp.__doc__\n\n\n##############################################################################\n# Definition of functions from the corresponding methods #\n##############################################################################\n\n\nclass _frommethod:\n \"\"\"\n Define functions from existing MaskedArray methods.\n\n Parameters\n ----------\n methodname : str\n Name of the method to transform.\n\n \"\"\"\n\n def __init__(self, methodname, reversed=False):\n self.__name__ = methodname\n self.__doc__ = self.getdoc()\n self.reversed = reversed\n\n def getdoc(self):\n \"Return the doc of the function (from the doc of the method).\"\n meth = getattr(MaskedArray, self.__name__, None) or\\\n getattr(np, self.__name__, None)\n signature = self.__name__ + get_object_signature(meth)\n if meth is not None:\n doc = \"\"\" %s\\n%s\"\"\" % (\n signature, getattr(meth, '__doc__', None))\n return doc\n\n def __call__(self, a, *args, **params):\n if self.reversed:\n args = list(args)\n a, args[0] = args[0], a\n\n marr = asanyarray(a)\n method_name = self.__name__\n method = getattr(type(marr), method_name, None)\n if method is None:\n # use the corresponding np function\n method = getattr(np, method_name)\n\n return method(marr, *args, **params)\n\n\nall = _frommethod('all')\nanomalies = anom = _frommethod('anom')\nany = _frommethod('any')\ncompress = _frommethod('compress', reversed=True)\ncumprod = _frommethod('cumprod')\ncumsum = _frommethod('cumsum')\ncopy = _frommethod('copy')\ndiagonal = _frommethod('diagonal')\nharden_mask = _frommethod('harden_mask')\nids = _frommethod('ids')\nmaximum = _extrema_operation(umath.maximum, greater, maximum_fill_value)\nmean = _frommethod('mean')\nminimum = _extrema_operation(umath.minimum, less, minimum_fill_value)\nnonzero = _frommethod('nonzero')\nprod = _frommethod('prod')\nproduct = _frommethod('prod')\nravel = _frommethod('ravel')\nrepeat = _frommethod('repeat')\nshrink_mask = _frommethod('shrink_mask')\nsoften_mask = _frommethod('soften_mask')\nstd = _frommethod('std')\nsum = _frommethod('sum')\nswapaxes = _frommethod('swapaxes')\n#take = _frommethod('take')\ntrace = _frommethod('trace')\nvar = _frommethod('var')\n\ncount = _frommethod('count')\n\ndef take(a, indices, axis=None, out=None, mode='raise'):\n \"\"\"\n \"\"\"\n a = masked_array(a)\n return a.take(indices, axis=axis, out=out, mode=mode)\n\n\ndef power(a, b, third=None):\n \"\"\"\n Returns element-wise base array raised to power from second array.\n\n This is the masked array version of `numpy.power`. For details see\n `numpy.power`.\n\n See Also\n --------\n numpy.power\n\n Notes\n -----\n The *out* argument to `numpy.power` is not supported, `third` has to be\n None.\n\n \"\"\"\n if third is not None:\n raise MaskError(\"3-argument power not supported.\")\n # Get the masks\n ma = getmask(a)\n mb = getmask(b)\n m = mask_or(ma, mb)\n # Get the rawdata\n fa = getdata(a)\n fb = getdata(b)\n # Get the type of the result (so that we preserve subclasses)\n if isinstance(a, MaskedArray):\n basetype = type(a)\n else:\n basetype = MaskedArray\n # Get the result and view it as a (subclass of) MaskedArray\n with np.errstate(divide='ignore', invalid='ignore'):\n result = np.where(m, fa, umath.power(fa, fb)).view(basetype)\n result._update_from(a)\n # Find where we're in trouble w/ NaNs and Infs\n invalid = np.logical_not(np.isfinite(result.view(ndarray)))\n # Add the initial mask\n if m is not nomask:\n if not result.ndim:\n return masked\n result._mask = np.logical_or(m, invalid)\n # Fix the invalid parts\n if invalid.any():\n if not result.ndim:\n return masked\n elif result._mask is nomask:\n result._mask = invalid\n result._data[invalid] = result.fill_value\n return result\n\nargmin = _frommethod('argmin')\nargmax = _frommethod('argmax')\n\ndef argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, fill_value=None):\n \"Function version of the eponymous method.\"\n a = np.asanyarray(a)\n\n # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default\n if axis is np._NoValue:\n axis = _deprecate_argsort_axis(a)\n\n if isinstance(a, MaskedArray):\n return a.argsort(axis=axis, kind=kind, order=order,\n endwith=endwith, fill_value=fill_value)\n else:\n return a.argsort(axis=axis, kind=kind, order=order)\nargsort.__doc__ = MaskedArray.argsort.__doc__\n\ndef sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None):\n \"\"\"\n Return a sorted copy of the masked array.\n\n Equivalent to creating a copy of the array\n and applying the MaskedArray ``sort()`` method.\n\n Refer to ``MaskedArray.sort`` for the full documentation\n\n See Also\n --------\n MaskedArray.sort : equivalent method\n \"\"\"\n a = np.array(a, copy=True, subok=True)\n if axis is None:\n a = a.flatten()\n axis = 0\n\n if isinstance(a, MaskedArray):\n a.sort(axis=axis, kind=kind, order=order,\n endwith=endwith, fill_value=fill_value)\n else:\n a.sort(axis=axis, kind=kind, order=order)\n return a\n\n\ndef compressed(x):\n \"\"\"\n Return all the non-masked data as a 1-D array.\n\n This function is equivalent to calling the \"compressed\" method of a\n `ma.MaskedArray`, see `ma.MaskedArray.compressed` for details.\n\n See Also\n --------\n ma.MaskedArray.compressed : Equivalent method.\n\n \"\"\"\n return asanyarray(x).compressed()\n\n\ndef concatenate(arrays, axis=0):\n \"\"\"\n Concatenate a sequence of arrays along the given axis.\n\n Parameters\n ----------\n arrays : sequence of array_like\n The arrays must have the same shape, except in the dimension\n corresponding to `axis` (the first, by default).\n axis : int, optional\n The axis along which the arrays will be joined. Default is 0.\n\n Returns\n -------\n result : MaskedArray\n The concatenated array with any masked entries preserved.\n\n See Also\n --------\n numpy.concatenate : Equivalent function in the top-level NumPy module.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> a = ma.arange(3)\n >>> a[1] = ma.masked\n >>> b = ma.arange(2, 5)\n >>> a\n masked_array(data=[0, --, 2],\n mask=[False, True, False],\n fill_value=999999)\n >>> b\n masked_array(data=[2, 3, 4],\n mask=False,\n fill_value=999999)\n >>> ma.concatenate([a, b])\n masked_array(data=[0, --, 2, 2, 3, 4],\n mask=[False, True, False, False, False, False],\n fill_value=999999)\n\n \"\"\"\n d = np.concatenate([getdata(a) for a in arrays], axis)\n rcls = get_masked_subclass(*arrays)\n data = d.view(rcls)\n # Check whether one of the arrays has a non-empty mask.\n for x in arrays:\n if getmask(x) is not nomask:\n break\n else:\n return data\n # OK, so we have to concatenate the masks\n dm = np.concatenate([getmaskarray(a) for a in arrays], axis)\n dm = dm.reshape(d.shape)\n\n # If we decide to keep a '_shrinkmask' option, we want to check that\n # all of them are True, and then check for dm.any()\n data._mask = _shrink_mask(dm)\n return data\n\n\ndef diag(v, k=0):\n \"\"\"\n Extract a diagonal or construct a diagonal array.\n\n This function is the equivalent of `numpy.diag` that takes masked\n values into account, see `numpy.diag` for details.\n\n See Also\n --------\n numpy.diag : Equivalent function for ndarrays.\n\n \"\"\"\n output = np.diag(v, k).view(MaskedArray)\n if getmask(v) is not nomask:\n output._mask = np.diag(v._mask, k)\n return output\n\n\ndef left_shift(a, n):\n \"\"\"\n Shift the bits of an integer to the left.\n\n This is the masked array version of `numpy.left_shift`, for details\n see that function.\n\n See Also\n --------\n numpy.left_shift\n\n \"\"\"\n m = getmask(a)\n if m is nomask:\n d = umath.left_shift(filled(a), n)\n return masked_array(d)\n else:\n d = umath.left_shift(filled(a, 0), n)\n return masked_array(d, mask=m)\n\n\ndef right_shift(a, n):\n \"\"\"\n Shift the bits of an integer to the right.\n\n This is the masked array version of `numpy.right_shift`, for details\n see that function.\n\n See Also\n --------\n numpy.right_shift\n\n \"\"\"\n m = getmask(a)\n if m is nomask:\n d = umath.right_shift(filled(a), n)\n return masked_array(d)\n else:\n d = umath.right_shift(filled(a, 0), n)\n return masked_array(d, mask=m)\n\n\ndef put(a, indices, values, mode='raise'):\n \"\"\"\n Set storage-indexed locations to corresponding values.\n\n This function is equivalent to `MaskedArray.put`, see that method\n for details.\n\n See Also\n --------\n MaskedArray.put\n\n \"\"\"\n # We can't use 'frommethod', the order of arguments is different\n try:\n return a.put(indices, values, mode=mode)\n except AttributeError:\n return narray(a, copy=False).put(indices, values, mode=mode)\n\n\ndef putmask(a, mask, values): # , mode='raise'):\n \"\"\"\n Changes elements of an array based on conditional and input values.\n\n This is the masked array version of `numpy.putmask`, for details see\n `numpy.putmask`.\n\n See Also\n --------\n numpy.putmask\n\n Notes\n -----\n Using a masked array as `values` will **not** transform a `ndarray` into\n a `MaskedArray`.\n\n \"\"\"\n # We can't use 'frommethod', the order of arguments is different\n if not isinstance(a, MaskedArray):\n a = a.view(MaskedArray)\n (valdata, valmask) = (getdata(values), getmask(values))\n if getmask(a) is nomask:\n if valmask is not nomask:\n a._sharedmask = True\n a._mask = make_mask_none(a.shape, a.dtype)\n np.copyto(a._mask, valmask, where=mask)\n elif a._hardmask:\n if valmask is not nomask:\n m = a._mask.copy()\n np.copyto(m, valmask, where=mask)\n a.mask |= m\n else:\n if valmask is nomask:\n valmask = getmaskarray(values)\n np.copyto(a._mask, valmask, where=mask)\n np.copyto(a._data, valdata, where=mask)\n return\n\n\ndef transpose(a, axes=None):\n \"\"\"\n Permute the dimensions of an array.\n\n This function is exactly equivalent to `numpy.transpose`.\n\n See Also\n --------\n numpy.transpose : Equivalent function in top-level NumPy module.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> x = ma.arange(4).reshape((2,2))\n >>> x[1, 1] = ma.masked\n >>> x\n masked_array(\n data=[[0, 1],\n [2, --]],\n mask=[[False, False],\n [False, True]],\n fill_value=999999)\n\n >>> ma.transpose(x)\n masked_array(\n data=[[0, 2],\n [1, --]],\n mask=[[False, False],\n [False, True]],\n fill_value=999999)\n \"\"\"\n # We can't use 'frommethod', as 'transpose' doesn't take keywords\n try:\n return a.transpose(axes)\n except AttributeError:\n return narray(a, copy=False).transpose(axes).view(MaskedArray)\n\n\ndef reshape(a, new_shape, order='C'):\n \"\"\"\n Returns an array containing the same data with a new shape.\n\n Refer to `MaskedArray.reshape` for full documentation.\n\n See Also\n --------\n MaskedArray.reshape : equivalent function\n\n \"\"\"\n # We can't use 'frommethod', it whine about some parameters. Dmmit.\n try:\n return a.reshape(new_shape, order=order)\n except AttributeError:\n _tmp = narray(a, copy=False).reshape(new_shape, order=order)\n return _tmp.view(MaskedArray)\n\n\ndef resize(x, new_shape):\n \"\"\"\n Return a new masked array with the specified size and shape.\n\n This is the masked equivalent of the `numpy.resize` function. The new\n array is filled with repeated copies of `x` (in the order that the\n data are stored in memory). If `x` is masked, the new array will be\n masked, and the new mask will be a repetition of the old one.\n\n See Also\n --------\n numpy.resize : Equivalent function in the top level NumPy module.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> a = ma.array([[1, 2] ,[3, 4]])\n >>> a[0, 1] = ma.masked\n >>> a\n masked_array(\n data=[[1, --],\n [3, 4]],\n mask=[[False, True],\n [False, False]],\n fill_value=999999)\n >>> np.resize(a, (3, 3))\n masked_array(\n data=[[1, 2, 3],\n [4, 1, 2],\n [3, 4, 1]],\n mask=False,\n fill_value=999999)\n >>> ma.resize(a, (3, 3))\n masked_array(\n data=[[1, --, 3],\n [4, 1, --],\n [3, 4, 1]],\n mask=[[False, True, False],\n [False, False, True],\n [False, False, False]],\n fill_value=999999)\n\n A MaskedArray is always returned, regardless of the input type.\n\n >>> a = np.array([[1, 2] ,[3, 4]])\n >>> ma.resize(a, (3, 3))\n masked_array(\n data=[[1, 2, 3],\n [4, 1, 2],\n [3, 4, 1]],\n mask=False,\n fill_value=999999)\n\n \"\"\"\n # We can't use _frommethods here, as N.resize is notoriously whiny.\n m = getmask(x)\n if m is not nomask:\n m = np.resize(m, new_shape)\n result = np.resize(x, new_shape).view(get_masked_subclass(x))\n if result.ndim:\n result._mask = m\n return result\n\n\ndef ndim(obj):\n \"\"\"\n maskedarray version of the numpy function.\n\n \"\"\"\n return np.ndim(getdata(obj))\n\nndim.__doc__ = np.ndim.__doc__\n\n\ndef shape(obj):\n \"maskedarray version of the numpy function.\"\n return np.shape(getdata(obj))\nshape.__doc__ = np.shape.__doc__\n\n\ndef size(obj, axis=None):\n \"maskedarray version of the numpy function.\"\n return np.size(getdata(obj), axis)\nsize.__doc__ = np.size.__doc__\n\n\n##############################################################################\n# Extra functions #\n##############################################################################\n\n\ndef where(condition, x=_NoValue, y=_NoValue):\n \"\"\"\n Return a masked array with elements from `x` or `y`, depending on condition.\n\n .. note::\n When only `condition` is provided, this function is identical to\n `nonzero`. The rest of this documentation covers only the case where\n all three arguments are provided.\n\n Parameters\n ----------\n condition : array_like, bool\n Where True, yield `x`, otherwise yield `y`.\n x, y : array_like, optional\n Values from which to choose. `x`, `y` and `condition` need to be\n broadcastable to some shape.\n\n Returns\n -------\n out : MaskedArray\n An masked array with `masked` elements where the condition is masked,\n elements from `x` where `condition` is True, and elements from `y`\n elsewhere.\n\n See Also\n --------\n numpy.where : Equivalent function in the top-level NumPy module.\n nonzero : The function that is called when x and y are omitted\n\n Examples\n --------\n >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0],\n ... [1, 0, 1],\n ... [0, 1, 0]])\n >>> x\n masked_array(\n data=[[0.0, --, 2.0],\n [--, 4.0, --],\n [6.0, --, 8.0]],\n mask=[[False, True, False],\n [ True, False, True],\n [False, True, False]],\n fill_value=1e+20)\n >>> np.ma.where(x > 5, x, -3.1416)\n masked_array(\n data=[[-3.1416, --, -3.1416],\n [--, -3.1416, --],\n [6.0, --, 8.0]],\n mask=[[False, True, False],\n [ True, False, True],\n [False, True, False]],\n fill_value=1e+20)\n\n \"\"\"\n\n # handle the single-argument case\n missing = (x is _NoValue, y is _NoValue).count(True)\n if missing == 1:\n raise ValueError(\"Must provide both 'x' and 'y' or neither.\")\n if missing == 2:\n return nonzero(condition)\n\n # we only care if the condition is true - false or masked pick y\n cf = filled(condition, False)\n xd = getdata(x)\n yd = getdata(y)\n\n # we need the full arrays here for correct final dimensions\n cm = getmaskarray(condition)\n xm = getmaskarray(x)\n ym = getmaskarray(y)\n\n # deal with the fact that masked.dtype == float64, but we don't actually\n # want to treat it as that.\n if x is masked and y is not masked:\n xd = np.zeros((), dtype=yd.dtype)\n xm = np.ones((), dtype=ym.dtype)\n elif y is masked and x is not masked:\n yd = np.zeros((), dtype=xd.dtype)\n ym = np.ones((), dtype=xm.dtype)\n\n data = np.where(cf, xd, yd)\n mask = np.where(cf, xm, ym)\n mask = np.where(cm, np.ones((), dtype=mask.dtype), mask)\n\n # collapse the mask, for backwards compatibility\n mask = _shrink_mask(mask)\n\n return masked_array(data, mask=mask)\n\n\ndef choose(indices, choices, out=None, mode='raise'):\n \"\"\"\n Use an index array to construct a new array from a list of choices.\n\n Given an array of integers and a list of n choice arrays, this method\n will create a new array that merges each of the choice arrays. Where a\n value in `index` is i, the new array will have the value that choices[i]\n contains in the same place.\n\n Parameters\n ----------\n indices : ndarray of ints\n This array must contain integers in ``[0, n-1]``, where n is the\n number of choices.\n choices : sequence of arrays\n Choice arrays. The index array and all of the choices should be\n broadcastable to the same shape.\n out : array, optional\n If provided, the result will be inserted into this array. It should\n be of the appropriate shape and `dtype`.\n mode : {'raise', 'wrap', 'clip'}, optional\n Specifies how out-of-bounds indices will behave.\n\n * 'raise' : raise an error\n * 'wrap' : wrap around\n * 'clip' : clip to the range\n\n Returns\n -------\n merged_array : array\n\n See Also\n --------\n choose : equivalent function\n\n Examples\n --------\n >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]])\n >>> a = np.array([2, 1, 0])\n >>> np.ma.choose(a, choice)\n masked_array(data=[3, 2, 1],\n mask=False,\n fill_value=999999)\n\n \"\"\"\n def fmask(x):\n \"Returns the filled array, or True if masked.\"\n if x is masked:\n return True\n return filled(x)\n\n def nmask(x):\n \"Returns the mask, True if ``masked``, False if ``nomask``.\"\n if x is masked:\n return True\n return getmask(x)\n # Get the indices.\n c = filled(indices, 0)\n # Get the masks.\n masks = [nmask(x) for x in choices]\n data = [fmask(x) for x in choices]\n # Construct the mask\n outputmask = np.choose(c, masks, mode=mode)\n outputmask = make_mask(mask_or(outputmask, getmask(indices)),\n copy=False, shrink=True)\n # Get the choices.\n d = np.choose(c, data, mode=mode, out=out).view(MaskedArray)\n if out is not None:\n if isinstance(out, MaskedArray):\n out.__setmask__(outputmask)\n return out\n d.__setmask__(outputmask)\n return d\n\n\ndef round_(a, decimals=0, out=None):\n \"\"\"\n Return a copy of a, rounded to 'decimals' places.\n\n When 'decimals' is negative, it specifies the number of positions\n to the left of the decimal point. The real and imaginary parts of\n complex numbers are rounded separately. Nothing is done if the\n array is not of float type and 'decimals' is greater than or equal\n to 0.\n\n Parameters\n ----------\n decimals : int\n Number of decimals to round to. May be negative.\n out : array_like\n Existing array to use for output.\n If not given, returns a default copy of a.\n\n Notes\n -----\n If out is given and does not have a mask attribute, the mask of a\n is lost!\n\n \"\"\"\n if out is None:\n return np.round_(a, decimals, out)\n else:\n np.round_(getdata(a), decimals, out)\n if hasattr(out, '_mask'):\n out._mask = getmask(a)\n return out\nround = round_\n\n\n# Needed by dot, so move here from extras.py. It will still be exported\n# from extras.py for compatibility.\ndef mask_rowcols(a, axis=None):\n \"\"\"\n Mask rows and/or columns of a 2D array that contain masked values.\n\n Mask whole rows and/or columns of a 2D array that contain\n masked values. The masking behavior is selected using the\n `axis` parameter.\n\n - If `axis` is None, rows *and* columns are masked.\n - If `axis` is 0, only rows are masked.\n - If `axis` is 1 or -1, only columns are masked.\n\n Parameters\n ----------\n a : array_like, MaskedArray\n The array to mask. If not a MaskedArray instance (or if no array\n elements are masked). The result is a MaskedArray with `mask` set\n to `nomask` (False). Must be a 2D array.\n axis : int, optional\n Axis along which to perform the operation. If None, applies to a\n flattened version of the array.\n\n Returns\n -------\n a : MaskedArray\n A modified version of the input array, masked depending on the value\n of the `axis` parameter.\n\n Raises\n ------\n NotImplementedError\n If input array `a` is not 2D.\n\n See Also\n --------\n mask_rows : Mask rows of a 2D array that contain masked values.\n mask_cols : Mask cols of a 2D array that contain masked values.\n masked_where : Mask where a condition is met.\n\n Notes\n -----\n The input array's mask is modified by this function.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> a = np.zeros((3, 3), dtype=int)\n >>> a[1, 1] = 1\n >>> a\n array([[0, 0, 0],\n [0, 1, 0],\n [0, 0, 0]])\n >>> a = ma.masked_equal(a, 1)\n >>> a\n masked_array(\n data=[[0, 0, 0],\n [0, --, 0],\n [0, 0, 0]],\n mask=[[False, False, False],\n [False, True, False],\n [False, False, False]],\n fill_value=1)\n >>> ma.mask_rowcols(a)\n masked_array(\n data=[[0, --, 0],\n [--, --, --],\n [0, --, 0]],\n mask=[[False, True, False],\n [ True, True, True],\n [False, True, False]],\n fill_value=1)\n\n \"\"\"\n a = array(a, subok=False)\n if a.ndim != 2:\n raise NotImplementedError(\"mask_rowcols works for 2D arrays only.\")\n m = getmask(a)\n # Nothing is masked: return a\n if m is nomask or not m.any():\n return a\n maskedval = m.nonzero()\n a._mask = a._mask.copy()\n if not axis:\n a[np.unique(maskedval[0])] = masked\n if axis in [None, 1, -1]:\n a[:, np.unique(maskedval[1])] = masked\n return a\n\n\n# Include masked dot here to avoid import problems in getting it from\n# extras.py. Note that it is not included in __all__, but rather exported\n# from extras in order to avoid backward compatibility problems.\ndef dot(a, b, strict=False, out=None):\n \"\"\"\n Return the dot product of two arrays.\n\n This function is the equivalent of `numpy.dot` that takes masked values\n into account. Note that `strict` and `out` are in different position\n than in the method version. In order to maintain compatibility with the\n corresponding method, it is recommended that the optional arguments be\n treated as keyword only. At some point that may be mandatory.\n\n .. note::\n Works only with 2-D arrays at the moment.\n\n\n Parameters\n ----------\n a, b : masked_array_like\n Inputs arrays.\n strict : bool, optional\n Whether masked data are propagated (True) or set to 0 (False) for\n the computation. Default is False. Propagating the mask means that\n if a masked value appears in a row or column, the whole row or\n column is considered masked.\n out : masked_array, optional\n Output argument. This must have the exact kind that would be returned\n if it was not used. In particular, it must have the right type, must be\n C-contiguous, and its dtype must be the dtype that would be returned\n for `dot(a,b)`. This is a performance feature. Therefore, if these\n conditions are not met, an exception is raised, instead of attempting\n to be flexible.\n\n .. versionadded:: 1.10.2\n\n See Also\n --------\n numpy.dot : Equivalent function for ndarrays.\n\n Examples\n --------\n >>> a = np.ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]])\n >>> b = np.ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]])\n >>> np.ma.dot(a, b)\n masked_array(\n data=[[21, 26],\n [45, 64]],\n mask=[[False, False],\n [False, False]],\n fill_value=999999)\n >>> np.ma.dot(a, b, strict=True)\n masked_array(\n data=[[--, --],\n [--, 64]],\n mask=[[ True, True],\n [ True, False]],\n fill_value=999999)\n\n \"\"\"\n # !!!: Works only with 2D arrays. There should be a way to get it to run\n # with higher dimension\n if strict and (a.ndim == 2) and (b.ndim == 2):\n a = mask_rowcols(a, 0)\n b = mask_rowcols(b, 1)\n am = ~getmaskarray(a)\n bm = ~getmaskarray(b)\n\n if out is None:\n d = np.dot(filled(a, 0), filled(b, 0))\n m = ~np.dot(am, bm)\n if d.ndim == 0:\n d = np.asarray(d)\n r = d.view(get_masked_subclass(a, b))\n r.__setmask__(m)\n return r\n else:\n d = np.dot(filled(a, 0), filled(b, 0), out._data)\n if out.mask.shape != d.shape:\n out._mask = np.empty(d.shape, MaskType)\n np.dot(am, bm, out._mask)\n np.logical_not(out._mask, out._mask)\n return out\n\n\ndef inner(a, b):\n \"\"\"\n Returns the inner product of a and b for arrays of floating point types.\n\n Like the generic NumPy equivalent the product sum is over the last dimension\n of a and b. The first argument is not conjugated.\n\n \"\"\"\n fa = filled(a, 0)\n fb = filled(b, 0)\n if fa.ndim == 0:\n fa.shape = (1,)\n if fb.ndim == 0:\n fb.shape = (1,)\n return np.inner(fa, fb).view(MaskedArray)\ninner.__doc__ = doc_note(np.inner.__doc__,\n \"Masked values are replaced by 0.\")\ninnerproduct = inner\n\n\ndef outer(a, b):\n \"maskedarray version of the numpy function.\"\n fa = filled(a, 0).ravel()\n fb = filled(b, 0).ravel()\n d = np.outer(fa, fb)\n ma = getmask(a)\n mb = getmask(b)\n if ma is nomask and mb is nomask:\n return masked_array(d)\n ma = getmaskarray(a)\n mb = getmaskarray(b)\n m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=False)\n return masked_array(d, mask=m)\nouter.__doc__ = doc_note(np.outer.__doc__,\n \"Masked values are replaced by 0.\")\nouterproduct = outer\n\n\ndef _convolve_or_correlate(f, a, v, mode, propagate_mask):\n \"\"\"\n Helper function for ma.correlate and ma.convolve\n \"\"\"\n if propagate_mask:\n # results which are contributed to by either item in any pair being invalid\n mask = (\n f(getmaskarray(a), np.ones(np.shape(v), dtype=bool), mode=mode)\n | f(np.ones(np.shape(a), dtype=bool), getmaskarray(v), mode=mode)\n )\n data = f(getdata(a), getdata(v), mode=mode)\n else:\n # results which are not contributed to by any pair of valid elements\n mask = ~f(~getmaskarray(a), ~getmaskarray(v))\n data = f(filled(a, 0), filled(v, 0), mode=mode)\n\n return masked_array(data, mask=mask)\n\n\ndef correlate(a, v, mode='valid', propagate_mask=True):\n \"\"\"\n Cross-correlation of two 1-dimensional sequences.\n\n Parameters\n ----------\n a, v : array_like\n Input sequences.\n mode : {'valid', 'same', 'full'}, optional\n Refer to the `np.convolve` docstring. Note that the default\n is 'valid', unlike `convolve`, which uses 'full'.\n propagate_mask : bool\n If True, then a result element is masked if any masked element contributes towards it.\n If False, then a result element is only masked if no non-masked element\n contribute towards it\n\n Returns\n -------\n out : MaskedArray\n Discrete cross-correlation of `a` and `v`.\n\n See Also\n --------\n numpy.correlate : Equivalent function in the top-level NumPy module.\n \"\"\"\n return _convolve_or_correlate(np.correlate, a, v, mode, propagate_mask)\n\n\ndef convolve(a, v, mode='full', propagate_mask=True):\n \"\"\"\n Returns the discrete, linear convolution of two one-dimensional sequences.\n\n Parameters\n ----------\n a, v : array_like\n Input sequences.\n mode : {'valid', 'same', 'full'}, optional\n Refer to the `np.convolve` docstring.\n propagate_mask : bool\n If True, then if any masked element is included in the sum for a result\n element, then the result is masked.\n If False, then the result element is only masked if no non-masked cells\n contribute towards it\n\n Returns\n -------\n out : MaskedArray\n Discrete, linear convolution of `a` and `v`.\n\n See Also\n --------\n numpy.convolve : Equivalent function in the top-level NumPy module.\n \"\"\"\n return _convolve_or_correlate(np.convolve, a, v, mode, propagate_mask)\n\n\ndef allequal(a, b, fill_value=True):\n \"\"\"\n Return True if all entries of a and b are equal, using\n fill_value as a truth value where either or both are masked.\n\n Parameters\n ----------\n a, b : array_like\n Input arrays to compare.\n fill_value : bool, optional\n Whether masked values in a or b are considered equal (True) or not\n (False).\n\n Returns\n -------\n y : bool\n Returns True if the two arrays are equal within the given\n tolerance, False otherwise. If either array contains NaN,\n then False is returned.\n\n See Also\n --------\n all, any\n numpy.ma.allclose\n\n Examples\n --------\n >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])\n >>> a\n masked_array(data=[10000000000.0, 1e-07, --],\n mask=[False, False, True],\n fill_value=1e+20)\n\n >>> b = np.array([1e10, 1e-7, -42.0])\n >>> b\n array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01])\n >>> np.ma.allequal(a, b, fill_value=False)\n False\n >>> np.ma.allequal(a, b)\n True\n\n \"\"\"\n m = mask_or(getmask(a), getmask(b))\n if m is nomask:\n x = getdata(a)\n y = getdata(b)\n d = umath.equal(x, y)\n return d.all()\n elif fill_value:\n x = getdata(a)\n y = getdata(b)\n d = umath.equal(x, y)\n dm = array(d, mask=m, copy=False)\n return dm.filled(True).all(None)\n else:\n return False\n\n\ndef allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8):\n \"\"\"\n Returns True if two arrays are element-wise equal within a tolerance.\n\n This function is equivalent to `allclose` except that masked values\n are treated as equal (default) or unequal, depending on the `masked_equal`\n argument.\n\n Parameters\n ----------\n a, b : array_like\n Input arrays to compare.\n masked_equal : bool, optional\n Whether masked values in `a` and `b` are considered equal (True) or not\n (False). They are considered equal by default.\n rtol : float, optional\n Relative tolerance. The relative difference is equal to ``rtol * b``.\n Default is 1e-5.\n atol : float, optional\n Absolute tolerance. The absolute difference is equal to `atol`.\n Default is 1e-8.\n\n Returns\n -------\n y : bool\n Returns True if the two arrays are equal within the given\n tolerance, False otherwise. If either array contains NaN, then\n False is returned.\n\n See Also\n --------\n all, any\n numpy.allclose : the non-masked `allclose`.\n\n Notes\n -----\n If the following equation is element-wise True, then `allclose` returns\n True::\n\n absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))\n\n Return True if all elements of `a` and `b` are equal subject to\n given tolerances.\n\n Examples\n --------\n >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])\n >>> a\n masked_array(data=[10000000000.0, 1e-07, --],\n mask=[False, False, True],\n fill_value=1e+20)\n >>> b = np.ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1])\n >>> np.ma.allclose(a, b)\n False\n\n >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])\n >>> b = np.ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1])\n >>> np.ma.allclose(a, b)\n True\n >>> np.ma.allclose(a, b, masked_equal=False)\n False\n\n Masked values are not compared directly.\n\n >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])\n >>> b = np.ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1])\n >>> np.ma.allclose(a, b)\n True\n >>> np.ma.allclose(a, b, masked_equal=False)\n False\n\n \"\"\"\n x = masked_array(a, copy=False)\n y = masked_array(b, copy=False)\n\n # make sure y is an inexact type to avoid abs(MIN_INT); will cause\n # casting of x later.\n # NOTE: We explicitly allow timedelta, which used to work. This could\n # possibly be deprecated. See also gh-18286.\n # timedelta works if `atol` is an integer or also a timedelta.\n # Although, the default tolerances are unlikely to be useful\n if y.dtype.kind != \"m\":\n dtype = np.result_type(y, 1.)\n if y.dtype != dtype:\n y = masked_array(y, dtype=dtype, copy=False)\n\n m = mask_or(getmask(x), getmask(y))\n xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False)\n # If we have some infs, they should fall at the same place.\n if not np.all(xinf == filled(np.isinf(y), False)):\n return False\n # No infs at all\n if not np.any(xinf):\n d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)),\n masked_equal)\n return np.all(d)\n\n if not np.all(filled(x[xinf] == y[xinf], masked_equal)):\n return False\n x = x[~xinf]\n y = y[~xinf]\n\n d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)),\n masked_equal)\n\n return np.all(d)\n\n\ndef asarray(a, dtype=None, order=None):\n \"\"\"\n Convert the input to a masked array of the given data-type.\n\n No copy is performed if the input is already an `ndarray`. If `a` is\n a subclass of `MaskedArray`, a base class `MaskedArray` is returned.\n\n Parameters\n ----------\n a : array_like\n Input data, in any form that can be converted to a masked array. This\n includes lists, lists of tuples, tuples, tuples of tuples, tuples\n of lists, ndarrays and masked arrays.\n dtype : dtype, optional\n By default, the data-type is inferred from the input data.\n order : {'C', 'F'}, optional\n Whether to use row-major ('C') or column-major ('FORTRAN') memory\n representation. Default is 'C'.\n\n Returns\n -------\n out : MaskedArray\n Masked array interpretation of `a`.\n\n See Also\n --------\n asanyarray : Similar to `asarray`, but conserves subclasses.\n\n Examples\n --------\n >>> x = np.arange(10.).reshape(2, 5)\n >>> x\n array([[0., 1., 2., 3., 4.],\n [5., 6., 7., 8., 9.]])\n >>> np.ma.asarray(x)\n masked_array(\n data=[[0., 1., 2., 3., 4.],\n [5., 6., 7., 8., 9.]],\n mask=False,\n fill_value=1e+20)\n >>> type(np.ma.asarray(x))\n <class 'numpy.ma.core.MaskedArray'>\n\n \"\"\"\n order = order or 'C'\n return masked_array(a, dtype=dtype, copy=False, keep_mask=True,\n subok=False, order=order)\n\n\ndef asanyarray(a, dtype=None):\n \"\"\"\n Convert the input to a masked array, conserving subclasses.\n\n If `a` is a subclass of `MaskedArray`, its class is conserved.\n No copy is performed if the input is already an `ndarray`.\n\n Parameters\n ----------\n a : array_like\n Input data, in any form that can be converted to an array.\n dtype : dtype, optional\n By default, the data-type is inferred from the input data.\n order : {'C', 'F'}, optional\n Whether to use row-major ('C') or column-major ('FORTRAN') memory\n representation. Default is 'C'.\n\n Returns\n -------\n out : MaskedArray\n MaskedArray interpretation of `a`.\n\n See Also\n --------\n asarray : Similar to `asanyarray`, but does not conserve subclass.\n\n Examples\n --------\n >>> x = np.arange(10.).reshape(2, 5)\n >>> x\n array([[0., 1., 2., 3., 4.],\n [5., 6., 7., 8., 9.]])\n >>> np.ma.asanyarray(x)\n masked_array(\n data=[[0., 1., 2., 3., 4.],\n [5., 6., 7., 8., 9.]],\n mask=False,\n fill_value=1e+20)\n >>> type(np.ma.asanyarray(x))\n <class 'numpy.ma.core.MaskedArray'>\n\n \"\"\"\n # workaround for #8666, to preserve identity. Ideally the bottom line\n # would handle this for us.\n if isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype):\n return a\n return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True)\n\n\n##############################################################################\n# Pickling #\n##############################################################################\n\ndef _pickle_warn(method):\n # NumPy 1.15.0, 2017-12-10\n warnings.warn(\n f\"np.ma.{method} is deprecated, use pickle.{method} instead\",\n DeprecationWarning, stacklevel=3)\n\n\ndef fromfile(file, dtype=float, count=-1, sep=''):\n raise NotImplementedError(\n \"fromfile() not yet implemented for a MaskedArray.\")\n\n\ndef fromflex(fxarray):\n \"\"\"\n Build a masked array from a suitable flexible-type array.\n\n The input array has to have a data-type with ``_data`` and ``_mask``\n fields. This type of array is output by `MaskedArray.toflex`.\n\n Parameters\n ----------\n fxarray : ndarray\n The structured input array, containing ``_data`` and ``_mask``\n fields. If present, other fields are discarded.\n\n Returns\n -------\n result : MaskedArray\n The constructed masked array.\n\n See Also\n --------\n MaskedArray.toflex : Build a flexible-type array from a masked array.\n\n Examples\n --------\n >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4)\n >>> rec = x.toflex()\n >>> rec\n array([[(0, False), (1, True), (2, False)],\n [(3, True), (4, False), (5, True)],\n [(6, False), (7, True), (8, False)]],\n dtype=[('_data', '<i8'), ('_mask', '?')])\n >>> x2 = np.ma.fromflex(rec)\n >>> x2\n masked_array(\n data=[[0, --, 2],\n [--, 4, --],\n [6, --, 8]],\n mask=[[False, True, False],\n [ True, False, True],\n [False, True, False]],\n fill_value=999999)\n\n Extra fields can be present in the structured array but are discarded:\n\n >>> dt = [('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')]\n >>> rec2 = np.zeros((2, 2), dtype=dt)\n >>> rec2\n array([[(0, False, 0.), (0, False, 0.)],\n [(0, False, 0.), (0, False, 0.)]],\n dtype=[('_data', '<i4'), ('_mask', '?'), ('field3', '<f4')])\n >>> y = np.ma.fromflex(rec2)\n >>> y\n masked_array(\n data=[[0, 0],\n [0, 0]],\n mask=[[False, False],\n [False, False]],\n fill_value=999999,\n dtype=int32)\n\n \"\"\"\n return masked_array(fxarray['_data'], mask=fxarray['_mask'])\n\n\nclass _convert2ma:\n\n \"\"\"\n Convert functions from numpy to numpy.ma.\n\n Parameters\n ----------\n _methodname : string\n Name of the method to transform.\n\n \"\"\"\n __doc__ = None\n\n def __init__(self, funcname, np_ret, np_ma_ret, params=None):\n self._func = getattr(np, funcname)\n self.__doc__ = self.getdoc(np_ret, np_ma_ret)\n self._extras = params or {}\n\n def getdoc(self, np_ret, np_ma_ret):\n \"Return the doc of the function (from the doc of the method).\"\n doc = getattr(self._func, '__doc__', None)\n sig = get_object_signature(self._func)\n if doc:\n doc = self._replace_return_type(doc, np_ret, np_ma_ret)\n # Add the signature of the function at the beginning of the doc\n if sig:\n sig = \"%s%s\\n\" % (self._func.__name__, sig)\n doc = sig + doc\n return doc\n\n def _replace_return_type(self, doc, np_ret, np_ma_ret):\n \"\"\"\n Replace documentation of ``np`` function's return type.\n\n Replaces it with the proper type for the ``np.ma`` function.\n\n Parameters\n ----------\n doc : str\n The documentation of the ``np`` method.\n np_ret : str\n The return type string of the ``np`` method that we want to\n replace. (e.g. \"out : ndarray\")\n np_ma_ret : str\n The return type string of the ``np.ma`` method.\n (e.g. \"out : MaskedArray\")\n \"\"\"\n if np_ret not in doc:\n raise RuntimeError(\n f\"Failed to replace `{np_ret}` with `{np_ma_ret}`. \"\n f\"The documentation string for return type, {np_ret}, is not \"\n f\"found in the docstring for `np.{self._func.__name__}`. \"\n f\"Fix the docstring for `np.{self._func.__name__}` or \"\n \"update the expected string for return type.\"\n )\n\n return doc.replace(np_ret, np_ma_ret)\n\n def __call__(self, *args, **params):\n # Find the common parameters to the call and the definition\n _extras = self._extras\n common_params = set(params).intersection(_extras)\n # Drop the common parameters from the call\n for p in common_params:\n _extras[p] = params.pop(p)\n # Get the result\n result = self._func.__call__(*args, **params).view(MaskedArray)\n if \"fill_value\" in common_params:\n result.fill_value = _extras.get(\"fill_value\", None)\n if \"hardmask\" in common_params:\n result._hardmask = bool(_extras.get(\"hard_mask\", False))\n return result\n\n\narange = _convert2ma(\n 'arange', \n params=dict(fill_value=None, hardmask=False),\n np_ret='arange : ndarray',\n np_ma_ret='arange : MaskedArray',\n)\nclip = _convert2ma(\n 'clip',\n params=dict(fill_value=None, hardmask=False),\n np_ret='clipped_array : ndarray',\n np_ma_ret='clipped_array : MaskedArray',\n)\ndiff = _convert2ma(\n 'diff',\n params=dict(fill_value=None, hardmask=False),\n np_ret='diff : ndarray',\n np_ma_ret='diff : MaskedArray',\n)\nempty = _convert2ma(\n 'empty', \n params=dict(fill_value=None, hardmask=False),\n np_ret='out : ndarray',\n np_ma_ret='out : MaskedArray',\n)\nempty_like = _convert2ma(\n 'empty_like',\n np_ret='out : ndarray',\n np_ma_ret='out : MaskedArray',\n)\nfrombuffer = _convert2ma(\n 'frombuffer',\n np_ret='out : ndarray',\n np_ma_ret='out: MaskedArray',\n)\nfromfunction = _convert2ma(\n 'fromfunction',\n np_ret='fromfunction : any',\n np_ma_ret='fromfunction: MaskedArray',\n)\nidentity = _convert2ma(\n 'identity', \n params=dict(fill_value=None, hardmask=False),\n np_ret='out : ndarray',\n np_ma_ret='out : MaskedArray',\n)\nindices = _convert2ma(\n 'indices',\n params=dict(fill_value=None, hardmask=False),\n np_ret='grid : one ndarray or tuple of ndarrays',\n np_ma_ret='grid : one MaskedArray or tuple of MaskedArrays',\n)\nones = _convert2ma(\n 'ones',\n params=dict(fill_value=None, hardmask=False),\n np_ret='out : ndarray',\n np_ma_ret='out : MaskedArray',\n)\nones_like = _convert2ma(\n 'ones_like',\n np_ret='out : ndarray',\n np_ma_ret='out : MaskedArray',\n)\nsqueeze = _convert2ma(\n 'squeeze',\n params=dict(fill_value=None, hardmask=False),\n np_ret='squeezed : ndarray',\n np_ma_ret='squeezed : MaskedArray',\n)\nzeros = _convert2ma(\n 'zeros',\n params=dict(fill_value=None, hardmask=False),\n np_ret='out : ndarray',\n np_ma_ret='out : MaskedArray',\n)\nzeros_like = _convert2ma(\n 'zeros_like',\n np_ret='out : ndarray',\n np_ma_ret='out : MaskedArray',\n)\n\n\ndef append(a, b, axis=None):\n \"\"\"Append values to the end of an array.\n\n .. versionadded:: 1.9.0\n\n Parameters\n ----------\n a : array_like\n Values are appended to a copy of this array.\n b : array_like\n These values are appended to a copy of `a`. It must be of the\n correct shape (the same shape as `a`, excluding `axis`). If `axis`\n is not specified, `b` can be any shape and will be flattened\n before use.\n axis : int, optional\n The axis along which `v` are appended. If `axis` is not given,\n both `a` and `b` are flattened before use.\n\n Returns\n -------\n append : MaskedArray\n A copy of `a` with `b` appended to `axis`. Note that `append`\n does not occur in-place: a new array is allocated and filled. If\n `axis` is None, the result is a flattened array.\n\n See Also\n --------\n numpy.append : Equivalent function in the top-level NumPy module.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> a = ma.masked_values([1, 2, 3], 2)\n >>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)\n >>> ma.append(a, b)\n masked_array(data=[1, --, 3, 4, 5, 6, --, 8, 9],\n mask=[False, True, False, False, False, False, True, False,\n False],\n fill_value=999999)\n \"\"\"\n return concatenate([a, b], axis)\n"
]
| [
[
"numpy.core.umath.equal",
"numpy.ndarray.sort",
"numpy.isclose",
"numpy.dot",
"numpy.void",
"numpy.core.umath.less",
"numpy.core.umath.greater",
"numpy.where",
"numpy.resize",
"numpy.iscomplexobj",
"numpy.finfo",
"numpy.unique",
"numpy.outer",
"numpy.issubdtype",
"numpy.inner",
"numpy.broadcast_to",
"numpy.dtype",
"numpy.concatenate",
"numpy.full",
"numpy.core.umath.less_equal",
"numpy.empty",
"numpy.ndarray.ravel",
"numpy.seterr",
"numpy.core.umath.isfinite",
"numpy.take",
"numpy.core.umath.cos",
"numpy.core.umath.multiply",
"numpy.can_cast",
"numpy.isfinite",
"numpy.expand_dims",
"numpy.core.arrayprint.dtype_is_implied",
"numpy.logical_or",
"numpy.array",
"numpy.copyto",
"numpy.core.umath.logical_or.outer",
"numpy.reshape",
"numpy.zeros",
"numpy.core.umath.logical_not",
"numpy.core.umath.logical_or",
"numpy.shape",
"numpy.core.numeric.normalize_axis_tuple",
"numpy.ndarray",
"numpy.take_along_axis",
"numpy.subtract",
"numpy.timedelta64",
"numpy.core.arrayprint.dtype_short_repr",
"numpy.power",
"numpy.ndarray.view",
"numpy.ndarray.__new__",
"numpy.datetime64",
"numpy.array2string",
"numpy.logical_not",
"numpy.result_type",
"numpy.core.umath.power",
"numpy.round_",
"numpy.isinf",
"numpy.asarray",
"numpy.errstate",
"numpy.broadcast",
"numpy.ones",
"numpy.choose",
"numpy.core.umath.absolute",
"numpy.AxisError",
"numpy.split",
"numpy.core.umath.logical_and.reduce",
"numpy.any",
"numpy.core.arrayprint._get_legacy_print_mode",
"numpy.all",
"numpy.diag",
"numpy.asanyarray",
"numpy.compat.getargspec"
]
]
|
giuliovv/cryptotrading | [
"9627f68ef318e17f14784e3275a849885fb3710a"
]
| [
"utils/technical.py"
]
| [
"import numpy as np\nimport pandas as pd\nfrom tqdm.auto import tqdm\n\nfrom utils import ultimate_cycle\n\n# OSCILLATORS\n\ndef macd(prices: pd.Series, long: int, short: int, strategy=False, getgains=False, winning=False, commissions=0.005) -> pd.Series:\n '''\n Return the MACD\n\n :param pd.Series prices: Prices of the stock\n :param int long: Long moving average length\n :param int short: Short moving average length\n :param bool strategy: If strategy should be returned\n :param bool getgains: If gains should be returned\n :param bool winning: If policy gain - no strategy gain should be returned\n :param float commissions: Percentage commissions per transaction\n '''\n if prices.index.duplicated().any():\n raise ValueError(\"There are some duplicate indexes.\")\n macdvalues = prices.rolling(short).mean() - prices.rolling(long).mean()\n if winning:\n positive = macdvalues > 0\n policy = positive.shift(1) != positive\n if positive.iloc[0]:\n policy.iloc[0] = 1\n gain = gains(prices=prices, policy=policy, commissions=commissions)\n diff = (prices.iloc[-1]/prices.iloc[0]) - 1\n return gain.sum() - diff * 100\n if strategy:\n positive = macdvalues > 0\n return positive.shift(1) != positive\n if getgains:\n positive = macdvalues > 0\n policy = positive.shift(1) != positive\n if positive.iloc[0]:\n policy.iloc[0] = 1\n return gains(prices=prices, policy=policy, commissions=commissions)\n return macdvalues\n \ndef ultimate(prices: pd.Series, low: pd.Series, high: pd.Series, buylevel=30, selllevel=70, days=7, strategy=False, getgains=False, winning=False, commissions=0.005, mingain=0, accelerate=True, firstopportunity=False, stoploss=0) -> pd.Series:\n '''\n Return the Ultimate oscillator\n\n :param pd.Series prices: Prices of the stock\n :param pd.Series low: Long moving average length\n :param pd.Series high: Short moving average length\n :param int days: Days for moving sum\n :param bool strategy: If strategy should be returned\n :param bool getgains: If gains should be returned\n :param bool winning: If policy gain - no strategy gain should be returned\n :param float commissions: Percentage commissions per transaction\n :param bool accelerate: If uses cython\n :param float mingain: Minimum gain to sell\n :param bool firstopportunity: If sell first time you have mingain\n :param float stoploss: Maximum percentage loss\n '''\n if prices.index.duplicated().any():\n raise ValueError(\"There are some duplicate indexes.\")\n bp = prices - np.minimum(prices.shift(1), low)\n tr = np.maximum(high, prices.shift(1)) - np.minimum(prices.shift(1), low)\n avg1 = bp.rolling(days).sum()/tr.rolling(days).sum()\n avg2 = bp.rolling(2*days).sum()/tr.rolling(2*days).sum()\n avg3 = bp.rolling(3*days).sum()/tr.rolling(3*days).sum()\n ult = 100 * (4*avg1 + 2*avg2 + avg3)/7\n if mingain == 0 and not firstopportunity and stoploss == 0:\n prices = prices.loc[~ult.isna()]\n ult = ult.dropna()\n if winning or strategy or getgains:\n buy = ult < buylevel\n sell = ult > selllevel\n policy = getpolicy(buy=buy, sell=sell, prices=prices, mingain=mingain, stoploss=stoploss, accelerate=accelerate, firstopportunity=firstopportunity)\n else:\n return ult\n if winning:\n gain = gains(prices=prices, policy=policy, commissions=commissions)\n diff = (prices.iloc[-1]/prices.iloc[0]) - 1\n return gain.sum() - diff * 100\n if strategy:\n return policy\n if getgains:\n return gains(prices=prices, policy=policy, commissions=commissions)\n\ndef bollinger_bands(prices: pd.Series, k=1, period=1000, strategy=False, getgains=False, winning=False, commissions=0.005, accelerate=True, mingain=0, firstopportunity=False, stoploss=0) -> (pd.Series, pd.Series):\n '''\n Return the Bollinger bands\n\n :param pd.Series prices: Prices of the stock\n :param int k: How many standard deviations out\n :param int period: Period for moving average \n :param bool strategy: If strategy should be returned\n :param bool getgains: If gains should be returned\n :param bool winning: If policy gain - no strategy gain should be returned\n :param float commissions: Percentage commissions per transaction\n :param bool accelerate: If uses cython\n :param float mingain: Minimum gain to sell\n :param bool firstopportunity: If sell first time you have mingain\n :param float stoploss: Maximum percentage loss\n '''\n std = prices.rolling(period).std()\n mean = prices.rolling(period).mean()\n upperband = mean + std*k\n lowerband = mean - std*k\n if strategy or getgains or winning:\n sell = prices > upperband\n buy = prices < lowerband\n policy = getpolicy(buy=buy, sell=sell, prices=prices, mingain=mingain, stoploss=stoploss, accelerate=accelerate, firstopportunity=firstopportunity)\n if winning:\n gain = gains(prices=prices, policy=policy, commissions=commissions)\n diff = (prices.iloc[-1]/prices.iloc[0]) - 1\n return gain.sum() - diff * 100\n if strategy:\n return policy\n if getgains:\n return gains(prices=prices, policy=policy, commissions=commissions)\n return lowerband, upperband\n\ndef williams(prices: pd.Series, low: pd.Series, high: pd.Series, buylevel=-80, selllevel=-20, days=10, strategy=False, getgains=False, winning=False, commissions=0.005, mingain=0, accelerate=True, firstopportunity=False, stoploss=0) -> pd.Series:\n '''\n Return the Williams %R oscillator\n\n :param pd.Series prices: Prices of the stock\n :param pd.Series low: Long moving average length\n :param pd.Series high: Short moving average length\n :param int days: Days for moving sum\n :param bool strategy: If strategy should be returned\n :param bool getgains: If gains should be returned\n :param bool winning: If policy gain - no strategy gain should be returned\n :param float commissions: Percentage commissions per transaction\n :param bool accelerate: If uses cython\n :param float mingain: Minimum gain to sell\n :param bool firstopportunity: If sell first time you have mingain\n :param float stoploss: Maximum percentage loss\n '''\n if prices.index.duplicated().any():\n raise ValueError(\"There are some duplicate indexes.\")\n high_N = high.rolling(days).max()\n low_N = low.rolling(days).min()\n R = -100*(high_N - prices)/(high_N - low_N)\n if winning or strategy or getgains:\n buy = R > buylevel\n sell = R < selllevel\n policy = getpolicy(buy=buy, sell=sell, prices=prices, mingain=mingain, stoploss=stoploss, accelerate=accelerate, firstopportunity=firstopportunity)\n else:\n return R\n if winning:\n gain = gains(prices=prices, policy=policy, commissions=commissions)\n diff = (prices.iloc[-1]/prices.iloc[0]) - 1\n return gain.sum() - diff * 100\n if strategy:\n return policy\n if getgains:\n return gains(prices=prices, policy=policy, commissions=commissions)\n\ndef momentum(prices: pd.Series, period=10, strategy=False, getgains=False, winning=False, commissions=0.005) -> pd.Series:\n '''\n Return the Momentum\n\n :param pd.Series prices: Prices of the stock\n :param int period: Days for moving average\n :param bool strategy: If strategy should be returned\n :param bool getgains: If gains should be returned\n :param bool winning: If policy gain - no strategy gain should be returned\n :param float commissions: Percentage commissions per transaction\n '''\n if prices.index.duplicated().any():\n raise ValueError(\"There are some duplicate indexes.\")\n momentum = prices.rolling(period).mean().pct_change()\n if winning or strategy or getgains:\n buy = momentum > 0\n sell = momentum < 0\n buy_ = buy.shift(1) != buy\n sell_ = sell.shift(1) != sell\n sell_.loc[:buy_[buy_].iloc[0].index] = 0\n policy = buy_ | sell_\n else:\n return momentum\n if winning:\n gain = gains(prices=prices, policy=policy, commissions=commissions)\n diff = (prices.iloc[-1]/prices.iloc[0]) - 1\n return gain.sum() - diff * 100\n if strategy:\n return policy\n if getgains:\n return gains(prices=prices, policy=policy, commissions=commissions)\n\n# UTILS\n\ndef gains(prices: pd.Series, policy: pd.Series, budget=100, commissions=0.005) -> pd.Series:\n '''\n Return the gains\n\n :param pd.Series prices: Prices of the stock\n :param pd.Series policy: True when buy or sell\n :param float budget: My budget\n :param float commissions: Percentage commissions per transaction\n '''\n prices = prices.loc[policy.index]\n buy = prices[policy].iloc[::2]\n sell = prices[policy].iloc[1::2]\n buy = buy.iloc[:sell.size].values\n gains = (sell/buy) - 1\n return (gains - commissions*2)*budget\n\ndef getpolicy(buy: pd.Series, sell: pd.Series, prices: pd.Series, mingain=0, stoploss=0, accelerate=True, firstopportunity=False) -> pd.Series:\n \"\"\"\n Return the policy given all the moments sell or buy is True\n\n :param pd.Series buy: When the buy pricinple is respected\n :param pd.Series sell: When the sell pricinple is respected\n :param float mingain: Minimum gain to sell\n :param float stoploss: Maximum percentage loss\n :param bool accelerate: If use cython\n :param bool firstopportunity: If sell first time you have mingain, MUST USE ACCELERATE\n \"\"\"\n if firstopportunity and not accelerate:\n print(\"Changing accelerate to True to use firstopportunity.\")\n accelerate = True\n buys = buy.shift(1) != buy\n sells = sell.shift(1) != sell\n policy = pd.Series(np.zeros(buy.size), index=buy.index)\n if accelerate:\n buys.reset_index(drop=True, inplace=True)\n sells.reset_index(drop=True, inplace=True)\n index = buys[buys | sells].index.to_numpy()\n if mingain == 0 and stoploss == 0:\n policy_ = ultimate_cycle.ultimate_cycle(policy.to_numpy(), buys.to_numpy(), sells.to_numpy(), index)\n elif not firstopportunity and stoploss == 0:\n policy_ = ultimate_cycle.cycle_checkgain(policy.to_numpy(), buys.to_numpy(), sells.to_numpy(), index, prices.to_numpy(), mingain)\n else:\n policy_ = ultimate_cycle.cycle_absolutegain(policy.to_numpy(dtype=bool), buys.to_numpy(dtype=bool), buys[buys].index.to_numpy(dtype=np.int32), prices.to_numpy(dtype=np.float32), mingain, stoploss)\n policy = pd.Series(policy_, index=policy.index)\n else:\n token = 1\n buy_price = 0\n for idx in tqdm(buys[buys | sells].index):\n if token and buys.loc[idx]:\n policy.loc[idx] = 1\n token = 0\n buy_price = prices.loc[idx]\n elif not token and sells.loc[idx] and mingain*(prices.loc[idx]/buy_price) >= mingain*(1 + mingain):\n policy.loc[idx] = 1\n token = 1\n return policy == 1"
]
| [
[
"numpy.zeros",
"pandas.Series"
]
]
|
avant1/server | [
"effbc03644de60ed97242811e0933e3611f14cd8"
]
| [
"qa/L0_infer/infer_test.py"
]
| [
"# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of NVIDIA CORPORATION nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\n# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport sys\nsys.path.append(\"../common\")\n\nimport unittest\nimport numpy as np\nimport infer_util as iu\nimport test_util as tu\nimport os\n\nfrom tritonclientutils import *\n\nTEST_SYSTEM_SHARED_MEMORY = bool(\n int(os.environ.get('TEST_SYSTEM_SHARED_MEMORY', 0)))\nTEST_CUDA_SHARED_MEMORY = bool(int(os.environ.get('TEST_CUDA_SHARED_MEMORY',\n 0)))\nCPU_ONLY = (os.environ.get('TRITON_SERVER_CPU_ONLY') is not None)\n\nUSE_GRPC = (os.environ.get('USE_GRPC', 1) != \"0\")\nUSE_HTTP = (os.environ.get('USE_HTTP', 1) != \"0\")\nassert USE_GRPC or USE_HTTP, \"USE_GRPC or USE_HTTP must be non-zero\"\n\nBACKENDS = os.environ.get('BACKENDS',\n \"graphdef savedmodel onnx libtorch plan python\")\nENSEMBLES = bool(int(os.environ.get('ENSEMBLES', 1)))\nOS_WINDOWS = bool(int(os.environ.get('OS_WINDOWS', 0)))\n\nnp_dtype_string = np.dtype(object)\n\n\nclass InferTest(tu.TestResultCollector):\n\n def _full_exact(self, input_dtype, output0_dtype, output1_dtype,\n output0_raw, output1_raw, swap):\n\n def _infer_exact_helper(tester,\n pf,\n tensor_shape,\n batch_size,\n input_dtype,\n output0_dtype,\n output1_dtype,\n output0_raw=True,\n output1_raw=True,\n model_version=None,\n swap=False,\n outputs=(\"OUTPUT0\", \"OUTPUT1\"),\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_http_json_tensors=True,\n skip_request_id_check=True,\n use_streaming=True,\n correlation_id=0):\n for bs in (1, batch_size):\n # model that does not support batching\n if bs == 1:\n iu.infer_exact(\n tester,\n pf + \"_nobatch\",\n tensor_shape,\n bs,\n input_dtype,\n output0_dtype,\n output1_dtype,\n output0_raw=output0_raw,\n output1_raw=output1_raw,\n model_version=model_version,\n swap=swap,\n outputs=outputs,\n use_http=use_http,\n use_grpc=use_grpc,\n use_http_json_tensors=use_http_json_tensors,\n skip_request_id_check=skip_request_id_check,\n use_streaming=use_streaming,\n correlation_id=correlation_id,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n # model that supports batching\n iu.infer_exact(\n tester,\n pf, (bs,) + tensor_shape,\n bs,\n input_dtype,\n output0_dtype,\n output1_dtype,\n output0_raw=output0_raw,\n output1_raw=output1_raw,\n model_version=model_version,\n swap=swap,\n outputs=outputs,\n use_http=use_http,\n use_grpc=use_grpc,\n use_http_json_tensors=use_http_json_tensors,\n skip_request_id_check=skip_request_id_check,\n use_streaming=use_streaming,\n correlation_id=correlation_id,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n\n input_size = 16\n\n all_ensemble_prefix = [\"simple_\", \"sequence_\", \"fan_\"]\n ensemble_prefix = [\"\"]\n if ENSEMBLES and OS_WINDOWS:\n for prefix in all_ensemble_prefix:\n if tu.validate_for_ensemble_model(prefix, input_dtype,\n output0_dtype, output1_dtype,\n (input_size,), (input_size,),\n (input_size,)):\n ensemble_prefix.append(prefix)\n\n if tu.validate_for_tf_model(input_dtype, output0_dtype, output1_dtype,\n (input_size,), (input_size,),\n (input_size,)):\n for prefix in ensemble_prefix:\n for pf in [\"graphdef\", \"savedmodel\"]:\n if pf in BACKENDS:\n _infer_exact_helper(self,\n prefix + pf, (input_size,),\n 8,\n input_dtype,\n output0_dtype,\n output1_dtype,\n output0_raw=output0_raw,\n output1_raw=output1_raw,\n swap=swap)\n\n if not CPU_ONLY and tu.validate_for_trt_model(\n input_dtype, output0_dtype, output1_dtype, (input_size, 1, 1),\n (input_size, 1, 1), (input_size, 1, 1)):\n for prefix in ensemble_prefix:\n if 'plan' in BACKENDS:\n if input_dtype == np.int8:\n _infer_exact_helper(self,\n prefix + 'plan', (input_size, 1, 1),\n 8,\n input_dtype,\n output0_dtype,\n output1_dtype,\n output0_raw=output0_raw,\n output1_raw=output1_raw,\n swap=swap)\n else:\n _infer_exact_helper(self,\n prefix + 'plan', (input_size,),\n 8,\n input_dtype,\n output0_dtype,\n output1_dtype,\n output0_raw=output0_raw,\n output1_raw=output1_raw,\n swap=swap)\n\n if tu.validate_for_onnx_model(input_dtype, output0_dtype, output1_dtype,\n (input_size,), (input_size,),\n (input_size,)):\n for prefix in ensemble_prefix:\n if 'onnx' in BACKENDS:\n _infer_exact_helper(self,\n prefix + 'onnx', (input_size,),\n 8,\n input_dtype,\n output0_dtype,\n output1_dtype,\n output0_raw=output0_raw,\n output1_raw=output1_raw,\n swap=swap)\n\n if tu.validate_for_libtorch_model(input_dtype, output0_dtype,\n output1_dtype, (input_size,),\n (input_size,), (input_size,)):\n for prefix in ensemble_prefix:\n if 'libtorch' in BACKENDS:\n _infer_exact_helper(self,\n prefix + 'libtorch', (input_size,),\n 8,\n input_dtype,\n output0_dtype,\n output1_dtype,\n output0_raw=output0_raw,\n output1_raw=output1_raw,\n swap=swap)\n\n if prefix == \"\":\n if 'python' in BACKENDS:\n _infer_exact_helper(self,\n prefix + 'python', (input_size,),\n 8,\n input_dtype,\n output0_dtype,\n output1_dtype,\n output0_raw=output0_raw,\n output1_raw=output1_raw,\n swap=swap)\n\n def test_raw_bbb(self):\n self._full_exact(np.int8,\n np.int8,\n np.int8,\n output0_raw=True,\n output1_raw=True,\n swap=True)\n\n def test_raw_sss(self):\n self._full_exact(np.int16,\n np.int16,\n np.int16,\n output0_raw=True,\n output1_raw=True,\n swap=True)\n\n def test_raw_iii(self):\n self._full_exact(np.int32,\n np.int32,\n np.int32,\n output0_raw=True,\n output1_raw=True,\n swap=True)\n\n def test_raw_lll(self):\n self._full_exact(np.int64,\n np.int64,\n np.int64,\n output0_raw=True,\n output1_raw=True,\n swap=False)\n\n def test_raw_hhh(self):\n self._full_exact(np.float16,\n np.float16,\n np.float16,\n output0_raw=True,\n output1_raw=True,\n swap=False)\n\n def test_raw_fff(self):\n self._full_exact(np.float32,\n np.float32,\n np.float32,\n output0_raw=True,\n output1_raw=True,\n swap=True)\n\n def test_raw_hff(self):\n self._full_exact(np.float16,\n np.float32,\n np.float32,\n output0_raw=True,\n output1_raw=True,\n swap=False)\n\n def test_raw_bii(self):\n self._full_exact(np.int8,\n np.int32,\n np.int32,\n output0_raw=True,\n output1_raw=True,\n swap=False)\n\n def test_raw_ibb(self):\n self._full_exact(np.int32,\n np.int8,\n np.int8,\n output0_raw=True,\n output1_raw=True,\n swap=False)\n\n def test_raw_ibs(self):\n self._full_exact(np.int32,\n np.int8,\n np.int16,\n output0_raw=True,\n output1_raw=True,\n swap=False)\n\n def test_raw_iff(self):\n self._full_exact(np.int32,\n np.float32,\n np.float32,\n output0_raw=True,\n output1_raw=True,\n swap=False)\n\n def test_raw_fii(self):\n self._full_exact(np.float32,\n np.int32,\n np.int32,\n output0_raw=True,\n output1_raw=True,\n swap=False)\n\n def test_raw_ihs(self):\n self._full_exact(np.int32,\n np.float16,\n np.int16,\n output0_raw=True,\n output1_raw=True,\n swap=False)\n\n def test_raw_ooo(self):\n self._full_exact(np_dtype_string,\n np_dtype_string,\n np_dtype_string,\n output0_raw=True,\n output1_raw=True,\n swap=False)\n\n def test_raw_oii(self):\n self._full_exact(np_dtype_string,\n np.int32,\n np.int32,\n output0_raw=True,\n output1_raw=True,\n swap=False)\n\n def test_raw_oio(self):\n self._full_exact(np_dtype_string,\n np.int32,\n np_dtype_string,\n output0_raw=True,\n output1_raw=True,\n swap=False)\n\n def test_raw_ooi(self):\n self._full_exact(np_dtype_string,\n np_dtype_string,\n np.int32,\n output0_raw=True,\n output1_raw=True,\n swap=False)\n\n def test_raw_ioo(self):\n self._full_exact(np.int32,\n np_dtype_string,\n np_dtype_string,\n output0_raw=True,\n output1_raw=True,\n swap=False)\n\n def test_raw_iio(self):\n self._full_exact(np.int32,\n np.int32,\n np_dtype_string,\n output0_raw=True,\n output1_raw=True,\n swap=False)\n\n def test_raw_ioi(self):\n self._full_exact(np.int32,\n np_dtype_string,\n np.int32,\n output0_raw=True,\n output1_raw=True,\n swap=False)\n\n # shared memory does not support class output\n if not (TEST_SYSTEM_SHARED_MEMORY or TEST_CUDA_SHARED_MEMORY):\n\n def test_class_bbb(self):\n self._full_exact(np.int8,\n np.int8,\n np.int8,\n output0_raw=False,\n output1_raw=False,\n swap=True)\n\n def test_class_sss(self):\n self._full_exact(np.int16,\n np.int16,\n np.int16,\n output0_raw=False,\n output1_raw=False,\n swap=True)\n\n def test_class_iii(self):\n self._full_exact(np.int32,\n np.int32,\n np.int32,\n output0_raw=False,\n output1_raw=False,\n swap=True)\n\n def test_class_lll(self):\n self._full_exact(np.int64,\n np.int64,\n np.int64,\n output0_raw=False,\n output1_raw=False,\n swap=False)\n\n def test_class_fff(self):\n self._full_exact(np.float32,\n np.float32,\n np.float32,\n output0_raw=False,\n output1_raw=False,\n swap=True)\n\n def test_class_iff(self):\n self._full_exact(np.int32,\n np.float32,\n np.float32,\n output0_raw=False,\n output1_raw=False,\n swap=False)\n\n def test_mix_bbb(self):\n self._full_exact(np.int8,\n np.int8,\n np.int8,\n output0_raw=True,\n output1_raw=False,\n swap=True)\n\n def test_mix_sss(self):\n self._full_exact(np.int16,\n np.int16,\n np.int16,\n output0_raw=False,\n output1_raw=True,\n swap=True)\n\n def test_mix_iii(self):\n self._full_exact(np.int32,\n np.int32,\n np.int32,\n output0_raw=True,\n output1_raw=False,\n swap=True)\n\n def test_mix_lll(self):\n self._full_exact(np.int64,\n np.int64,\n np.int64,\n output0_raw=False,\n output1_raw=True,\n swap=False)\n\n def test_mix_fff(self):\n self._full_exact(np.float32,\n np.float32,\n np.float32,\n output0_raw=True,\n output1_raw=False,\n swap=True)\n\n def test_mix_iff(self):\n self._full_exact(np.int32,\n np.float32,\n np.float32,\n output0_raw=False,\n output1_raw=True,\n swap=False)\n\n def test_raw_version_latest_1(self):\n input_size = 16\n tensor_shape = (1, input_size)\n\n # There are 3 versions of graphdef_int8_int8_int8 but\n # only version 3 should be available\n for platform in ('graphdef', 'savedmodel'):\n if platform not in BACKENDS:\n continue\n try:\n iu.infer_exact(\n self,\n platform,\n tensor_shape,\n 1,\n np.int8,\n np.int8,\n np.int8,\n model_version=1,\n swap=False,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n except InferenceServerException as ex:\n self.assertTrue(\n ex.message().startswith(\"Request for unknown model\"))\n\n try:\n iu.infer_exact(\n self,\n platform,\n tensor_shape,\n 1,\n np.int8,\n np.int8,\n np.int8,\n model_version=2,\n swap=True,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n except InferenceServerException as ex:\n self.assertTrue(\n ex.message().startswith(\"Request for unknown model\"))\n\n iu.infer_exact(self,\n platform,\n tensor_shape,\n 1,\n np.int8,\n np.int8,\n np.int8,\n model_version=3,\n swap=True,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n\n def test_raw_version_latest_2(self):\n input_size = 16\n tensor_shape = (1, input_size)\n\n # There are 3 versions of graphdef_int16_int16_int16 but only\n # versions 2 and 3 should be available\n for platform in ('graphdef', 'savedmodel'):\n if platform not in BACKENDS:\n continue\n try:\n iu.infer_exact(\n self,\n platform,\n tensor_shape,\n 1,\n np.int16,\n np.int16,\n np.int16,\n model_version=1,\n swap=False,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n except InferenceServerException as ex:\n self.assertTrue(\n ex.message().startswith(\"Request for unknown model\"))\n\n iu.infer_exact(self,\n platform,\n tensor_shape,\n 1,\n np.int16,\n np.int16,\n np.int16,\n model_version=2,\n swap=True,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n iu.infer_exact(self,\n platform,\n tensor_shape,\n 1,\n np.int16,\n np.int16,\n np.int16,\n model_version=3,\n swap=True,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n\n def test_raw_version_all(self):\n input_size = 16\n tensor_shape = (1, input_size)\n\n # There are 3 versions of *_int32_int32_int32 and all should\n # be available.\n for platform in ('graphdef', 'savedmodel'):\n if platform not in BACKENDS:\n continue\n iu.infer_exact(self,\n platform,\n tensor_shape,\n 1,\n np.int32,\n np.int32,\n np.int32,\n model_version=1,\n swap=False,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n iu.infer_exact(self,\n platform,\n tensor_shape,\n 1,\n np.int32,\n np.int32,\n np.int32,\n model_version=2,\n swap=True,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n iu.infer_exact(self,\n platform,\n tensor_shape,\n 1,\n np.int32,\n np.int32,\n np.int32,\n model_version=3,\n swap=True,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n\n def test_raw_version_specific_1(self):\n input_size = 16\n tensor_shape = (1, input_size)\n\n # There are 3 versions of *_float16_float16_float16 but only\n # version 1 should be available.\n for platform in ('graphdef', 'savedmodel'):\n if platform not in BACKENDS:\n continue\n iu.infer_exact(self,\n platform,\n tensor_shape,\n 1,\n np.float16,\n np.float16,\n np.float16,\n model_version=1,\n swap=False,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n\n try:\n iu.infer_exact(\n self,\n platform,\n tensor_shape,\n 1,\n np.float16,\n np.float16,\n np.float16,\n model_version=2,\n swap=True,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n except InferenceServerException as ex:\n self.assertTrue(\n ex.message().startswith(\"Request for unknown model\"))\n\n try:\n iu.infer_exact(\n self,\n platform,\n tensor_shape,\n 1,\n np.float16,\n np.float16,\n np.float16,\n model_version=3,\n swap=True,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n except InferenceServerException as ex:\n self.assertTrue(\n ex.message().startswith(\"Request for unknown model\"))\n\n def test_raw_version_specific_1_3(self):\n input_size = 16\n\n # There are 3 versions of *_float32_float32_float32 but only\n # versions 1 and 3 should be available.\n for platform in ('graphdef', 'savedmodel', 'plan'):\n if platform == 'plan' and CPU_ONLY:\n continue\n if platform not in BACKENDS:\n continue\n tensor_shape = (1, input_size)\n iu.infer_exact(self,\n platform,\n tensor_shape,\n 1,\n np.float32,\n np.float32,\n np.float32,\n model_version=1,\n swap=False,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n\n try:\n iu.infer_exact(\n self,\n platform,\n tensor_shape,\n 1,\n np.float32,\n np.float32,\n np.float32,\n model_version=2,\n swap=True,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n except InferenceServerException as ex:\n self.assertTrue(\n ex.message().startswith(\"Request for unknown model\"))\n\n iu.infer_exact(self,\n platform,\n tensor_shape,\n 1,\n np.float32,\n np.float32,\n np.float32,\n model_version=3,\n swap=True,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n\n if ENSEMBLES:\n if all(x in BACKENDS for x in ['graphdef', 'savedmodel']):\n\n def test_ensemble_mix_platform(self):\n # Skip on CPU only machine as TensorRT model is used in this ensemble\n if CPU_ONLY:\n return\n for bs in (1, 8):\n iu.infer_exact(\n self,\n \"mix_platform\", (bs, 16),\n bs,\n np.float32,\n np.float32,\n np.float32,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n\n if \"graphdef\" in BACKENDS:\n\n def test_ensemble_mix_type(self):\n for bs in (1, 8):\n iu.infer_exact(\n self,\n \"mix_type\", (bs, 16),\n bs,\n np.int32,\n np.float32,\n np.float32,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n\n if all(x in BACKENDS for x in ['graphdef', 'savedmodel']):\n\n def test_ensemble_mix_ensemble(self):\n for bs in (1, 8):\n iu.infer_exact(\n self,\n \"mix_ensemble\", (bs, 16),\n bs,\n np.int32,\n np.float32,\n np.float32,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n\n if all(x in BACKENDS for x in ['graphdef',]) and not OS_WINDOWS:\n\n def test_ensemble_mix_batch_nobatch(self):\n base_names = [\"batch_to_nobatch\", \"nobatch_to_batch\"]\n for name in base_names:\n for bs in (1, 8):\n iu.infer_exact(\n self,\n name, (bs, 16),\n bs,\n np.float32,\n np.float32,\n np.float32,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n iu.infer_exact(\n self,\n name + \"_nobatch\", (8, 16),\n 1,\n np.float32,\n np.float32,\n np.float32,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n\n # batch -> nobatch -> batch\n for bs in (1, 8):\n iu.infer_exact(\n self,\n \"mix_nobatch_batch\", (bs, 16),\n bs,\n np.float32,\n np.float32,\n np.float32,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n\n if not (TEST_SYSTEM_SHARED_MEMORY or TEST_CUDA_SHARED_MEMORY):\n\n def test_ensemble_label_lookup(self):\n if all(x in BACKENDS for x in ['graphdef', 'savedmodel']):\n # Ensemble needs to look up label from the actual model\n for bs in (1, 8):\n iu.infer_exact(\n self,\n \"mix_platform\", (bs, 16),\n bs,\n np.float32,\n np.float32,\n np.float32,\n output0_raw=False,\n output1_raw=False,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n\n if all(x in BACKENDS for x in ['graphdef', 'savedmodel']):\n # Label from the actual model will be passed along the nested ensemble\n for bs in (1, 8):\n iu.infer_exact(\n self,\n \"mix_ensemble\", (bs, 16),\n bs,\n np.int32,\n np.float32,\n np.float32,\n output0_raw=False,\n output1_raw=False,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n\n if \"graphdef\" in BACKENDS:\n # If label file is provided, it will use the provided label file directly\n try:\n iu.infer_exact(\n self,\n \"wrong_label\", (1, 16),\n 1,\n np.int32,\n np.float32,\n np.float32,\n output0_raw=False,\n output1_raw=False,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n except AssertionError:\n # Sanity check that infer_exact failed since this ensemble is provided\n # with unexpected labels\n pass\n\n if \"graphdef\" in BACKENDS:\n for bs in (1, 8):\n iu.infer_exact(\n self,\n \"label_override\", (bs, 16),\n bs,\n np.int32,\n np.float32,\n np.float32,\n output0_raw=False,\n output1_raw=False,\n use_http=USE_HTTP,\n use_grpc=USE_GRPC,\n use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,\n use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
]
| [
[
"numpy.dtype"
]
]
|
R1704/SpeechRecognitionSNN | [
"4b788d1bd20d8ce201da6da8b200b3ca722c7efa"
]
| [
"SpykeTorch/utils.py"
]
| [
"import torch\nimport torch.nn.functional as fn\nimport numpy as np\nimport math\nfrom torchvision import transforms\nfrom torchvision import datasets\nimport os\n\ndef to_pair(data):\n\tr\"\"\"Converts a single or a tuple of data into a pair. If the data is a tuple with more than two elements, it selects\n\tthe first two of them. In case of single data, it duplicates that data into a pair.\n\n\tArgs:\n\t\tdata (object or tuple): The input data.\n\n\tReturns:\n\t\tTuple: A pair of data.\n\t\"\"\"\n\tif isinstance(data, tuple):\n\t\treturn data[0:2]\n\treturn (data, data)\n\ndef generate_inhibition_kernel(inhibition_percents):\n\tr\"\"\"Generates an inhibition kernel suitable to be used by :func:`~functional.intensity_lateral_inhibition`.\n\n\tArgs:\n\t\tinhibition_percents (sequence): The sequence of inhibition factors (in range [0,1]).\n\n\tReturns:\n\t\tTensor: Inhibition kernel.\n\t\"\"\"\n\tinhibition_kernel = torch.zeros(2*len(inhibition_percents)+1, 2*len(inhibition_percents)+1).float()\n\tcenter = len(inhibition_percents)\n\tfor i in range(2*len(inhibition_percents)+1):\n\t\tfor j in range(2*len(inhibition_percents)+1):\n\t\t\tdist = int(max(math.fabs(i - center), math.fabs(j - center)))\n\t\t\tif dist != 0:\n\t\t\t\tinhibition_kernel[i,j] = inhibition_percents[dist - 1]\n\treturn inhibition_kernel\n\ndef tensor_to_text(data, address):\n\tr\"\"\"Saves a tensor into a text file in row-major format. The first line of the file contains comma-separated integers denoting\n\tthe size of each dimension. The second line contains comma-separated values indicating all the tensor's data.\n\n\tArgs:\n\t\tdata (Tensor): The tensor to be saved.\n\t\taddress (str): The saving address.\n\t\"\"\"\n\tf = open(address, \"w\")\n\tdata_cpu = data.cpu()\n\tshape = data.shape\n\tprint(\",\".join(map(str, shape)), file=f)\n\tdata_flat = data_cpu.view(-1).numpy()\n\tprint(\",\".join(data_flat.astype(np.str)), file=f)\n\tf.close()\n\ndef text_to_tensor(address, type='float'):\n\tr\"\"\"Loads a tensor from a text file. Format of the text file is as follows: The first line of the file contains comma-separated integers denoting\n\tthe size of each dimension. The second line contains comma-separated values indicating all the tensor's data.\n\n\tArgs:\n\t\taddress (str): Address of the text file.\n\t\ttype (float or int, optional): The type of the tensor's data ('float' or 'int'). Default: 'float'\n\n\tReturns:\n\t\tTensor: The loaded tensor.\n\t\"\"\"\n\tf = open(address, \"r\")\n\tshape = tuple(map(int, f.readline().split(\",\")))\n\tdata = np.array(f.readline().split(\",\"))\n\tif type == 'float':\n\t\tdata = data.astype(np.float32)\n\telif type == 'int':\n\t\tdata = data.astype(np.int32)\n\telse:\n\t\traise ValueError(\"type must be 'int' or 'float'\")\n\tdata = torch.from_numpy(data)\n\tdata = data.reshape(shape)\n\tf.close()\n\treturn data\n\nclass LateralIntencityInhibition:\n\tr\"\"\"Applies lateral inhibition on intensities. For each location, this inhibition decreases the intensity of the\n\tsurrounding cells that has lower intensities by a specific factor. This factor is relative to the distance of the\n\tneighbors and are put in the :attr:`inhibition_percents`.\n\n\tArgs:\n\t\tinhibition_percents (sequence): The sequence of inhibition factors (in range [0,1]).\n\t\"\"\"\n\tdef __init__(self, inhibition_percents):\n\t\tself.inhibition_kernel = generate_inhibition_kernel(inhibition_percents)\n\t\tself.inhibition_kernel.unsqueeze_(0).unsqueeze_(0)\n\n\t# decrease lateral intencities by factors given in the inhibition_kernel\n\tdef intensity_lateral_inhibition(self, intencities):\n\t\tintencities.squeeze_(0)\n\t\tintencities.unsqueeze_(1)\n\n\t\tinh_win_size = self.inhibition_kernel.size(-1)\n\t\trad = inh_win_size//2\n\t\t# repeat each value\n\t\tvalues = intencities.reshape(intencities.size(0),intencities.size(1),-1,1)\n\t\tvalues = values.repeat(1,1,1,inh_win_size)\n\t\tvalues = values.reshape(intencities.size(0),intencities.size(1),-1,intencities.size(-1)*inh_win_size)\n\t\tvalues = values.repeat(1,1,1,inh_win_size)\n\t\tvalues = values.reshape(intencities.size(0),intencities.size(1),-1,intencities.size(-1)*inh_win_size)\n\t\t# extend patches\n\t\tpadded = fn.pad(intencities,(rad,rad,rad,rad))\n\t\t# column-wise\n\t\tpatches = padded.unfold(-1,inh_win_size,1)\n\t\tpatches = patches.reshape(patches.size(0),patches.size(1),patches.size(2),-1,patches.size(3)*patches.size(4))\n\t\tpatches.squeeze_(-2)\n\t\t# row-wise\n\t\tpatches = patches.unfold(-2,inh_win_size,1).transpose(-1,-2)\n\t\tpatches = patches.reshape(patches.size(0),patches.size(1),1,-1,patches.size(-1))\n\t\tpatches.squeeze_(-3)\n\t\t# compare each element by its neighbors\n\t\tcoef = values - patches\n\t\tcoef.clamp_(min=0).sign_() # \"ones\" are neighbors greater than center\n\t\t# convolution with full stride to get accumulative inhibiiton factor\n\t\tfactors = fn.conv2d(coef, self.inhibition_kernel, stride=inh_win_size)\n\t\tresult = intencities + intencities * factors\n\n\t\tintencities.squeeze_(1)\n\t\tintencities.unsqueeze_(0)\n\t\tresult.squeeze_(1)\n\t\tresult.unsqueeze_(0)\n\t\treturn result\n\n\tdef __call__(self,input):\n\t\treturn self.intensity_lateral_inhibition(input)\n\nclass FilterKernel:\n\tr\"\"\"Base class for generating image filter kernels such as Gabor, DoG, etc. Each subclass should override :attr:`__call__` function.\n\t\"\"\"\n\tdef __init__(self, window_size):\n\t\tself.window_size = window_size\n\n\tdef __call__(self):\n\t\tpass\n\nclass DoGKernel(FilterKernel):\n\tr\"\"\"Generates DoG filter kernel.\n\n\tArgs:\n\t\twindow_size (int): The size of the window (square window).\n\t\tsigma1 (float): The sigma for the first Gaussian function.\n\t\tsigma2 (float): The sigma for the second Gaussian function.\n\t\"\"\"\n\tdef __init__(self, window_size, sigma1, sigma2):\n\t\tsuper(DoGKernel, self).__init__(window_size)\n\t\tself.sigma1 = sigma1\n\t\tself.sigma2 = sigma2\n\n\t# returns a 2d tensor corresponding to the requested DoG filter\n\tdef __call__(self):\n\t\tw = self.window_size//2\n\t\tx, y = np.mgrid[-w:w+1:1, -w:w+1:1]\n\t\ta = 1.0 / (2 * math.pi)\n\t\tprod = x*x + y*y\n\t\tf1 = (1/(self.sigma1*self.sigma1)) * np.exp(-0.5 * (1/(self.sigma1*self.sigma1)) * (prod))\n\t\tf2 = (1/(self.sigma2*self.sigma2)) * np.exp(-0.5 * (1/(self.sigma2*self.sigma2)) * (prod))\n\t\tdog = a * (f1-f2)\n\t\tdog_mean = np.mean(dog)\n\t\tdog = dog - dog_mean\n\t\tdog_max = np.max(dog)\n\t\tdog = dog / dog_max\n\t\tdog_tensor = torch.from_numpy(dog)\n\t\treturn dog_tensor.float()\n\nclass GaborKernel(FilterKernel):\n\tr\"\"\"Generates Gabor filter kernel.\n\n\tArgs:\n\t\twindow_size (int): The size of the window (square window).\n\t\torientation (float): The orientation of the Gabor filter (in degrees).\n\t\tdiv (float, optional): The divisor of the lambda equation. Default: 4.0\n\t\"\"\"\n\tdef __init__(self, window_size, orientation, div=4.0):\n\t\tsuper(GaborKernel, self).__init__(window_size)\n\t\tself.orientation = orientation\n\t\tself.div = div\n\n\t# returns a 2d tensor corresponding to the requested Gabor filter\n\tdef __call__(self):\n\t\tw = self.window_size//2\n\t\tx, y = np.mgrid[-w:w+1:1, -w:w+1:1]\n\t\tlamda = self.window_size * 2 / self.div\n\t\tsigma = lamda * 0.8\n\t\tsigmaSq = sigma * sigma\n\t\tg = 0.3;\n\t\ttheta = (self.orientation * np.pi) / 180;\n\t\tY = y*np.cos(theta) - x*np.sin(theta)\n\t\tX = y*np.sin(theta) + x*np.cos(theta)\n\t\tgabor = np.exp(-(X * X + g * g * Y * Y) / (2 * sigmaSq)) * np.cos(2 * np.pi * X / lamda);\n\t\tgabor_mean = np.mean(gabor)\n\t\tgabor = gabor - gabor_mean\n\t\tgabor_max = np.max(gabor)\n\t\tgabor = gabor / gabor_max\n\t\tgabor_tensor = torch.from_numpy(gabor)\n\t\treturn gabor_tensor.float()\n\nclass Filter:\n\tr\"\"\"Applies a filter transform. Each filter contains a sequence of :attr:`FilterKernel` objects.\n\tThe result of each filter kernel will be passed through a given threshold (if not :attr:`None`).\n\n\tArgs:\n\t\tfilter_kernels (sequence of FilterKernels): The sequence of filter kernels.\n\t\tpadding (int, optional): The size of the padding for the convolution of filter kernels. Default: 0\n\t\tthresholds (sequence of floats, optional): The threshold for each filter kernel. Default: None\n\t\tuse_abs (boolean, optional): To compute the absolute value of the outputs or not. Default: False\n\n\t.. note::\n\n\t\tThe size of the compund filter kernel tensor (stack of individual filter kernels) will be equal to the \n\t\tgreatest window size among kernels. All other smaller kernels will be zero-padded with an appropriate \n\t\tamount.\n\t\"\"\"\n\t# filter_kernels must be a list of filter kernels\n\t# thresholds must be a list of thresholds for each kernel\n\tdef __init__(self, filter_kernels, padding=0, thresholds=None, use_abs=False):\n\t\ttensor_list = []\n\t\tself.max_window_size = 0\n\t\tfor kernel in filter_kernels:\n\t\t\tif isinstance(kernel, torch.Tensor):\n\t\t\t\ttensor_list.append(kernel)\n\t\t\t\tself.max_window_size = max(self.max_window_size, kernel.size(-1))\n\t\t\telse:\n\t\t\t\ttensor_list.append(kernel().unsqueeze(0))\n\t\t\t\tself.max_window_size = max(self.max_window_size, kernel.window_size)\n\t\tfor i in range(len(tensor_list)):\n\t\t\tp = (self.max_window_size - filter_kernels[i].window_size)//2\n\t\t\ttensor_list[i] = fn.pad(tensor_list[i], (p,p,p,p))\n\n\t\tself.kernels = torch.stack(tensor_list)\n\t\tself.number_of_kernels = len(filter_kernels)\n\t\tself.padding = padding\n\t\tif isinstance(thresholds, list):\n\t\t\tself.thresholds = thresholds.clone().detach()\n\t\t\tself.thresholds.unsqueeze_(0).unsqueeze_(2).unsqueeze_(3)\n\t\telse:\n\t\t\tself.thresholds = thresholds\n\t\tself.use_abs = use_abs\n\n\t# returns a 4d tensor containing the flitered versions of the input image\n\t# input is a 4d tensor. dim: (minibatch=1, filter_kernels, height, width)\n\tdef __call__(self, input):\n\t\toutput = fn.conv2d(input, self.kernels, padding = self.padding).float()\n\t\tif not(self.thresholds is None):\n\t\t\toutput = torch.where(output < self.thresholds, torch.tensor(0.0, device=output.device), output)\n\t\tif self.use_abs:\n\t\t\ttorch.abs_(output)\n\t\treturn output\n\nclass Intensity2Latency:\n\tr\"\"\"Applies intensity to latency transform. Spike waves are generated in the form of\n\tspike bins with almost equal number of spikes.\n\n\tArgs:\n\t\tnumber_of_spike_bins (int): Number of spike bins (time steps).\n\t\tto_spike (boolean, optional): To generate spike-wave tensor or not. Default: False\n\n\t.. note::\n\n\t\tIf :attr:`to_spike` is :attr:`False`, then the result is intesities that are ordered and packed into bins.\n\t\"\"\"\n\tdef __init__(self, number_of_spike_bins, to_spike=False):\n\t\tself.time_steps = number_of_spike_bins\n\t\tself.to_spike = to_spike\n\t\n\t# intencities is a tensor of input intencities (1, input_channels, height, width)\n\t# returns a tensor of tensors containing spikes in each timestep (considers minibatch for timesteps)\n\t# spikes are accumulative, i.e. spikes in timestep i are also presented in i+1, i+2, ...\n\tdef intensity_to_latency(self, intencities):\n\t\t#bins = []\n\t\tbins_intencities = []\n\t\tnonzero_cnt = torch.nonzero(intencities).size()[0]\n\n\t\t#check for empty bins\n\t\tbin_size = nonzero_cnt//self.time_steps\n\n\t\t#sort\n\t\tintencities_flattened = torch.reshape(intencities, (-1,))\n\t\tintencities_flattened_sorted = torch.sort(intencities_flattened, descending=True)\n\n\t\t#bin packing\n\t\tsorted_bins_value, sorted_bins_idx = torch.split(intencities_flattened_sorted[0], bin_size), torch.split(intencities_flattened_sorted[1], bin_size)\n\n\t\t#add to the list of timesteps\n\t\tspike_map = torch.zeros_like(intencities_flattened_sorted[0])\n\t\n\t\tfor i in range(self.time_steps):\n\t\t\tspike_map.scatter_(0, sorted_bins_idx[i], sorted_bins_value[i])\n\t\t\tspike_map_copy = spike_map.clone().detach()\n\t\t\tspike_map_copy = spike_map_copy.reshape(tuple(intencities.shape))\n\t\t\tbins_intencities.append(spike_map_copy.squeeze(0).float())\n\t\t\t#bins.append(spike_map_copy.sign().squeeze_(0).float())\n\t\n\t\treturn torch.stack(bins_intencities)#, torch.stack(bins)\n\t\t#return torch.stack(bins)\n\n\tdef __call__(self, image):\n\t\tif self.to_spike:\n\t\t\treturn self.intensity_to_latency(image).sign()\n\t\treturn self.intensity_to_latency(image)\n\n#class ImageFolderCache(datasets.ImageFolder):\n#\tdef __init__(self, root, transform=None, target_transform=None,\n# loader=datasets.folder.default_loader, cache_address=None):\n#\t\tsuper(ImageFolderCache, self).__init__(root, transform=transform, target_transform=target_transform, loader=loader)\n#\t\tself.imgs = self.samples\n#\t\tself.cache_address = cache_address\n#\t\tself.cache = [None] * len(self)\n\n#\tdef __getitem__(self, index):\n#\t\tpath, target = self.samples[index]\n#\t\tif self.cache[index] is None:\n#\t\t\tsample = self.loader(path)\n#\t\t\tif self.transform is not None:\n#\t\t\t\tsample = self.transform(sample)\n#\t\t\tif self.target_transform is not None:\n#\t\t\t\ttarget = self.target_transform(target)\n\n#\t\t\t#cache it\n#\t\t\tif self.cache_address is None:\n#\t\t\t\tself.cache[index] = sample\n#\t\t\telse:\n#\t\t\t\tsave_path = os.path.join(self.cache_address, str(index)+'.c')\n#\t\t\t\ttorch.save(sample, save_path)\n#\t\t\t\tself.cache[index] = save_path\n#\t\telse:\n#\t\t\tif self.cache_address is None:\n#\t\t\t\tsample = self.cache[index]\n#\t\t\telse:\n#\t\t\t\tsample = torch.load(self.cache[index])\n#\t\treturn sample, target\n\n#\tdef reset_cache(self):\n#\t\tself.cache = [None] * len(self)\n\nclass CacheDataset(torch.utils.data.Dataset):\n\tr\"\"\"A wrapper dataset to cache pre-processed data. It can cache data on RAM or a secondary memory.\n\n\t.. note::\n\n\t\tSince converting image into spike-wave can be time consuming, we recommend to wrap your dataset into a :attr:`CacheDataset`\n\t\tobject.\n\n\tArgs:\n\t\tdataset (torch.utils.data.Dataset): The reference dataset object.\n\t\tcache_address (str, optional): The location of cache in the secondary memory. Use :attr:`None` to cache on RAM. Default: None\n\t\"\"\"\n\tdef __init__(self, dataset, cache_address=None):\n\t\tself.dataset = dataset\n\t\tself.cache_address = cache_address\n\t\tself.cache = [None] * len(self.dataset)\n\n\tdef __getitem__(self, index):\n\t\tif self.cache[index] is None:\n\t\t\t#cache it\n\t\t\tsample, target = self.dataset[index]\n\t\t\tif self.cache_address is None:\n\t\t\t\tself.cache[index] = sample, target\n\t\t\telse:\n\t\t\t\tsave_path = os.path.join(self.cache_address, str(index))\n\t\t\t\ttorch.save(sample, save_path + \".cd\")\n\t\t\t\ttorch.save(target, save_path + \".cl\")\n\t\t\t\tself.cache[index] = save_path\n\t\telse:\n\t\t\tif self.cache_address is None:\n\t\t\t\tsample, target = self.cache[index]\n\t\t\telse:\n\t\t\t\tsample = torch.load(self.cache[index] + \".cd\")\n\t\t\t\ttarget = torch.load(self.cache[index] + \".cl\")\n\t\treturn sample, target\n\n\tdef reset_cache(self):\n\t\tr\"\"\"Clears the cached data. It is useful when you want to change a pre-processing parameter during\n\t\tthe training process.\n\t\t\"\"\"\n\t\tif self.cache_address is not None:\n\t\t\tfor add in self.cache:\n\t\t\t\tos.remove(add + \".cd\")\n\t\t\t\tos.remove(add + \".cl\")\n\t\tself.cache = [None] * len(self)\n\n\tdef __len__(self):\n\t\treturn len(self.dataset)"
]
| [
[
"numpy.max",
"torch.nonzero",
"numpy.sin",
"torch.stack",
"torch.split",
"torch.save",
"numpy.exp",
"numpy.mean",
"torch.from_numpy",
"torch.tensor",
"numpy.cos",
"torch.load",
"torch.zeros_like",
"torch.nn.functional.pad",
"torch.abs_",
"torch.nn.functional.conv2d",
"torch.sort",
"torch.reshape"
]
]
|
tusharsarkar3/TLA | [
"a898617765e2af8ce4f416d8430a8ee9c92aba94"
]
| [
"build/lib/TLA/Lang_Classify/predict.py"
]
| [
"from TLA.Lang_Classify.model import get_model, BERT_Arch\nimport argparse\nimport torch\nimport pandas as pd\nimport numpy as np\nfrom transformers import AutoModel, BertTokenizerFast\nimport pickle\nfrom distutils.sysconfig import get_python_lib\n\ndef predict(val_text,model):\n try:\n if isinstance(pd.read_csv(val_text),pd.DataFrame) == True:\n val_text = np.array(pd.read_csv(val_text))\n except:\n if isinstance(val_text,str) == True:\n val_text = np.array([val_text])\n else:\n return \"First Argument must be of string or numpy array DataType\"\n\n tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')\n tokens_val = tokenizer.batch_encode_plus(\n val_text.tolist(),\n max_length = 512,\n padding='max_length',\n truncation=True\n )\n\n val_seq = torch.tensor(tokens_val['input_ids'])\n val_mask = torch.tensor(tokens_val['attention_mask'])\n le = pickle.load(open(get_python_lib() + \"/TLA/ang_Classify/models/encoder.pkl\",\"rb\"))\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n with torch.no_grad():\n model.to(device)\n preds = model(val_seq.to(device), val_mask.to(device))\n preds = preds.detach().cpu().numpy()\n preds = np.argmax(preds, axis=1)\n preds = le.inverse_transform(preds)\n return preds[0]\n\nif __name__ == \"__main__\":\n my_parser = argparse.ArgumentParser()\n my_parser.add_argument('--predict', action='store', type=str,required=True)\n my_parser.add_argument('--weights', action='store', type=str)\n args = my_parser.parse_args()\n model = get_model(args.weights)\n prediction = predict(args.predict,model)\n print(prediction)"
]
| [
[
"numpy.array",
"torch.no_grad",
"torch.cuda.is_available",
"torch.tensor",
"numpy.argmax",
"pandas.read_csv"
]
]
|
hhelm10/graspy | [
"bbf93b069af426885261d64a6225228ff5aa049b"
]
| [
"graspy/utils/utils.py"
]
| [
"#!/usr/bin/env python\n\n# utils.py\n# Created by Eric Bridgeford on 2018-09-07.\n# Email: [email protected]\n# Copyright (c) 2018. All rights reserved.\n\nimport warnings\nfrom collections import Iterable\nfrom functools import reduce\nfrom pathlib import Path\n\nimport networkx as nx\nimport numpy as np\nfrom sklearn.utils import check_array\n\n\ndef import_graph(graph):\n \"\"\"\n\tA function for reading a graph and returning a shared\n\tdata type. Makes IO cleaner and easier.\n\n\tParameters\n\t----------\n graph: object\n Either array-like, shape (n_vertices, n_vertices) numpy array,\n or an object of type networkx.Graph.\n\n\tReturns\n\t-------\n out: array-like, shape (n_vertices, n_vertices)\n A graph.\n\t\t \n\tSee Also\n\t--------\n networkx.Graph, numpy.array\n\t\"\"\"\n if isinstance(graph, (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)):\n out = nx.to_numpy_array(graph, nodelist=sorted(graph.nodes), dtype=np.float)\n elif isinstance(graph, (np.ndarray, np.memmap)):\n maximum = np.max(graph.shape)\n out = check_array(\n graph,\n dtype=[np.float64, np.float32],\n ensure_2d=True,\n allow_nd=True, # For omni tensor input\n ensure_min_features=maximum,\n ensure_min_samples=maximum,\n copy=True,\n )\n else:\n msg = \"Input must be networkx.Graph or np.array, not {}.\".format(type(graph))\n raise TypeError(msg)\n return out\n\n\ndef import_edgelist(\n path, extension=\"edgelist\", delimiter=None, nodetype=int, return_vertices=False\n):\n \"\"\"\n Function for reading a single or multiple edgelists. When importing multiple \n edgelists, the union of vertices from all graphs is computed so that each output\n graph have matched vertex set. The order of nodes are sorted by node values.\n\n Parameters\n ----------\n path : str, Path object, or iterable\n If ``path`` is a directory, then the importing order will be sorted in \n alphabetical order.\n\n extension : str, optional\n If ``path`` is a directory, then the function will convert all files\n with matching extension. \n\n delimiter : str or None, default=None, optional\n Delimiter of edgelist. If None, the delimiter is whitespace.\n\n nodetype : int (default), float, str, Python type, optional\n Convert node data from strings to specified type.\n\n return_vertices : bool, default=False, optional\n Returns the union of all ind\n\n Returns\n -------\n out : list of array-like, or array-like, shape (n_vertices, n_vertices)\n If ``path`` is a directory, a list of arrays is returned. If ``path`` is a file,\n an array is returned.\n\n vertices : array-like, shape (n_vertices, )\n If ``return_vertices`` == True, then returns an array of all vertices that were \n included in the output graphs. \n \"\"\"\n # p = Path(path)\n if not isinstance(path, (str, Path, Iterable)):\n msg = \"path must be a string or Iterable, not {}\".format(type(path))\n raise TypeError(msg)\n\n # get a list of files to import\n if isinstance(path, (str, Path)):\n p = Path(path)\n if p.is_dir():\n files = sorted(p.glob(\"*\" + extension))\n elif p.is_file():\n files = [p]\n else:\n raise ValueError(\"No graphs founds to import.\")\n else: # path is an iterable\n files = [Path(f) for f in path]\n\n if len(files) == 0:\n msg = \"No files found with '{}' extension found.\".format(extension)\n raise ValueError(msg)\n\n graphs = [\n nx.read_weighted_edgelist(f, nodetype=nodetype, delimiter=delimiter)\n for f in files\n ]\n\n if all(len(G.nodes) == 0 for G in graphs):\n msg = (\n \"All graphs have 0 vertices. Please double check if proper \"\n + \"'delimiter' is given.\"\n )\n warnings.warn(msg, UserWarning)\n\n # Compute union of all vertices\n vertices = np.sort(reduce(np.union1d, [G.nodes for G in graphs]))\n out = [nx.to_numpy_array(G, nodelist=vertices, dtype=np.float) for G in graphs]\n\n # only return adjacency matrix if input is only 1 graph\n if len(out) == 1:\n out = out[0]\n\n if return_vertices:\n return out, vertices\n else:\n return out\n\n\ndef is_symmetric(X):\n return np.array_equal(X, X.T)\n\n\ndef is_loopless(X):\n return not np.any(np.diag(X) != 0)\n\n\ndef is_unweighted(X):\n return ((X == 0) | (X == 1)).all()\n\n\ndef is_almost_symmetric(X, atol=1e-15):\n return np.allclose(X, X.T, atol=atol)\n\n\ndef symmetrize(graph, method=\"triu\"):\n \"\"\"\n A function for forcing symmetry upon a graph.\n\n Parameters\n ----------\n graph: object\n Either array-like, (n_vertices, n_vertices) numpy matrix,\n or an object of type networkx.Graph.\n\n method: {'triu' (default), 'tril', 'avg'}, optional\n An option indicating which half of the edges to\n retain when symmetrizing. \n\n - 'triu'\n Retain the upper right triangle.\n - 'tril'\n Retain the lower left triangle.\n - 'avg'\n Retain the average weight between the upper and lower \n right triangle, of the adjacency matrix.\n\n Returns\n -------\n graph: array-like, shape (n_vertices, n_vertices)\n the graph with asymmetries removed.\n \"\"\"\n # graph = import_graph(graph)\n if method is \"triu\":\n graph = np.triu(graph)\n elif method is \"tril\":\n graph = np.tril(graph)\n elif method is \"avg\":\n graph = (np.triu(graph) + np.tril(graph)) / 2\n else:\n msg = \"You have not passed a valid parameter for the method.\"\n raise ValueError(msg)\n # A = A + A' - diag(A)\n graph = graph + graph.T - np.diag(np.diag(graph))\n return graph\n\n\ndef remove_loops(graph):\n \"\"\"\n A function to remove loops from a graph.\n\n Parameters\n ----------\n graph: object\n Either array-like, (n_vertices, n_vertices) numpy matrix,\n or an object of type networkx.Graph.\n\n Returns\n -------\n graph: array-like, shape(n_vertices, n_vertices)\n the graph with self-loops (edges between the same node) removed.\n \"\"\"\n graph = import_graph(graph)\n graph = graph - np.diag(np.diag(graph))\n\n return graph\n\n\ndef to_laplace(graph, form=\"DAD\", regularizer=None):\n r\"\"\"\n A function to convert graph adjacency matrix to graph laplacian. \n\n Currently supports I-DAD, DAD, and R-DAD laplacians, where D is the diagonal\n matrix of degrees of each node raised to the -1/2 power, I is the \n identity matrix, and A is the adjacency matrix.\n \n R-DAD is regularized laplacian: where :math:`D_t = D + regularizer*I`.\n\n Parameters\n ----------\n graph: object\n Either array-like, (n_vertices, n_vertices) numpy array,\n or an object of type networkx.Graph.\n\n form: {'I-DAD' (default), 'DAD', 'R-DAD'}, string, optional\n \n - 'I-DAD'\n Computes :math:`L = I - D*A*D`\n - 'DAD'\n Computes :math:`L = D*A*D`\n - 'R-DAD'\n Computes :math:`L = D_t*A*D_t` where :math:`D_t = D + regularizer*I`\n\n regularizer: int, float or None, optional (default=None)\n Constant to be added to the diagonal of degree matrix. If None, average \n node degree is added. If int or float, must be >= 0. Only used when \n ``form`` == 'R-DAD'.\n\n Returns\n -------\n L: numpy.ndarray\n 2D (n_vertices, n_vertices) array representing graph \n laplacian of specified form\n\t\n References\n ----------\n .. [1] Qin, Tai, and Karl Rohe. \"Regularized spectral clustering\n under the degree-corrected stochastic blockmodel.\" In Advances\n in Neural Information Processing Systems, pp. 3120-3128. 2013\n \"\"\"\n valid_inputs = [\"I-DAD\", \"DAD\", \"R-DAD\"]\n if form not in valid_inputs:\n raise TypeError(\"Unsuported Laplacian normalization\")\n\n A = import_graph(graph)\n\n if not is_almost_symmetric(A):\n raise ValueError(\"Laplacian not implemented/defined for directed graphs\")\n\n D_vec = np.sum(A, axis=0)\n # regularize laplacian with parameter\n # set to average degree\n if form == \"R-DAD\":\n if regularizer == None:\n regularizer = np.mean(D_vec)\n elif not isinstance(regularizer, (int, float)):\n raise TypeError(\n \"Regularizer must be a int or float, not {}\".format(type(regularizer))\n )\n elif regularizer < 0:\n raise ValueError(\"Regularizer must be greater than or equal to 0\")\n D_vec += regularizer\n\n with np.errstate(divide=\"ignore\"):\n D_root = 1 / np.sqrt(D_vec) # this is 10x faster than ** -0.5\n D_root[np.isinf(D_root)] = 0\n D_root = np.diag(D_root) # just change to sparse diag for sparse support\n\n if form == \"I-DAD\":\n L = np.diag(D_vec) - A\n L = D_root @ L @ D_root\n elif form == \"DAD\" or form == \"R-DAD\":\n L = D_root @ A @ D_root\n return symmetrize(L, method=\"avg\") # sometimes machine prec. makes this necessary\n\n\ndef is_fully_connected(graph):\n \"\"\"\n Checks whether the input graph is fully connected in the undirected case\n or weakly connected in the directed case. \n\n Connected means one can get from any vertex u to vertex v by traversing\n the graph. For a directed graph, weakly connected means that the graph \n is connected after it is converted to an unweighted graph (ignore the \n direction of each edge)\n\n Parameters\n ----------\n graph: nx.Graph, nx.DiGraph, nx.MultiDiGraph, nx.MultiGraph, np.ndarray\n Input graph in any of the above specified formats. If np.ndarray, \n interpreted as an n x n adjacency matrix\n\n Returns\n -------\n boolean: True if the entire input graph is connected\n\n References\n ----------\n http://mathworld.wolfram.com/ConnectedGraph.html\n http://mathworld.wolfram.com/WeaklyConnectedDigraph.html\n\n \"\"\"\n if type(graph) is np.ndarray:\n if is_symmetric(graph):\n g_object = nx.Graph()\n else:\n g_object = nx.DiGraph()\n graph = nx.from_numpy_array(graph, create_using=g_object)\n if type(graph) in [nx.Graph, nx.MultiGraph]:\n return nx.is_connected(graph)\n elif type(graph) in [nx.DiGraph, nx.MultiDiGraph]:\n return nx.is_weakly_connected(graph)\n\n\ndef get_lcc(graph, return_inds=False):\n \"\"\"\n Finds the largest connected component for the input graph. \n\n The largest connected component is the fully connected subgraph\n which has the most nodes. \n\n Parameters\n ----------\n graph: nx.Graph, nx.DiGraph, nx.MultiDiGraph, nx.MultiGraph, np.ndarray\n Input graph in any of the above specified formats. If np.ndarray, \n interpreted as an n x n adjacency matrix\n \n return_inds: boolean, default: False\n Whether to return a np.ndarray containing the indices in the original\n adjacency matrix that were kept and are now in the returned graph.\n Ignored when input is networkx object\n\n Returns\n -------\n graph: nx.Graph, nx.DiGraph, nx.MultiDiGraph, nx.MultiGraph, np.ndarray\n New graph of the largest connected component of the input parameter. \n\n inds: (optional)\n Indices from the original adjacency matrix that were kept after taking\n the largest connected component \n \"\"\"\n\n input_ndarray = False\n if type(graph) is np.ndarray:\n input_ndarray = True\n if is_symmetric(graph):\n g_object = nx.Graph()\n else:\n g_object = nx.DiGraph()\n graph = nx.from_numpy_array(graph, create_using=g_object)\n if type(graph) in [nx.Graph, nx.MultiGraph]:\n lcc_nodes = max(nx.connected_components(graph), key=len)\n elif type(graph) in [nx.DiGraph, nx.MultiDiGraph]:\n lcc_nodes = max(nx.weakly_connected_components(graph), key=len)\n lcc = graph.subgraph(lcc_nodes).copy()\n lcc.remove_nodes_from([n for n in lcc if n not in lcc_nodes])\n if return_inds:\n nodelist = np.array(list(lcc_nodes))\n if input_ndarray:\n lcc = nx.to_numpy_array(lcc)\n if return_inds:\n return lcc, nodelist\n return lcc\n\n\ndef get_multigraph_union_lcc(graphs, return_inds=False):\n \"\"\"\n Finds the union of all multiple graphs, then compute the largest connected\n component.\n\n Parameters\n ----------\n graphs: list or np.ndarray\n List of array-like, (n_vertices, n_vertices), or list of np.ndarray\n nx.Graph, nx.DiGraph, nx.MultiDiGraph, nx.MultiGraph.\n \n return_inds: boolean, default: False\n Whether to return a np.ndarray containing the indices in the original\n adjacency matrix that were kept and are now in the returned graph.\n Ignored when input is networkx object\n \n Returns\n -------\n out : list or np.ndarray\n If input was a list\n \"\"\"\n if isinstance(graphs, list):\n if not isinstance(graphs[0], np.ndarray):\n raise NotImplementedError\n\n out = [import_graph(g) for g in graphs]\n if len(set(map(np.shape, out))) != 1:\n msg = \"All input graphs must have the same size\"\n raise ValueError(msg)\n bar = np.stack(out).mean(axis=0)\n elif isinstance(graphs, np.ndarray):\n shape = graphs.shape\n if shape[1] != shape[2]:\n msg = \"Input graphs must be square\"\n raise ValueError(msg)\n bar = graphs.mean(axis=0)\n else:\n msg = \"Expected list or np.ndarray, but got {} instead.\".format(type(graphs))\n raise ValueError(msg)\n\n _, idx = get_lcc(bar, return_inds=True)\n idx = np.array(idx)\n\n if isinstance(graphs, np.ndarray):\n graphs[:, idx[:, None], idx]\n elif isinstance(graphs, list):\n if isinstance(graphs[0], np.ndarray):\n graphs = [g[idx[:, None], idx] for g in graphs]\n if return_inds:\n return graphs, idx\n return graphs\n\n\ndef get_multigraph_intersect_lcc(graphs, return_inds=False):\n \"\"\"\n Finds the intersection of multiple graphs's largest connected components. \n\n Computes the largest connected component for each graph that was input, and \n takes the intersection over all of these resulting graphs. Note that this \n does not guarantee finding the largest graph where every node is shared among\n all of the input graphs.\n\n Parameters\n ----------\n graphs: list or np.ndarray\n if list, each element must be an n x n np.ndarray adjacency matrix\n \n return_inds: boolean, default: False\n Whether to return a np.ndarray containing the indices in the original\n adjacency matrix that were kept and are now in the returned graph.\n Ignored when input is networkx object\n \n Returns\n -------\n graph: nx.Graph, nx.DiGraph, nx.MultiDiGraph, nx.MultiGraph, np.ndarray\n New graph of the largest connected component of the input parameter. \n\n inds: (optional)\n Indices from the original adjacency matrix that were kept after taking\n the largest connected component \n \"\"\"\n lcc_by_graph = []\n inds_by_graph = []\n for graph in graphs:\n lcc, inds = get_lcc(graph, return_inds=True)\n lcc_by_graph.append(lcc)\n inds_by_graph.append(inds)\n inds_intersection = reduce(np.intersect1d, inds_by_graph)\n new_graphs = []\n for graph in graphs:\n if type(graph) is np.ndarray:\n lcc = graph[inds_intersection, :][:, inds_intersection]\n else:\n lcc = graph.subgraph(inds_intersection).copy()\n lcc.remove_nodes_from([n for n in lcc if n not in inds_intersection])\n new_graphs.append(lcc)\n # this is not guaranteed be connected after one iteration because taking the\n # intersection of nodes among graphs can cause some components to become\n # disconnected, so, we check for this and run again if necessary\n recurse = False\n for new_graph in new_graphs:\n if not is_fully_connected(new_graph):\n recurse = True\n break\n if recurse:\n new_graphs, inds_intersection = get_multigraph_intersect_lcc(\n new_graphs, return_inds=True\n )\n if type(graphs) != list:\n new_graphs = np.stack(new_graphs)\n if return_inds:\n return new_graphs, inds_intersection\n else:\n return new_graphs\n\n\ndef augment_diagonal(graph, weight=1):\n \"\"\"\n Replaces the diagonal of adjacency matrix with \n :math: \\frac{degree}{num_verts - 1} for the degree associated\n with each node. \n\n For directed graphs, the degree used is the out degree (number) of \n edges leaving the vertex. Ignores self-loops when calculating degree\n\n Parameters\n ----------\n graph: nx.Graph, nx.DiGraph, nx.MultiDiGraph, nx.MultiGraph, np.ndarray\n Input graph in any of the above specified formats. If np.ndarray, \n interpreted as an n x n adjacency matrix \n \"\"\"\n graph = import_graph(graph)\n graph = remove_loops(graph)\n divisor = graph.shape[0] - 1\n # use out degree for directed graph\n # ignore self loops in either case\n degrees = np.count_nonzero(graph, axis=1)\n diag = weight * degrees / divisor\n graph += np.diag(diag)\n return graph\n"
]
| [
[
"numpy.max",
"numpy.isinf",
"numpy.array",
"numpy.count_nonzero",
"numpy.array_equal",
"numpy.errstate",
"numpy.sum",
"numpy.triu",
"numpy.mean",
"numpy.allclose",
"numpy.stack",
"numpy.sqrt",
"sklearn.utils.check_array",
"numpy.diag",
"numpy.tril"
]
]
|
zongdaoming/TinyTransformer | [
"8e64f8816117048c388b4b20e3a56760ce149fe3"
]
| [
"unn/models/heads/pair_head_clean/pair.py"
]
| [
"#coding: utf-8\nfrom ..utils import bbox_helper\nfrom ..utils.pair_helper import pair_nms\nfrom ..utils.pair_helper import pair_box_transform\nfrom ..utils.pair_helper import pair_pos_score\nimport pdb\nimport ctypes\n\nimport math\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport logging\nimport json\nlogger = logging.getLogger('global')\nhistory = [0, 0]\n\ndef to_np_array(x):\n if x is None:\n return x\n if isinstance(x, Variable): x = x.data\n return x.cpu().float().numpy() if torch.is_tensor(x) else x\n\ndef compute_proposal_targets2(all_rois, pair_rois, num_classes, cfg, gt_bboxes, gt_assos, roiTable, image_info, predict_type='rcnn', ignore_regions=None):\n B = len(image_info)\n C = num_classes\n N = pair_rois.shape[0] # 267,前一步makepair生成了 267 个pair\n gt_cls = np.array([0 for _ in range(N)]).astype(np.int32)\n all_rois, gt_assos, image_info = map(to_np_array, [all_rois, gt_assos, image_info])\n pos_ix = []\n neg_ix = []\n gt_cls = [0 for __ in range(N)]\n gt_binary_cls = [[0 for __ in range(C)] for _ in range(N)]\n for b_ix in range(B):\n idx = np.where(all_rois[:,0] == b_ix)[0]\n rois = all_rois[idx, 1: 5] # (100, 4) 该图片下的检测bbox\n gts = gt_bboxes[b_ix] # (16, 5)\n gts = to_np_array(gts) # (1, 13) 该 图片 下的 gt 关联\n gtasso = gt_assos[b_ix].cpu().numpy() # [idx1, idx2, item['label']] + item['bbox1'] + [item['label1']] + item['bbox2'] + [item['label2']]\n dt_pairs = pair_rois[pair_rois[:, 0] == b_ix]\n R = rois.shape[0] \n G = gts.shape[0]\n if R == 0:\n continue\n if G > 0:\n overlaps = bbox_helper.bbox_iou_overlaps(torch.from_numpy(rois), torch.from_numpy(gts)).numpy() # (100, 16)\n argmax_overlaps = overlaps.argmax(axis=1) # 每一个检测框与gt iou重叠最大的 bbox 的索引 (100,)\n max_overlaps = overlaps.max(axis=1) # 每一个检测框与gt iou重叠最大的值 (100, )\n if cfg.get('allow_low_quality_match', False):\n gt_argmax_overlaps = overlaps.argmax(axis=0)\n gt_max_overlaps = overlaps.max(axis=0)\n gt_pos_g_ix = np.where(gt_max_overlaps > cfg['negative_iou_thresh'])[0]\n gt_pos_r_ix = gt_argmax_overlaps[gt_pos_g_ix]\n gt_pos_g_ix, return_index = np.unique(gt_pos_g_ix, return_index=True)\n gt_pos_r_ix = gt_pos_r_ix[return_index]\n max_overlaps[gt_pos_r_ix] = 1.0\n\n pos_r_ix = np.where(max_overlaps > cfg['positive_iou_thresh'])[0] # (91, ) 选择该图下的检测的正例的索引\n pos_g_ix = argmax_overlaps[pos_r_ix] # (91, ) 改图下的检测的正例的匹配到的gt的索引\n pos_r_ix, return_index = np.unique(pos_r_ix, return_index=True)\n pos_g_ix = pos_g_ix[return_index]\n GG = gtasso.shape[0]\n #for m in range(dt_pairs.shape[0]):\n # x = dt_pairs[m, 1]\n # y = dt_pairs[m, 2]\n # if x in pos_r_ix and y in pos_r_ix:\n # continue\n # else:\n # id = roiTable[(x,y)]\n # neg_ix.append(id)\n # gt_cls[id] = 0\n \n for k in range(GG): # 对于每一个 gt\n if gtasso[k][2] == 0: # 如果是背景,则略过\n continue\n x = gtasso[k][0] # pair id 1\n y = gtasso[k][1] # pair id 2\n for i in range(len(pos_r_ix)):\n for j in range(len(pos_r_ix)):\n if x == pos_g_ix[i] and y == pos_g_ix[j]: # 只有都匹配到了,才算 pos\n rx = idx[pos_r_ix[i]]\n ry = idx[pos_r_ix[j]]\n if not (rx, ry) in roiTable:\n continue\n assert((rx,ry) in roiTable)\n id = roiTable[(rx,ry)]\n pos_ix.append(id)\n if predict_type == 'rcnn':\n gt_cls[id] = gtasso[k][2]\n gt_binary_cls[id][gtasso[k][2].astype(np.int32)] = 1\n else:\n gt_cls[id] = 1\n gt_binary_cls[id][0] = 1\n \n neg_ix = np.array(list(set([_ for _ in range(N)]) - set(pos_ix))) # 实际本batch中得到的负例的个数,237\n batch_size = cfg['batch_size']\n all_num = batch_size * B # 128 * 4 = 512\n #pos_num = int(all_num * cfg['positive_percent']) # 128\n pos_ix = np.array(pos_ix) # 实际本batch中得到的正例的个数,根绝某规则选择出的正例(iou。。。。) (30, )\n #if pos_ix.shape[0] > pos_num: # 如果实际的正例比我们想要的正例数目还要多,则进行随机算则\n # keep_ix = np.random.choice(len(pos_ix), size = pos_num, replace = True) # 随机选择128个正例,降采样\n # pos_ix = pos_ix[keep_ix]\n # if cfg.get('dynamic_batch_size', False): # 动态batch size\n # all_num = int(len(pos_ix) / cfg['positive_percent']) # 增大 batch size,并不一定是 512 了\n\n #neg_num = all_num - len(pos_ix) # 我们想要的负例的个数 (482, )\n #if len(neg_ix) == 0:\n # neg_ix = np.array([0])\n #keep_ix = np.random.choice(len(neg_ix), size = neg_num, replace = True)\n #neg_ix = neg_ix[keep_ix] # (482, )\n neg_cls = [0 for __ in range(len(neg_ix))]\n neg_binary_cls = [[0 for __ in range(C)] for _ in range(len(neg_ix))]\n gt_cls = np.array(gt_cls)\n gt_cls = gt_cls[pos_ix.astype(np.int32)]\n gt_binary_cls = np.array(gt_binary_cls)\n gt_binary_cls = gt_binary_cls[pos_ix.astype(np.int32)]\n neg_ix = np.array(neg_ix)\n neg_cls = np.array(neg_cls)\n neg_binary_cls = np.array(neg_binary_cls)\n\n if len(pos_ix) == 0:\n dt = neg_ix\n gt_cls = neg_cls\n gt_binary_cls = neg_binary_cls\n else:\n dt = np.hstack([pos_ix, neg_ix]) # 正负例的索引 stack 起来\n gt_cls = np.hstack([gt_cls, neg_cls])\n gt_binary_cls = np.vstack([gt_binary_cls, neg_binary_cls])\n \n pdb.set_trace()\n if cfg.get('cls_type', 'softmax') == 'softmax':\n return dt, gt_cls, gt_cls.shape[0]\n else:\n return dt, gt_binary_cls, gt_binary_cls.shape[0]\n\ndef compute_proposal_targets_gt(all_rois, pair_rois, num_classes, cfg, gt_bboxes, gt_assos, roiTable, image_info, predict_type='rcnn', ignore_regions=None):\n '''\n gt_assos: [N, 3] rois1_ix, rois2_ix, cls\n '''\n #pdb.set_trace()\n B = len(image_info)\n C = num_classes\n N = pair_rois.shape[0]\n gt_cls = np.array([0 for _ in range(N)]).astype(np.int32)\n all_rois, gt_assos, image_info = map(to_np_array, [all_rois, gt_assos, image_info])\n pos_ix = []\n gt_cls = [0 for __ in range(N)]\n gt_binary_cls = [[0 for __ in range(C)] for _ in range(N)]\n for b_ix in range(B):\n idx = np.where(all_rois[:,0] == b_ix)[0]\n rois = all_rois[idx, 1: 5]\n gts = gt_bboxes[b_ix]\n gts = to_np_array(gts)\n gtasso = gt_assos[b_ix].cpu().numpy()\n R = rois.shape[0]\n G = gtasso.shape[0]\n if R == 0:\n continue\n if G > 0:\n overlaps = bbox_helper.bbox_iou_overlaps(torch.from_numpy(rois), torch.from_numpy(gts)).numpy()\n argmax_overlaps = overlaps.argmax(axis=1)\n max_overlaps = overlaps.max(axis=1)\n if cfg.get('allow_low_quality_match', False):\n gt_argmax_overlaps = overlaps.argmax(axis=0)\n gt_max_overlaps = overlaps.max(axis=0)\n gt_pos_g_ix = np.where(gt_max_overlaps > cfg['negative_iou_thresh'])[0]\n gt_pos_r_ix = gt_argmax_overlaps[gt_pos_g_ix]\n gt_pos_g_ix, return_index = np.unique(gt_pos_g_ix, return_index=True)\n gt_pos_r_ix = gt_pos_r_ix[return_index]\n max_overlaps[gt_pos_r_ix] = 1.0\n\n pos_r_ix = np.where(max_overlaps > cfg['positive_iou_thresh'])[0]\n pos_g_ix = argmax_overlaps[pos_r_ix]\n pos_r_ix, return_index = np.unique(pos_r_ix, return_index=True)\n pos_g_ix = pos_g_ix[return_index]\n GG = gtasso.shape[0]\n added_gt = set()\n for k in range(GG):\n if gtasso[k][2] == 0:\n continue\n x = gtasso[k][0]\n y = gtasso[k][1]\n for i in range(len(pos_r_ix)):\n for j in range(len(pos_r_ix)):\n if x == pos_g_ix[i] and y == pos_g_ix[j]:\n rx = idx[pos_r_ix[i]]\n ry = idx[pos_r_ix[j]]\n if not (rx, ry) in roiTable:\n continue\n assert((rx,ry) in roiTable)\n id = roiTable[(rx,ry)]\n pos_ix.append(id)\n added_gt.add(k)\n if predict_type == 'rcnn':\n gt_cls[id] = gtasso[k][2]\n gt_binary_cls[id][gtasso[k][2].astype(np.int32)] = 1\n else:\n gt_cls[id] = 1\n gt_binary_cls[id][0] = 1\n overlaps = overlaps.T\n argmax_overlaps = overlaps.argmax(axis=1)\n max_overlaps = overlaps.max(axis=1)\n\n no_added_gt = set(list(range(GG))) - added_gt\n if len(no_added_gt) <= 1:\n continue\n for k in no_added_gt: # for not added gt\n #pdb.set_trace()\n x = int(gtasso[k][0])\n y = int (gtasso[k][1])\n if max_overlaps[x] == 0 or max_overlaps[y] == 0:\n continue\n rx = argmax_overlaps[x]\n ry = argmax_overlaps[y]\n rx = idx[rx]\n ry = idx[ry]\n if not (rx, ry) in roiTable:\n continue\n assert((rx,ry) in roiTable)\n id = roiTable[(rx,ry)]\n pos_ix.append(id)\n if predict_type == 'rcnn':\n gt_cls[id] = gtasso[k][2] # todo 类别没有对应好,如果要对应类别,就不能去max\n gt_binary_cls[id][gtasso[k][2].astype(np.int32)] = 1\n else:\n gt_cls[id] = 1\n gt_binary_cls[id][0] = 1\n\n neg_ix = np.array(list(set([_ for _ in range(N)]) - set(pos_ix)))\n #pdb.set_trace()\n batch_size = cfg['batch_size']\n all_num = batch_size * B\n pos_num = int(all_num * cfg['positive_percent'])\n pos_ix = np.array(pos_ix)\n if pos_ix.shape[0] > pos_num:\n keep_ix = np.random.choice(len(pos_ix), size = pos_num, replace = True)\n pos_ix = pos_ix[keep_ix]\n if cfg.get('dynamic_batch_size', False):\n all_num = int(len(pos_ix) / cfg['positive_percent'])\n\n neg_num = all_num - len(pos_ix)\n if len(neg_ix) == 0:\n neg_ix = np.array([0])\n keep_ix = np.random.choice(len(neg_ix), size = neg_num, replace = True)\n neg_ix = neg_ix[keep_ix]\n neg_cls = [0 for __ in range(len(keep_ix))]\n neg_binary_cls = [[0 for __ in range(C)] for _ in range(len(keep_ix))]\n gt_cls = np.array(gt_cls)\n gt_cls = gt_cls[pos_ix.astype(np.int32)]\n gt_binary_cls = np.array(gt_binary_cls)\n gt_binary_cls = gt_binary_cls[pos_ix.astype(np.int32)]\n neg_ix = np.array(neg_ix)\n neg_cls = np.array(neg_cls)\n neg_binary_cls = np.array(neg_binary_cls)\n\n if len(pos_ix) == 0:\n dt = neg_ix\n gt_cls = neg_cls\n gt_binary_cls = neg_binary_cls\n else:\n dt = np.hstack([pos_ix, neg_ix])\n gt_cls = np.hstack([gt_cls, neg_cls])\n gt_binary_cls = np.vstack([gt_binary_cls, neg_binary_cls])\n idx = np.arange(len(dt))\n np.random.shuffle(idx)\n dt = dt[idx]\n gt_cls = gt_cls[idx]\n gt_binary_cls = gt_binary_cls[idx]\n #pdb.set_trace()\n if cfg.get('cls_type', 'softmax') == 'softmax':\n return dt, gt_cls, gt_cls.shape[0]\n else:\n return dt, gt_binary_cls, gt_binary_cls.shape[0]\n\ndef compute_proposal_targets(all_rois, pair_rois, num_classes, cfg, gt_bboxes, gt_assos, roiTable, image_info, predict_type='rcnn', ignore_regions=None):\n '''\n gt_assos: [N, 3] rois1_ix, rois2_ix, cls\n '''\n #pdb.set_trace()\n B = len(image_info)\n C = num_classes\n N = pair_rois.shape[0]\n gt_cls = np.array([0 for _ in range(N)]).astype(np.int32)\n all_rois, gt_assos, image_info = map(to_np_array, [all_rois, gt_assos, image_info])\n pos_ix = []\n gt_cls = [0 for __ in range(N)]\n gt_binary_cls = [[0 for __ in range(C)] for _ in range(N)]\n for b_ix in range(B):\n idx = np.where(all_rois[:,0] == b_ix)[0]\n rois = all_rois[idx, 1: 5]\n gts = gt_bboxes[b_ix]\n gts = to_np_array(gts)\n gtasso = gt_assos[b_ix].cpu().numpy()\n R = rois.shape[0]\n G = gts.shape[0]\n if R == 0:\n continue\n if G > 0:\n overlaps = bbox_helper.bbox_iou_overlaps(torch.from_numpy(rois), torch.from_numpy(gts)).numpy()\n argmax_overlaps = overlaps.argmax(axis=1)\n max_overlaps = overlaps.max(axis=1)\n if cfg.get('allow_low_quality_match', False):\n gt_argmax_overlaps = overlaps.argmax(axis=0)\n gt_max_overlaps = overlaps.max(axis=0)\n gt_pos_g_ix = np.where(gt_max_overlaps > cfg['negative_iou_thresh'])[0]\n gt_pos_r_ix = gt_argmax_overlaps[gt_pos_g_ix]\n gt_pos_g_ix, return_index = np.unique(gt_pos_g_ix, return_index=True)\n gt_pos_r_ix = gt_pos_r_ix[return_index]\n max_overlaps[gt_pos_r_ix] = 1.0\n\n pos_r_ix = np.where(max_overlaps > cfg['positive_iou_thresh'])[0]\n pos_g_ix = argmax_overlaps[pos_r_ix]\n pos_r_ix, return_index = np.unique(pos_r_ix, return_index=True)\n pos_g_ix = pos_g_ix[return_index]\n GG = gtasso.shape[0]\n for k in range(GG):\n if gtasso[k][2] == 0:\n continue\n x = gtasso[k][0]\n y = gtasso[k][1]\n for i in range(len(pos_r_ix)):\n for j in range(len(pos_r_ix)):\n if x == pos_g_ix[i] and y == pos_g_ix[j]:\n rx = idx[pos_r_ix[i]]\n ry = idx[pos_r_ix[j]]\n if not (rx, ry) in roiTable:\n continue\n assert((rx,ry) in roiTable)\n id = roiTable[(rx,ry)]\n pos_ix.append(id)\n if predict_type == 'rcnn':\n gt_cls[id] = gtasso[k][2]\n gt_binary_cls[id][gtasso[k][2].astype(np.int32)] = 1\n else:\n gt_cls[id] = 1\n gt_binary_cls[id][0] = 1\n neg_ix = np.array(list(set([_ for _ in range(N)]) - set(pos_ix)))\n #pdb.set_trace()\n batch_size = cfg['batch_size']\n all_num = batch_size * B\n pos_num = int(all_num * cfg['positive_percent'])\n pos_ix = np.array(pos_ix)\n if pos_ix.shape[0] > pos_num:\n keep_ix = np.random.choice(len(pos_ix), size = pos_num, replace = True)\n pos_ix = pos_ix[keep_ix]\n if cfg.get('dynamic_batch_size', False):\n all_num = int(len(pos_ix) / cfg['positive_percent'])\n\n neg_num = all_num - len(pos_ix)\n if len(neg_ix) == 0:\n neg_ix = np.array([0])\n keep_ix = np.random.choice(len(neg_ix), size = neg_num, replace = True)\n neg_ix = neg_ix[keep_ix]\n neg_cls = [0 for __ in range(len(keep_ix))]\n neg_binary_cls = [[0 for __ in range(C)] for _ in range(len(keep_ix))]\n gt_cls = np.array(gt_cls)\n gt_cls = gt_cls[pos_ix.astype(np.int32)]\n gt_binary_cls = np.array(gt_binary_cls)\n gt_binary_cls = gt_binary_cls[pos_ix.astype(np.int32)]\n neg_ix = np.array(neg_ix)\n neg_cls = np.array(neg_cls)\n neg_binary_cls = np.array(neg_binary_cls)\n\n if len(pos_ix) == 0:\n dt = neg_ix\n gt_cls = neg_cls\n gt_binary_cls = neg_binary_cls\n else:\n dt = np.hstack([pos_ix, neg_ix])\n gt_cls = np.hstack([gt_cls, neg_cls])\n gt_binary_cls = np.vstack([gt_binary_cls, neg_binary_cls])\n idx = np.arange(len(dt))\n np.random.shuffle(idx)\n dt = dt[idx]\n gt_cls = gt_cls[idx]\n gt_binary_cls = gt_binary_cls[idx]\n #pdb.set_trace()\n if cfg.get('cls_type', 'softmax') == 'softmax':\n return dt, gt_cls, gt_cls.shape[0]\n else:\n return dt, gt_binary_cls, gt_binary_cls.shape[0]\n\ndef predict_assos(rois, pair_rois, pred_cls, image_info, cfg, tocaffe):\n '''\n :param cfg: config\n :param rois: [N, k] k>=7, batch_ix, x1, y1, x2, y2, score, cls\n :param pred_assos:[N, num_classes * 4, 1, 1]\n :param image_info:[N, 3]\n :return: assos: [M, 13], batch_ix, ax1, ay1, ax2, ay2, acls, bx1, by1, bx2, by2, bcls, cls, score\n '''\n rois, pair_rois, pred_cls = map(to_np_array, [rois, pair_rois, pred_cls])\n N, num_classes = pred_cls.shape[0:2]\n B = max(rois[:, 0].astype(np.int32))+1\n nmsed_assos = [np.zeros((1,13))]\n rois1 = rois[pair_rois[:, 1].astype(np.int32)]\n rois2 = rois[pair_rois[:, 2].astype(np.int32)]\n #pdb.set_trace()\n for cls in range(1, num_classes):\n #if cls == 1:\n # in body-face pair, the cls of object must be face\n # idx = np.where(rois2[:,6] == 2)\n #else:\n # in body-hand pair, the cls of object must be hand\n # idx = np.where(rois2[:,6] == 3)\n #idx = np.where(pred_cls[:, cls] == pred_cls.max(axis=0))\n #pdb.set_trace()\n idx = np.where(rois2[:, 6] > 0)\n tmp_pred_cls = pred_cls[idx]\n tmp_rois1 = rois1[idx]\n tmp_rois2 = rois2[idx]\n tmp_pair = pair_rois[idx]\n #scores = (tmp_pred_cls[:,cls] * tmp_rois1[:, 5] * tmp_rois2[:, 5]).reshape((-1,1))\n scores = tmp_pred_cls[:, cls].reshape((-1,1))\n pair_cls = np.array([cls for _ in range(scores.shape[0])]).reshape((-1,1))\n batch = tmp_pair[:,0]\n batch = batch.reshape((-1,1))\n #nmsed_assos.append(np.hstack([batch, tmp_rois1[:,1:5], tmp_rois1[:,6].reshape((-1,1)), tmp_rois2[:, 1:5], tmp_rois2[:,6].reshape((-1,1)), pair_cls, scores]))\n batch_asso = np.hstack([batch, tmp_rois1[:,1:5], tmp_rois1[:,6].reshape((-1,1)), tmp_rois2[:, 1:5], tmp_rois2[:,6].reshape((-1,1)), pair_cls, scores])\n if cfg.get('cls_top_n', 0) > 0:# no run\n for b_ix in range(B):\n assos = batch_asso[batch_asso[:, 0] == b_ix]\n if assos.size == 0: continue\n scores = assos[:, -1]\n order = scores.argsort()[::-1][:cfg['cls_top_n']]\n assos = assos[order]\n if cfg.get('use_filter', None) is not None:\n cls_triplet = cfg['asso_triplet'][cls - 1]\n tmp = []\n for line in assos:\n acls = line[5].astype(int)\n bcls = line[10].astype(int)\n if (str(acls) in cls_triplet.keys() and bcls in cls_triplet[str(acls)]):\n tmp.append(line)\n if len(tmp) > 0:\n assos = np.vstack(tmp)\n else:\n assos = []\n if len(assos) > 0:\n nmsed_assos.append(assos)\n else:\n nmsed_assos.append(batch_asso)\n \n\n if tocaffe:\n nmsed_assos = np.zeros((1,13))\n else:\n nmsed_assos = np.vstack(nmsed_assos)\n if cfg.get('pair_nms', None):# no run\n if cfg['pre_top_n'] > 0:\n top_n_assos = []\n for b_ix in range(B):\n assos = nmsed_assos[nmsed_assos[:, 0] == b_ix]\n if assos.size == 0: continue\n scores = assos[:, -1]\n order = scores.argsort()[::-1][:cfg['pre_top_n']]\n assos = assos[order]\n if cfg.get('pre_score_thresh', None) is not None:\n keep = assos[:, -1] > cfg['pre_score_thresh']\n assos = assos[keep]\n top_n_assos.append(assos)\n nmsed_assos = np.vstack(top_n_assos)\n nmsed_assos = pair_nms(nmsed_assos, cfg['pair_nms'])\n if cfg['top_n'] > 0:\n top_n_assos = [[0,0.0,0.0,0.0,0.0,0,0.0,0.0,0.0,0.0,0,0,0]]\n for b_ix in range(B):\n assos = nmsed_assos[nmsed_assos[:, 0] == b_ix]\n if assos.size == 0: continue\n scores = assos[:, -1]\n order = scores.argsort()[::-1][:cfg['top_n']]\n assos = assos[order]\n if cfg.get('score_thresh', None) is not None:\n keep = assos[:, -1] > cfg['score_thresh']\n #keep = np.bitwise_and(0.5 > assos[:, -1], assos[:, -1] > cfg['score_thresh'])\n assos = assos[keep]\n top_n_assos.append(assos)\n nmsed_assos = np.vstack(top_n_assos)\n return nmsed_assos\n\n\ndef predict_assos_2(rois, pair_rois, pred_cls, image_info, cfg, tocaffe):\n '''\n :param cfg: config\n :param rois: [N, k] k>=7, batch_ix, x1, y1, x2, y2, score, cls\n :param pred_assos:[N, num_classes * 4, 1, 1]\n :param image_info:[N, 3]\n :return: assos: [M, 13], batch_ix, ax1, ay1, ax2, ay2, acls, bx1, by1, bx2, by2, bcls, cls, score\n '''\n rois, pair_rois, pred_cls = map(to_np_array, [rois, pair_rois, pred_cls])\n N, num_classes = pred_cls.shape[0:2]\n B = max(rois[:, 0].astype(np.int32))+1\n nmsed_assos = [np.zeros((1,13))]\n #pdb.set_trace()\n rois1 = rois[pair_rois[:, 1].astype(np.int32)]\n rois2 = rois[pair_rois[:, 2].astype(np.int32)]\n for cls in range(1, num_classes):\n #if cls == 1:\n # in body-face pair, the cls of object must be face\n # idx = np.where(rois2[:,6] == 2)\n #else:\n # in body-hand pair, the cls of object must be hand\n # idx = np.where(rois2[:,6] == 3)\n idx = np.where(rois2[:, 6] > 0)\n tmp_pred_cls = pred_cls[idx]\n tmp_rois1 = rois1[idx]\n tmp_rois2 = rois2[idx]\n tmp_pair = pair_rois[idx]\n scores = (tmp_pred_cls[:,cls] * tmp_rois1[:, 5] * tmp_rois2[:, 5]).reshape((-1,1))\n #scores = tmp_pred_cls[:, cls].reshape((-1,1))\n # pair_cls = np.array([cls for _ in range(scores.shape[0])]).reshape((-1,1))\n pair_cls = tmp_rois2[:, 6].copy()\n pair_cls[pair_cls == 2] = 1\n pair_cls[pair_cls == 3] = 2\n pair_cls = pair_cls.reshape((-1, 1))\n batch = tmp_pair[:,0]\n batch = batch.reshape((-1,1))\n #nmsed_assos.append(np.hstack([batch, tmp_rois1[:,1:5], tmp_rois1[:,6].reshape((-1,1)), tmp_rois2[:, 1:5], tmp_rois2[:,6].reshape((-1,1)), pair_cls, scores]))\n batch_asso = np.hstack([batch, tmp_rois1[:,1:5], tmp_rois1[:,6].reshape((-1,1)), \n tmp_rois2[:, 1:5], tmp_rois2[:,6].reshape((-1,1)), pair_cls, scores])\n if cfg.get('cls_top_n', 0) > 0:\n for b_ix in range(B):\n assos = batch_asso[batch_asso[:, 0] == b_ix]\n if assos.size == 0: continue\n scores = assos[:, -1]\n order = scores.argsort()[::-1][:cfg['cls_top_n']]\n assos = assos[order]\n if cfg.get('use_filter', None) is not None:\n cls_triplet = cfg['asso_triplet'][cls - 1]\n tmp = []\n for line in assos:\n acls = line[5].astype(int)\n bcls = line[10].astype(int)\n if (str(acls) in cls_triplet.keys() and bcls in cls_triplet[str(acls)]):\n tmp.append(line)\n if len(tmp) > 0:\n assos = np.vstack(tmp)\n else:\n assos = []\n if len(assos) > 0:\n nmsed_assos.append(assos)\n else:\n nmsed_assos.append(batch_asso)\n \n\n if tocaffe:\n nmsed_assos = np.zeros((1,13))\n else:\n nmsed_assos = np.vstack(nmsed_assos)\n if cfg.get('pair_nms', None):\n if cfg['pre_top_n'] > 0:\n top_n_assos = []\n for b_ix in range(B):\n assos = nmsed_assos[nmsed_assos[:, 0] == b_ix]\n if assos.size == 0: continue\n scores = assos[:, -1]\n order = scores.argsort()[::-1][:cfg['pre_top_n']]\n assos = assos[order]\n if cfg.get('pre_score_thresh', None) is not None:\n keep = assos[:, -1] > cfg['pre_score_thresh']\n assos = assos[keep]\n top_n_assos.append(assos)\n nmsed_assos = np.vstack(top_n_assos)\n nmsed_assos = pair_nms(nmsed_assos, cfg['pair_nms'])\n if cfg['top_n'] > 0:\n top_n_assos = [[0,0.0,0.0,0.0,0.0,0,0.0,0.0,0.0,0.0,0,0,0]]\n for b_ix in range(B):\n assos = nmsed_assos[nmsed_assos[:, 0] == b_ix]\n if assos.size == 0: continue\n scores = assos[:, -1]\n order = scores.argsort()[::-1][:cfg['top_n']]\n assos = assos[order]\n if cfg.get('score_thresh', None) is not None:\n keep = assos[:, -1] > cfg['score_thresh']\n assos = assos[keep]\n top_n_assos.append(assos)\n nmsed_assos = np.vstack(top_n_assos)\n return nmsed_assos\n\n"
]
| [
[
"numpy.array",
"numpy.zeros",
"torch.is_tensor",
"numpy.random.shuffle",
"torch.from_numpy",
"numpy.where",
"numpy.unique",
"numpy.hstack",
"numpy.vstack"
]
]
|
rystylee/pytorch-3DGAN | [
"768f53182183c123a7cbac16581fb777fcf8f726"
]
| [
"trainer.py"
]
| [
"import os\n\nfrom tqdm import tqdm\n\nimport torch\nimport torch.optim as optim\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchsummary import summary\n\nfrom model import Discriminator\nfrom model import Generator\nfrom losses import GANLoss\nfrom data_loader import load_dataloader\nfrom utils import sample_z, save_voxel\n\n\nclass Trainer(object):\n def __init__(self, config):\n self.config = config\n self.device = config.device\n\n self.dataloader = load_dataloader(\n data_root=config.data_root,\n dataset_name=config.dataset_name,\n dataset_type=config.dataset_type,\n batch_size=config.batch_size,\n dim_voxel=config.dim_voxel\n )\n\n self.start_itr = 1\n self.dim_z = config.dim_z\n self.dis_z = config.dis_z\n self.d_thresh = config.d_thresh\n\n self.generator = Generator(dim_z=config.dim_z, ch=config.ch_g, out_ch=1, bias=config.bias).to(self.device)\n self.discriminator = Discriminator(ch=config.ch_d, out_ch=1, bias=config.bias, dim_voxel=config.dim_voxel).to(self.device)\n print('')\n print('Generator summary')\n summary(self.generator, (config.batch_size, self.dim_z))\n print('')\n print('')\n print('Discriminator summary')\n summary(self.discriminator, (config.batch_size, config.dim_voxel, config.dim_voxel, config.dim_voxel))\n print('')\n\n self.optim_g = optim.Adam(self.generator.parameters(), lr=config.lr_g, betas=(config.beta1, config.beta2))\n self.optim_d = optim.Adam(self.discriminator.parameters(), lr=config.lr_d, betas=(config.beta1, config.beta2))\n self.criterion = GANLoss()\n\n if not self.config.checkpoint_path == '':\n self._load_models(self.config.checkpoint_path)\n\n self.writer = SummaryWriter(log_dir=config.log_dir)\n\n def train(self):\n print('Start training!\\n')\n with tqdm(total=self.config.max_itr + 1 - self.start_itr) as pbar:\n for n_itr in range(self.start_itr, self.config.max_itr + 1):\n pbar.set_description(f'iteration [{n_itr}]')\n\n img = next(self.dataloader)\n real_img = img.to(self.device)\n\n batch_size = len(real_img)\n\n # ------------------------------------------------\n # Train D\n # ------------------------------------------------\n z = sample_z(batch_size, self.dim_z, self.dis_z, self.device)\n\n with torch.no_grad():\n fake_img = self.generator(z)\n\n d_real = self.discriminator(real_img)\n d_fake = self.discriminator(fake_img)\n\n loss_d_real = self.criterion(d_real, 'd_real')\n loss_d_fake = self.criterion(d_fake, 'd_fake')\n loss_d = loss_d_real + loss_d_fake\n\n acc_d_real = torch.ge(d_real.squeeze(), 0.5).float()\n acc_d_fake = torch.le(d_fake.squeeze(), 0.5).float()\n acc_d = torch.mean(torch.cat((acc_d_real, acc_d_fake), 0))\n\n if acc_d < self.d_thresh:\n self.optim_d.zero_grad()\n loss_d.backward()\n self.optim_d.step()\n\n # ------------------------------------------------\n # Train G\n # ------------------------------------------------\n z = sample_z(batch_size, self.dim_z, self.dis_z, self.device)\n\n fake_img = self.generator(z)\n d_fake = self.discriminator(fake_img)\n\n loss_g = self.criterion(d_fake, 'g')\n\n self.optim_g.zero_grad()\n loss_g.backward()\n self.optim_g.step()\n\n # ------------------------------------------------\n # Logging\n # ------------------------------------------------\n if n_itr % self.config.log_interval == 0:\n tqdm.write('iteration: {}/{}, loss_g: {}, loss_d: {}, loss_d_real: {}, loss_d_fake: {}'.format(\n n_itr, self.config.max_itr, loss_g.item(), loss_d.item(), loss_d_real.item(), loss_d_fake.item()\n ))\n self.writer.add_scalar('loss/loss_g', loss_g.item(), n_itr)\n self.writer.add_scalar('loss/loss_d', loss_d.item(), n_itr)\n self.writer.add_scalar('loss/loss_d_real', loss_d_real.item(), n_itr)\n self.writer.add_scalar('loss/loss_d_fake', loss_d_fake.item(), n_itr)\n self.writer.add_scalar('loss/acc_d', acc_d.item(), n_itr)\n\n # ------------------------------------------------\n # Sampling\n # ------------------------------------------------\n if n_itr % self.config.sample_interval == 0:\n img_path = os.path.join(self.config.sample_dir, f'fake_{n_itr}.jpg')\n samples = fake_img.detach().cpu().numpy()\n save_voxel(samples, img_path)\n\n if n_itr % self.config.sample_interval == 0:\n img_path = os.path.join(self.config.sample_dir, f'real_{n_itr}.jpg')\n samples = real_img.detach().cpu().numpy()\n save_voxel(samples, img_path)\n\n # ------------------------------------------------\n # Save model\n # ------------------------------------------------\n if n_itr % self.config.checkpoint_interval == 0:\n self._save_models(n_itr)\n\n pbar.update()\n\n self.writer.close()\n\n def _save_models(self, n_itr):\n checkpoint_name = f'{self.config.dataset_name}-{self.config.dim_voxel}_model_ckpt_{n_itr}.pth'\n checkpoint_path = os.path.join(self.config.checkpoint_dir, checkpoint_name)\n torch.save({\n 'n_itr': n_itr,\n 'generator': self.generator.state_dict(),\n 'optim_g': self.optim_g.state_dict(),\n 'discriminator': self.discriminator.state_dict(),\n 'optim_d': self.optim_d.state_dict(),\n }, checkpoint_path)\n tqdm.write(f'Saved checkpoint: n_itr_{n_itr}')\n\n def _load_models(self, model_state_path):\n checkpoint = torch.load(model_state_path)\n self.start_itr = checkpoint['n_itr'] + 1\n self.generator.load_state_dict(checkpoint['generator'])\n self.optim_g.load_state_dict(checkpoint['optim_g'])\n self.discriminator.load_state_dict(checkpoint['discriminator'])\n self.optim_d.load_state_dict(checkpoint['optim_d'])\n print(f'start_itr: {self.start_itr}')\n print('Loaded pretrained models...\\n')\n"
]
| [
[
"torch.no_grad",
"torch.cat",
"torch.utils.tensorboard.SummaryWriter",
"torch.load"
]
]
|
sms1097/imodels | [
"b2b062c8ff1b12c02271f041674a11af85fcfea6"
]
| [
"experiments/combine.py"
]
| [
"import argparse\nimport glob\nimport os\nimport pickle as pkl\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom experiments.compare_models import compute_meta_auc, MODEL_COMPARISON_PATH\n\n\ndef combine_comparisons(path, model, test):\n all_files = glob.glob(path + '*')\n model_files = list(filter(lambda x: (model in x) and ('comparisons_' in x), all_files))\n model_files_sorted = sorted(model_files, key=lambda x: int(x.split('_')[-1][:-4]))\n results_sorted = [pkl.load(open(f, 'rb')) for f in model_files_sorted]\n\n df = pd.concat([r['df'] for r in results_sorted])\n estimators = []\n for r in results_sorted:\n estimators += np.unique(r['estimators']).tolist()\n\n output_dict = {\n 'estimators': estimators,\n 'comparison_datasets': results_sorted[0]['comparison_datasets'],\n 'metrics': results_sorted[0]['metrics'],\n 'df': df,\n }\n\n if 'rule_df' in results_sorted[0]:\n rule_df = pd.concat([r['rule_df'] for r in results_sorted])\n output_dict['rule_df'] = rule_df\n\n if not test:\n # easy_df = df.loc[:, ['easy' in col for col in df.columns]].copy()\n # med_df = df.loc[:, ['med' in col for col in df.columns]].copy()\n # hard_df = df.loc[:, ['hard' in col for col in df.columns]].copy()\n # all_df = df.loc[:, ['all' in col for col in df.columns]].copy()\n # level_dfs = (med_df, 'med'), (hard_df, 'hard'), (all_df, 'all')\n\n # for curr_df, prefix in level_dfs:\n try:\n meta_auc_df = compute_meta_auc(df)\n except ValueError as e:\n warnings.warn(f'bad complexity range')\n warnings.warn(e)\n meta_auc_df = None\n \n output_dict['meta_auc_df'] = meta_auc_df\n\n\n combined_filename = '.'.join(model_files_sorted[0].split('_0.'))\n pkl.dump(output_dict, open(combined_filename, 'wb'))\n\n # for f in model_files_sorted:\n # os.remove(f)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', type=str, default=None)\n parser.add_argument('--dataset', type=str)\n parser.add_argument('--test', action='store_true')\n parser.add_argument('--cv', action='store_true')\n parser.add_argument('--low_data', action='store_true')\n args = parser.parse_args()\n\n path = MODEL_COMPARISON_PATH\n path += 'low_data/' if args.low_data else 'reg_data/'\n path += f'{args.dataset}/'\n\n if args.test:\n path += 'test/'\n elif args.cv:\n path += 'cv/'\n else:\n path += 'val/'\n\n combine_comparisons(path, args.model, args.test)\n"
]
| [
[
"numpy.unique",
"pandas.concat"
]
]
|
amandanic11/shopping-cart | [
"a7a4a11ba452582a83e5a01bc69ec8834edbad5b"
]
| [
"shopping-cart-pandas.py"
]
| [
"# shopping_cart_pandas.py\nfrom __future__ import print_function\nimport datetime\nimport os\nimport pandas as pd \nimport csv\nfrom dotenv import load_dotenv\nfrom sendgrid import SendGridAPIClient\nfrom sendgrid.helpers.mail import Mail\nimport functools\n\n\nnow = datetime.datetime.now()\npd.options.display.float_format = '${:,.2f}'.format\n\n# load_dotenv()\n# api_key = os.getenv(\"SENDGRID_API_KEY\", \"Oops, please set env var called SENDGRID_API_KEY\")\n# sendgrid_template = os.getenv(\"SENDGRID_TEMPLATE\", \"Oops, please set env var called SENDGRID_TEMPLATE\" )\n# my_address = os.getenv(\"MY_EMAIL_ADDRESS\", \"Oops, please set env var called MY_EMAIL_ADDRESS\")\n\n# products = [\n# {\"id\":1, \"name\": \"Chocolate Sandwich Cookies\", \"department\": \"snacks\", \"aisle\": \"cookies cakes\", \"price\": 3.50},\n# {\"id\":2, \"name\": \"All-Seasons Salt\", \"department\": \"pantry\", \"aisle\": \"spices seasonings\", \"price\": 4.99},\n# {\"id\":3, \"name\": \"Robust Golden Unsweetened Oolong Tea\", \"department\": \"beverages\", \"aisle\": \"tea\", \"price\": 2.49},\n# {\"id\":4, \"name\": \"Smart Ones Classic Favorites Mini Rigatoni With Vodka Cream Sauce\", \"department\": \"frozen\", \"aisle\": \"frozen meals\", \"price\": 6.99},\n# {\"id\":5, \"name\": \"Green Chile Anytime Sauce\", \"department\": \"pantry\", \"aisle\": \"marinades meat preparation\", \"price\": 7.99},\n# {\"id\":6, \"name\": \"Dry Nose Oil\", \"department\": \"personal care\", \"aisle\": \"cold flu allergy\", \"price\": 21.99},\n# {\"id\":7, \"name\": \"Pure Coconut Water With Orange\", \"department\": \"beverages\", \"aisle\": \"juice nectars\", \"price\": 3.50},\n# {\"id\":8, \"name\": \"Cut Russet Potatoes Steam N' Mash\", \"department\": \"frozen\", \"aisle\": \"frozen produce\", \"price\": 4.25},\n# {\"id\":9, \"name\": \"Light Strawberry Blueberry Yogurt\", \"department\": \"dairy eggs\", \"aisle\": \"yogurt\", \"price\": 6.50},\n# {\"id\":10, \"name\": \"Sparkling Orange Juice & Prickly Pear Beverage\", \"department\": \"beverages\", \"aisle\": \"water seltzer sparkling water\", \"price\": 2.99},\n# {\"id\":11, \"name\": \"Peach Mango Juice\", \"department\": \"beverages\", \"aisle\": \"refrigerated\", \"price\": 1.99},\n# {\"id\":12, \"name\": \"Chocolate Fudge Layer Cake\", \"department\": \"frozen\", \"aisle\": \"frozen dessert\", \"price\": 18.50},\n# {\"id\":13, \"name\": \"Saline Nasal Mist\", \"department\": \"personal care\", \"aisle\": \"cold flu allergy\", \"price\": 16.00},\n# {\"id\":14, \"name\": \"Fresh Scent Dishwasher Cleaner\", \"department\": \"household\", \"aisle\": \"dish detergents\", \"price\": 4.99},\n# {\"id\":15, \"name\": \"Overnight Diapers Size 6\", \"department\": \"babies\", \"aisle\": \"diapers wipes\", \"price\": 25.50},\n# {\"id\":16, \"name\": \"Mint Chocolate Flavored Syrup\", \"department\": \"snacks\", \"aisle\": \"ice cream toppings\", \"price\": 4.50},\n# {\"id\":17, \"name\": \"Rendered Duck Fat\", \"department\": \"meat seafood\", \"aisle\": \"poultry counter\", \"price\": 9.99},\n# {\"id\":18, \"name\": \"Pizza for One Suprema Frozen Pizza\", \"department\": \"frozen\", \"aisle\": \"frozen pizza\", \"price\": 12.50},\n# {\"id\":19, \"name\": \"Gluten Free Quinoa Three Cheese & Mushroom Blend\", \"department\": \"dry goods pasta\", \"aisle\": \"grains rice dried goods\", \"price\": 3.99},\n# {\"id\":20, \"name\": \"Pomegranate Cranberry & Aloe Vera Enrich Drink\", \"department\": \"beverages\", \"aisle\": \"juice nectars\", \"price\": 4.25}\n# ] # based on data from Instacart: https://www.instacart.com/datasets/grocery-shopping-2017\n\n# df = pd.DataFrame(products)\n# df.to_csv('product_list.csv')\nproduct_filepath = os.path.join(os.path.dirname(__file__), \"product_list.csv\")\nproduct_filename = \"product_list.csv\"\nproducts = pd.read_csv(product_filename)\n\ndef to_usd(my_price):\n return f\"${my_price:,.2f}\" #> $12,000.71\n\n#User inputs\ntotal_price = 0\nproduct_ids = []\n\nvalid_ids = products[\"id\"]\n# print(valid_ids)\n\nwhile True:\n product_id = input(\"Please input a product identifier, or enter DONE when finished: \")\n if product_id == \"DONE\":\n break\n elif int(product_id) in valid_ids:\n product_ids.append(int(product_id))\n else: \n print(\"Identifier not recognized, please try again.\")\n \nidx = []\nfor i in product_ids:\n idx.append(i - 1)\np2 = products.iloc[idx].rename(columns={'id': 'id','name': 'Name','department': 'department','aisle': 'aisle','price': 'Price'}).reset_index()\n\n#Program Outputs\n\nprint(\"---------------------------------\")\nprint(\"THANK YOU FOR SHOPPING AT NOOK'S CRANNY\")\nprint(\"www.nooks.com\")\nprint(\"---------------------------------\")\nprint(\"CHECKOUT AT: \" + str(now))\nprint(\"---------------------------------\")\nprint(\"SELECTED PRODUCTS:\")\n\nprint((p2[['Name', 'Price']]).to_string(index=False, header=True, justify={'left'}))\n\nprint(\"---------------------------------\")\nsubtotal_p = p2['Price'].sum()\nstotal = to_usd(subtotal_p)\ntax = 0.0875\ntax_price = float(subtotal_p) * tax\ntprice = to_usd(tax_price)\ntotal_price = (float(subtotal_p) * tax) + float(subtotal_p)\nprint(\"SUBTOTAL: \" + str(stotal))\nprint(\"TAX (8.75%):\" + str(tprice))\ndef final_total(total_price, tax):\n return (total_price * tax) + total_price\nf_total = to_usd(total_price)\nprint(f\"TOTAL: {f_total}\")\nprint(\"---------------------------------\")\nprint(\"THANK YOU, PLEASE COME AGAIN\")\nprint(\"---------------------------------\")"
]
| [
[
"pandas.read_csv"
]
]
|
chrisdxie/rice | [
"c3e42822226af9ac28d95d434cd582386122b679"
]
| [
"src/data_augmentation.py"
]
| [
"import torch\nimport random\nimport numpy as np\nimport cv2\n\nfrom .util import utilities as util_\n\n\n##### Useful Utilities #####\n\ndef array_to_tensor(array):\n \"\"\"Convert a numpy.ndarray to torch.FloatTensor.\n\n numpy.ndarray [N, H, W, C] -> torch.FloatTensor [N, C, H, W]\n OR\n numpy.ndarray [H, W, C] -> torch.FloatTensor [C, H, W]\n \"\"\"\n\n if array.ndim == 4: # NHWC\n tensor = torch.from_numpy(array).permute(0,3,1,2).float()\n elif array.ndim == 3: # HWC\n tensor = torch.from_numpy(array).permute(2,0,1).float()\n else: # everything else\n tensor = torch.from_numpy(array).float()\n\n return tensor\n\n\n##### Depth augmentations #####\n\ndef add_noise_to_depth(depth_img, noise_params):\n \"\"\"Add noise to depth image. \n \n This is adapted from the DexNet 2.0 code.\n Their code: https://github.com/BerkeleyAutomation/gqcnn/blob/75040b552f6f7fb264c27d427b404756729b5e88/gqcnn/sgd_optimizer.py\n\n Args:\n depth_img: a [H, W] numpy.ndarray set of depth z values in meters.\n\n Returns:\n a [H, W] numpy.ndarray.\n \"\"\"\n depth_img = depth_img.copy()\n\n # Multiplicative noise: Gamma random variable\n multiplicative_noise = np.random.gamma(noise_params['gamma_shape'], noise_params['gamma_scale'])\n depth_img = multiplicative_noise * depth_img\n\n return depth_img\n\ndef add_noise_to_xyz(xyz_img, depth_img, noise_params):\n \"\"\"Add (approximate) Gaussian Process noise to ordered point cloud.\n\n Args:\n xyz_img: a [H, W, 3] ordered point cloud.\n\n Returns:\n a [H, W, 3] ordered point cloud.\n \"\"\"\n xyz_img = xyz_img.copy()\n\n H, W, C = xyz_img.shape\n\n # Additive noise: Gaussian process, approximated by zero-mean anisotropic Gaussian random variable,\n # which is rescaled with bicubic interpolation.\n gp_rescale_factor = np.random.randint(noise_params['gp_rescale_factor_range'][0],\n noise_params['gp_rescale_factor_range'][1])\n gp_scale = np.random.uniform(noise_params['gaussian_scale_range'][0],\n noise_params['gaussian_scale_range'][1])\n\n small_H, small_W = (np.array([H, W]) / gp_rescale_factor).astype(int)\n additive_noise = np.random.normal(loc=0.0, scale=gp_scale, size=(small_H, small_W, C))\n additive_noise = cv2.resize(additive_noise, (W, H), interpolation=cv2.INTER_CUBIC)\n xyz_img[depth_img > 0, :] += additive_noise[depth_img > 0, :]\n\n return xyz_img\n\n\n##### RGB Augmentations #####\n\ndef standardize_image(image):\n \"\"\"Standardize RGB image.\n\n Subtract ImageNet mean and divide by ImageNet stddev.\n Convert a numpy.ndarray [H, W, 3] RGB image to [0,1] range, and then standardizes.\n\n Args:\n image: a [H, W, 3] np.ndarray RGB image.\n\n Returns:\n a [H, W, 3] numpy array of np.float32.\n \"\"\"\n image_standardized = np.zeros_like(image).astype(np.float32)\n\n mean=[0.485, 0.456, 0.406]\n std=[0.229, 0.224, 0.225]\n for i in range(3):\n image_standardized[...,i] = (image[...,i]/255. - mean[i]) / std[i]\n\n return image_standardized\n\ndef unstandardize_image(image):\n \"\"\"Convert standardized image back to RGB.\n\n Inverse of standardize_image()\n\n Args:\n image: a [H, W, 3] np.ndarray RGB image.\n\n Returns:\n a [H, W, 3] numpy array of type np.uint8.\n \"\"\"\n\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n orig_img = (image * std[None,None,:] + mean[None,None,:]) * 255.\n return orig_img.round().astype(np.uint8)\n\n\n##### Label transformations #####\n\ndef random_rotation(label, noise_params):\n \"\"\" Randomly rotate mask\n\n @param label: a [H, W] numpy array of {0, 1}\n \"\"\"\n H, W = label.shape\n\n num_tries = 0\n valid_transform = False\n while not valid_transform:\n\n if num_tries >= noise_params['max_augmentation_tries']:\n print('Rotate: Exhausted number of augmentation tries...')\n return label\n\n # Rotate about center of box\n pixel_indices = util_.build_matrix_of_indices(H, W)\n h_idx, w_idx = np.where(label)\n mean = np.mean(pixel_indices[h_idx, w_idx, :], axis=0) # Shape: [2]. y_center, x_center\n\n # Sample an angle\n applied_angle = np.random.uniform(-noise_params['rotation_angle_max'], \n noise_params['rotation_angle_max'])\n\n rotated_label = rotate(label, applied_angle, center=tuple(mean[::-1]), interpolation=cv2.INTER_NEAREST)\n\n # Make sure the mass is reasonable\n if (np.count_nonzero(rotated_label) / rotated_label.size > 0.001) and \\\n (np.count_nonzero(rotated_label) / rotated_label.size < 0.98):\n valid_transform = True\n\n num_tries += 1\n\n return rotated_label\n\ndef random_cut(label, noise_params):\n \"\"\"Randomly cut part of mask.\n\n Args:\n label: a [H, W] numpy array of {0, 1}\n noise_params: a Python dictionary.\n \"\"\"\n\n H, W = label.shape\n\n num_tries = 0\n valid_transform = False\n while not valid_transform:\n\n if num_tries >= noise_params['max_augmentation_tries']:\n print('Cut: Exhausted number of augmentation tries...')\n return label\n\n cut_label = label.copy()\n\n # Sample cut percentage\n cut_percentage = np.random.uniform(noise_params['cut_percentage_min'],\n noise_params['cut_percentage_max'])\n\n x_min, y_min, x_max, y_max = util_.mask_to_tight_box(label)\n if np.random.rand() < 0.5: # choose width\n \n sidelength = x_max - x_min\n if np.random.rand() < 0.5: # from the left\n x = int(round(cut_percentage * sidelength)) + x_min\n cut_label[y_min:y_max+1, x_min:x] = 0\n else: # from the right\n x = x_max - int(round(cut_percentage * sidelength))\n cut_label[y_min:y_max+1, x:x_max+1] = 0\n\n else: # choose height\n \n sidelength = y_max - y_min\n if np.random.rand() < 0.5: # from the top\n y = int(round(cut_percentage * sidelength)) + y_min\n cut_label[y_min:y, x_min:x_max+1] = 0\n else: # from the bottom\n y = y_max - int(round(cut_percentage * sidelength))\n cut_label[y:y_max+1, x_min:x_max+1] = 0\n\n # Make sure the mass is reasonable\n if (np.count_nonzero(cut_label) / cut_label.size > 0.001) and \\\n (np.count_nonzero(cut_label) / cut_label.size < 0.98):\n valid_transform = True\n\n num_tries += 1\n\n return cut_label\n\n\ndef random_add(label, noise_params):\n \"\"\"Randomly add part of mask .\n\n Args:\n label: a [H, W] numpy array of {0, 1}\n noise_params: a Python dictionary.\n \"\"\"\n H, W = label.shape\n\n num_tries = 0\n valid_transform = False\n while not valid_transform:\n if num_tries >= noise_params['max_augmentation_tries']:\n print('Add: Exhausted number of augmentation tries...')\n return label\n\n added_label = label.copy()\n\n # Sample add percentage\n add_percentage = np.random.uniform(noise_params['add_percentage_min'],\n noise_params['add_percentage_max'])\n\n x_min, y_min, x_max, y_max = util_.mask_to_tight_box(label)\n\n # Sample translation from center\n translation_percentage_x = np.random.uniform(0, 2*add_percentage)\n tx = int(round( (x_max - x_min) * translation_percentage_x ))\n translation_percentage_y = np.random.uniform(0, 2*add_percentage)\n ty = int(round( (y_max - y_min) * translation_percentage_y ))\n\n if np.random.rand() < 0.5: # choose x direction\n\n sidelength = x_max - x_min\n ty = np.random.choice([-1, 1]) * ty # mask will be moved to the left/right. up/down doesn't matter\n\n if np.random.rand() < 0.5: # mask copied from the left. \n x = int(round(add_percentage * sidelength)) + x_min\n try:\n temp = added_label[y_min+ty : y_max+1+ty, x_min-tx : x-tx]\n added_label[y_min+ty : y_max+1+ty, x_min-tx : x-tx] = np.logical_or(temp, added_label[y_min : y_max+1, x_min : x])\n except ValueError as e: # indices were out of bounds\n num_tries += 1\n continue\n else: # mask copied from the right\n x = x_max - int(round(add_percentage * sidelength))\n try:\n temp = added_label[y_min+ty : y_max+1+ty, x+tx : x_max+1+tx]\n added_label[y_min+ty : y_max+1+ty, x+tx : x_max+1+tx] = np.logical_or(temp, added_label[y_min : y_max+1, x : x_max+1])\n except ValueError as e: # indices were out of bounds\n num_tries += 1\n continue\n\n else: # choose y direction\n\n sidelength = y_max - y_min\n tx = np.random.choice([-1, 1]) * tx # mask will be moved up/down. lef/right doesn't matter\n\n if np.random.rand() < 0.5: # from the top\n y = int(round(add_percentage * sidelength)) + y_min\n try:\n temp = added_label[y_min-ty : y-ty, x_min+tx : x_max+1+tx]\n added_label[y_min-ty : y-ty, x_min+tx : x_max+1+tx] = np.logical_or(temp, added_label[y_min : y, x_min : x_max+1])\n except ValueError as e: # indices were out of bounds\n num_tries += 1\n continue\n else: # from the bottom\n y = y_max - int(round(add_percentage * sidelength))\n try:\n temp = added_label[y+ty : y_max+1+ty, x_min+tx : x_max+1+tx]\n added_label[y+ty : y_max+1+ty, x_min+tx : x_max+1+tx] = np.logical_or(temp, added_label[y : y_max+1, x_min : x_max+1])\n except ValueError as e: # indices were out of bounds\n num_tries += 1\n continue\n\n # Make sure the mass is reasonable\n if (np.count_nonzero(added_label) / added_label.size > 0.001) and \\\n (np.count_nonzero(added_label) / added_label.size < 0.98):\n valid_transform = True\n\n num_tries += 1\n\n return added_label\n\n\n"
]
| [
[
"numpy.random.normal",
"numpy.array",
"numpy.zeros_like",
"numpy.random.rand",
"numpy.random.choice",
"numpy.count_nonzero",
"numpy.logical_or",
"numpy.random.gamma",
"numpy.mean",
"torch.from_numpy",
"numpy.where",
"numpy.random.uniform",
"numpy.random.randint"
]
]
|
Mushroomcat9998/U-2-Net | [
"290d82b087b5eb6e7b781cacea18f270badc51e3"
]
| [
"u2net_test.py"
]
| [
"import os\nimport glob\nimport torch\nimport argparse\nfrom PIL import Image\nfrom tqdm import tqdm\nfrom skimage import io\nfrom torchvision import transforms\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\n\nfrom model import U2NET\nfrom model import U2NETP\n\nfrom data_loader import RescaleT\nfrom data_loader import ToTensorLab\nfrom data_loader import SalObjDataset\n\nimport numpy as np\n# normalize the predicted SOD probability map\ndef norm_pred(d):\n ma = torch.max(d)\n mi = torch.min(d)\n\n dn = (d-mi) / (ma-mi)\n\n return dn\n\n\ndef save_output(image_name, predict, d_dir):\n predict = predict.squeeze()\n predict_np = predict.cpu().data.numpy()\n\n im = Image.fromarray(predict_np * 255).convert('RGB')\n img_name = image_name.split(os.sep)[-1]\n image = io.imread(image_name)\n imo = im.resize((image.shape[1], image.shape[0]), resample=Image.BILINEAR)\n\n aaa = img_name.split(\".\")\n bbb = aaa[0:-1]\n imidx = bbb[0]\n for i in range(1, len(bbb)):\n imidx = imidx + \".\" + bbb[i]\n imo.save(os.path.join(d_dir, imidx+'.jpg'))\n\n\ndef get_args_parser(add_help=True):\n parser = argparse.ArgumentParser(description=\"U2NET Segmentation Test\", add_help=add_help)\n\n # File parameters\n parser.add_argument(\"--data-path\", default='/content/data', type=str, help=\"path to input images folder\")\n parser.add_argument(\"--save-path\", default='output', type=str, help=\"path for output masks folder\")\n parser.add_argument(\"--model-path\", default='weights/u2net_full_model.pth', type=str, help=\"path to models\")\n\n parser.add_argument(\"--model\", default='u2net', type=str, help=\"model name: u2net or u2netp\")\n parser.add_argument(\"--worker\", default=2, type=int, help=\"number of workers\")\n\n # Pre-processing parameters\n parser.add_argument(\"--resize\", default=320, type=int, help=\"rescale size (int or tuple (h, w))\")\n\n return parser\n\n\ndef main():\n args = get_args_parser().parse_args()\n\n # --------- 1. get image path and name ---------\n model_name = args.model\n\n image_dir = args.data_path\n prediction_dir = args.save_path\n model_path = args.model_path\n\n img_name_list = sorted(glob.glob(os.path.join(image_dir, '*.*')))[:]\n # print(img_name_list)\n\n # --------- 2. dataloader ---------\n # 1. dataloader\n test_salobj_dataset = SalObjDataset(img_name_list=img_name_list,\n lbl_name_list=[],\n transform=transforms.Compose([RescaleT(args.resize),\n ToTensorLab(flag=0)]))\n test_salobj_dataloader = DataLoader(test_salobj_dataset,\n batch_size=1,\n shuffle=False,\n num_workers=args.worker)\n\n # --------- 3. model define ---------\n if model_name == 'u2netp':\n print(\"...load U2NETP---4.7 MB\")\n net = U2NETP(3, 1)\n else:\n print(\"...load U2NET---173.6 MB\")\n net = U2NET(3, 1)\n\n if torch.cuda.is_available():\n net.load_state_dict(torch.load(model_path)['model'])\n net.cuda()\n else:\n net.load_state_dict(torch.load(model_path, map_location='cpu')['model'])\n net.eval()\n\n # --------- 4. inference for each image ---------\n for i_test, data_test in tqdm(enumerate(test_salobj_dataloader)):\n\n # print(\"inferring:\", img_name_list[i_test].split(os.sep)[-1])\n\n inputs_test = data_test['image']\n inputs_test = inputs_test.type(torch.FloatTensor)\n\n if torch.cuda.is_available():\n inputs_test = Variable(inputs_test.cuda())\n else:\n inputs_test = Variable(inputs_test)\n\n d1, d2, d3, d4, d5, d6, d7 = net(inputs_test)\n\n # normalization\n pred = d1[:, 0, :, :]\n pred = norm_pred(pred)\n\n # save results to test_results folder\n if not os.path.exists(prediction_dir):\n os.makedirs(prediction_dir, exist_ok=True)\n save_output(img_name_list[i_test], pred, prediction_dir)\n\n del d1, d2, d3, d4, d5, d6, d7\n\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"torch.min",
"torch.max",
"torch.autograd.Variable",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load"
]
]
|
msAlcantara/tsgen | [
"b8b5e1d5bf99997135b696284261b9fe41a2a614"
]
| [
"tests/gen_test.py"
]
| [
"import pandas as pd\nimport numpy as np\nimport os.path\nfrom unittest import TestCase\nfrom tsgen.gen import TimeSerieGenerator\n\n\nclass TimeSerieGeneratorTestCase(TestCase):\n def test_generate_df_freq(self):\n ts_gen = TimeSerieGenerator(\n date_start=\"1990-01-01\",\n date_end=\"1990-01-02\",\n freq=\"D\",\n ts_name=\"ts.test\",\n )\n df = ts_gen.generate_df()\n self.assertEqual(len(df), 2)\n\n def test_generate_df_name(self):\n ts_gen = TimeSerieGenerator(\n date_start=\"1990-01-01\",\n ts_name=\"ts.test_generate_df_name\",\n )\n df = ts_gen.generate_df()\n self.assertEqual(df[\"ts_name\"][0], ts_gen.ts_name)\n\n def test_export_df(self):\n ts_gen = TimeSerieGenerator(\n date_start=\"1990-01-01\",\n date_end=\"1990-01-02\",\n freq=\"D\",\n tz=\"UTC\",\n low=0,\n high=100,\n ts_name=\"ts.test_export_df\",\n )\n df = ts_gen.generate_df()\n ts_gen.export_df(df)\n self.assertTrue(\n os.path.exists(\n f\"{ts_gen.ts_name}_{pd.datetime.now().strftime('%y-%m-%d')}.csv\"\n )\n )\n\n def test_export(self):\n ts_gen = TimeSerieGenerator(\n date_start=\"1990-01-01\",\n date_end=\"1990-01-02\",\n freq=\"D\",\n tz=\"UTC\",\n low=0,\n high=100,\n ts_name=\"ts.test_export\",\n )\n ts_gen.generate()\n self.assertTrue(\n os.path.exists(\n f\"{ts_gen.ts_name}_{pd.datetime.now().strftime('%y-%m-%d')}.csv\"\n )\n )\n\n"
]
| [
[
"pandas.datetime.now"
]
]
|
jamesthesnake/bonvoyage | [
"60c4d442138a65262496fd7dea0c8c8837b6c5a7"
]
| [
"bonvoyage/tests/test_visualize.py"
]
| [
"import matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport pytest\n\n\[email protected](params=['hexbin', 'scatter'])\ndef kind(request):\n return request.param\n\n\ndef test_waypointplot(waypoints, kind):\n from bonvoyage import waypointplot\n\n fig, ax = plt.subplots()\n waypointplot(waypoints, kind, ax=ax)\n\n if kind == 'hexbin':\n assert isinstance(ax.collections[0], mpl.collections.PolyCollection)\n if kind == 'scatter':\n assert isinstance(ax.collections[0], mpl.collections.PathCollection)\n"
]
| [
[
"matplotlib.pyplot.subplots"
]
]
|
RK900/learna | [
"c61f88fff5275fb627ab16d539ccc9c81e5a4b46"
]
| [
"src/analyse/analyse_experiment_group.py"
]
| [
"from pathlib import Path\nimport pandas as pd\nimport numpy as np\nimport scikits.bootstrap as sci\n\nfrom .read_data import read_data_from_method_path, read_sequence_lengths\nfrom .process_data import (\n solved_across_time_per_run,\n solved_across_time_min,\n runs_solve_instance,\n solved_per_time_limit,\n solved_per_run_quantile,\n time_across_length,\n)\n\n\n_datasets = {\"eterna\", \"rfam_taneda\", \"rfam_learn_validation\", \"rfam_learn_test\"}\n_dataset_sizes = {\n \"eterna\": 100,\n \"rfam_taneda\": 29,\n \"rfam_learn_validation\": 100,\n \"rfam_learn_test\": 100,\n}\n_runs_per_dataset = {\n \"eterna\": 5,\n \"rfam_taneda\": 50,\n \"rfam_learn_validation\": 5,\n \"rfam_learn_test\": 5,\n}\n_timeout_per_dataset = {\n \"eterna\": 86400,\n \"rfam_taneda\": 600,\n \"rfam_learn_validation\": 600,\n \"rfam_learn_test\": 3600,\n}\n_time_limits_per_dataset = {\n \"eterna\": (10, 60, 1800, 3600, 14400, 43200, 86400),\n \"rfam_taneda\": (10, 30, 60, 300, 600),\n \"rfam_learn_validation\": (10, 30, 60, 300, 600, 1800),\n \"rfam_learn_test\": (10, 30, 60, 300, 600, 1200, 1800, 3600),\n}\n_run_quantiles_per_dataset = {\n \"eterna\": (0, 20, 40, 60, 80, 100),\n \"rfam_taneda\": (0, 10, 20, 50, 100),\n \"rfam_learn_validation\": (0, 20, 40, 60, 80, 100),\n \"rfam_learn_test\": (0, 20, 40, 60, 80, 100),\n}\n\n\ndef pd_to_tsv(output_dir, filename, pd, index_label=None):\n output_dir.mkdir(parents=True, exist_ok=True)\n with output_dir.joinpath(filename).open(\"w\") as output_file:\n pd.to_csv(output_file, index_label=index_label, sep=\"\\t\")\n\n\ndef analyse_method(method_path, dataset_name, output_dir, sequences_dir, ci_alpha):\n runs, ids, times = read_data_from_method_path(\n method_path, _timeout_per_dataset[dataset_name]\n )\n\n sequence_lengths = read_sequence_lengths(sequences_dir)\n length_analysis = time_across_length(runs, ids, times, sequence_lengths)\n length_analysis[\"method\"] = method_path.name\n pd_to_tsv(\n output_dir.joinpath(f\"{dataset_name}/{method_path.name}/\"),\n \"length_to_time.tsv\",\n length_analysis,\n index_label=\"length\",\n )\n try:\n per_run_analysis = solved_across_time_per_run(\n runs,\n times,\n ci_alpha,\n _dataset_sizes[dataset_name],\n _timeout_per_dataset[dataset_name],\n )\n pd_to_tsv(\n output_dir.joinpath(f\"{dataset_name}/{method_path.name}/\"),\n f\"ci.tsv\",\n per_run_analysis,\n index_label=\"time\",\n )\n except:\n pd_to_tsv(\n output_dir.joinpath(f\"{dataset_name}/{method_path.name}/\"),\n f\"ci.tsv\",\n pd.DataFrame({'time' : 1e-10, 'high_ci_0.05': 0.0, 'low_ci_0.05' : 0.0, 'mean' : 0.0}, index = [1e-10]),\n index_label=\"time\",\n )\n\n min_analysis = solved_across_time_min(\n runs, ids, times, _dataset_sizes[dataset_name], _timeout_per_dataset[dataset_name]\n )\n pd_to_tsv(\n output_dir.joinpath(f\"{dataset_name}/{method_path.name}/\"),\n f\"min.tsv\",\n min_analysis,\n index_label=\"time\",\n )\n\n # number of runs solving individual instances\n runs_solve_instance_analysis = runs_solve_instance(runs, ids)\n pd_to_tsv(\n output_dir.joinpath(f\"{dataset_name}/{method_path.name}/\"),\n f\"runs_solve_instance.tsv\",\n runs_solve_instance_analysis,\n index_label=\"id\",\n )\n\n # number of structures solved within a given time limit by at least one run\n # TODO(Frederic): get time_limits from command line\n time_limit_analysis = solved_per_time_limit(\n runs, ids, times, time_limits=_time_limits_per_dataset[dataset_name]\n )\n pd_to_tsv(\n output_dir.joinpath(f\"{dataset_name}/{method_path.name}/\"),\n f\"time_limit.tsv\",\n time_limit_analysis,\n index_label=\"time\",\n )\n\n # Number of structures solved in X% of the runs\n # TODO(frederic): get quantiles via command line\n solved_per_runs_quantile = solved_per_run_quantile(\n runs,\n ids,\n _runs_per_dataset[dataset_name],\n quantiles=_run_quantiles_per_dataset[dataset_name],\n )\n pd_to_tsv(\n output_dir.joinpath(f\"{dataset_name}/{method_path.name}/\"),\n f\"run_quantiles.tsv\",\n solved_per_runs_quantile,\n index_label=\"quantile\",\n )\n\n\ndef analyse_dataset(dataset_path, output_dir, root_sequences_dir, ci_alpha):\n for method_path in dataset_path.iterdir():\n analyse_method(\n method_path,\n dataset_path.name,\n output_dir,\n root_sequences_dir.joinpath(dataset_path.name),\n ci_alpha,\n )\n\n\ndef analyse_experiment_group(\n experiment_group, analysis_dir, root_sequences_dir, ci_alpha\n):\n for path in experiment_group.iterdir():\n if path.name in _datasets:\n analyse_dataset(\n path, analysis_dir or experiment_group, root_sequences_dir, ci_alpha\n )\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--experiment_group\", required=True, type=Path, help=\"Experiment group to analyse\"\n )\n parser.add_argument(\n \"--analysis_dir\", type=Path, help=\"Root folder for analysis results\"\n )\n parser.add_argument(\n \"--root_sequences_dir\", type=Path, help=\"Root folder for datasets\"\n )\n parser.add_argument(\n \"--ci_alpha\", default=0.05, type=float, help=\"Alpha for confidence intervalls\"\n )\n args = parser.parse_args()\n\n analyse_experiment_group(\n args.experiment_group, args.analysis_dir, args.root_sequences_dir, args.ci_alpha\n )\n"
]
| [
[
"pandas.DataFrame",
"pandas.to_csv"
]
]
|
Munyola/dnc | [
"d3d94b3b1f1efc282481910054f82047caf37f65"
]
| [
"train.py"
]
| [
"# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Example script to train the DNC on a repeated copy task.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport sonnet as snt\n\nfrom dnc import dnc\nfrom dnc import repeat_copy\n\nFLAGS = tf.flags.FLAGS\n\n# Model parameters\ntf.flags.DEFINE_integer(\"hidden_size\", 64, \"Size of LSTM hidden layer.\")\ntf.flags.DEFINE_integer(\"memory_size\", 16, \"The number of memory slots.\")\ntf.flags.DEFINE_integer(\"word_size\", 16, \"The width of each memory slot.\")\ntf.flags.DEFINE_integer(\"num_write_heads\", 1, \"Number of memory write heads.\")\ntf.flags.DEFINE_integer(\"num_read_heads\", 4, \"Number of memory read heads.\")\ntf.flags.DEFINE_integer(\"clip_value\", 20,\n \"Maximum absolute value of controller and dnc outputs.\")\n\n# Optimizer parameters.\ntf.flags.DEFINE_float(\"max_grad_norm\", 50, \"Gradient clipping norm limit.\")\ntf.flags.DEFINE_float(\"learning_rate\", 1e-4, \"Optimizer learning rate.\")\ntf.flags.DEFINE_float(\"optimizer_epsilon\", 1e-10,\n \"Epsilon used for RMSProp optimizer.\")\n\n# Task parameters\ntf.flags.DEFINE_integer(\"batch_size\", 16, \"Batch size for training.\")\ntf.flags.DEFINE_integer(\"num_bits\", 4, \"Dimensionality of each vector to copy\")\ntf.flags.DEFINE_integer(\n \"min_length\", 1,\n \"Lower limit on number of vectors in the observation pattern to copy\")\ntf.flags.DEFINE_integer(\n \"max_length\", 2,\n \"Upper limit on number of vectors in the observation pattern to copy\")\ntf.flags.DEFINE_integer(\"min_repeats\", 1,\n \"Lower limit on number of copy repeats.\")\ntf.flags.DEFINE_integer(\"max_repeats\", 2,\n \"Upper limit on number of copy repeats.\")\n\n# Training options.\ntf.flags.DEFINE_integer(\"num_training_iterations\", 100000,\n \"Number of iterations to train for.\")\ntf.flags.DEFINE_integer(\"report_interval\", 100,\n \"Iterations between reports (samples, valid loss).\")\ntf.flags.DEFINE_string(\"checkpoint_dir\", \"/tmp/tf/dnc\",\n \"Checkpointing directory.\")\ntf.flags.DEFINE_integer(\"checkpoint_interval\", -1,\n \"Checkpointing step interval.\")\n\n\ndef run_model(input_sequence, output_size):\n \"\"\"Runs model on input sequence.\"\"\"\n\n access_config = {\n \"memory_size\": FLAGS.memory_size,\n \"word_size\": FLAGS.word_size,\n \"num_reads\": FLAGS.num_read_heads,\n \"num_writes\": FLAGS.num_write_heads,\n }\n controller_config = {\n \"hidden_size\": FLAGS.hidden_size,\n }\n clip_value = FLAGS.clip_value\n\n dnc_core = dnc.DNC(access_config, controller_config, output_size, clip_value)\n initial_state = dnc_core.initial_state(FLAGS.batch_size)\n output_sequence, _ = tf.nn.dynamic_rnn(\n cell=dnc_core,\n inputs=input_sequence,\n time_major=True,\n initial_state=initial_state)\n\n return output_sequence\n\n\ndef train(num_training_iterations, report_interval):\n \"\"\"Trains the DNC and periodically reports the loss.\"\"\"\n\n dataset = repeat_copy.RepeatCopy(FLAGS.num_bits, FLAGS.batch_size,\n FLAGS.min_length, FLAGS.max_length,\n FLAGS.min_repeats, FLAGS.max_repeats)\n dataset_tensors = dataset()\n\n output_logits = run_model(dataset_tensors.observations, dataset.target_size)\n # Used for visualization.\n output = tf.round(\n tf.expand_dims(dataset_tensors.mask, -1) * tf.sigmoid(output_logits))\n\n train_loss = dataset.cost(output_logits, dataset_tensors.target,\n dataset_tensors.mask)\n\n # Set up optimizer with global norm clipping.\n trainable_variables = tf.trainable_variables()\n grads, _ = tf.clip_by_global_norm(\n tf.gradients(train_loss, trainable_variables), FLAGS.max_grad_norm)\n\n global_step = tf.get_variable(\n name=\"global_step\",\n shape=[],\n dtype=tf.int64,\n initializer=tf.zeros_initializer(),\n trainable=False,\n collections=[tf.GraphKeys.GLOBAL_VARIABLES, tf.GraphKeys.GLOBAL_STEP])\n\n optimizer = tf.train.RMSPropOptimizer(\n FLAGS.learning_rate, epsilon=FLAGS.optimizer_epsilon)\n train_step = optimizer.apply_gradients(\n zip(grads, trainable_variables), global_step=global_step)\n\n saver = tf.train.Saver()\n\n if FLAGS.checkpoint_interval > 0:\n hooks = [\n tf.train.CheckpointSaverHook(\n checkpoint_dir=FLAGS.checkpoint_dir,\n save_steps=FLAGS.checkpoint_interval,\n saver=saver)\n ]\n else:\n hooks = []\n\n # Train.\n with tf.train.SingularMonitoredSession(\n hooks=hooks, checkpoint_dir=FLAGS.checkpoint_dir) as sess:\n\n start_iteration = sess.run(global_step)\n total_loss = 0\n\n for train_iteration in range(start_iteration, num_training_iterations):\n _, loss = sess.run([train_step, train_loss])\n total_loss += loss\n\n if (train_iteration + 1) % report_interval == 0:\n dataset_tensors_np, output_np = sess.run([dataset_tensors, output])\n dataset_string = dataset.to_human_readable(dataset_tensors_np,\n output_np)\n tf.logging.info(\"%d: Avg training loss %f.\\n%s\",\n train_iteration, total_loss / report_interval,\n dataset_string)\n total_loss = 0\n\n\ndef main(unused_argv):\n tf.logging.set_verbosity(3) # Print INFO log messages.\n train(FLAGS.num_training_iterations, FLAGS.report_interval)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n"
]
| [
[
"tensorflow.train.SingularMonitoredSession",
"tensorflow.trainable_variables",
"tensorflow.logging.set_verbosity",
"tensorflow.zeros_initializer",
"tensorflow.flags.DEFINE_string",
"tensorflow.train.CheckpointSaverHook",
"tensorflow.expand_dims",
"tensorflow.sigmoid",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.train.Saver",
"tensorflow.logging.info",
"tensorflow.gradients",
"tensorflow.flags.DEFINE_float",
"tensorflow.flags.DEFINE_integer",
"tensorflow.nn.dynamic_rnn",
"tensorflow.app.run"
]
]
|
feifzhou/deepmind-research | [
"769bfdbeafbcb472cb8e2c6cfa746b53ac82efc2"
]
| [
"hierarchical_transformer_memory/pycolab_ballet/ballet_environment.py"
]
| [
"# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A pycolab environment for going to the ballet.\n\nA pycolab-based environment for testing memory for sequences of events. The\nenvironment contains some number of \"dancer\" characters in (implicit) 3 x 3\nsquares within a larger 9 x 9 room. The agent starts in the center of the room.\nAt the beginning of an episode, the dancers each do a dance solo of a fixed\nlength, separated by empty time of a fixed length. The agent's actions do\nnothing during the dances. After the last dance ends, the agent must go up to a\ndancer, identified using language describing the dance. The agent is rewarded +1\nfor approaching the correct dancer, 0 otherwise.\n\nThe room is upsampled at a size of 9 pixels per square to render a view for the\nagent, which is cropped in egocentric perspective, i.e. the agent is always in\nthe center of its view (see https://arxiv.org/abs/1910.00571).\n\"\"\"\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport dm_env\n\nimport numpy as np\nfrom pycolab import cropping\n\nfrom hierarchical_transformer_memory.pycolab_ballet import ballet_environment_core as ballet_core\n\nFLAGS = flags.FLAGS\n\nUPSAMPLE_SIZE = 9 # pixels per game square\nSCROLL_CROP_SIZE = 11 # in game squares\n\nDANCER_SHAPES = [\n \"triangle\", \"empty_square\", \"plus\", \"inverse_plus\", \"ex\", \"inverse_ex\",\n \"circle\", \"empty_circle\", \"tee\", \"upside_down_tee\",\n \"h\", \"u\", \"upside_down_u\", \"vertical_stripes\", \"horizontal_stripes\"\n]\n\nCOLORS = {\n \"red\": np.array([255, 0, 0]),\n \"green\": np.array([0, 255, 0]),\n \"blue\": np.array([0, 0, 255]),\n \"purple\": np.array([128, 0, 128]),\n \"orange\": np.array([255, 165, 0]),\n \"yellow\": np.array([255, 255, 0]),\n \"brown\": np.array([128, 64, 0]),\n \"pink\": np.array([255, 64, 255]),\n \"cyan\": np.array([0, 255, 255]),\n \"dark_green\": np.array([0, 100, 0]),\n \"dark_red\": np.array([100, 0, 0]),\n \"dark_blue\": np.array([0, 0, 100]),\n \"olive\": np.array([100, 100, 0]),\n \"teal\": np.array([0, 100, 100]),\n \"lavender\": np.array([215, 200, 255]),\n \"peach\": np.array([255, 210, 170]),\n \"rose\": np.array([255, 205, 230]),\n \"light_green\": np.array([200, 255, 200]),\n \"light_yellow\": np.array([255, 255, 200]),\n}\n\n\ndef _generate_template(object_name):\n \"\"\"Generates a template object image, given a name with color and shape.\"\"\"\n object_color, object_type = object_name.split()\n template = np.zeros((UPSAMPLE_SIZE, UPSAMPLE_SIZE))\n half = UPSAMPLE_SIZE // 2\n if object_type == \"triangle\":\n for i in range(UPSAMPLE_SIZE):\n for j in range(UPSAMPLE_SIZE):\n if (j <= half and i >= 2 * (half - j)) or (j > half and i >= 2 *\n (j - half)):\n template[i, j] = 1.\n elif object_type == \"square\":\n template[:, :] = 1.\n elif object_type == \"empty_square\":\n template[:2, :] = 1.\n template[-2:, :] = 1.\n template[:, :2] = 1.\n template[:, -2:] = 1.\n elif object_type == \"plus\":\n template[:, half - 1:half + 2] = 1.\n template[half - 1:half + 2, :] = 1.\n elif object_type == \"inverse_plus\":\n template[:, :] = 1.\n template[:, half - 1:half + 2] = 0.\n template[half - 1:half + 2, :] = 0.\n elif object_type == \"ex\":\n for i in range(UPSAMPLE_SIZE):\n for j in range(UPSAMPLE_SIZE):\n if abs(i - j) <= 1 or abs(UPSAMPLE_SIZE - 1 - j - i) <= 1:\n template[i, j] = 1.\n elif object_type == \"inverse_ex\":\n for i in range(UPSAMPLE_SIZE):\n for j in range(UPSAMPLE_SIZE):\n if not (abs(i - j) <= 1 or abs(UPSAMPLE_SIZE - 1 - j - i) <= 1):\n template[i, j] = 1.\n elif object_type == \"circle\":\n for i in range(UPSAMPLE_SIZE):\n for j in range(UPSAMPLE_SIZE):\n if (i - half)**2 + (j - half)**2 <= half**2:\n template[i, j] = 1.\n elif object_type == \"empty_circle\":\n for i in range(UPSAMPLE_SIZE):\n for j in range(UPSAMPLE_SIZE):\n if abs((i - half)**2 + (j - half)**2 - half**2) < 6:\n template[i, j] = 1.\n elif object_type == \"tee\":\n template[:, half - 1:half + 2] = 1.\n template[:3, :] = 1.\n elif object_type == \"upside_down_tee\":\n template[:, half - 1:half + 2] = 1.\n template[-3:, :] = 1.\n elif object_type == \"h\":\n template[:, :3] = 1.\n template[:, -3:] = 1.\n template[half - 1:half + 2, :] = 1.\n elif object_type == \"u\":\n template[:, :3] = 1.\n template[:, -3:] = 1.\n template[-3:, :] = 1.\n elif object_type == \"upside_down_u\":\n template[:, :3] = 1.\n template[:, -3:] = 1.\n template[:3, :] = 1.\n elif object_type == \"vertical_stripes\":\n for j in range(half + UPSAMPLE_SIZE % 2):\n template[:, 2*j] = 1.\n elif object_type == \"horizontal_stripes\":\n for i in range(half + UPSAMPLE_SIZE % 2):\n template[2*i, :] = 1.\n else:\n raise ValueError(\"Unknown object: {}\".format(object_type))\n\n if object_color not in COLORS:\n raise ValueError(\"Unknown color: {}\".format(object_color))\n\n template = np.tensordot(template, COLORS[object_color], axes=0)\n\n return template\n\n\n# Agent and wall templates\n_CHAR_TO_TEMPLATE_BASE = {\n ballet_core.AGENT_CHAR:\n np.tensordot(\n np.ones([UPSAMPLE_SIZE, UPSAMPLE_SIZE]),\n np.array([255, 255, 255]),\n axes=0),\n ballet_core.WALL_CHAR:\n np.tensordot(\n np.ones([UPSAMPLE_SIZE, UPSAMPLE_SIZE]),\n np.array([40, 40, 40]),\n axes=0),\n}\n\n\ndef get_scrolling_cropper(rows=9, cols=9, crop_pad_char=\" \"):\n return cropping.ScrollingCropper(rows=rows, cols=cols,\n to_track=[ballet_core.AGENT_CHAR],\n pad_char=crop_pad_char,\n scroll_margins=(None, None))\n\n\nclass BalletEnvironment(dm_env.Environment):\n \"\"\"A Python environment API for pycolab ballet tasks.\"\"\"\n\n def __init__(self, num_dancers, dance_delay, max_steps, rng=None):\n \"\"\"Construct a BalletEnvironment that wraps pycolab games for agent use.\n\n This class inherits from dm_env and has all the expected methods and specs.\n\n Args:\n num_dancers: The number of dancers to use, between 1 and 8 (inclusive).\n dance_delay: How long to delay between the dances.\n max_steps: The maximum number of steps to allow in an episode, after which\n it will terminate.\n rng: An optional numpy Random Generator, to set a fixed seed use e.g.\n `rng=np.random.default_rng(seed=...)`\n \"\"\"\n self._num_dancers = num_dancers\n self._dance_delay = dance_delay\n self._max_steps = max_steps\n\n # internal state\n if rng is None:\n rng = np.random.default_rng()\n self._rng = rng\n self._current_game = None # Current pycolab game instance.\n self._state = None # Current game step state.\n self._game_over = None # Whether the game has ended.\n self._char_to_template = None # Mapping of chars to sprite images.\n\n # rendering tools\n self._cropper = get_scrolling_cropper(SCROLL_CROP_SIZE, SCROLL_CROP_SIZE,\n \" \")\n\n def _game_factory(self):\n \"\"\"Samples dancers and positions, returns a pycolab core game engine.\"\"\"\n target_dancer_index = self._rng.integers(self._num_dancers)\n motions = list(ballet_core.DANCE_SEQUENCES.keys())\n positions = ballet_core.DANCER_POSITIONS.copy()\n colors = list(COLORS.keys())\n shapes = DANCER_SHAPES.copy()\n self._rng.shuffle(positions)\n self._rng.shuffle(motions)\n self._rng.shuffle(colors)\n self._rng.shuffle(shapes)\n dancers_and_properties = []\n for dancer_i in range(self._num_dancers):\n if dancer_i == target_dancer_index:\n value = 1.\n else:\n value = 0.\n dancers_and_properties.append(\n (ballet_core.POSSIBLE_DANCER_CHARS[dancer_i],\n positions[dancer_i],\n motions[dancer_i],\n shapes[dancer_i],\n colors[dancer_i],\n value))\n\n logging.info(\"Making level with dancers_and_properties: %s\",\n dancers_and_properties)\n\n return ballet_core.make_game(\n dancers_and_properties=dancers_and_properties,\n dance_delay=self._dance_delay)\n\n def _render_observation(self, observation):\n \"\"\"Renders from raw pycolab image observation to agent-usable ones.\"\"\"\n observation = self._cropper.crop(observation)\n obs_rows, obs_cols = observation.board.shape\n image = np.zeros([obs_rows * UPSAMPLE_SIZE, obs_cols * UPSAMPLE_SIZE, 3],\n dtype=np.float32)\n for i in range(obs_rows):\n for j in range(obs_cols):\n this_char = chr(observation.board[i, j])\n if this_char != ballet_core.FLOOR_CHAR:\n image[\n i * UPSAMPLE_SIZE:(i + 1) * UPSAMPLE_SIZE, j *\n UPSAMPLE_SIZE:(j + 1) * UPSAMPLE_SIZE] = self._char_to_template[\n this_char]\n image /= 255.\n language = np.array(self._current_game.the_plot[\"instruction_string\"])\n full_observation = (image, language)\n return full_observation\n\n def reset(self):\n \"\"\"Start a new episode.\"\"\"\n # Build a new game and retrieve its first set of state/reward/discount.\n self._current_game = self._game_factory()\n # set up rendering, cropping, and state for current game\n self._char_to_template = {\n k: _generate_template(v) for k, v in self._current_game.the_plot[\n \"char_to_color_shape\"]}\n self._char_to_template.update(_CHAR_TO_TEMPLATE_BASE)\n self._cropper.set_engine(self._current_game)\n self._state = dm_env.StepType.FIRST\n # let's go!\n observation, _, _ = self._current_game.its_showtime()\n observation = self._render_observation(observation)\n return dm_env.TimeStep(\n step_type=self._state,\n reward=None,\n discount=None,\n observation=observation)\n\n def step(self, action):\n \"\"\"Apply action, step the world forward, and return observations.\"\"\"\n # If needed, reset and start new episode.\n if self._state == dm_env.StepType.LAST:\n self._clear_state()\n if self._current_game is None:\n return self.reset()\n\n # Execute the action in pycolab.\n observation, reward, discount = self._current_game.play(action)\n\n self._game_over = self._is_game_over()\n reward = reward if reward is not None else 0.\n observation = self._render_observation(observation)\n\n # Check the current status of the game.\n if self._game_over:\n self._state = dm_env.StepType.LAST\n else:\n self._state = dm_env.StepType.MID\n\n return dm_env.TimeStep(\n step_type=self._state,\n reward=reward,\n discount=discount,\n observation=observation)\n\n @property\n def observation_spec(self):\n image_shape = (SCROLL_CROP_SIZE * UPSAMPLE_SIZE,\n SCROLL_CROP_SIZE * UPSAMPLE_SIZE,\n 3)\n return (\n # vision\n dm_env.specs.Array(\n shape=image_shape, dtype=np.float32, name=\"image\"),\n # language\n dm_env.specs.Array(\n shape=[], dtype=str, name=\"language\"),\n )\n\n @property\n def action_spec(self):\n return dm_env.specs.BoundedArray(\n shape=[], dtype=\"int32\",\n minimum=0, maximum=7,\n name=\"grid_actions\")\n\n def _is_game_over(self):\n \"\"\"Returns whether it is game over, either from the engine or timeout.\"\"\"\n return (self._current_game.game_over or\n (self._current_game.the_plot.frame >= self._max_steps))\n\n def _clear_state(self):\n \"\"\"Clear all the internal information about the game.\"\"\"\n self._state = None\n self._current_game = None\n self._char_to_template = None\n self._game_over = None\n\n\ndef simple_builder(level_name):\n \"\"\"Simplifies building from fixed defs.\n\n Args:\n level_name: '{num_dancers}_delay{delay_length}', where each variable is an\n integer. The levels used in the paper were:\n ['2_delay16', '4_delay16', '8_delay16',\n '2_delay48', '4_delay48', '8_delay48']\n\n Returns:\n A BalletEnvironment with the requested settings.\n \"\"\"\n num_dancers, dance_delay = level_name.split(\"_\")\n num_dancers = int(num_dancers)\n dance_delay = int(dance_delay[5:])\n max_steps = 320 if dance_delay == 16 else 1024\n level_args = dict(\n num_dancers=num_dancers,\n dance_delay=dance_delay,\n max_steps=max_steps)\n return BalletEnvironment(**level_args)\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError(\"Too many command-line arguments.\")\n env = simple_builder(\"4_delay16\")\n for _ in range(3):\n obs = env.reset().observation\n for _ in range(300):\n obs = env.step(0).observation\n print(obs)\n\nif __name__ == \"__main__\":\n app.run(main)\n"
]
| [
[
"numpy.array",
"numpy.zeros",
"numpy.ones",
"numpy.random.default_rng",
"numpy.tensordot"
]
]
|
yzh119/tvm | [
"19400c9967020ca822399f57de0253c3dc98845b"
]
| [
"tests/python/unittest/test_tir_intrin.py"
]
| [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport tvm\nimport tvm.testing\nfrom tvm import te, tir\nfrom tvm import topi\nfrom tvm.contrib import utils, clang\nfrom tvm.script import tir as T\nimport numpy as np\nimport ctypes\nimport math\n\n\ndef test_nearbyint():\n m = te.var(\n \"m\",\n )\n A = te.placeholder((m,), name=\"A\")\n A_rounded = te.compute((m,), lambda *i: tvm.tir.nearbyint(A(*i)), name=\"A\")\n s = te.create_schedule(A_rounded.op)\n f = tvm.build(s, [A, A_rounded], \"llvm\")\n dev = tvm.cpu(0)\n n = 10\n a = tvm.nd.array(np.random.uniform(high=100, size=n).astype(A.dtype), dev)\n a_rounded = tvm.nd.array(np.random.uniform(size=n).astype(A_rounded.dtype), dev)\n f(a, a_rounded)\n # Note that numpys rint rounds to nearest integer with\n # ties to halfway is broken by rounding to even.\n # So that 1.5 and 2.5 will round 2.\n # This is the default rounding mode with libc as well.\n # However one can set a different rounding mode and in that\n # case numpy result might differ.\n tvm.testing.assert_allclose(a_rounded.numpy(), np.rint(a.numpy()))\n\n\ndef test_round_intrinsics_on_int():\n i = tvm.te.var(\"i\", \"int32\")\n for op in [tvm.tir.round, tvm.tir.trunc, tvm.tir.ceil, tvm.tir.floor, tvm.tir.nearbyint]:\n assert op(tvm.tir.const(10, \"int32\")).value == 10\n assert op(tvm.tir.const(True, \"bool\")).value == True\n assert op(i).same_as(i)\n\n assert tvm.tir.isnan(tvm.tir.const(10, \"int32\")).value == False\n\n\ndef test_unary_intrin():\n test_funcs = [\n (tvm.tir.exp10, lambda x: np.power(10, x)),\n (tvm.tir.log2, lambda x: np.log2(x)),\n (tvm.tir.log10, lambda x: np.log10(x)),\n (tvm.tir.sinh, lambda x: np.sinh(x)),\n (tvm.tir.cosh, lambda x: np.cosh(x)),\n (tvm.tir.log1p, lambda x: np.log1p(x)),\n (tvm.tir.asin, lambda x: np.arcsin(x)),\n (tvm.tir.acos, lambda x: np.arccos(x)),\n (tvm.tir.atan, lambda x: np.arctan(x)),\n (tvm.tir.asinh, lambda x: np.arcsinh(x)),\n (tvm.tir.acosh, lambda x: np.arccosh(x)),\n (tvm.tir.atanh, lambda x: np.arctanh(x)),\n ]\n\n def run_test(tvm_intrin, np_func):\n m = te.var(\n \"m\",\n )\n A = te.placeholder((m,), name=\"A\")\n B = te.compute((m,), lambda *i: tvm_intrin(A(*i)), name=\"B\")\n s = te.create_schedule(B.op)\n f = tvm.build(s, [A, B], \"llvm\")\n dev = tvm.cpu(0)\n n = 10\n a = tvm.nd.array(np.random.uniform(0.1, 0.5, size=n).astype(A.dtype), dev)\n b = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)\n f(a, b)\n tvm.testing.assert_allclose(b.numpy(), np_func(a.numpy()), atol=1e-5, rtol=1e-5)\n\n for func in test_funcs:\n run_test(*func)\n\n\ndef test_binary_intrin():\n test_funcs = [\n (tvm.tir.atan2, lambda x1, x2: np.arctan2(x1, x2)),\n (tvm.tir.nextafter, lambda x1, x2: np.nextafter(x1, x2)),\n (tvm.tir.copysign, lambda x1, x2: np.copysign(x1, x2)),\n (tvm.tir.hypot, lambda x1, x2: np.hypot(x1, x2)),\n ]\n\n def run_test(tvm_intrin, np_func):\n m = te.var(\n \"m\",\n )\n A = te.placeholder((m,), name=\"A\")\n B = te.placeholder((m,), name=\"B\")\n C = te.compute((m,), lambda *i: tvm_intrin(A(*i), B(*i)), name=\"C\")\n s = te.create_schedule(C.op)\n f = tvm.build(s, [A, B, C], \"llvm\")\n dev = tvm.cpu(0)\n n = 10\n a = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(A.dtype), dev)\n b = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(B.dtype), dev)\n c = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)\n f(a, b, c)\n tvm.testing.assert_allclose(c.numpy(), np_func(a.numpy(), b.numpy()), atol=1e-5, rtol=1e-5)\n\n for func in test_funcs:\n run_test(*func)\n\n\ndef test_ldexp():\n m = te.var(\n \"m\",\n )\n A = te.placeholder((m,), name=\"A\")\n B = te.placeholder((m,), name=\"B\", dtype=\"int32\")\n C = te.compute((m,), lambda *i: tvm.tir.ldexp(A(*i), B(*i)), name=\"C\")\n s = te.create_schedule(C.op)\n f = tvm.build(s, [A, B, C], \"llvm\")\n dev = tvm.cpu(0)\n n = 10\n a = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(A.dtype), dev)\n b = tvm.nd.array(np.random.randint(0, 5, size=n).astype(B.dtype), dev)\n c = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)\n f(a, b, c)\n tvm.testing.assert_allclose(c.numpy(), np.ldexp(a.numpy(), b.numpy()), atol=1e-5, rtol=1e-5)\n\n\ndtype = tvm.testing.parameter(\"int32\", \"int64\")\n\n\[email protected]_targets(\"llvm\", \"vulkan -from_device=0\")\ndef test_clz(target, dev, dtype):\n target = tvm.target.Target(target)\n if (\n target.kind.name == \"vulkan\"\n and dtype == \"int64\"\n and not target.attrs.get(\"supports_int64\", False)\n ):\n pytest.xfail(\"Vulkan target does not support Int64 types\")\n\n def clz_np(x, dtype):\n ceil_log2 = np.ceil(np.log2(x)).astype(dtype)\n bits = int(dtype[-2:])\n clz = bits - ceil_log2\n clz[np.bitwise_and(x, x - 1) == 0] -= 1\n return clz\n\n m = te.var(\"m\")\n A = te.placeholder((m,), name=\"A\", dtype=dtype)\n B = te.compute((m,), lambda *i: tvm.tir.clz(A(*i)), name=\"B\")\n s = te.create_schedule(B.op)\n\n if target.kind.name == \"vulkan\":\n bx, tx = s[B].split(B.op.axis[0], factor=64)\n\n s[B].bind(bx, te.thread_axis(\"blockIdx.x\"))\n s[B].bind(tx, te.thread_axis(\"threadIdx.x\"))\n\n f = tvm.build(s, [A, B], target)\n n = 10\n\n highs = [10, 100, 1000, 10000, 100000, 1000000]\n\n if dtype == \"int64\":\n highs.append((1 << 63) - 1)\n\n for high in highs:\n a_np = np.random.randint(1, high=high, size=(n,), dtype=dtype)\n a = tvm.nd.array(a_np, dev)\n b = tvm.nd.array(np.zeros((n,)).astype(\"int32\"), dev)\n f(a, b)\n ref = clz_np(a_np, dtype)\n np.testing.assert_equal(b.numpy(), ref)\n\n\[email protected]_module\nclass Module:\n @T.prim_func\n def test_tir_fma(A: T.handle, B: T.handle, C: T.handle, d: T.handle) -> None:\n # function attr dict\n T.func_attr({\"global_symbol\": \"test_fma\", \"tir.noalias\": True})\n n = T.var(\"int32\")\n stride = T.var(\"int32\")\n stride_1 = T.var(\"int32\")\n stride_2 = T.var(\"int32\")\n stride_3 = T.var(\"int32\")\n A_1 = T.match_buffer(\n A,\n [n],\n strides=[stride],\n elem_offset=0,\n align=128,\n offset_factor=1,\n type=\"auto\",\n )\n B_1 = T.match_buffer(\n B,\n [n],\n strides=[stride_1],\n elem_offset=0,\n align=128,\n offset_factor=1,\n type=\"auto\",\n )\n C_1 = T.match_buffer(\n C,\n [n],\n strides=[stride_2],\n elem_offset=0,\n align=128,\n offset_factor=1,\n type=\"auto\",\n )\n d_1 = T.match_buffer(\n d,\n [n],\n strides=[stride_3],\n elem_offset=0,\n align=128,\n offset_factor=1,\n type=\"auto\",\n )\n # body\n for i in T.serial(0, n):\n d_1.data[(i * stride_3)] = (\n T.load(\"float32\", A_1.data, (i * stride))\n * T.load(\"float32\", B_1.data, (i * stride_1))\n ) + T.load(\"float32\", C_1.data, (i * stride_2))\n\n\ndef test_fma():\n opt = tvm.transform.Sequential(\n [\n tvm.tir.transform.Apply(lambda f: f.with_attr(\"target\", tvm.target.Target(\"llvm\"))),\n tvm.tir.transform.LowerIntrin(),\n ]\n )\n mod = opt(Module)\n assert mod[\"test_tir_fma\"].body.body.value.op.name == \"tir.call_llvm_pure_intrin\"\n\n\[email protected]\ndef binary_search(a: ty.handle, b: ty.handle, c: ty.handle, d: ty.handle) -> None:\n n = tir.var('int32')\n m = tir.var('int32')\n A = tir.match_buffer(a, (n,), dtype='int32')\n B = tir.match_buffer(b, (m,), dtype='int32')\n C = tir.match_buffer(c, (m,), dtype='int32')\n D = tir.match_buffer(d, (m,), dtype='int32')\n with tir.block([m], 'search') as [vi]:\n tir.reads([A[0:n], B[vi]])\n tir.writes([C[vi], D[vi]])\n C[vi] = tir.lower_bound(A.data, B[vi], 0, n)\n D[vi] = tir.upper_bound(A.data, B[vi], 0, n)\n\n\ndef test_binary_search():\n sch = tir.Schedule(binary_search)\n b = sch.get_block('search')\n i, = sch.get_loops(b)\n io, ii = sch.split(i, [1, None])\n sch.bind(io, 'threadIdx.x')\n sch.bind(ii, 'blockIdx.x')\n f = tvm.build(sch.mod['main'], target='cuda')\n # print(f.imported_modules[0].get_source())\n\n x = np.arange(-128, 128).astype(np.int32)\n y = np.random.randint(-200, 200, size=1024).astype(np.int32) \n a = np.zeros((1024,)).astype(np.int32)\n b = np.zeros((1024,)).astype(np.int32)\n\n # numpy results\n np_a = np.searchsorted(x, y, side='left').astype(np.int32)\n np_b = np.searchsorted(x, y, side='right').astype(np.int32)\n\n # tvm results\n dev = tvm.cuda(0)\n x_array = tvm.nd.array(x, device=dev)\n y_array = tvm.nd.array(y, device=dev)\n a_array = tvm.nd.array(a, device=dev) \n b_array = tvm.nd.array(b, device=dev)\n f(x_array, y_array, a_array, b_array)\n tvm_a = a_array.numpy()\n tvm_b = b_array.numpy()\n\n # verify result\n tvm.testing.assert_allclose(np_a, tvm_a)\n tvm.testing.assert_allclose(np_b, tvm_b)\n\n\nif __name__ == \"__main__\":\n test_nearbyint()\n test_unary_intrin()\n test_round_intrinsics_on_int()\n test_binary_intrin()\n test_ldexp()\n test_clz()\n test_fma()\n test_binary_search()\n"
]
| [
[
"numpy.nextafter",
"numpy.arccos",
"numpy.arcsin",
"numpy.log1p",
"numpy.arange",
"numpy.random.randint",
"numpy.sinh",
"numpy.hypot",
"numpy.log10",
"numpy.arctanh",
"numpy.zeros",
"numpy.arccosh",
"numpy.arctan",
"numpy.copysign",
"numpy.power",
"numpy.arctan2",
"numpy.bitwise_and",
"numpy.searchsorted",
"numpy.log2",
"numpy.cosh",
"numpy.random.uniform",
"numpy.arcsinh"
]
]
|
NunoEdgarGFlowHub/gandissect | [
"1a162a6bd3d4842139feb9f191aa1fad565dee4e"
]
| [
"netdissect/upsegmodel/prroi_pool/functional.py"
]
| [
"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File : functional.py\n# Author : Jiayuan Mao, Tete Xiao\n# Email : [email protected], [email protected]\n# Date : 07/13/2018\n# \n# This file is part of PreciseRoIPooling.\n# Distributed under terms of the MIT license.\n# Copyright (c) 2017 Megvii Technology Limited.\n\nimport torch\nimport torch.autograd as ag\n\ntry:\n from . import _prroi_pooling\nexcept ImportError:\n raise ImportError('Can not found the compiled Precise RoI Pooling library. Run ./travis.sh in the directory first.')\n\n__all__ = ['prroi_pool2d']\n\n\nclass PrRoIPool2DFunction(ag.Function):\n @staticmethod\n def forward(ctx, features, rois, pooled_height, pooled_width, spatial_scale):\n features = features.contiguous()\n rois = rois.contiguous()\n pooled_height = int(pooled_height)\n pooled_width = int(pooled_width)\n spatial_scale = float(spatial_scale)\n\n params = (pooled_height, pooled_width, spatial_scale)\n batch_size, nr_channels, data_height, data_width = features.size()\n nr_rois = rois.size(0)\n output = torch.zeros(\n (nr_rois, nr_channels, pooled_height, pooled_width),\n dtype=features.dtype, device=features.device\n )\n\n if features.is_cuda:\n _prroi_pooling.prroi_pooling_forward_cuda(features, rois, output, *params)\n ctx.params = params\n # everything here is contiguous.\n ctx.save_for_backward(features, rois, output)\n else:\n raise NotImplementedError('Precise RoI Pooling only supports GPU (cuda) implememtations.')\n\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n features, rois, output = ctx.saved_tensors\n grad_input = grad_coor = None\n\n if features.requires_grad:\n grad_output = grad_output.contiguous()\n grad_input = torch.zeros_like(features)\n _prroi_pooling.prroi_pooling_backward_cuda(features, rois, output, grad_output, grad_input, *ctx.params)\n if rois.requires_grad:\n grad_output = grad_output.contiguous()\n grad_coor = torch.zeros_like(rois)\n _prroi_pooling.prroi_pooling_coor_backward_cuda(features, rois, output, grad_output, grad_coor, *ctx.params)\n\n return grad_input, grad_coor, None, None, None\n\n\nprroi_pool2d = PrRoIPool2DFunction.apply\n\n"
]
| [
[
"torch.zeros",
"torch.zeros_like"
]
]
|
dongzhi0312/can | [
"067d19844f1bf0e058acd03c23f47449686570ac"
]
| [
"solver/can_solver.py"
]
| [
"import torch\nimport torch.nn as nn\nimport os\nfrom . import utils as solver_utils\nfrom utils.utils import to_cuda, to_onehot\nfrom torch import optim\nfrom . import clustering\nfrom discrepancy.cdd import CDD\nfrom math import ceil as ceil\nfrom .base_solver import BaseSolver\nfrom copy import deepcopy\n\n\nclass CANSolver(BaseSolver):\n def __init__(self, net, dataloader, bn_domain_map={}, resume=None, **kwargs):\n super(CANSolver, self).__init__(net, dataloader, \\\n bn_domain_map=bn_domain_map, resume=resume, **kwargs)\n\n if len(self.bn_domain_map) == 0:\n self.bn_domain_map = {self.source_name: 0, self.target_name: 1}\n\n self.clustering_source_name = 'clustering_' + self.source_name\n self.clustering_target_name = 'clustering_' + self.target_name\n\n assert ('categorical' in self.train_data)\n\n num_layers = len(self.net.module.FC) + 1\n self.cdd = CDD(kernel_num=self.opt.CDD.KERNEL_NUM, kernel_mul=self.opt.CDD.KERNEL_MUL,\n num_layers=num_layers, num_classes=self.opt.DATASET.NUM_CLASSES,\n intra_only=self.opt.CDD.INTRA_ONLY)\n\n self.discrepancy_key = 'intra' if self.opt.CDD.INTRA_ONLY else 'cdd'\n self.clustering = clustering.Clustering(self.opt.CLUSTERING.EPS,\n self.opt.CLUSTERING.FEAT_KEY,\n self.opt.CLUSTERING.BUDGET)\n\n self.clustered_target_samples = {}\n\n def complete_training(self):\n if self.loop >= self.opt.TRAIN.MAX_LOOP:\n return True\n\n if 'target_centers' not in self.history or \\\n 'ts_center_dist' not in self.history or \\\n 'target_labels' not in self.history:\n return False\n\n if len(self.history['target_centers']) < 2 or \\\n len(self.history['ts_center_dist']) < 1 or \\\n len(self.history['target_labels']) < 2:\n return False\n\n # target centers along training\n target_centers = self.history['target_centers']\n eval1 = torch.mean(self.clustering.Dist.get_dist(target_centers[-1],\n target_centers[-2])).item()\n\n # target-source center distances along training\n eval2 = self.history['ts_center_dist'][-1].item()\n\n # target labels along training\n path2label_hist = self.history['target_labels']\n paths = self.clustered_target_samples['data']\n num = 0\n for path in paths:\n pre_label = path2label_hist[-2][path]\n cur_label = path2label_hist[-1][path]\n if pre_label != cur_label:\n num += 1\n eval3 = 1.0 * num / len(paths)\n\n return (eval1 < self.opt.TRAIN.STOP_THRESHOLDS[0] and \\\n eval2 < self.opt.TRAIN.STOP_THRESHOLDS[1] and \\\n eval3 < self.opt.TRAIN.STOP_THRESHOLDS[2])\n\n def solve(self):\n stop = False\n if self.resume:\n self.iters += 1\n self.loop += 1\n\n while True:\n # updating the target label hypothesis through clustering\n target_hypt = {}\n filtered_classes = []\n with torch.no_grad():\n # self.update_ss_alignment_loss_weight()\n print('Clustering based on %s...' % self.source_name)\n # 1-3 生成目标域的伪标签\n self.update_labels()\n self.clustered_target_samples = self.clustering.samples\n target_centers = self.clustering.centers\n center_change = self.clustering.center_change\n path2label = self.clustering.path2label\n\n # updating the history\n self.register_history('target_centers', target_centers,\n self.opt.CLUSTERING.HISTORY_LEN)\n self.register_history('ts_center_dist', center_change,\n self.opt.CLUSTERING.HISTORY_LEN)\n self.register_history('target_labels', path2label,\n self.opt.CLUSTERING.HISTORY_LEN)\n\n if self.clustered_target_samples is not None and \\\n self.clustered_target_samples['gt'] is not None:\n preds = to_onehot(self.clustered_target_samples['label'],\n self.opt.DATASET.NUM_CLASSES)\n gts = self.clustered_target_samples['gt']\n res = self.model_eval(preds, gts)\n print('Clustering %s: %.4f' % (self.opt.EVAL_METRIC, res))\n\n # check if meet the stop condition\n stop = self.complete_training()\n if stop: break\n\n # 4.过滤掉模糊的样本和类别,filtering the clustering results\n target_hypt, filtered_classes = self.filtering()\n\n # update dataloaders\n self.construct_categorical_dataloader(target_hypt, filtered_classes)\n # update train data setting\n self.compute_iters_per_loop(filtered_classes)\n\n # 5.k步更新网络参数,k-step update of network parameters through forward-backward process\n self.update_network(filtered_classes)\n self.loop += 1\n\n print('Training Done!')\n\n def update_labels(self):\n net = self.net\n net.eval()\n opt = self.opt\n\n source_dataloader = self.train_data[self.clustering_source_name]['loader']\n net.module.set_bn_domain(self.bn_domain_map[self.source_name])\n\n # 1.用 resnet50 提取特征,聚类生成源域的聚类中心,源域类别数是聚类中心的个数\n source_centers = solver_utils.get_centers(net,\n source_dataloader, self.opt.DATASET.NUM_CLASSES,\n self.opt.CLUSTERING.FEAT_KEY)\n # 2.目标域的初始值赋值为源域的中心\n init_target_centers = source_centers\n\n target_dataloader = self.train_data[self.clustering_target_name]['loader']\n net.module.set_bn_domain(self.bn_domain_map[self.target_name])\n\n # 3.在目标域上执行聚类算法,生成伪标签,我要加的 MMT 就是在生成伪标签的时候\n self.clustering.set_init_centers(init_target_centers)\n self.clustering.feature_clustering(net, target_dataloader)\n\n def filtering(self):\n threshold = self.opt.CLUSTERING.FILTERING_THRESHOLD\n min_sn_cls = self.opt.TRAIN.MIN_SN_PER_CLASS\n target_samples = self.clustered_target_samples\n\n # filtering the samples\n chosen_samples = solver_utils.filter_samples(\n target_samples, threshold=threshold)\n\n # filtering the classes\n filtered_classes = solver_utils.filter_class(\n chosen_samples['label'], min_sn_cls, self.opt.DATASET.NUM_CLASSES)\n\n print('The number of filtered classes: %d.' % len(filtered_classes))\n return chosen_samples, filtered_classes\n\n def construct_categorical_dataloader(self, samples, filtered_classes):\n # update self.dataloader\n target_classwise = solver_utils.split_samples_classwise(\n samples, self.opt.DATASET.NUM_CLASSES)\n\n dataloader = self.train_data['categorical']['loader']\n classnames = dataloader.classnames\n dataloader.class_set = [classnames[c] for c in filtered_classes]\n dataloader.target_paths = {classnames[c]: target_classwise[c]['data'] \\\n for c in filtered_classes}\n dataloader.num_selected_classes = min(self.opt.TRAIN.NUM_SELECTED_CLASSES, len(filtered_classes))\n dataloader.construct()\n\n def CAS(self):\n samples = self.get_samples('categorical')\n\n source_samples = samples['Img_source']\n source_sample_paths = samples['Path_source']\n source_nums = [len(paths) for paths in source_sample_paths]\n\n target_samples = samples['Img_target']\n target_sample_paths = samples['Path_target']\n target_nums = [len(paths) for paths in target_sample_paths]\n\n source_sample_labels = samples['Label_source']\n self.selected_classes = [labels[0].item() for labels in source_sample_labels]\n assert (self.selected_classes ==\n [labels[0].item() for labels in samples['Label_target']])\n return source_samples, source_nums, target_samples, target_nums\n\n def prepare_feats(self, feats):\n return [feats[key] for key in feats if key in self.opt.CDD.ALIGNMENT_FEAT_KEYS]\n\n def compute_iters_per_loop(self, filtered_classes):\n self.iters_per_loop = int(\n len(self.train_data['categorical']['loader'])) * self.opt.TRAIN.UPDATE_EPOCH_PERCENTAGE\n print('Iterations in one loop: %d' % (self.iters_per_loop))\n\n def update_network(self, filtered_classes):\n # initial configuration\n stop = False\n update_iters = 0\n\n self.train_data[self.source_name]['iterator'] = \\\n iter(self.train_data[self.source_name]['loader'])\n self.train_data['categorical']['iterator'] = \\\n iter(self.train_data['categorical']['loader'])\n\n while not stop:\n # update learning rate\n self.update_lr()\n\n # set the status of network\n self.net.train()\n self.net.zero_grad()\n\n loss = 0\n ce_loss_iter = 0\n cdd_loss_iter = 0\n\n # coventional sampling for training on labeled source data\n source_sample = self.get_samples(self.source_name)\n source_data, source_gt = source_sample['Img'], \\\n source_sample['Label']\n\n source_data = to_cuda(source_data)\n source_gt = to_cuda(source_gt)\n self.net.module.set_bn_domain(self.bn_domain_map[self.source_name])\n source_preds = self.net(source_data)['logits']\n\n # compute the cross-entropy loss\n ce_loss = self.CELoss(source_preds, source_gt)\n ce_loss.backward()\n\n ce_loss_iter += ce_loss\n loss += ce_loss\n\n if len(filtered_classes) > 0:\n # update the network parameters\n # 1) class-aware sampling\n source_samples_cls, source_nums_cls, \\\n target_samples_cls, target_nums_cls = self.CAS()\n\n # 2) forward and compute the loss\n source_cls_concat = torch.cat([to_cuda(samples)\n for samples in source_samples_cls], dim=0)\n target_cls_concat = torch.cat([to_cuda(samples)\n for samples in target_samples_cls], dim=0)\n\n self.net.module.set_bn_domain(self.bn_domain_map[self.source_name])\n feats_source = self.net(source_cls_concat)\n self.net.module.set_bn_domain(self.bn_domain_map[self.target_name])\n feats_target = self.net(target_cls_concat)\n\n # prepare the features\n feats_toalign_S = self.prepare_feats(feats_source)\n feats_toalign_T = self.prepare_feats(feats_target)\n\n cdd_loss = self.cdd.forward(feats_toalign_S, feats_toalign_T,\n source_nums_cls, target_nums_cls)[self.discrepancy_key]\n\n cdd_loss *= self.opt.CDD.LOSS_WEIGHT\n cdd_loss.backward()\n\n cdd_loss_iter += cdd_loss\n loss += cdd_loss\n\n # update the network\n self.optimizer.step()\n\n if self.opt.TRAIN.LOGGING and (update_iters + 1) % \\\n (max(1, self.iters_per_loop // self.opt.TRAIN.NUM_LOGGING_PER_LOOP)) == 0:\n accu = self.model_eval(source_preds, source_gt)\n cur_loss = {'ce_loss': ce_loss_iter, 'cdd_loss': cdd_loss_iter,\n 'total_loss': loss}\n self.logging(cur_loss, accu)\n\n self.opt.TRAIN.TEST_INTERVAL = min(1.0, self.opt.TRAIN.TEST_INTERVAL)\n self.opt.TRAIN.SAVE_CKPT_INTERVAL = min(1.0, self.opt.TRAIN.SAVE_CKPT_INTERVAL)\n\n if self.opt.TRAIN.TEST_INTERVAL > 0 and \\\n (update_iters + 1) % int(self.opt.TRAIN.TEST_INTERVAL * self.iters_per_loop) == 0:\n with torch.no_grad():\n self.net.module.set_bn_domain(self.bn_domain_map[self.target_name])\n accu = self.test()\n print('Test at (loop %d, iters: %d) with %s: %.4f.' % (self.loop,\n self.iters, self.opt.EVAL_METRIC, accu))\n\n if self.opt.TRAIN.SAVE_CKPT_INTERVAL > 0 and \\\n (update_iters + 1) % int(self.opt.TRAIN.SAVE_CKPT_INTERVAL * self.iters_per_loop) == 0:\n self.save_ckpt()\n\n update_iters += 1\n self.iters += 1\n\n # update stop condition\n if update_iters >= self.iters_per_loop:\n stop = True\n else:\n stop = False\n"
]
| [
[
"torch.no_grad"
]
]
|
robert-mijakovic/hoomd-blue | [
"f7f97abfa3fcc2522fa8d458d65d0aeca7ba781a"
]
| [
"hoomd/md/pytest/test_dihedral.py"
]
| [
"# Copyright (c) 2009-2022 The Regents of the University of Michigan.\n# Part of HOOMD-blue, released under the BSD 3-Clause License.\n\nimport hoomd\nimport pytest\nimport numpy\n\n# Test parameters include the class, class keyword arguments, bond params,\n# force, and energy.\ndihedral_test_parameters = [\n (\n hoomd.md.dihedral.Harmonic,\n dict(),\n dict(k=3.0, d=-1, n=2, phi0=numpy.pi / 2),\n 0,\n 3,\n ),\n (\n hoomd.md.dihedral.Harmonic,\n dict(),\n dict(k=10.0, d=1, n=1, phi0=numpy.pi / 4),\n 5.0,\n 5.0,\n ),\n (\n hoomd.md.dihedral.Harmonic,\n dict(),\n dict(k=5.0, d=1, n=3, phi0=numpy.pi / 6),\n 1.9411,\n 0.0852,\n ),\n (\n hoomd.md.dihedral.OPLS,\n dict(),\n dict(k1=1.0, k2=1.5, k3=0.5, k4=0.75),\n -0.616117,\n 2.42678,\n ),\n (\n hoomd.md.dihedral.OPLS,\n dict(),\n dict(k1=0.5, k2=2.5, k3=1.5, k4=1.0),\n -0.732233,\n 2.89645,\n ),\n (\n hoomd.md.dihedral.OPLS,\n dict(),\n dict(k1=2.0, k2=1.0, k3=0.25, k4=3.5),\n -0.0277282,\n 5.74372,\n ),\n (\n hoomd.md.dihedral.Table,\n dict(width=2),\n dict(V=[0, 10], tau=[0, 1]),\n -0.375,\n 3.75,\n ),\n]\n\n\[email protected](scope='session')\ndef dihedral_snapshot_factory(device):\n\n def make_snapshot(d=1.0, phi_deg=45, particle_types=['A'], L=20):\n phi_rad = phi_deg * (numpy.pi / 180)\n # the central particles are along the x-axis, so phi is determined from\n # the angle in the yz plane.\n\n snapshot = hoomd.Snapshot(device.communicator)\n N = 4\n if snapshot.communicator.rank == 0:\n box = [L, L, L, 0, 0, 0]\n snapshot.configuration.box = box\n snapshot.particles.N = N\n snapshot.particles.types = particle_types\n # shift particle positions slightly in z so MPI tests pass\n snapshot.particles.position[:] = [\n [\n 0.0,\n d * numpy.cos(phi_rad / 2),\n d * numpy.sin(phi_rad / 2) + 0.1,\n ],\n [0.0, 0.0, 0.1],\n [d, 0.0, 0.1],\n [\n d,\n d * numpy.cos(phi_rad / 2),\n -d * numpy.sin(phi_rad / 2) + 0.1,\n ],\n ]\n\n snapshot.dihedrals.N = 1\n snapshot.dihedrals.types = ['A-A-A-A']\n snapshot.dihedrals.typeid[0] = 0\n snapshot.dihedrals.group[0] = (0, 1, 2, 3)\n\n return snapshot\n\n return make_snapshot\n\n\[email protected]('dihedral_cls, dihedral_args, params, force, energy',\n dihedral_test_parameters)\ndef test_before_attaching(dihedral_cls, dihedral_args, params, force, energy):\n potential = dihedral_cls(**dihedral_args)\n potential.params['A-A-A-A'] = params\n for key in params:\n potential.params['A-A-A-A'][key] == pytest.approx(params[key])\n\n\[email protected]('dihedral_cls, dihedral_args, params, force, energy',\n dihedral_test_parameters)\ndef test_after_attaching(dihedral_snapshot_factory, simulation_factory,\n dihedral_cls, dihedral_args, params, force, energy):\n snapshot = dihedral_snapshot_factory(d=0.969, L=5)\n sim = simulation_factory(snapshot)\n\n potential = dihedral_cls(**dihedral_args)\n potential.params['A-A-A-A'] = params\n\n sim.operations.integrator = hoomd.md.Integrator(dt=0.005,\n forces=[potential])\n\n sim.run(0)\n for key in params:\n assert potential.params['A-A-A-A'][key] == pytest.approx(params[key])\n\n\[email protected]('dihedral_cls, dihedral_args, params, force, energy',\n dihedral_test_parameters)\ndef test_forces_and_energies(dihedral_snapshot_factory, simulation_factory,\n dihedral_cls, dihedral_args, params, force,\n energy):\n phi_deg = 45\n phi_rad = phi_deg * (numpy.pi / 180)\n snapshot = dihedral_snapshot_factory(phi_deg=phi_deg)\n sim = simulation_factory(snapshot)\n\n # the dihedral angle is in yz plane, thus no force along x axis\n force_array = force * numpy.asarray(\n [0, numpy.sin(-phi_rad / 2),\n numpy.cos(-phi_rad / 2)])\n potential = dihedral_cls(**dihedral_args)\n potential.params['A-A-A-A'] = params\n\n sim.operations.integrator = hoomd.md.Integrator(dt=0.005,\n forces=[potential])\n\n sim.run(0)\n\n sim_energies = potential.energies\n sim_forces = potential.forces\n if sim.device.communicator.rank == 0:\n assert sum(sim_energies) == pytest.approx(energy, rel=1e-2, abs=1e-5)\n numpy.testing.assert_allclose(sim_forces[0],\n force_array,\n rtol=1e-2,\n atol=1e-5)\n numpy.testing.assert_allclose(sim_forces[1],\n -1 * force_array,\n rtol=1e-2,\n atol=1e-5)\n numpy.testing.assert_allclose(sim_forces[2],\n [0, -1 * force_array[1], force_array[2]],\n rtol=1e-2,\n atol=1e-5)\n numpy.testing.assert_allclose(sim_forces[3],\n [0, force_array[1], -1 * force_array[2]],\n rtol=1e-2,\n atol=1e-5)\n"
]
| [
[
"numpy.testing.assert_allclose",
"numpy.sin",
"numpy.cos"
]
]
|
petakajaib/dia-kata | [
"5e3f90498352eb4fbf8d7b95e807d4d31df139a2"
]
| [
"ml_pipeline/vectorization/relative_entity_position_vector.py"
]
| [
"import numpy as np\nfrom .entity_position_vector import get_entity_position_vector\n\ndef get_relative_entity_position_vector(entry, enriched_collection):\n entity_position = get_entity_position_vector(entry, enriched_collection)\n\n sorted_map = {}\n for idx, elem in enumerate(sorted(set(entity_position))):\n sorted_map[elem] = idx\n\n arr_relative = []\n for elem in entity_position:\n relative = sorted_map[elem]\n if relative == 0:\n arr_relative.append(1)\n else:\n arr_relative.append(1/relative)\n\n return np.array(arr_relative)\n"
]
| [
[
"numpy.array"
]
]
|
strates-git/mathematics | [
"1271033400cece3e987cd6e0d7a8db82abeea769"
]
| [
"CDCmodels.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 30 18:17:48 2020\n\n@author: Shane Strate\n\"\"\"\nimport pandas as pd\nimport glob, os\nfrom sklearn import preprocessing\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, confusion_matrix\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ncolumns = ['model', 'target', 'target_week_end_date', 'location_name', 'point', 'lower_bound', 'upper_bound', 'cumulative']\naccuracyList = []\nconfusion = [] #Not Used Yet\n#File operations\npath = r\"CDC Data\"\nactuals_path = r\"CDC Data\\Actuals\\2020-10-22-actuals.csv\"\nmodel_files = glob.glob(os.path.join(path, \"*.csv\"))\nmodelData = pd.DataFrame()\nmodelData = pd.concat((pd.read_csv(file) for file in model_files))\nactualData = pd.read_csv(actuals_path)\n#initial cleanup\nmodelData['target_week_end_date'] = pd.to_datetime(modelData.target_week_end_date)\nmodelData.rename(columns={'quantile_0.025':'lower_bound', 'quantile_0.975':'upper_bound'}, inplace=True)\nactualData['End Week'] = pd.to_datetime(actualData['End Week']) \n#mapping to rename 'target' field values\nforecastWindows = {'1 wk ahead cum death': '1wk',\n '2 wk ahead cum death': '2wk',\n '3 wk ahead cum death': '3wk',\n '4 wk ahead cum death': '4wk'}\nmodelData = modelData.replace('US', 'National')\nprint('Initial Model Count: ', len(pd.unique(modelData['model'])))\n\n#Determine if observed data is within predicted 95% CI\ndef calcHit(row):\n if (row.cumulative >= row.lower_bound) & (row.cumulative <= row.upper_bound):\n val = 1\n else:\n val = 0\n iRange = row.upper_bound - row.lower_bound\n return val, iRange\n\n#Filter data to National and Cumulative death counts. Filter out extreme outlier models\nnationalData = modelData[(modelData['location_name']=='National')\n & (modelData['target'].str.contains('cum'))\n & (modelData['model'] != 'Imperial')\n & (modelData['model'] != 'JHU-APL')\n & (modelData['model'] != 'JHU-IDD')]\n\nmergedData = pd.merge(nationalData, actualData, left_on=['target_week_end_date', 'location_name'], right_on=['End Week', 'State'])\nprint('Interim Model Count:', len(pd.unique(mergedData['model'])))\nmergedData.dropna(axis=0, subset=['lower_bound', 'upper_bound'], inplace=True)\n\nmergedData = mergedData[columns]\nmergedData['missedCount'] = mergedData.apply(lambda row: row.point-row.cumulative, axis=1)\nmergedData[['hit', 'iRange']] = mergedData.apply(lambda row: pd.Series(list(calcHit(row))), axis=1)\n#Group Data into (Model, Target) pairs and calculate aggregate stats (not all stats used)\nnewData = mergedData.groupby(['model', 'target']).agg({'hit': ['sum', 'mean'], \n 'missedCount': 'mean',\n 'iRange' : 'mean',\n 'model' : 'count'})\nnewData.columns=['hitSum', 'hitMean', 'missedCountMean', 'iRangeMean', 'modelObsCount']\nnewData.reset_index()\n#remerged to get back 'cumulative' and 'point' fields\nmergedData = pd.merge(mergedData, newData, on=['model', 'target'])\n#Filter out small models and outliers\nmergedData = mergedData[(mergedData.iRange <= 1000000) & (mergedData.modelObsCount >= 5)]\n#not used\nmergedData['iRangeScaled'] = preprocessing.MinMaxScaler().fit_transform(np.array(mergedData['iRangeMean']).reshape(-1,1))\n\n#Slice data by models and targets to get accuracy measures for each (Model, Target) pair. \nfor model in pd.unique(mergedData['model']):\n for target in pd.unique(mergedData['target']):\n slicedData = mergedData[(mergedData['model']== model) & (mergedData['target']==target)]\n if not slicedData.empty:\n MSE = mean_squared_error(slicedData['cumulative'], slicedData['point'])\n RMSE = np.sqrt(MSE)\n MAE = mean_absolute_error(slicedData['cumulative'], slicedData['point'])\n #conf = confusion_matrix(slicedData['cumulative'], slicedData['point'])\n accuracyList.append([model, target, MSE, RMSE, MAE])\n #confusion.append([model, target, conf])\n\n#create a dataframe of accuracy measures, and merge with the rest of the data. \naccuracyDF = pd.DataFrame(accuracyList, columns=['model', 'target', 'MSE', 'RMSE', 'MAE'])\nmergedData = mergedData.merge(accuracyDF, on=['model', 'target'])\n#rename 'target' field values for readability\nmergedData.target = mergedData.target.map(forecastWindows)\n\n#cleanup and sort merged data prior to plotting\nplotData = mergedData.groupby(['model', 'target']).agg('max').reset_index()\nplotData = plotData.sort_values(by=['model', 'target'], ascending=[True, False])\nplotData = plotData.round(4)\n\n#slice on single (model, target) pair to demonstrate model vs. observation\ntimeSeriesData = mergedData[(mergedData['model']=='Ensemble') & (mergedData['target']=='4wk')][['target_week_end_date', 'point', 'cumulative']]\ntimeSeriesData.set_index('target_week_end_date', inplace=True)\n\n#plotting options below\n#plot = sns.lineplot(data=timeSeriesData, legend=True)\n#plot.set(xlim=('2020-06-06', '2020-10-17'))\n#plt.title('Ensemble - 4 Weeks Out')\n#plt.xticks(rotation=45, horizontalalignment='right')\n#plt.savefig('cumulativevEnd_date.pdf', dpi=300)\n#plot = sns.lineplot(x='target', y='MAE', data=plotData, hue='model', legend=False)\n#plt.savefig('MAEvTarget3.pdf', dpi=300)\nplot = sns.FacetGrid(plotData, col='target', hue='model')\nplot.map(sns.scatterplot, 'modelObsCount', \"hitMean\")\nplot.add_legend()\nplot.savefig('hitMeanvObsCount3.pdf', dpi=300)\n#plot = sns.FacetGrid(plotData, col='model', col_wrap=6, hue='model')\n#plot.map(sns.lineplot, 'target', \"MAE\")\n#plot.add_legend()\n#plot.savefig('MAEvTarget2.pdf', dpi=300)\n\nprint('Final Model Count:', len(pd.unique(mergedData['model'])))\n\n\nmergedData.to_csv(r'CDC_Data.csv')\n\n\n "
]
| [
[
"pandas.to_datetime",
"numpy.array",
"sklearn.metrics.mean_squared_error",
"pandas.merge",
"pandas.DataFrame",
"sklearn.metrics.mean_absolute_error",
"sklearn.preprocessing.MinMaxScaler",
"numpy.sqrt",
"pandas.unique",
"pandas.read_csv"
]
]
|
amazon-research/cmaxent | [
"a18998b5b02a7d1ef96fceadbea6a9c1aae8cae1"
]
| [
"experiments/ace_bounds_experiment.py"
]
| [
"# ./experiments/ace_bounds_experiment.py\n\"\"\" Experiments testing our derived ACE bounds.\n\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved. \nSPDX-License-Identifier: Apache-2.0\n\"\"\"\nimport numpy as np\nfrom tqdm import trange\n\nfrom experiments.synthetic_data_generation import *\n\nfrom src.cmaxent.maxent import *\nfrom src.utils.plot_utils import *\nfrom src.utils.utils import *\n\n\ndef ace_bound_experiment():\n \"\"\" Function to perform the ACE bounds.\n \"\"\"\n ### Interventional distribution experiments:\n lower = []\n upper = []\n ace_one_lower = []\n ace_one_upper = []\n \n true = []\n maxent = []\n maxent_unknown_px = []\n \n ace_true_effect_one = []\n ace_maxent_effect_one = []\n ace_maxent_unknown_px_effect_one = []\n\n for i in trange(10):\n data = scm_polynomial_thresholding_second(seed=i, p_not_including=0.2)\n\n cause_idx = 3 # Cause we want to take the causal effect from.\n\n # Bounds:\n bounds_tmp = interventional_distribution_bounds(data, \n cause_idx=cause_idx, \n intervention_value=0)\n ace_bounds_one_tmp = ace_distribution_bounds(data=data, \n cause_idx=cause_idx, \n intervened_value=1,\n intervention_value=[1,0])\n\n # True interventional distributions and ACE:\n true_tmp = interventional_distribution(data, cause_idx=cause_idx, intervention_value=0)\n true_ace_effect_one = ace(data=data, \n cause_idx=cause_idx, \n intervened_value=1, \n intervention_values=[1,0])\n\n # Joint MAXENT, to infer the distribution of the causes\n joint = JointMAXENT(x=data[\"x\"],\n moment_dict=data[\"moment_constraints_joint\"],\n seed=i)\n joint.fit()\n\n # Conditional MAXENT with learned distribution over the causes\n conditional_unknown_px = ConditionalMAXENT(x = data[\"y\"] + data[\"x\"],\n moment_dict=data[\"moment_constraints_conditional\"],\n probability_x=joint.compute_exponential_family(joint.multipliers),\n seed=i)\n\n conditional_unknown_px.fit()\n maxent_probability_unkown_px = conditional_unknown_px.compute_exponential_family(conditional_unknown_px.multipliers)\n\n # Compute the interventional distribution, and ace for the maxent estimation\n maxent_probability_unkown_px = {\n \"px\":data[\"px\"],\n \"joint_synthetic\":maxent_probability_unkown_px\n }\n maxent_unknown_px_tmp = interventional_distribution(maxent_probability_unkown_px, cause_idx=cause_idx, intervention_value=0)\n maxent_unkown_px_ace_effect_one = ace(data=maxent_probability_unkown_px, \n cause_idx=cause_idx, \n intervened_value=1, \n intervention_values=[1,0])\n\n # Conditional MAXENT with given distribution over the causes\n conditional = ConditionalMAXENT(x = data[\"y\"] + data[\"x\"],\n moment_dict=data[\"moment_constraints_conditional\"],\n probability_x=data[\"px\"],\n seed=i)\n\n conditional.fit()\n maxent_probability = conditional.compute_exponential_family(conditional.multipliers)\n\n # Compute the interventional distribution, and ace for the maxent estimation\n maxent_dictionary = {\n \"px\":data[\"px\"],\n \"joint_synthetic\":maxent_probability\n }\n maxent_tmp = interventional_distribution(maxent_dictionary, cause_idx=cause_idx, intervention_value=0)\n maxent_ace_effect_one = ace(data=maxent_dictionary, \n cause_idx=cause_idx, \n intervened_value=1, \n intervention_values=[1,0])\n\n # Interventional and ACE bounds\n lower.append(np.array(bounds_tmp)[0,0]) # lower bound [0], for y=0\n upper.append(np.array(bounds_tmp)[1,0]) # upper bound [1], for y=0\n ace_one_lower.append(ace_bounds_one_tmp[0])\n ace_one_upper.append(ace_bounds_one_tmp[1])\n \n # Interventional distributions and ACE:\n true.append(true_tmp[0]) # true interventional distribution for y=0\n ace_true_effect_one.append(true_ace_effect_one)\n \n maxent.append(maxent_tmp[0]) # maxent interventional distribution for y=0\n ace_maxent_effect_one.append(maxent_ace_effect_one)\n\n maxent_unknown_px.append(maxent_unknown_px_tmp[0]) # maxent interventional distribution for y=0\n ace_maxent_unknown_px_effect_one.append(maxent_unkown_px_ace_effect_one)\n\n ace_plotter(np.clip(ace_one_lower, a_min=-1., a_max=1.), \n np.clip(ace_one_upper, a_min=-1., a_max=1.),\n ace_true_effect_one, \n ace_maxent_effect_one,\n ace_maxent_unknown_px_effect_one,\n {\"cause_idx\": cause_idx},\n \"./experiments/results/ace_polynomial.png\")\n\n\nif __name__ == \"__main__\":\n ace_bound_experiment()"
]
| [
[
"numpy.array",
"numpy.clip"
]
]
|
15thai/Gibb_ringing | [
"0e019d0da60d6da7c933a85d2407b655ada206bc"
]
| [
"unring_parallel.py"
]
| [
"import time\nimport numpy as np\nimport multiprocessing\nimport ctypes\nfrom contextlib import closing\nfrom unring import unring_2d\n\ndef unring_wrapper(vol):\n inp = np.frombuffer(shared_input)\n sh_input= inp.reshape(arr_shape)\n\n out = np.frombuffer(shared_output)\n sh_out= out.reshape(arr_shape)\n\n for k in range(arr_shape[2]): \n slice_data = sh_input[:,:,k,vol] \n result_slice = unring_2d(slice_data, nsh,minW,maxW)\n sh_out[:,:,k,vol]=result_slice.real \n\ndef init(shared_input_,shared_output_,arr_shape_,params_):\n #initialization of the global shared arrays\n global shared_input, shared_output,arr_shape,nsh,minW,maxW\n shared_input = shared_input_\n shared_output = shared_output_ \n arr_shape=arr_shape_ \n nsh=params_[0]\n minW=params_[1]\n maxW=params_[2]\n\n\ndef unring_parallel(arr, nsh=25, minW=1, maxW=5, out_dtype=None,num_threads=None):\n r\"\"\"Gibbs ringing correction for 4D DWI datasets.\n\n Parameters\n ----------\n arr : 4D array\n Array of data to be corrected. The dimensions are (X, Y, Z, N), where N\n are the diffusion gradient directions.\n nsh : int, optional\n Number of shifted images on one side. Default: 25. The total number of\n shifted images will be 2*nsh+1\n minW : int, optional\n Minimum neighborhood distance. Default:1\n maxW : int, optional\n Maximum neighborhood distance. Default:5\n out_dtype : str or dtype, optional\n The dtype for the output array. Default: output has the same dtype as\n the input.\n num_threads : int, optional\n The number of threads that the algorithm can create. Default: Use all cores.\n\n Returns\n -------\n corrected_arr : 4D array\n This is the corrected array of the same size as that of the input data,\n clipped to non-negative values\n\n References\n ----------\n .. [Kellner2015] Kellner E., Bibek D., Valerij K. G., Reisert M.(2015)\n Gibbs-ringing artifact removal based on local subvoxel-shifts.\n Magnetic resonance in Medicine 76(5), p1574-1581.\n https://doi.org/10.1002/mrm.26054\n \"\"\"\n start_time = time.time()\n\n # We perform the computations in float64. However we output \n # with the original data_type\n if out_dtype is None:\n out_dtype = arr.dtype\n\n if not arr.ndim == 4:\n print('Converting input array from 3D to 4D...')\n arr=arr.reshape([arr.shape[0],arr.shape[1],arr.shape[2],1])\n\n if num_threads is not None:\n threads_to_use = num_threads\n else:\n threads_to_use = multiprocessing.cpu_count()\n\n # Creating input and output shared arrays for multi-process processing\n #input array\n mp_arr= multiprocessing.RawArray(ctypes.c_double,arr.shape[0]*arr.shape[1]*arr.shape[2]*arr.shape[3])\n shared_arr = np.frombuffer(mp_arr)\n shared_input= shared_arr.reshape(arr.shape)\n shared_input[:] =arr[:]\n #output array\n mp_arr2= multiprocessing.RawArray(ctypes.c_double,arr.shape[0]*arr.shape[1]*arr.shape[2]*arr.shape[3])\n shared_arr2 = np.frombuffer(mp_arr2)\n shared_output= shared_arr2.reshape(arr.shape)\n #parameters\n params=[nsh,minW,maxW]\n\n #multi-processing\n with closing(multiprocessing.Pool(threads_to_use,initializer=init, initargs=(shared_arr,shared_arr2,arr.shape,params))) as p:\n p.map_async(unring_wrapper, [vol for vol in range(0, arr.shape[3])])\n p.join() \n \n\n print(\"Gibbs ringing correction took --- %s seconds ---\" % (time.time() - start_time))\n\n return shared_output.astype(out_dtype)\n\n"
]
| [
[
"numpy.frombuffer"
]
]
|
mjziebarth/gmt-python | [
"0005152780528c7248369fb1446a9670383f2b19"
]
| [
"gmt/helpers/tempfile.py"
]
| [
"\"\"\"\nUtilities for dealing with temporary file management.\n\"\"\"\nimport os\nfrom tempfile import NamedTemporaryFile\n\nimport numpy as np\n\n\ndef unique_name():\n \"\"\"\n Generate a unique name with the prefix 'gmt-python-'.\n\n Useful for generating unique names for figures (otherwise GMT will plot\n everything on the same figure instead of creating a new one).\n\n Returns\n -------\n name : str\n A unique name generated by ``tempfile.NamedTemporaryFile``\n\n \"\"\"\n # Use the tempfile module to generate a unique name.\n with NamedTemporaryFile(prefix=\"gmt-python-\") as tmpfile:\n return os.path.split(tmpfile.name)[-1]\n\n\nclass GMTTempFile:\n \"\"\"\n Context manager for creating closed temporary files.\n\n This class does not return a file-like object. So, you can't do\n ``for line in GMTTempFile()``, for example, or pass it to things that\n need file objects.\n\n Parameters\n ----------\n prefix : str\n The temporary file name begins with the prefix.\n suffix : str\n The temporary file name ends with the suffix.\n\n Examples\n --------\n >>> import numpy as np\n >>> with GMTTempFile() as tmpfile:\n ... # write data to temporary file\n ... x = y = z = np.arange(0, 3, 1)\n ... np.savetxt(tmpfile.name, (x, y, z), fmt='%.1f')\n ... lines = tmpfile.read()\n ... print(lines)\n ... nx, ny, nz = tmpfile.loadtxt(unpack=True, dtype=float)\n ... print(nx, ny, nz)\n 0.0 1.0 2.0\n 0.0 1.0 2.0\n 0.0 1.0 2.0\n <BLANKLINE>\n [0. 0. 0.] [1. 1. 1.] [2. 2. 2.]\n \"\"\"\n\n def __init__(self, prefix=\"gmt-python-\", suffix=\".txt\"):\n args = dict(prefix=prefix, suffix=suffix, delete=False)\n with NamedTemporaryFile(**args) as tmpfile:\n self.name = tmpfile.name\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n if os.path.exists(self.name):\n os.remove(self.name)\n\n def read(self, keep_tabs=False):\n \"\"\"\n Read the entire contents of the file as a Unicode string.\n\n Parameters\n ----------\n keep_tabs : bool\n If False, replace the tabs that GMT uses with spaces.\n\n Returns\n -------\n content : str\n Content of the temporary file as a Unicode string.\n \"\"\"\n with open(self.name) as tmpfile:\n content = tmpfile.read()\n if not keep_tabs:\n content = content.replace(\"\\t\", \" \")\n return content\n\n def loadtxt(self, **kwargs):\n \"\"\"\n Load data from the temporary file using numpy.loadtxt.\n\n Parameters\n ----------\n kwargs : dict\n Any keyword arguments that can be passed to numpy.loadtxt.\n\n Returns\n -------\n ndarray\n Data read from the text file.\n\n \"\"\"\n return np.loadtxt(self.name, **kwargs)\n"
]
| [
[
"numpy.loadtxt"
]
]
|
Peterror/CPS-2018 | [
"e63c17032e9af0a0cd2e0c30a9b31a8bc3018888"
]
| [
"Transformata/FFT.py"
]
| [
"import numpy as np\n\n\nclass FFT(object):\n def __init__(self, sampling_frequency, samples_power_of_two):\n \"\"\"\n samples - 2^samples_power_of_two\n sampling_frequency must be equal to the number of samples\n \"\"\"\n try:\n self._number_of_samples = 1 << samples_power_of_two\n except ValueError:\n raise ValueError(\"samples_power_of_two must be an int greater than 0\")\n if self._number_of_samples != int(sampling_frequency):\n raise ValueError(\"sampling_frequency must be equal to the number of samples\")\n self._samples_power_of_two = samples_power_of_two\n self._sampling_frequency = int(sampling_frequency)\n self._frequency_bin_resolution = self._sampling_frequency / self._number_of_samples\n self.frequency_bin_x_axis = [self._frequency_bin_resolution * n\n for n in range(int(self._sampling_frequency/2))] # Nyquist limit\n self._W_lookup_table = self._generate_W_lookup_table()\n self._stage0_pair_array = self._generate_pair_array()\n\n def generate_frequency_bins(self, wave):\n values = []\n for i in range(0, self._number_of_samples, 2): # values init (stage 0)\n index1 = self._stage0_pair_array[i]\n index2 = self._stage0_pair_array[i+1]\n values += [wave[index1] + wave[index2]]\n values += [wave[index1] - wave[index2]]\n\n for stage in range(1, self._samples_power_of_two-1, 1):\n temp_values = []\n for row in range(0, self._number_of_samples >> (stage+1), 1):\n stage_row_offset = row * (2 << stage)\n for i in range(1 << stage):\n a = values[stage_row_offset + i]\n xa = values[stage_row_offset + (1 << stage) + i]\n temp_values += [a + self._get_W(i, stage) * xa]\n for i in range(1 << stage):\n a = values[stage_row_offset + i]\n xa = values[stage_row_offset + (1 << stage) + i]\n temp_values += [a - self._get_W(i, stage) * xa]\n values = temp_values\n\n # we need only half of the results (Nyquist law)\n stage = self._samples_power_of_two-1 # last stage\n temp_values = []\n row = 0\n stage_row_offset = row * (2 << stage)\n for i in range(1 << stage):\n a = values[stage_row_offset + i]\n xa = values[stage_row_offset + (1 << stage) + i]\n temp_values += [a + self._get_W(i, stage) * xa]\n values = temp_values\n\n #values = np.abs(values)\n return values\n\n def _get_W(self, power, stage):\n return self._W_lookup_table[stage][power]\n\n def _generate_W_lookup_table(self): # generowanie tej listy da się mega przyspieszyć + zmniejszyć wymagane zasoby\n W_array = [\n [\n self._calculate_W(power, index)\n for power in range(index >> 1)\n ] for index in [2 ** x for x in range(1, 10 + 1, 1)]\n ] # [[W], [W, ...], ...]\n return W_array\n\n def _calculate_W(self, power, index):\n e_powers = 2 * np.pi * power / index\n return np.cos(e_powers) - np.complex(0, 1) * np.sin(e_powers)\n\n def _generate_pair_array(self):\n pair_array = []\n for i in range(self._number_of_samples):\n pair_array += [[self._reverse_int(i)]]\n return pair_array # [[index1, index2], ...]\n\n def _reverse_int(self, num):\n result = 0\n for i in range(self._samples_power_of_two):\n result = (result << 1) + (num & 1)\n num >>= 1\n return result\n"
]
| [
[
"numpy.complex",
"numpy.sin",
"numpy.cos"
]
]
|
guy-amir/swt | [
"9f7f1de45318d2cfb1903f777a7fbb965cd845ac"
]
| [
"src/utils.py"
]
| [
"\"\"\"\nsome utiliy functions for data processing and visualization.\n\"\"\"\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import ConnectionPatch\nimport numpy as np\nimport torch\n\n# class name for CIFAR-10 dataset\ncifar10_class_name = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', \n 'frog', 'horse', 'ship', 'truck']\n\ndef show_data(dataset, name):\n \"\"\"\n show some image from the dataset.\n args:\n dataset: dataset to show\n name: name of the dataset\n \"\"\"\n if name == 'mnist':\n num_test = len(dataset)\n num_shown = 100\n cols = 10\n rows = int(num_shown/cols)\n indices = np.random.choice(list(range(num_test)), num_test)\n plt.figure()\n for i in range(num_shown):\n plt.subplot(rows, cols, i+1)\n plt.imshow(dataset[indices[i]][0].squeeze().numpy())\n plt.axis('off')\n plt.title(str(dataset[indices[i]][1].data.item()))\n plt.gcf().tight_layout()\n plt.show()\n else:\n raise NotImplementedError\n return\n\ndef get_sample(dataset, sample_num, name):\n # random seed\n #np.random.seed(2019)\n \"\"\"\n get a batch of random images from the dataset\n args:\n dataset: Pytorch dataset object to use\n sample_num: number of samples to draw\n name: name of the dataset\n return:\n selected sample tensor\n \"\"\"\n # get random indices\n indices = np.random.choice(list(range(len(dataset))), sample_num)\n if name in ['mnist', 'cifar10']:\n # for MNIST and CIFAR-10 dataset\n sample = [dataset[indices[i]][0].unsqueeze(0) for i in range(len(indices))]\n # concatenate the samples as one tensor\n sample = torch.cat(sample, dim = 0)\n else:\n raise ValueError \n return sample\n\ndef revert_preprocessing(data_tensor, name):\n \"\"\"\n unnormalize the data tensor by multiplying the standard deviation and adding the mean.\n args:\n data_tensor: input data tensor\n name: name of the dataset\n return:\n data_tensor: unnormalized data tensor\n \"\"\"\n if name == 'mnist':\n data_tensor = data_tensor*0.3081 + 0.1307\n elif name == 'cifar10':\n data_tensor[:,0,:,:] = data_tensor[:,0,:,:]*0.2023 + 0.4914 \n data_tensor[:,1,:,:] = data_tensor[:,1,:,:]*0.1994 + 0.4822 \n data_tensor[:,2,:,:] = data_tensor[:,2,:,:]*0.2010 + 0.4465 \n else:\n raise NotImplementedError\n return data_tensor\n\ndef normalize(gradient, name):\n \"\"\"\n normalize the gradient to a 0 to 1 range for display\n args:\n gradient: input gradent tensor\n name: name of the dataset\n return:\n gradient: normalized gradient tensor\n \"\"\"\n if name == 'mnist':\n pass\n elif name == 'cifar10':\n # take the maximum gradient from the 3 channels\n gradient = (gradient.max(dim=1)[0]).unsqueeze(dim=1)\n # get the maximum gradient\n max_gradient = torch.max(gradient.view(len(gradient), -1), dim=1)[0]\n max_gradient = max_gradient.view(len(gradient), 1, 1, 1)\n min_gradient = torch.min(gradient.view(len(gradient), -1), dim=1)[0]\n min_gradient = min_gradient.view(len(gradient), 1, 1, 1) \n # do normalization\n gradient = (gradient - min_gradient)/(max_gradient - min_gradient) \n return gradient\n\ndef trace(record):\n \"\"\"\n get the the path that is very likely to be visited by the input images. For each splitting node along the\n path the probability of arriving at it is also computed.\n args:\n record: record of the routing probabilities of the splitting nodes\n return:\n path: the very likely computational path\n \"\"\"\n path = []\n # probability of arriving at the root node is just 1\n prob = 1\n # the starting index \n node_idx = 1\n while node_idx < len(record):\n path.append((node_idx, prob))\n # find the children node with larger visiting probability\n if record[node_idx] >= 0.5:\n prob *= record[node_idx]\n # go to left sub-tree\n node_idx = node_idx*2\n else:\n prob *= 1 - record[node_idx]\n # go to right sub-tree\n node_idx = node_idx*2 + 1 \n return path\n\ndef get_paths(dataset, model, tree_idx, name):\n \"\"\"\n compute the computational paths for the input tensors\n args:\n dataset: Pytorch dataset object\n model: pre-trained deep neural decision forest for visualizing\n tree_idx: which tree to use if there are multiple trees in the forest. \n name: name of the dataset\n return:\n sample: randomly drawn sample\n paths: computational paths for the samples\n class_pred: model predictions for the samples\n \"\"\"\n sample_num = 5\n # get some random input images\n sample = get_sample(dataset, sample_num, name) \n # forward pass to get the routing probability\n pred, cache, _ = model(sample.cuda(), save_flag = True)\n class_pred = pred.max(dim=1)[1]\n # for now use the first tree by cache[0]\n # please refer to ndf.py if you are interested in how the forward pass is implemented\n decision = cache[0]['decision'].data.cpu().numpy()\n paths = []\n # trace the computational path for every input image\n for sample_idx in range(len(decision)):\n paths.append(trace(decision[sample_idx, :]))\n return sample, paths, class_pred\n\ndef get_node_saliency_map(dataset, model, tree_idx, node_idx, name):\n \"\"\"\n get decision saliency maps for one specific splitting node\n args:\n dataset: Pytorch dataset object\n model: pre-trained neural decision forest to visualize\n tree_idx: index of the tree\n node_idx: index of the splitting node\n name: name of the dataset\n return:\n gradient: computed decision saliency maps\n \"\"\"\n # pick some samples from the dataset\n sample_num = 5\n sample = get_sample(dataset, sample_num, name)\n # For now only GPU code is supported\n sample = sample.cuda()\n # enable the gradient computation (the input tensor will requires gradient computation in the backward computational graph) \n sample.requires_grad = True\n # get the feature vectors for the drawn samples\n feats = model.feature_layer(sample)\n # using_idx gives the indices of the neurons in the last FC layer that are used to compute routing probabilities \n using_idx = model.forest.trees[tree_idx].using_idx[node_idx + 1]\n# for sample_idx in range(len(feats)):\n# feats[sample_idx, using_idx].backward(retain_graph=True)\n # equivalent to the above commented one\n feats[:, using_idx].sum(dim = 0).backward()\n # get the gradient data\n gradient = sample.grad.data\n # get the magnitude\n gradient = torch.abs(gradient)\n # normalize the gradient for visualizing\n gradient = normalize(gradient, name)\n # plot the input data and their corresponding decison saliency maps\n plt.figure()\n # unnormalize the images for display\n sample = revert_preprocessing(sample, name)\n # plot for every input image\n for sample_idx in range(sample_num):\n plt.subplot(2, sample_num, sample_idx + 1)\n sample_to_show = sample[sample_idx].squeeze().data.cpu().numpy()\n if name == 'cifar10':\n # re-order the channels\n sample_to_show = sample_to_show.transpose((1,2,0))\n plt.imshow(sample_to_show)\n elif name == 'mnist':\n plt.imshow(sample_to_show, cmap='gray')\n else:\n raise NotImplementedError\n plt.subplot(2, sample_num, sample_idx + 1 + sample_num)\n plt.imshow(gradient[sample_idx].squeeze().cpu().numpy())\n plt.axis('off')\n plt.show()\n return gradient\n\ndef get_map(model, sample, node_idx, tree_idx, name):\n\t\"\"\"\n\thelper function for computing the saliency map for a specified sample and splitting node\n\targs:\n\t model: pre-trained neural decison forest to visualize\n\t sample: input image tensors\n\t node_idx: index of the splitting node\n\t tree_idx: index of the decison tree\n\t name:name of the dataset\n\treturn:\n\t saliency_map: computed decision saliency map\n\t\"\"\"\n\t# move to GPU\n\tsample = sample.unsqueeze(dim=0).cuda()\n\t# enable gradient computation for the input tensor\n\tsample.requires_grad = True\n\t# get feature vectors of the input samples\n\tfeat = model.feature_layer(sample)\n\t# using_idx gives the indices of the neurons in the last FC layer that are used to compute routing probabilities \n\tusing_idx = model.forest.trees[tree_idx].using_idx[node_idx]\n\t# compute gradient by a backward pass\n\tfeat[:, using_idx].backward()\n\t# get the gradient data\n\tgradient = sample.grad.data\n\t# normalize the gradient\n\tgradient = normalize(torch.abs(gradient), name)\n\tsaliency_map = gradient.squeeze().cpu().numpy()\n\treturn saliency_map\n\ndef get_path_saliency(samples, paths, class_pred, model, tree_idx, name, orientation = 'horizontal'):\n \"\"\" \n show the saliency maps for the input samples with their pre-computed computational paths \n args:\n samples: input image tensor\n paths: pre-computed computational paths for the inputs\n class_pred: model predictons for the inputs\n model: pre-trained neural decison forest\n tree_idx: index of the decision tree\n name: name of the dataset\n orientation: layout of the figure\n \"\"\"\n #plt.ioff()\n # plotting parameters\n plt.figure(figsize=(20,5))\n plt.rcParams.update({'font.size': 12})\n # number of input samples\n num_samples = len(samples)\n # length of the computational path\n path_length = len(paths[0])\n # iterate for every input sample\n for sample_idx in range(num_samples):\n sample = samples[sample_idx]\n # plot the sample\n plt.subplot(num_samples, path_length + 1, sample_idx*(path_length + 1) + 1)\n # unnormalize the input\n sample_to_plot = revert_preprocessing(sample.unsqueeze(dim=0), name)\n if name == 'mnist':\n plt.imshow(sample_to_plot.squeeze().cpu().numpy(), cmap='gray')\n pred_class_name = str(int(class_pred[sample_idx]))\n else:\n plt.imshow(sample_to_plot.squeeze().cpu().numpy().transpose((1,2,0))) \n pred_class_name = cifar10_class_name[int(class_pred[sample_idx])]\n plt.axis('off') \n plt.title('Pred:{:s}'.format(pred_class_name))\n # computational path for this sample\n path = paths[sample_idx]\n for node_idx in range(path_length):\n # compute and plot decison saliency map for each splitting node along the path\n node = path[node_idx][0]\n # probability of arriving at this node\n prob = path[node_idx][1] \n # compute the saliency map\n saliency_map = get_map(model, sample, node, tree_idx, name)\n if orientation == 'horizontal':\n sub_plot_idx = sample_idx*(path_length + 1) + node_idx + 2\n plt.subplot(num_samples, path_length + 1, sub_plot_idx)\n elif orientation == 'vertical':\n raise NotImplementedError \n else:\n raise NotImplementedError\n plt.imshow(saliency_map)\n plt.title('(N{:d}, P{:.2f})'.format(node, prob))\n plt.axis('off')\n # draw some arrows \n for arrow_idx in range(num_samples*(path_length + 1) - 1):\n if (arrow_idx+1) % (path_length+1) == 0 and arrow_idx != 0:\n continue\n ax1 = plt.subplot(num_samples, path_length + 1, arrow_idx + 1)\n ax2 = plt.subplot(num_samples, path_length + 1, arrow_idx + 2)\n arrow = ConnectionPatch(xyA=[1.1,0.5], xyB=[-0.1, 0.5], coordsA='axes fraction', coordsB='axes fraction',\n axesA=ax1, axesB=ax2, arrowstyle=\"fancy\")\n ax1.add_artist(arrow)\n left = 0 # the left side of the subplots of the figure\n right = 1 # the right side of the subplots of the figure\n bottom = 0.01 # the bottom of the subplots of the figure\n top = 0.95 # the top of the subplots of the figure\n wspace = 0.0 # the amount of width reserved for space between subplots,\n # expressed as a fraction of the average axis width\n hspace = 0.4 # the amount of height reserved for space between subplots,\n # expressed as a fraction of the average axis height \n plt.subplots_adjust(left, bottom, right, top, wspace, hspace)\n plt.show()\n # save figure if you need\n #plt.savefig('saved_fig.png',dpi=1200)\n return\n"
]
| [
[
"matplotlib.pyplot.rcParams.update",
"torch.cat",
"matplotlib.pyplot.figure",
"torch.abs",
"matplotlib.patches.ConnectionPatch",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot"
]
]
|
vinsis/attention-seeking-in-pytorch | [
"fe9cab2cd9def3efb1837d70cd0179b0fb04b2c0"
]
| [
"code/content_based_concat_attention.py"
]
| [
"import torch\nimport torch.nn as nn\nfrom torch.optim import Adam\nimport torch.nn.functional as F\n\nfrom loader import loader, sequence_length\n\n# sequence_length is equal to 10\nencoder_input_size = 32\nencoder_hidden_size = 32\n\ndecoder_input_size = 32\ndecoder_output_size = 32*2\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\ncriterion = nn.CrossEntropyLoss()\n\nembedding = nn.Embedding(num_embeddings=sequence_length, embedding_dim=encoder_input_size)\nencoder = nn.LSTM(encoder_input_size, encoder_hidden_size, bidirectional=True, batch_first=True)\n\ndecoder =nn.LSTM(decoder_input_size, decoder_output_size, batch_first=True)\ndecoder_output_to_sequence_length = nn.Linear(decoder_output_size, sequence_length)\n\n# Based on technique mentioned in right column of page 3 of http://aclweb.org/anthology/D15-1166\n# We only use encoder_h and replace encoder_c with weighted sum of encoder_outputs\ncontent_based_attention_concat = nn.Linear(encoder_hidden_size*4, encoder_hidden_size*2)\nreference_vector = torch.Tensor(1,1,encoder_hidden_size*2).uniform_().requires_grad_(True)\n\ntrainable_parameters = [{'params': net.parameters()} for net in [embedding, encoder, decoder, decoder_output_to_sequence_length, content_based_attention_concat]]\ntrainable_parameters += [{'params': reference_vector}]\n\noptimizer = Adam(trainable_parameters, lr=0.001)\n\nembedding.to(device)\nencoder.to(device)\ndecoder.to(device)\ndecoder_output_to_sequence_length.to(device)\ncontent_based_attention_concat.to(device)\n\ndecoder_input = torch.zeros(1, 1, decoder_input_size)\n\ndef train():\n correct, total = 0, 0\n for index, random_sequence in enumerate(loader):\n random_sequence = random_sequence.to(device)\n correct_sequence = torch.sort(random_sequence)[1]\n correct_sequence = correct_sequence.long().to(device).squeeze(0)\n\n random_sequence_embedding = embedding(random_sequence)\n encoder_outputs, (encoder_h, encoder_c) = encoder(random_sequence_embedding)\n\n decoder_outputs = []\n decoder_input_h = encoder_h.view(1,1,-1)\n for time in range(sequence_length):\n # attention starts here\n decoder_input_h_repeated = decoder_input_h.repeat(1,sequence_length,1)\n concatenated_tensor = torch.cat([decoder_input_h_repeated, encoder_outputs], dim=2)\n transformed_concatenated_tensor = content_based_attention_concat(concatenated_tensor)\n similarity_with_reference_vector = torch.bmm(reference_vector, transformed_concatenated_tensor.transpose(1,2))\n encoder_output_weights = F.softmax(similarity_with_reference_vector, dim=2)\n weighted_sum_of_encoder_outputs = torch.bmm(encoder_output_weights, encoder_outputs)\n # attention ends here\n decoder_output_at_time_t, (decoder_h, decoder_c) = decoder(decoder_input, (decoder_input_h, weighted_sum_of_encoder_outputs))\n decoder_outputs.append(decoder_output_at_time_t)\n decoder_input_h = decoder_h\n\n decoder_outputs = torch.cat(decoder_outputs, 1)\n softmax_input = decoder_output_to_sequence_length(decoder_outputs).squeeze(0)\n\n loss = criterion(softmax_input, correct_sequence)\n\n # calculating accuracy\n accurate = (softmax_input.max(1)[1] == correct_sequence).sum()\n correct += accurate\n total += sequence_length\n if index%100 == 0:\n print('Loss at iteration {}: {:.8f}'.format(index, loss.item()))\n print('Accuracy in last 100 iterations: {}/{}'.format(correct, total))\n correct, total = 0, 0\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n # break\n\ndef test():\n with torch.no_grad():\n for index, random_sequence in enumerate(loader):\n random_sequence = random_sequence.to(device)\n correct_sequence = torch.sort(random_sequence)[1]\n correct_sequence = correct_sequence.long().to(device).squeeze(0)\n\n random_sequence_embedding = embedding(random_sequence)\n encoder_outputs, (encoder_h, encoder_c) = encoder(random_sequence_embedding)\n\n decoder_outputs = []\n decoder_input_h = encoder_h.view(1,1,-1)\n attentions = []\n for time in range(sequence_length):\n # attention starts here\n decoder_input_h_repeated = decoder_input_h.repeat(1,sequence_length,1)\n concatenated_tensor = torch.cat([decoder_input_h_repeated, encoder_outputs], dim=2)\n transformed_concatenated_tensor = content_based_attention_concat(concatenated_tensor)\n similarity_with_reference_vector = torch.bmm(reference_vector, transformed_concatenated_tensor.transpose(1,2))\n encoder_output_weights = F.softmax(similarity_with_reference_vector, dim=2)\n weighted_sum_of_encoder_outputs = torch.bmm(encoder_output_weights, encoder_outputs)\n attentions.append(encoder_output_weights)\n # attention ends here\n decoder_output_at_time_t, (decoder_h, decoder_c) = decoder(decoder_input, (decoder_input_h, weighted_sum_of_encoder_outputs))\n decoder_outputs.append(decoder_output_at_time_t)\n decoder_input_h = decoder_h\n\n decoder_outputs = torch.cat(decoder_outputs, 1)\n softmax_input = decoder_output_to_sequence_length(decoder_outputs).squeeze(0)\n\n loss = criterion(softmax_input, correct_sequence)\n accurate = (softmax_input.max(1)[1] == correct_sequence).sum()\n return random_sequence, correct_sequence, softmax_input, accurate, attentions\n\nif __name__ == '__main__':\n train()\n"
]
| [
[
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.nn.LSTM",
"torch.nn.CrossEntropyLoss",
"torch.optim.Adam",
"torch.no_grad",
"torch.bmm",
"torch.sort",
"torch.cuda.is_available",
"torch.nn.functional.softmax",
"torch.Tensor",
"torch.nn.Embedding"
]
]
|
tillbiskup/cwepr | [
"fb5df019238b63f36c7cfdf2d88264e5d18078be"
]
| [
"cwepr/plotting.py"
]
| [
"\"\"\"\nPlotting: Graphical representations of data extracted from datasets.\n\nGraphical representations of cw-EPR data are an indispensable aspect of data\nanalysis. To facilitate this, a series of different plotters are available.\n\nPlotting relies on `matplotlib <https://matplotlib.org/>`_, and mainly its\nobject-oriented interface should be used for the actual plotting.\n\nGenerally, two types of plotters can be distinguished:\n\n* Plotters for handling single datasets\n\n Shall be derived from :class:`aspecd.plotting.SinglePlotter`.\n\n* Plotters for handling multiple datasets\n\n Shall be derived from :class:`aspecd.plotting.MultiPlotter`.\n\nIn the first case, the plot is usually handled using the :meth:`plot` method\nof the respective :obj:`cwepr.dataset.Dataset` object. Additionally,\nthose plotters always only operate on the data of a single dataset, and the\nplot can easily be attached as a representation to the respective dataset.\nPlotters handling single datasets should always inherit from the\n:class:`aspecd.plotting.SinglePlotter` class.\n\nIn the second case, the plot is handled using the :meth:`plot` method of the\n:obj:`aspecd.plotting.Plotter` object, and the datasets are stored as a list\nwithin the plotter. As these plots span several datasets, there is no easy\nconnection between a single dataset and such a plot in sense of\nrepresentations stored in datasets. Plotters handling multiple datasets should\nalways inherit from the :class:`aspecd.plotting.MultiPlotter` class.\n\nIn a certain sense, there is a third type of plotters:\n\n* Plotters consisting of more than one axes\n\n Shall be derived from :class:`aspecd.plotting.CompositePlotter`.\n\nHowever, practically mostly these composite plotters will behave like\nplotters handling either single or multiple datasets. Generally,\nthese composite plotters will use other types of plotters to perform the\nactual plot tasks. This modular approach allows for great flexibility.\n\n\nA note on array dimensions and axes\n===================================\n\nSomething often quite confusing is the apparent inconsistency between the\norder of array dimensions and the order of axes. While we are used to assign\naxes in the order *x*, *y*, *z*, and assuming *x* to be horizontal,\n*y* vertical (and *z* sticking out of the paper plane), arrays are usually\nindexed row-first, column-second. That means, however, that if you simply\nplot a 2D array in axes, your *first* dimension is along the *y* axis,\nthe *second* dimension along the *x* axis.\n\nTherefore, as the axes of your datasets will always correspond to the array\ndimensions of your data, in case of 2D plots you will need to *either* use\nthe information contained in the second axis object for your *x* axis label,\nand the information from the first axis object for your *y* axis label,\n*or* to transpose the data array.\n\nAnother aspect to have in mind is the position of the origin. Usually,\nin a Cartesian coordinate system, convention is to have the origin (0,\n0) in the *lower left* of the axes (for the positive quadrant). However,\nfor images, convention is to have the corresponding (0, 0) pixel located in\nthe *upper left* edge of your image. Therefore, those plotting methods\ndealing with images will usually *revert* the direction of your *y* axis.\nMost probably, eventually you will have to check with real data and ensure\nthe plotters to plot data and axes in a consistent fashion.\n\n\nTypes of concrete plotters\n==========================\n\nThe cwepr package comes with a series of concrete plotters included ready\nto be used, thanks to inheriting from the underlying ASpecD framework. As\nstated above, plotters can generally be divided into two types: plotters\noperating on single datasets and plotters combining the data of multiple\ndatasets into a single figure.\n\nAdditionally, plotters can be categorised with regard to creating figures\nconsisting of a single or multiple axes. The latter are plotters inheriting\nfrom the :class:`aspecd.plotting.CompositePlotter` class. The latter can be\nthought of as templates for the other plotters to operate on, *i.e.* they\nprovide the axes for other plotters to display their results.\n\n\nConcrete plotters for single datasets\n-------------------------------------\n\n* :class:`cwepr.plotting.SinglePlotter1D`\n\n Basic line plots for single datasets, allowing to plot a series of\n line-type plots, including (semi)log plots\n\n* :class:`cwepr.plotting.SinglePlotter2D`\n\n Basic 2D plots for single datasets, allowing to plot a series of 2D plots,\n including contour plots and image-type display\n\n* :class:`aspecd.plotting.SinglePlotter2DStacked`\n\n Stacked plots of 2D data, converting a 2D display into a series of 1D line\n plots stacked on top of each other.\n\n* :class:`cwepr.plotting.SingleCompositePlotter`\n\n Composite plotter for single datasets, allowing to plot different views of\n one and the same datasets by using existing plotters for single datasets.\n\n* :class:`cwepr.plotting.GoniometerSweepPlotter`\n\n Composite plotter for single datasets representing goniometer sweeps,\n *i.e.* angular-dependent cw-EPR measurements.\n\n\nConcrete plotters for multiple datasets\n---------------------------------------\n\n* :class:`cwepr.plotting.MultiPlotter1D`\n\n Basic line plots for multiple datasets, allowing to plot a series of\n line-type plots, including (semi)log plots\n\n* :class:`cwepr.plotting.MultiPlotter1DStacked`\n\n Stacked line plots for multiple datasets, allowing to plot a series of\n line-type plots, including (semi)log plots\n\n* :class:`cwepr.plotting.PowerSweepAnalysisPlotter`\n\n Line plot for multiple datasets particularly for power sweep analysis\n (power saturation analysis) with a second *x* axis on top showing the\n microwave power.\n\n\nA note for developers\n=====================\n\nAs each kind of spectroscopy comes with own needs for extensions, there is a\nclass :class:`PlotterExtensions` that can be used as a mixin class for other\nplotters to provide additional functionality for all plotters.\n\nMake sure when implementing functionality here that it really works with all\ntypes of plotters, *i.e.* both SinglePlotters and MultiPlotters. This is\nparticularly relevant if you need to get information from dataset(s),\nas a SinglePlotter will have an attribute ``dataset``, while a MultiPlotter\nwill have an attribute ``datasets``.\n\n\nModule documentation\n====================\n\n\"\"\"\nimport copy\n\nimport numpy as np\n\nimport aspecd.plotting\nimport aspecd.processing\n\nfrom cwepr import utils\n\n\nclass GoniometerSweepPlotter(aspecd.plotting.SingleCompositePlotter):\n \"\"\"Overview of the results of a goniometer sweep.\n\n A goniometer sweep, *i.e.* a series of cw-EPR spectra as a function of\n the angle of the sample with respect to the external magnetic field,\n is usually performed over at least 180°, regardless of the step size.\n The reason is simply that the spectra for 0° and 180° should be\n identical due to the underlying physics of magnetic resonance.\n\n The plotter will create three subpanels:\n\n * A 2D plot (scaled image plot) as a general overview.\n\n * A 1D multiplot comparing the signals for 0° and 180° to check for\n consistency during the measurement.\n\n * A stacked plot showing all angular positions, providing an alternative\n view of the angular-dependent signal changes compared to the 2D plot.\n\n\n Examples\n --------\n For convenience, a series of examples in recipe style (for details of\n the recipe-driven data analysis, see :mod:`aspecd.tasks`) is given below\n for how to make use of this class.\n\n To get an overview of your goniometer sweep, just invoke the plotter with\n default values:\n\n .. code-block:: yaml\n\n - kind: singleplot\n type: GoniometerSweepPlotter\n properties:\n filename: output.pdf\n\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.description = 'Plot for one goniometric dataset in different ' \\\n 'representations.'\n self.grid_dimensions = [2, 2]\n self.subplot_locations = [[0, 0, 1, 1], [1, 0, 1, 1], [0, 1, 2, 1]]\n self.plotter = [aspecd.plotting.SinglePlotter2D(),\n aspecd.plotting.MultiPlotter1D(),\n aspecd.plotting.SinglePlotter2DStacked()]\n self.axes_positions = [[0, 0.15, 1, 1], [0, 0, 1, 1],\n [0.25, 0, 0.9, 1.07]]\n self.zero_deg_slice = None\n self.hundredeighty_deg_slice = None\n self.parameters['show_zero_lines'] = False\n self.__kind__ = 'singleplot'\n self._exclude_from_to_dict.extend(['dataset', 'zero_deg_slice',\n 'hundredeighty_deg_slice'])\n\n def _create_plot(self):\n self._configure_traces_plotter()\n self._configure_contour_plotter()\n self._extract_traces()\n self._configure_comparison_plotter()\n super()._create_plot()\n\n def _configure_contour_plotter(self):\n upper_contour = self.plotter[0]\n upper_contour.type = 'contourf'\n upper_contour.parameters['show_contour_lines'] = True\n upper_contour.properties.from_dict({\n 'axes': {\n 'yticks': [0, 30, 60, 90, 120, 150, 180]\n }\n })\n self.plotter[0] = upper_contour\n\n def _extract_traces(self):\n slicing = aspecd.processing.SliceExtraction()\n slicing.parameters['axis'] = axis_no = 1\n zero_value = self._get_angle_closest_to_value(axis_no, 0)\n hundredeighty_value = self._get_angle_closest_to_value(axis_no, 180)\n slicing.parameters['unit'] = 'axis'\n slicing.parameters['position'] = zero_value\n self.zero_deg_slice = copy.deepcopy(self.dataset)\n self.zero_deg_slice.process(slicing)\n self.zero_deg_slice.label = f'{zero_value:.1f}°'\n slicing.parameters['position'] = hundredeighty_value\n self.hundredeighty_deg_slice = copy.deepcopy(self.dataset)\n self.hundredeighty_deg_slice.process(slicing)\n self.hundredeighty_deg_slice.label = f'{hundredeighty_value:.1f}°'\n\n def _get_angle_closest_to_value(self, axis_no=0, value=None):\n axis = self.dataset.data.axes[axis_no].values\n return axis[min(range(len(axis)), key=lambda i: abs(axis[i] - value))]\n\n def _configure_comparison_plotter(self):\n comparison_plotter = self.plotter[1]\n comparison_plotter.datasets = [self.zero_deg_slice,\n self.hundredeighty_deg_slice]\n comparison_plotter.properties.from_dict({\n 'drawings': [\n {'color': 'tab:blue'},\n {'color': 'tab:red'}\n ],\n 'axes': {\n 'yticks': [],\n 'ylabel': r'$EPR\\ intensity$'\n }\n })\n comparison_plotter.parameters['show_legend'] = True\n self.plotter[1] = comparison_plotter\n\n def _configure_traces_plotter(self):\n self.plotter[2].parameters['yticklabelformat'] = '%.1f'\n self.plotter[2].parameters['ytickcount'] = 19\n\n\nclass PowerSweepAnalysisPlotter(aspecd.plotting.MultiPlotter1D):\n r\"\"\"\n Plot results of a power saturation analysis with second axis for mw power.\n\n To determine the microwave power level not saturating the cw-EPR signal,\n usually a \"power sweep\" (power saturation study) is carried out with\n systematically varying the incident microwave power. The signal\n amplitude of the resulting data is plotted vs. the square root of the\n microwave power, resulting in a power saturation curve. As long as the\n signal is not saturated, the graph shows a linear relationship.\n\n As the class inherites from :class:`aspecd.plotting.MultiPlotter1D`\n see there for additional details of the parameters that can be set.\n\n Attributes\n ----------\n parameters : :class:`dict`\n All parameters necessary for the plot, implicit and explicit\n\n Most parameters are documented in the base class. Here, only the\n additional parameters or parameters with specific settings are\n documented.\n\n mw-axis : class:`bool`\n Whether to show an additional microwave axis in units of power.\n\n The main *x* axis gives the square root of the microwave power,\n but as the microwave power needs to be set in power units\n (typically mW), it is convenient to have this available as well.\n\n Default: True\n\n tight_layout: :class:`bool`\n Whether to adjust the plot to fit into the figure area\n\n For details see :meth:`matplotlib.figure.Figure.tight_layout`.\n\n Default: True\n\n\n Examples\n --------\n The class basically works like a usual MultiPlotter1D. A full power\n saturation analysis may look like this:\n\n .. code-block:: yaml\n\n datasets:\n - PowerSweep\n tasks:\n - kind: singleanalysis\n type: AmplitudeVsPower\n apply_to:\n - PowerSweep\n result: power_sweep_analysis\n - kind: singleanalysis\n type: PolynomialFitOnData\n properties:\n parameters:\n order: 1\n points: 5\n return_type: dataset\n apply_to:\n - power_sweep_analysis\n result: fit\n - kind: multiplot\n type: PowerSweepAnalysisPlotter\n properties:\n properties:\n drawings:\n - marker: '*'\n - color: red\n grid:\n show: true\n axis: both\n axes:\n ylabel: '$EPR\\\\ amplitude$'\n filename: powersweepanalysis.pdf\n apply_to:\n - power_sweep_analysis\n - fit\n\n\n This would result in a power saturation curve (EPR signal amplitude as a\n function of the square root of the microwave power, the latter usually\n in mW), and a linear fit covering in this case the first five data points.\n\n .. versionadded:: 0.2\n\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.parameters['mw-axis'] = True\n self.parameters['tight_layout'] = True\n\n def _create_plot(self):\n super()._create_plot()\n if self.parameters['mw-axis']:\n self._set_lower_xlim()\n self._create_power_axis()\n\n def _set_lower_xlim(self):\n xlim = list(self.axes.get_xlim())\n if xlim[0] < 0:\n xlim[0] = 0\n self.axes.set_xlim(xlim)\n\n def _create_power_axis(self):\n \"\"\"\n Add a mw power axis as second axis opposite the sqrt(mw power) axis.\n\n Note that :func:`numpy.sqrt` returns NaN for negative values.\n Therefore, the lower axis limit is set to be >= 0 in this plot.\n \"\"\"\n def forward(values):\n return np.power(values, 2)\n\n def backward(values):\n return np.sqrt(values)\n\n power_axis = self.ax.secondary_xaxis('top',\n functions=(backward, forward))\n power_axis.set_xlabel('$mw\\\\ power$')\n power_axis.tick_params(labelrotation=90)\n\n\nclass PlotterExtensions:\n \"\"\"Extensions for plots of cw-EPR data.\n\n This class is meant as a mixin class for plotters of the cwepr package\n and provides functionality specific for cw-EPR-spectroscopic data.\n\n Hence it can only be used as mixin in addition to a plotter class.\n\n Attributes\n ----------\n parameters : :class:`dict`\n All parameters necessary for the plot, implicit and explicit\n\n The following keys exist, in addition to those defined by the actual\n plotter:\n\n g-axis: :class:`bool`\n Whether to show an additional *g* axis opposite of the magnetic\n field axis\n\n This assumes the magnetic field axis to be the *x* axis and the\n magnetic field values to be in millitesla (mT), as it calls\n :func:`cwepr.utils.convert_mT2g`.\n\n\n .. versionadded:: 0.2\n\n \"\"\"\n\n def __init__(self):\n self.parameters['g-axis'] = False\n\n def _create_g_axis(self, mw_freq=None):\n \"\"\"\n Add a *g* axis as second axis opposite the magnetic field axis.\n\n Currently, this function assumes the magnetic field axis to be the\n *x* axis. Additionally, the magnetic field values are assumed to be\n in millitesla (mT), and the microwave frequency to be in gigahertz (\n GHz).\n\n Parameters\n ----------\n mw_freq : :class:`float`\n microwave frequency (**in GHz**) used to convert from mT to g\n\n \"\"\"\n def forward(values):\n return utils.convert_mT2g(values, mw_freq=mw_freq)\n\n def backward(values):\n return utils.convert_g2mT(values, mw_freq=mw_freq)\n\n gaxis = self.ax.secondary_xaxis('top', functions=(backward, forward))\n gaxis.set_xlabel(r'$g\\ value$')\n\n\nclass SinglePlotter1D(aspecd.plotting.SinglePlotter1D, PlotterExtensions):\n \"\"\"1D plots of single datasets.\n\n Convenience class taking care of 1D plots of single datasets.\n\n As the class is fully inherited from ASpecD for simple usage, see the\n ASpecD documentation of the :class:`aspecd.plotting.SinglePlotter1D`\n class for details.\n\n Furthermore, the class inhertis all functionality from\n :class:`PlotterExtensions`. See there for additional details.\n\n\n Examples\n --------\n For convenience, a series of examples in recipe style (for details of\n the recipe-driven data analysis, see :mod:`aspecd.tasks`) is given below\n for how to make use of this class. Of course, all parameters settable\n for the superclasses can be set as well. The examples focus each on a\n single aspect.\n\n In the simplest case, just invoke the plotter with default values:\n\n .. code-block:: yaml\n\n - kind: singleplot\n type: SinglePlotter1D\n properties:\n filename: output.pdf\n\n\n In case you would like to have a *g* axis plotted as a second *x* axis on\n top:\n\n .. code-block:: yaml\n\n - kind: singleplot\n type: SinglePlotter1D\n properties:\n parameters:\n g-axis: true\n filename: output.pdf\n\n \"\"\"\n\n def _create_plot(self):\n super()._create_plot()\n if self.parameters['g-axis'] and self.dataset.data.axes[0].unit == 'mT':\n self._create_g_axis(self.dataset.metadata.bridge.mw_frequency.value)\n\n\nclass SinglePlotter2D(aspecd.plotting.SinglePlotter2D, PlotterExtensions):\n \"\"\"2D plots of single datasets.\n\n Convenience class taking care of 2D plots of single datasets.\n\n As the class is fully inherited from ASpecD for simple usage, see the\n ASpecD documentation of the :class:`aspecd.plotting.SinglePlotter2D`\n class for details.\n\n Furthermore, the class inhertis all functionality from\n :class:`PlotterExtensions`. See there for additional details.\n\n\n Examples\n --------\n For convenience, a series of examples in recipe style (for details of\n the recipe-driven data analysis, see :mod:`aspecd.tasks`) is given below\n for how to make use of this class. Of course, all parameters settable\n for the superclasses can be set as well. The examples focus each on a\n single aspect.\n\n In the simplest case, just invoke the plotter with default values:\n\n .. code-block:: yaml\n\n - kind: singleplot\n type: SinglePlotter2D\n properties:\n filename: output.pdf\n\n To change the axes (flip *x* and *y* axis):\n\n .. code-block:: yaml\n\n - kind: singleplot\n type: SinglePlotter2D\n properties:\n filename: output.pdf\n parameters:\n switch_axes: True\n\n To use another type (here: contour):\n\n .. code-block:: yaml\n\n - kind: singleplot\n type: SinglePlotter2D\n properties:\n filename: output.pdf\n type: contour\n\n To set the number of levels of a contour plot to 10:\n\n .. code-block:: yaml\n\n - kind: singleplot\n type: SinglePlotter2D\n properties:\n filename: output.pdf\n type: contour\n parameters:\n levels: 10\n\n To change the colormap (cmap) used:\n\n .. code-block:: yaml\n\n - kind: singleplot\n type: SinglePlotter2D\n properties:\n filename: output.pdf\n properties:\n drawing:\n cmap: RdGy\n\n Make sure to check the documentation of the ASpecD\n :mod:`aspecd.plotting` module for further parameters that can be set.\n\n In case you would like to have a *g* axis plotted as a second *x* axis on\n top:\n\n .. code-block:: yaml\n\n - kind: singleplot\n type: SinglePlotter2D\n properties:\n parameters:\n g-axis: true\n filename: output.pdf\n\n \"\"\"\n\n def _create_plot(self):\n super()._create_plot()\n if self.parameters['g-axis'] and self.dataset.data.axes[0].unit == 'mT':\n self._create_g_axis(self.dataset.metadata.bridge.mw_frequency.value)\n\n\nclass SinglePlotter2DStacked(aspecd.plotting.SinglePlotter2DStacked,\n PlotterExtensions):\n \"\"\"Stacked plots of 2D data.\n\n A stackplot creates a series of lines stacked on top of each other from\n a 2D dataset.\n\n As the class is fully inherited from ASpecD for simple usage, see the\n ASpecD documentation of the :class:`aspecd.plotting.SinglePlotter2DStacked`\n class for details.\n\n Furthermore, the class inhertis all functionality from\n :class:`PlotterExtensions`. See there for additional details.\n\n\n Examples\n --------\n For convenience, a series of examples in recipe style (for details of\n the recipe-driven data analysis, see :mod:`aspecd.tasks`) is given below\n for how to make use of this class. Of course, all parameters settable\n for the superclasses can be set as well. The examples focus each on a\n single aspect.\n\n In the simplest case, just invoke the plotter with default values:\n\n .. code-block:: yaml\n\n - kind: singleplot\n type: SinglePlotter2DStacked\n properties:\n filename: output.pdf\n\n If you need to more precisely control the formatting of the y tick\n labels, particularly the number of decimals shown, you can set the\n formatting accordingly:\n\n .. code-block:: yaml\n\n - kind: singleplot\n type: SinglePlotter2DStacked\n properties:\n filename: output.pdf\n parameters:\n yticklabelformat: '%.2f'\n\n In this particular case, the y tick labels will appear with only two\n decimals. Note that currently, the \"old style\" formatting specifications\n are used due to their widespread use in other programming languages and\n hence the familiarity of many users with this particular notation.\n\n Sometimes you want to have horizontal \"zero lines\" appear for each\n individual trace of the stacked plot. This can be achieved explicitly\n setting the \"show_zero_lines\" parameter to \"True\" that is set to \"False\"\n by default:\n\n .. code-block:: yaml\n\n - kind: singleplot\n type: SinglePlotter2DStacked\n properties:\n filename: output.pdf\n parameters:\n show_zero_lines: True\n\n In case you would like to have a *g* axis plotted as a second *x* axis on\n top:\n\n .. code-block:: yaml\n\n - kind: singleplot\n type: SinglePlotter2DStacked\n properties:\n parameters:\n g-axis: true\n filename: output.pdf\n\n \"\"\"\n\n def _create_plot(self):\n super()._create_plot()\n if self.parameters['g-axis'] and self.dataset.data.axes[0].unit == 'mT':\n self._create_g_axis(self.dataset.metadata.bridge.mw_frequency.value)\n\n\nclass MultiPlotter1D(aspecd.plotting.MultiPlotter1D, PlotterExtensions):\n \"\"\"1D plots of multiple datasets.\n\n Convenience class taking care of 1D plots of multiple datasets.\n\n As the class is fully inherited from ASpecD for simple usage, see the\n ASpecD documentation of the :class:`aspecd.plotting.MultiPlotter1D`\n class for details.\n\n Furthermore, the class inhertis all functionality from\n :class:`PlotterExtensions`. See there for additional details.\n\n\n Examples\n --------\n For convenience, a series of examples in recipe style (for details of\n the recipe-driven data analysis, see :mod:`aspecd.tasks`) is given below\n for how to make use of this class. Of course, all parameters settable\n for the superclasses can be set as well. The examples focus each on a\n single aspect.\n\n In the simplest case, just invoke the plotter with default values:\n\n .. code-block:: yaml\n\n - kind: multiplot\n type: MultiPlotter1D\n properties:\n filename: output.pdf\n\n To change the settings of each individual line (here the colour and label),\n supposing you have three lines, you need to specify the properties in a\n list for each of the drawings:\n\n .. code-block:: yaml\n\n - kind: multiplot\n type: MultiPlotter1D\n properties:\n filename: output.pdf\n properties:\n drawings:\n - color: '#FF0000'\n label: foo\n - color: '#00FF00'\n label: bar\n - color: '#0000FF'\n label: foobar\n\n .. important::\n If you set colours using the hexadecimal RGB triple prefixed by\n ``#``, you need to explicitly tell YAML that these are strings,\n surrounding the values by quotation marks.\n\n In case you would like to have a *g* axis plotted as a second *x* axis on\n top:\n\n .. code-block:: yaml\n\n - kind: multiplot\n type: MultiPlotter1D\n properties:\n parameters:\n g-axis: true\n filename: output.pdf\n\n \"\"\"\n\n def _create_plot(self):\n super()._create_plot()\n if self.parameters['g-axis'] \\\n and self.datasets[0].data.axes[0].unit == 'mT':\n self._create_g_axis(\n self.datasets[0].metadata.bridge.mw_frequency.value)\n\n\nclass MultiPlotter1DStacked(aspecd.plotting.MultiPlotter1DStacked,\n PlotterExtensions):\n \"\"\"Stacked 1D plots of multiple datasets.\n\n Convenience class taking care of 1D plots of multiple datasets.\n\n As the class is fully inherited from ASpecD for simple usage, see the\n ASpecD documentation of the :class:`aspecd.plotting.MultiPlotter1DStacked`\n class for details.\n\n Furthermore, the class inhertis all functionality from\n :class:`PlotterExtensions`. See there for additional details.\n\n Examples\n --------\n For convenience, a series of examples in recipe style (for details of\n the recipe-driven data analysis, see :mod:`aspecd.tasks`) is given below\n for how to make use of this class. Of course, all parameters settable\n for the superclasses can be set as well. The examples focus each on a\n single aspect.\n\n In the simplest case, just invoke the plotter with default values:\n\n .. code-block:: yaml\n\n - kind: multiplot\n type: MultiPlotter1DStacked\n properties:\n filename: output.pdf\n\n To change the settings of each individual line (here the colour and label),\n supposing you have three lines, you need to specify the properties in a\n list for each of the drawings:\n\n .. code-block:: yaml\n\n - kind: multiplot\n type: MultiPlotter1DStacked\n properties:\n filename: output.pdf\n properties:\n drawings:\n - color: '#FF0000'\n label: foo\n - color: '#00FF00'\n label: bar\n - color: '#0000FF'\n label: foobar\n\n .. important::\n If you set colours using the hexadecimal RGB triple prefixed by\n ``#``, you need to explicitly tell YAML that these are strings,\n surrounding the values by quotation marks.\n\n Sometimes you want to have horizontal \"zero lines\" appear for each\n individual trace of the stacked plot. This can be achieved explicitly\n setting the \"show_zero_lines\" parameter to \"True\" that is set to \"False\"\n by default:\n\n .. code-block:: yaml\n\n - kind: multiplot\n type: MultiPlotter1DStacked\n properties:\n filename: output.pdf\n parameters:\n show_zero_lines: True\n\n In case you would like to have a *g* axis plotted as a second *x* axis on\n top:\n\n .. code-block:: yaml\n\n - kind: multiplot\n type: MultiPlotter1DStacked\n properties:\n parameters:\n g-axis: true\n filename: output.pdf\n\n \"\"\"\n\n def _create_plot(self):\n super()._create_plot()\n if self.parameters['g-axis'] \\\n and self.datasets[0].data.axes[0].unit == 'mT':\n self._create_g_axis(\n self.datasets[0].metadata.bridge.mw_frequency.value)\n"
]
| [
[
"numpy.power",
"numpy.sqrt"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.