repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
possible_versions
list
valerioda/pygama
[ "a3923df02a864747b0da1904e1bfc9f0ae8839ee" ]
[ "pygama/io/digitizers.py" ]
[ "import sys\nimport array\nimport itertools\nimport numpy as np\nimport pandas as pd\nfrom scipy import signal\nimport matplotlib.pyplot as plt\nfrom pprint import pprint\n\nfrom .io_base import DataTaker\nfrom .waveform import Waveform\n\n\"\"\"\nFIXME:\nthese variables should be set by config if digitizer:\nself.[window, win_type, n_samp, n_blsamp]\n\nTODO:\nRemove windowing feature completely, it's unnecessary with lh5 var-length arrs\n\"\"\"\n\nclass ORCAStruck3302(DataTaker):\n \"\"\" \n decode ORCA Struck 3302 digitizer data\n \"\"\"\n def __init__(self, *args, **kwargs):\n\n self.decoder_name = 'ORSIS3302DecoderForEnergy'\n self.class_name = 'ORSIS3302Model'\n\n # store an entry for every event\n self.decoded_values = {\n \"packet_id\": [],\n \"ievt\": [],\n \"energy\": [],\n \"energy_first\": [],\n \"timestamp\": [],\n \"channel\": [],\n \"ts_lo\": [],\n \"ts_hi\": [],\n \"waveform\": [],\n # \"energy_wf\": []\n }\n super().__init__(*args, **kwargs) # also initializes the garbage df\n\n self.event_header_length = 1\n self.sample_period = 10 # ns\n self.h5_format = \"table\"\n self.n_blsamp = 2000\n self.ievt = 0\n self.ievt_gbg = 0\n self.pytables_col_limit = 3000\n self.df_metadata = None # hack, this probably isn't right\n \n\n def decode_event(self, event_data_bytes, packet_id, header_dict, verbose=False):\n \"\"\"\n see README for the 32-bit data word diagram\n \"\"\"\n # parse the raw event data into numpy arrays of 16 and 32 bit ints\n evt_data_32 = np.fromstring(event_data_bytes, dtype=np.uint32)\n evt_data_16 = np.fromstring(event_data_bytes, dtype=np.uint16)\n\n # start reading the binary, baby\n n_lost_msb = (evt_data_32[0] >> 25) & 0x7F\n n_lost_lsb = (evt_data_32[0] >> 2) & 0x7F\n n_lost_records = (n_lost_msb << 7) + n_lost_lsb\n crate = (evt_data_32[0] >> 21) & 0xF\n card = (evt_data_32[0] >> 16) & 0x1F\n channel = (evt_data_32[0] >> 8) & 0xFF\n buffer_wrap = evt_data_32[0] & 0x1\n crate_card_chan = (crate << 9) + (card << 4) + channel\n wf_length_32 = evt_data_32[1]\n ene_wf_length = evt_data_32[2]\n evt_header_id = evt_data_32[3] & 0xFF\n timestamp = evt_data_32[4] + ((evt_data_32[3] >> 16) & 0xFFFF)\n last_word = evt_data_32[-1]\n\n # get the footer\n energy = evt_data_32[-4]\n energy_first = evt_data_32[-3]\n extra_flags = evt_data_32[-2]\n\n # compute expected and actual array dimensions\n wf_length16 = 2 * wf_length_32\n orca_helper_length16 = 2\n sis_header_length16 = 12 if buffer_wrap else 8\n header_length16 = orca_helper_length16 + sis_header_length16\n ene_wf_length16 = 2 * ene_wf_length\n footer_length16 = 8\n expected_wf_length = len(evt_data_16) - orca_helper_length16 - sis_header_length16 - \\\n footer_length16 - ene_wf_length16\n\n # error check: waveform size must match expectations\n if wf_length16 != expected_wf_length or last_word != 0xdeadbeef:\n print(len(evt_data_16), orca_helper_length16, sis_header_length16,\n footer_length16)\n print(\"ERROR: Waveform size %d doesn't match expected size %d.\" %\n (wf_length16, expected_wf_length))\n print(\" The Last Word (should be 0xdeadbeef):\",\n hex(last_word))\n exit()\n\n # indexes of stuff (all referring to the 16 bit array)\n i_wf_start = header_length16\n i_wf_stop = i_wf_start + wf_length16\n i_ene_start = i_wf_stop + 1\n i_ene_stop = i_ene_start + ene_wf_length16\n if buffer_wrap:\n # start somewhere in the middle of the record\n i_start_1 = evt_data_32[6] + header_length16 + 1\n i_stop_1 = i_wf_stop # end of the wf record\n i_start_2 = i_wf_start # beginning of the wf record\n i_stop_2 = i_start_1\n\n # handle the waveform(s)\n energy_wf = np.zeros(ene_wf_length16) # not used rn\n if wf_length_32 > 0:\n if not buffer_wrap:\n wf_data = evt_data_16[i_wf_start:i_wf_stop]\n else:\n wf_data1 = evt_data_16[i_start_1:i_stop_1]\n wf_data2 = evt_data_16[i_start_2:i_stop_2]\n wf_data = np.concatenate([wf_data1, wf_data2])\n\n if len(wf_data) != expected_wf_length:\n print(\"ERROR: event %d, we expected %d WF samples and only got %d\" %\n (ievt, expected_wf_length, len(wf_data)))\n exit()\n\n # final raw wf array\n waveform = wf_data\n\n # # if the wf is too big for pytables, we can window it\n # if self.window:\n # wf = Waveform(wf_data, self.sample_period, self.decoder_name)\n # win_wf, win_ts = wf.window_waveform(self.win_type,\n # self.n_samp,\n # self.n_blsamp,\n # test=False)\n # ts_lo, ts_hi = win_ts[0], win_ts[-1]\n # \n # waveform = win_wf # modify final wf array\n # \n # if wf.is_garbage:\n # ievt = self.ievt_gbg\n # self.ievt_gbg += 1\n # self.format_data(locals(), wf.is_garbage)\n # return\n\n if len(waveform) > self.pytables_col_limit and self.h5_format == \"table\":\n print(\"WARNING: too many columns for tables output,\\n\",\n \" reverting to saving as fixed hdf5 ...\")\n self.h5_format = \"fixed\"\n\n # set the event number (searchable HDF5 column)\n ievt = self.ievt\n self.ievt += 1\n\n # send any variable with a name in \"decoded_values\" to the pandas output\n self.format_data(locals())\n\n \nclass LLAMAStruck3316(DataTaker):\n \"\"\" \n decode Struck 3316 digitizer data\n \n TODO:\n handle per-channel data (gain, ...)\n most metadata of Struck header (energy, ...)\n \"\"\"\n def __init__(self, metadata=None, *args, **kwargs):\n self.decoder_name = 'SIS3316Decoder'\n self.class_name = 'SIS3316'\n\n # store an entry for every event\n self.decoded_values = {\n \"packet_id\": [],\n \"ievt\": [],\n \"energy_first\": [],\n \"energy\": [],\n \"timestamp\": [],\n \"peakhigh_index\": [],\n \"peakhigh_value\": [],\n \"information\": [],\n \"accumulator1\": [],\n \"accumulator2\": [],\n \"accumulator3\": [],\n \"accumulator4\": [],\n \"accumulator5\": [],\n \"accumulator6\": [],\n \"accumulator7\": [],\n \"accumulator8\": [],\n \"mawMax\": [],\n \"maw_before\": [],\n \"maw_after\": [],\n \"fadcID\": [],\n \"channel\": [],\n \"waveform\": [],\n }\n\n self.config_names = [] #TODO at some point we want the metainfo here\n self.file_config = {}\n self.lh5_spec = {}\n self.file_config = self.readMetadata(metadata)\n print(\"We have {} adcs and {} samples per WF.\".format(self.file_config[\"nadcs\"],self.file_config[\"nsamples\"]))\n\n super().__init__(*args, **kwargs) # also initializes the garbage df (whatever that means...)\n\n # self.event_header_length = 1 #?\n self.sample_period = 0 # ns, I will set this later, according to header info\n self.gain = 0 \n self.h5_format = \"table\"\t#was table\n #self.n_blsamp = 2000\n self.ievt = 0 #event number\n self.ievt_gbg = 0 #garbage event number\n self.window = False\n self.df_metadata = metadata #seems that was passed to superclass before, try now like this\n self.pytables_col_limit = 3000\n\n def readMetadata(self, meta):\n nsamples = -1\n totChan = 0\n configs = {}\n adcOff = {}\n for fadc in meta:\n adcOff[fadc] = {}\n for channel in meta[fadc]:\n if nsamples == -1:\n # FIXME everything is fixed to 1st existing channel.\n nsamples = meta[fadc][channel][\"SampleLength\"]\n configs[\"14BitFlag\"] = meta[fadc][channel][\"14BitFlag\"]\n #configs[\"ADCOffset\"] = meta[fadc][channel][\"ADCOffset\"]\n configs[\"FormatBits\"] = meta[fadc][channel][\"FormatBits\"]\n configs[\"Gain\"] = meta[fadc][channel][\"Gain\"]\n configs[\"SampleFreq\"] = meta[fadc][channel][\"SampleFreq\"]\n configs[\"SampleOffset\"] = meta[fadc][channel][\"SampleOffset\"]\n adcOff[fadc][channel] = meta[fadc][channel][\"ADCOffset\"]\n elif nsamples != meta[fadc][channel][\"SampleLength\"]:\n print(\"samples not uniform!!!\")\n totChan += 1\n configs[\"nadcs\"] = totChan\n configs[\"nsamples\"] = nsamples\n return configs\n \n def initialize(self, sample_period, gain):\n \"\"\"\n sets certain global values from a run, like:\n sample_period: time difference btw 2 samples in ns\n gain: multiply the integer sample value with the gain to get the voltage in V\n Method has to be called before the actual decoding work starts !\n \"\"\"\n self.sample_period = sample_period\n self.gain = gain\n \n \n def decode_event(self, event_data_bytes, packet_id, header_dict, fadcIndex, \n channelIndex, verbose=False):\n \"\"\"\n see the llamaDAQ documentation for data word diagrams\n \"\"\"\n \n if self.sample_period == 0:\n print(\"ERROR: Sample period not set; use initialize() before using decode_event() on SIS3316Decoder\")\n raise Exception (\"Sample period not set\")\n \n # parse the raw event data into numpy arrays of 16 and 32 bit ints\n evt_data_32 = np.fromstring(event_data_bytes, dtype=np.uint32)\n evt_data_16 = np.fromstring(event_data_bytes, dtype=np.uint16)\n \n # e sti gran binaries non ce li metti\n timestamp = ((evt_data_32[0] & 0xffff0000) << 16) + evt_data_32[1]\n format_bits = (evt_data_32[0]) & 0x0000000f\n offset = 2\n if format_bits & 0x1:\n peakhigh_value = evt_data_16[4]\n peakhigh_index = evt_data_16[5]\n information = (evt_data_32[offset+1] >> 24) & 0xff\n accumulator1 = evt_data_32[offset+2]\n accumulator2 = evt_data_32[offset+3]\n accumulator3 = evt_data_32[offset+4]\n accumulator4 = evt_data_32[offset+5]\n accumulator5 = evt_data_32[offset+6]\n accumulator6 = evt_data_32[offset+7]\n offset += 7\n else:\n peakhigh_value = 0\n peakhigh_index = 0 \n information = 0\n accumulator1 = accumulator2 = accumulator3 = accumulator4 = accumulator5 = accumulator6 = 0\n pass\n if format_bits & 0x2:\n accumulator7 = evt_data_32[offset+0]\n accumulator8 = evt_data_32[offset+1]\n offset += 2\n else:\n accumulator7 = accumulator8 = 0\n pass\n if format_bits & 0x4:\n mawMax = evt_data_32[offset+0]\n maw_before = evt_data_32[offset+1]\n maw_after = evt_data_32[offset+2]\n offset += 3\n else:\n mawMax = maw_before = maw_after = 0\n pass\n if format_bits & 0x8:\n energy_first = evt_data_32[offset+0]\n energy = evt_data_32[offset+1]\n offset += 2\n else:\n energy_first = energy = 0\n pass\n wf_length_32 = (evt_data_32[offset+0]) & 0x03ffffff\n offset += 1 #now the offset points to the wf data\n fadcID = fadcIndex\n channel = channelIndex\n \n \n # compute expected and actual array dimensions\n wf_length16 = 2 * wf_length_32\n header_length16 = offset * 2\n expected_wf_length = len(evt_data_16) - header_length16\n\n # error check: waveform size must match expectations\n if wf_length16 != expected_wf_length:\n print(len(evt_data_16), header_length16)\n print(\"ERROR: Waveform size %d doesn't match expected size %d.\" %\n (wf_length16, expected_wf_length))\n exit()\n\n # indexes of stuff (all referring to the 16 bit array)\n i_wf_start = header_length16\n i_wf_stop = i_wf_start + wf_length16\n\n # handle the waveform(s)\n if wf_length_32 > 0:\n wf_data = evt_data_16[i_wf_start:i_wf_stop]\n\n if len(wf_data) != expected_wf_length:\n print(\"ERROR: event %d, we expected %d WF samples and only got %d\" %\n (ievt, expected_wf_length, len(wf_data)))\n exit()\n\n # final raw wf array\n waveform = wf_data\n\n # if the wf is too big for pytables, we can window it\n if self.window:\n wf = Waveform(wf_data, self.sample_period, self.decoder_name)\n win_wf, win_ts = wf.window_waveform(self.win_type,\n self.n_samp,\n self.n_blsamp,\n test=False)\n # ts_lo, ts_hi = win_ts[0], win_ts[-1] # FIXME: what does this mean?\n\n waveform = win_wf # modify final wf array\n\n if wf.is_garbage:\n ievt = self.ievt_gbg\n self.ievt_gbg += 1\n self.format_data(locals(), wf.is_garbage)\n return\n\n if len(waveform) > self.pytables_col_limit and self.h5_format == \"table\":\n print(\"WARNING: too many columns for tables output,\\n\",\n \" reverting to saving as fixed hdf5 ...\")\n self.h5_format = \"fixed\"\n\n # set the event number (searchable HDF5 column)\n ievt = self.ievt\n self.ievt += 1\n\n # send any variable with a name in \"decoded_values\" to the pandas output\n self.format_data(locals())\n\n\nclass CAENDT57XX(DataTaker):\n \"\"\"\n decode CAENDT5725 or CAENDT5730 digitizer data.\n \n Setting the model_name will set the appropriate sample_rate\n Use the input_config function to set certain variables by passing\n a dictionary, this will most importantly assemble the file header used\n by CAEN CoMPASS to label output files.\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.id = None\n self.model_name = \"DT5725\" # hack -- can't set the model name in the init\n self.decoder_name = \"caen\"\n self.file_header = None\n self.adc_bitcount = 14\n self.sample_rates = {\"DT5725\": 250e6, \"DT5730\": 500e6}\n self.sample_rate = None\n if self.model_name in self.sample_rates.keys():\n self.sample_rate = self.sample_rates[self.model_name]\n else:\n raise TypeError(\"Unidentified digitizer type: \"+str(model_name))\n self.v_range = 2.0\n\n self.e_cal = None\n self.e_type = None\n self.int_window = None\n self.parameters = [\"TIMETAG\", \"ENERGY\", \"E_SHORT\", \"FLAGS\"]\n\n self.decoded_values = {\n \"board\": None,\n \"channel\": None,\n \"timestamp\": None,\n \"energy\": None,\n \"energy_short\": None,\n \"flags\": None,\n \"num_samples\": None,\n \"waveform\": []\n }\n super().__init__(*args, **kwargs)\n\n\n def input_config(self, config):\n self.id = config[\"id\"]\n self.v_range = config[\"v_range\"]\n self.e_cal = config[\"e_cal\"]\n self.e_type = config[\"e_type\"]\n self.int_window = config[\"int_window\"]\n self.file_header = \"CH_\"+str(config[\"channel\"])+\"@\"+self.model_name+\"_\"+str(config[\"id\"])+\"_Data_\"\n\n\n def get_event_size(self, t0_file):\n with open(t0_file, \"rb\") as file:\n if self.e_type == \"uncalibrated\":\n first_event = file.read(24)\n [num_samples] = np.frombuffer(first_event[20:24], dtype=np.uint16)\n return 24 + 2*num_samples\n elif self.e_type == \"calibrated\":\n first_event = file.read(30)\n [num_samples] = np.frombuffer(first_event[26:30], dtype=np.uint32)\n return 30 + 2 * num_samples # number of bytes / 2\n else:\n raise TypeError(\"Invalid e_type! Valid e_type's: uncalibrated, calibrated\")\n\n\n def get_event(self, event_data_bytes):\n self.decoded_values[\"board\"] = np.frombuffer(event_data_bytes[0:2], dtype=np.uint16)[0]\n self.decoded_values[\"channel\"] = np.frombuffer(event_data_bytes[2:4], dtype=np.uint16)[0]\n self.decoded_values[\"timestamp\"] = np.frombuffer(event_data_bytes[4:12], dtype=np.uint64)[0]\n if self.e_type == \"uncalibrated\":\n self.decoded_values[\"energy\"] = np.frombuffer(event_data_bytes[12:14], dtype=np.uint16)[0]\n self.decoded_values[\"energy_short\"] = np.frombuffer(event_data_bytes[14:16], dtype=np.uint16)[0]\n self.decoded_values[\"flags\"] = np.frombuffer(event_data_bytes[16:20], np.uint32)[0]\n self.decoded_values[\"num_samples\"] = np.frombuffer(event_data_bytes[20:24], dtype=np.uint32)[0]\n self.decoded_values[\"waveform\"] = np.frombuffer(event_data_bytes[24:], dtype=np.uint16)\n elif self.e_type == \"calibrated\":\n self.decoded_values[\"energy\"] = np.frombuffer(event_data_bytes[12:20], dtype=np.float64)[0]\n self.decoded_values[\"energy_short\"] = np.frombuffer(event_data_bytes[20:22], dtype=np.uint16)[0]\n self.decoded_values[\"flags\"] = np.frombuffer(event_data_bytes[22:26], np.uint32)[0]\n self.decoded_values[\"num_samples\"] = np.frombuffer(event_data_bytes[26:30], dtype=np.uint32)[0]\n self.decoded_values[\"waveform\"] = np.frombuffer(event_data_bytes[30:], dtype=np.uint16)\n else:\n raise TypeError(\"Invalid e_type! Valid e_type's: uncalibrated, calibrated\")\n return self.assemble_data_row()\n\n\n def assemble_data_row(self):\n timestamp = self.decoded_values[\"timestamp\"]\n energy = self.decoded_values[\"energy\"]\n energy_short = self.decoded_values[\"energy_short\"]\n flags = self.decoded_values[\"flags\"]\n waveform = self.decoded_values[\"waveform\"]\n return [timestamp, energy, energy_short, flags], waveform\n\n\n def create_dataframe(self, array):\n waveform_labels = [str(item) for item in list(range(self.decoded_values[\"num_samples\"]-1))]\n column_labels = self.parameters + waveform_labels\n dataframe = pd.DataFrame(data=array, columns=column_labels, dtype=float)\n return dataframe\n\n\nclass ORCAGretina4M(DataTaker):\n \"\"\" \n decode Majorana Gretina4M digitizer data\n \n NOTE: Tom Caldwell made some nice new summary slides on a 2019 LEGEND call\n https://indico.legend-exp.org/event/117/contributions/683/attachments/467/717/mjd_data_format.pdf\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.decoder_name = 'ORGretina4MWaveformDecoder'\n self.class_name = 'ORGretina4MModel'\n self.decoded_values = {\n \"packet_id\": [],\n \"ievt\": [],\n \"energy\": [],\n \"timestamp\": [],\n \"channel\": [],\n \"board_id\": [],\n \"waveform\": [],\n }\n super().__init__(*args, **kwargs)\n self.chan_list = None\n self.is_multisampled = True\n self.event_header_length = 18\n self.sample_period = 10 # ns\n self.gretina_event_no = 0\n self.window = False\n self.n_blsamp = 500\n self.ievt = 0\n \n self.df_metadata = None # hack, this probably isn't right\n self.active_channels = self.find_active_channels()\n \n\n def crate_card_chan(self, crate, card, channel):\n return (crate << 9) + (card << 4) + (channel)\n\n\n def find_active_channels(self):\n \"\"\" \n Only do this for multi-detector data \n \"\"\"\n active_channels = []\n if self.df_metadata is None:\n return active_channels\n\n for index, row in self.df_metadata.iterrows():\n crate, card = index\n for chan, chan_en in enumerate(row.Enabled):\n if chan_en:\n active_channels.append(\n self.crate_card_chan(crate, card, chan))\n\n return active_channels\n\n\n def decode_event(self, event_data_bytes, packet_id, header_dict):\n \"\"\" \n Parse the header for an individual event \n \"\"\"\n self.gretina_event_no += 1\n event_data = np.fromstring(event_data_bytes, dtype=np.uint16)\n card = event_data[1] & 0x1F\n crate = (event_data[1] >> 5) & 0xF\n channel = event_data[4] & 0xf\n board_id = (event_data[4] & 0xFFF0) >> 4\n timestamp = event_data[6] + (event_data[7] << 16) + (event_data[8] << 32)\n energy = event_data[9] + ((event_data[10] & 0x7FFF) << 16)\n wf_data = event_data[self.event_header_length:]\n\n ccc = self.crate_card_chan(crate, card, channel)\n if ccc not in self.active_channels:\n # should store this in a garbage data frame\n return\n\n # if the wf is too big for pytables, we can window it\n if self.window:\n wf = Waveform(wf_data, self.sample_period, self.decoder_name)\n waveform = wf.window_waveform(self.win_type,\n self.n_samp,\n self.n_blsamp,\n test=False)\n if wf.is_garbage:\n ievt = self.ievt_gbg\n self.ievt_gbg += 1\n self.garbage_count += 1\n\n if len(wf_data) > 2500 and self.h5_format == \"table\":\n print(\"WARNING: too many columns for tables output,\",\n \" reverting to saving as fixed hdf5 ...\")\n self.h5_format = \"fixed\"\n\n waveform = wf_data.astype(\"int16\")\n\n # set the event number (searchable HDF5 column)\n ievt = self.ievt\n self.ievt += 1\n\n # send any variable with a name in \"decoded_values\" to the pandas output\n self.format_data(locals())\n\n\nclass SIS3316ORCADecoder(DataTaker):\n \"\"\" \n handle ORCA Struck 3316 digitizer \n \n TODO: \n handle per-channel data (gain, ...)\n most metadata of Struck header (energy, ...)\n \"\"\"\n def __init__(self, *args, **kwargs):\n \n self.decoder_name = 'ORSIS3316WaveformDecoder'\n self.class_name = 'ORSIS3316Model'\n\n # store an entry for every event\n self.decoded_values = {\n \"packet_id\": [],\n \"ievt\": [],\n \"energy_first\": [],\n \"energy\": [],\n \"timestamp\": [],\n \"channel\": [],\n \"waveform\": [],\n }\n super().__init__(*args, **kwargs) # also initializes the garbage df (whatever that means...)\n\n # self.event_header_length = 1 #?\n self.sample_period = 10 # ns, I will set this later, according to header info\n self.gain = 0 \n self.h5_format = \"table\"\n self.ievt = 0 #event number\n self.ievt_gbg = 0 #garbage event number\n self.window = False\n \n \n def decode_event(self, event_data_bytes, packet_id, header_dict, \n verbose=False):\n\n # parse the raw event data into numpy arrays of 16 and 32 bit ints\n evt_data_32 = np.fromstring(event_data_bytes, dtype=np.uint32)\n evt_data_16 = np.fromstring(event_data_bytes, dtype=np.uint16)\n\n #TODO Figure out the header, particularly card/crate/channel/timestamp\n n_lost_msb = 0\n n_lost_lsb = 0\n n_lost_records = 0\n crate = evt_data_32[3]\n card = evt_data_32[4]\n channel = evt_data_32[4]\n buffer_wrap = 0\n crate_card_chan = crate + card + channel\n wf_length_32 = 0\n ene_wf_length = evt_data_32[4]\n evt_header_id = 0\n timestamp = 0\n\n # compute expected and actual array dimensions\n wf_length16 = 1024\n orca_helper_length16 = 52\n header_length16 = orca_helper_length16\n ene_wf_length16 = 2 * ene_wf_length\n footer_length16 = 0\n\n expected_wf_length = (len(evt_data_16) - header_length16 - ene_wf_length16)/2\n\n if wf_length16 != expected_wf_length:\n print(\"ERROR: Waveform size %d doesn't match expected size %d.\" %\n (wf_length16, expected_wf_length))\n #exit()\n\n # indexes of stuff (all referring to the 16 bit array)\n i_wf_start = header_length16\n i_wf_stop = i_wf_start + wf_length16\n i_ene_start = i_wf_stop + 1\n i_ene_stop = i_ene_start + ene_wf_length16\n\n\n # handle the waveform(s)\n if wf_length16 > 0:\n wf_data = evt_data_16[i_wf_start:i_wf_stop]\n\n\n #TODO check if number of events matches expected\n #if len(wf_data) != expected_wf_length:\n # print(\"ERROR: We expected %d WF samples and only got %d\" %\n # (expected_wf_length, len(wf_data)))\n # exit()\n\n # final raw wf array\n waveform = wf_data\n\n # set the event number (searchable HDF5 column)\n ievt = self.ievt\n self.ievt += 1\n\n # send any variable with a name in \"decoded_values\" to the pandas output\n self.format_data(locals())\n\n\nclass FlashCam(DataTaker):\n \"\"\" \n decode FlashCam digitizer data.\n \"\"\"\n def __init__(self, *args, **kwargs):\n \"\"\"\n \"\"\"\n self.decoder_name = \"FlashCam\"\n \n # these are read for every event (decode_event)\n self.decoded_values = {\n \"ievt\": [], # index of event\n \"timestamp\": [], # time since beginning of file\n \"channel\": [], # right now, index of the trigger (trace)\n \"baseline\" : [], # averages prebaseline0 and prebaseline1\n \"wf_max\": [], # ultra-simple np.max energy estimation\n \"wf_std\": [], # ultra-simple np.std noise estimation\n \"waveform\": [] # digitizer data\n }\n \n # these are read for every file (get_file_config)\n self.config_names = [\n \"nsamples\", # samples per channel\n \"nadcs\", # number of adc channels\n \"ntriggers\", # number of triggertraces\n \"telid\", # id of telescope\n \"adcbits\", # bit range of the adc channels\n \"sumlength\", # length of the fpga integrator\n \"blprecision\", # precision of the fpga baseline\n \"mastercards\", # number of attached mastercards\n \"triggercards\", # number of attached triggercards\n \"adccards\", # number of attached fadccards\n \"gps\", # gps mode (0: not used, 1: external pps and 10MHz)\n ]\n \n # put add'l info useful for LH5 specification\n # default structure is array<1>{real}, default unit is None.\n # here we only specify columns if they are non-default.\n self.lh5_spec = {\n \"timestamp\":{\"units\":\"sec\"},\n \"baseline\":{\"units\":\"adc\"},\n \"wf_max\":{\"units\":\"adc\"},\n \"wf_std\":{\"units\":\"adc\"},\n }\n \n super().__init__(*args, **kwargs)\n \n \n def get_file_config(self, fcio):\n \"\"\"\n access FCIOConfig members once when each file is opened\n \"\"\"\n self.file_config = {c:getattr(fcio, c) for c in self.config_names}\n\n \n def decode_event(self, fcio, packet_id, verbose=False):\n \"\"\"\n access FCIOEvent members for each event in the raw file\n \"\"\"\n ievt = fcio.eventnumber # the eventnumber since the beginning of the file\n timestamp = fcio.eventtime # the time since the beginning of the file in seconds\n traces = fcio.traces # the full traces for the event: (nadcs, nsamples)\n baselines = fcio.baselines # the fpga baseline values for each channel in LSB\n # baselines = fcio.average_prebaselines # equivalent?\n \n # these are empty in my test file\n integrals = fcio.integrals # the fpga integrator values for each channel in LSB\n triggertraces = fcio.triggertraces # the triggersum traces: (ntriggers, nsamples)\n \n # all channels are read out simultaneously for each event\n for iwf in range(self.file_config[\"nadcs\"]):\n channel = iwf\n waveform = traces[iwf]\n baseline = baselines[iwf]\n wf_max = np.amax(waveform)\n wf_std = np.std(waveform)\n self.total_count += 1\n \n # i don't know what indicates a garbage event yet\n # if wf.is_garbage:\n # self.garbage_count += 1\n # self.format_data(locals(), wf.is_garbage)\n # return\n \n # send any variable with a name in \"decoded_values\" to the output\n self.format_data(locals()) \n\n\n" ]
[ [ "numpy.amax", "pandas.DataFrame", "numpy.concatenate", "numpy.frombuffer", "numpy.std", "numpy.fromstring", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
tripods-xai/isit-2022
[ "024a0ccb59f7d4b2c9e88ef96d4a9c57712d6dfd" ]
[ "turbo-codes/archive/src/channels.py" ]
[ "import tensorflow as tf\nfrom tensorflow.keras import layers\nimport tensorflow.keras.backend as K\n\nfrom .utils import assert_binary_array\n\n\"\"\"TF Channels\"\"\"\nclass TFChannel(layers.Layer):\n \n def call(self, input_signal):\n NotImplemented\n\nclass TFAWGN(TFChannel):\n def __init__(self, sigma, **kwargs):\n super().__init__(**kwargs)\n self.sigma = sigma\n \n def call(self, input_signal):\n return input_signal + tf.random.normal(tf.shape(input_signal), stddev=self.sigma)\n\n\n\"\"\"Np Channels\"\"\"\nclass Channel(object):\n\n def __init__(self):\n pass\n\n def corrupt(self, input_signal):\n NotImplemented\n\nclass AWGN(Channel):\n\n def __init__(self, sigma, rng):\n super().__init__()\n self.sigma = sigma\n self.rng = rng\n \n def corrupt(self, input_signal):\n \n assert_binary_array(input_signal)\n data_shape = input_signal.shape # input_signal has to be a numpy array.\n\n noise = self.sigma * self.rng.standard_normal(data_shape) # Define noise\n corrupted_signal = 2.0*input_signal-1.0 + noise\n\n return corrupted_signal" ]
[ [ "tensorflow.shape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
burnpiro/wod-bike-temporal-network
[ "f84195cc2b76b4775ad6be505631a414e1627914" ]
[ "processing_helper.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom datetime import datetime\n\nCOLUMNS = [\n 'uid',\n 'rental_place',\n 'return_place'\n]\n\nnodes_to_remove = [\n '.GOTOWE DO REZERWACJI',\n 'Poza stacją',\n '.RELOKACYJNA',\n '.RELOKACYJNA A1-4',\n '# Rowery skradzione Wrocław 2014',\n '#Rowery zapasowe Warszawa'\n]\n\n\ndef trim_and_remove_slash(s):\n return s.strip().replace('/', '-').replace('\"', '').replace(',', ' -')\n\n\ndef extract_data(train_file_path, columns=COLUMNS):\n # Read csv file and return\n all_data = pd.read_csv(train_file_path, usecols=columns)\n for place in nodes_to_remove:\n all_data = all_data[all_data['rental_place'] != place]\n all_data = all_data[all_data['return_place'] != place]\n all_data = all_data[all_data['return_place'] != all_data['rental_place']]\n all_data['rental_place'] = all_data['rental_place'].apply(trim_and_remove_slash)\n all_data['return_place'] = all_data['return_place'].apply(trim_and_remove_slash)\n stations = all_data['rental_place'].unique()\n all_data = all_data.dropna()\n\n return all_data, stations\n\n#\n# data = extract_data('data/historia_przejazdow_2019-03.csv')\n#\n# print(data.head())\n# print()\n# print(len(data[data['return_place'] == 1][data['rental_place'] == 0]))\n# print()\n# grouped_data = data.groupby(['rental_place', 'return_place']).size().reset_index()\n# grouped_data = grouped_data.rename(columns={\n# 'rental_place': 'Source',\n# 'return_place': 'Target',\n# 0: 'Weight'\n# })\n# grouped_data['Type'] = 'Directed'\n# print(grouped_data)\n# grouped_data.to_csv('out.csv', index=False)\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
palmhjell/ninetysix
[ "86fcbf9740ac84e93067a9e3a4eca92aec057ff5" ]
[ "tests/test_plate_inputs.py" ]
[ "import numpy as np\nimport pandas as pd\nimport os\n\nimport ninetysix as ns\n\nimport pytest\n\n\n# General set up\ndef test_value_name():\n df = pd.DataFrame({\n 'well': ['A1'],\n 'test': [1],\n })\n\n value_name = ns.Plate(data=df, value_name='test').value_name\n assert value_name == 'test'\n\ndef test_nonstring_value_name():\n df = pd.DataFrame({\n 'well': ['A1'],\n 'test': [1],\n })\n\n with pytest.raises(ValueError):\n ns.Plate(data=df, value_name=4)\n\ndef test_auto_padding():\n # Auto-detect (lack of) padding\n df = pd.DataFrame({\n 'well': ['A1'],\n 'test': [1],\n })\n\n padded = ns.Plate(data=df, value_name='test').zero_padding\n\n assert not padded\n\n # Auto-detect padding\n df = pd.DataFrame({\n 'well': ['A01'],\n 'test': [1],\n })\n\n padded = ns.Plate(data=df, value_name='test').zero_padding\n\n assert padded\n\ndef test_explicit_padding():\n # Update to padded\n df = pd.DataFrame({\n 'well': ['A1'],\n 'test': [1],\n })\n\n output_df = ns.Plate(data=df, value_name='test', zero_padding=True).df\n\n desired_df = pd.DataFrame({\n 'well': ['A01'],\n 'row': ['A'],\n 'column': [1],\n 'test': [1],\n })\n\n assert output_df.equals(desired_df)\n\n # Update to un-padded\n df = pd.DataFrame({\n 'well': ['A01'],\n 'test': [1],\n })\n\n output_df = ns.Plate(data=df, value_name='test', zero_padding=False).df\n\n desired_df = pd.DataFrame({\n 'well': ['A1'],\n 'row': ['A'],\n 'column': [1],\n 'test': [1],\n })\n\n assert output_df.equals(desired_df)\n\n\n# Well-value pair inputs\ndef test_tuple_of_tuples():\n wells = ('A1', 'A2')\n values = (1, 2)\n data = zip(wells, values)\n\n assert ns.Plate(data=data)._passed\n\ndef test_nonstring_well():\n wells = ['A1', 2]\n values = [1, 2]\n data = zip(wells, values)\n\n with pytest.raises(TypeError):\n ns.Plate(data=data)\n\ndef test_tuple_of_tuples_with_name():\n wells = ('A1', 'A2')\n values = (1, 2)\n\n assert ns.Plate(data=zip(wells, values), value_name='test')._passed\n\n output_df = ns.Plate(data=zip(wells, values), value_name='test').df\n desired_df = pd.DataFrame({\n 'well': ['A1', 'A2'],\n 'row': ['A', 'A'],\n 'column': [1, 2],\n 'test': [1, 2],\n })\n\n assert output_df.equals(desired_df)\n\n\n# DataFrame/dict/path inputs\ndef test_simple_df():\n df = pd.DataFrame({\n 'well': ['A1'],\n 'value': [1],\n })\n\n assert ns.Plate(data=df)._passed\n assert ns.Plate(data=df).value_name == 'value'\n\ndef test_simple_dict():\n data = {\n 'well': ['A1'],\n 'value': [1],\n }\n\n assert ns.Plate(data=data)._passed\n assert ns.Plate(data=data).value_name == 'value'\n\ndef test_df_no_well():\n df = pd.DataFrame({\n 'while': ['A1'],\n 'value': [1],\n })\n\n with pytest.raises(ValueError):\n ns.Plate(data=df)\n\ndef test_df_too_many_well():\n df = pd.DataFrame({\n 'well': ['A1'],\n 'Well': ['A2'],\n 'value': [1],\n })\n\n with pytest.raises(ValueError):\n ns.Plate(data=df)\n\ndef test_df_nonstring_well():\n df = pd.DataFrame({\n 'well': ['A1', 2],\n 'value': [1, 2],\n })\n\n with pytest.raises(TypeError):\n ns.Plate(data=df)\n\ndef test_df_with_value_name():\n df = pd.DataFrame({\n 'well': ['A1'],\n 'RT': [0.4],\n 'area': [1],\n })\n\n assert ns.Plate(data=df, value_name='area')._passed\n\ndef test_df_move_value():\n input_df = pd.DataFrame({\n 'well': ['A1'],\n 'area': [1],\n 'RT': [0.4],\n })\n\n desired_df = pd.DataFrame({\n 'well': ['A1'],\n 'row': ['A'],\n 'column': [1],\n 'RT': [0.4],\n 'area': [1],\n })\n\n output_df = ns.Plate(data=input_df, value_name='area').df\n\n assert output_df.equals(desired_df)\n\ndef test_read_csv():\n\n df = pd.DataFrame({\n 'well': ['A'+str(col) for col in range(1, 13)],\n 'test': [1]*12,\n })\n\n temp = ns.Plate(data=df, value_name='test').to_csv('temp.csv')\n\n plate = ns.Plate('temp.csv')\n\n os.remove('temp.csv')\n\ndef test_read_excel():\n\n df = pd.DataFrame({\n 'well': ['A'+str(col) for col in range(1, 13)],\n 'test': [1]*12,\n })\n\n ns.Plate(data=df, value_name='test').to_excel('temp.xlsx')\n\n plate = ns.Plate('temp.xlsx')\n\n os.remove('temp.xlsx')\n\ndef test_contains_row_col():\n \n df = pd.DataFrame({\n 'well': ['A1'],\n 'row': ['A'],\n 'column': [1],\n 'value': [1],\n })\n\n desired_df = pd.DataFrame({\n 'well': ['A1'],\n 'row': ['A'],\n 'column': [1],\n 'value': [1],\n })\n\n output_df = ns.Plate(data=df).df\n\n assert output_df.equals(desired_df)\n\n\ndef test_moves_row_col():\n\n df = pd.DataFrame({\n 'well': ['A1'],\n 'value': [1],\n 'row': ['A'],\n 'column': [1],\n })\n\n desired_df = pd.DataFrame({\n 'well': ['A1'],\n 'row': ['A'],\n 'column': [1],\n 'value': [1],\n })\n\n output_df = ns.Plate(data=df, value_name='value').df\n\n assert output_df.equals(desired_df)\n\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
venthur/immoscrapy
[ "b472ff5bdaec86118cbc552d6417f3756e0ff7ac" ]
[ "immoscrapy/cli.py" ]
[ "import argparse\nimport logging\n\nimport pandas\n\nfrom immoscrapy.immoscrapy import query\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s %(levelname)s %(name)s %(message)s',\n)\n\n\ndef main(args=None):\n \"\"\"Main entry point of the CLI.\n\n This method parses the CLI arguments and executes the respective commands.\n\n Parameters\n ----------\n args : list[str], optional\n optional parameters, used for testing\n\n \"\"\"\n args = parse_args(args)\n args.func(args)\n\n\ndef parse_args(args=None):\n \"\"\"Parse command line arguments.\n\n Parametes\n ---------\n args : list[str]\n optional parameters, used for testing\n\n Returns\n -------\n argparse.Namespace\n\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"Query Immobilienscout24 offers\")\n commands = parser.add_subparsers(dest='command')\n commands.required = True\n\n # shared options for all parsers\n region_parser = argparse.ArgumentParser(add_help=False)\n region_parser.add_argument('-c', '--country', required=True)\n region_parser.add_argument('-r', '--region')\n region_parser.add_argument('-z', '--city')\n\n # shared query options\n query_parser = argparse.ArgumentParser(add_help=False)\n region_parser.add_argument(\n '-p', '--price',\n help=\"price range: min-max, -max or min-\",\n )\n region_parser.add_argument(\n '-o', '--numberofrooms',\n help=\"number of rooms: min-max, -max or min-\",\n )\n region_parser.add_argument(\n '-s', '--livingspace',\n help=\"living space: min-max, -max or min-\",\n )\n region_parser.add_argument(\n '-y', '--constructionyear',\n help=\"construction year: min-max, -max or min-\",\n )\n\n house_buy_parser = commands.add_parser(\n 'rent-apartment',\n description='Query for apartments for rent',\n parents=[region_parser, query_parser],\n )\n house_buy_parser.set_defaults(func=rent_apartment)\n\n house_buy_parser = commands.add_parser(\n 'buy-apartment',\n description='Query for apartments for sale',\n parents=[region_parser, query_parser],\n )\n house_buy_parser.set_defaults(func=buy_apartment)\n\n house_buy_parser = commands.add_parser(\n 'rent-house',\n description='Query for houses for rent',\n parents=[region_parser, query_parser],\n )\n house_buy_parser.set_defaults(func=rent_house)\n\n house_buy_parser = commands.add_parser(\n 'buy-house',\n description='Query for houses for sale',\n parents=[region_parser, query_parser],\n )\n house_buy_parser.set_defaults(func=buy_house)\n\n return parser.parse_args(args)\n\n\ndef rent_apartment(args):\n \"\"\"Query for apartments for rent and pretty print the results.\n\n Parameters\n ----------\n args : argparse.Namespace\n\n \"\"\"\n results = query(\n args.country, args.region, args.city,\n 'APARTMENT_RENT',\n price=args.price,\n livingspace=args.livingspace,\n numberofrooms=args.numberofrooms,\n constructionyear=args.constructionyear,\n )\n pretty_print(results)\n\n\ndef buy_apartment(args):\n \"\"\"Query for apartments to buy and pretty print the results.\n\n Parameters\n ----------\n args : argparse.Namespace\n\n \"\"\"\n results = query(\n args.country, args.region, args.city,\n 'APARTMENT_BUY',\n price=args.price,\n livingspace=args.livingspace,\n numberofrooms=args.numberofrooms,\n constructionyear=args.constructionyear,\n )\n pretty_print(results)\n\n\ndef rent_house(args):\n \"\"\"Query for houses for rent and pretty print the results.\n\n Parameters\n ----------\n args : argparse.Namespace\n\n \"\"\"\n results = query(\n args.country, args.region, args.city,\n 'HOUSE_RENT',\n price=args.price,\n livingspace=args.livingspace,\n numberofrooms=args.numberofrooms,\n constructionyear=args.constructionyear,\n )\n pretty_print(results)\n\n\ndef buy_house(args):\n \"\"\"Query for houses to buy and pretty print the results.\n\n Parameters\n ----------\n args : argparse.Namespace\n\n \"\"\"\n results = query(\n args.country, args.region, args.city,\n 'HOUSE_BUY',\n price=args.price,\n livingspace=args.livingspace,\n numberofrooms=args.numberofrooms,\n constructionyear=args.constructionyear,\n )\n pretty_print(results)\n\n\ndef pretty_print(results):\n \"\"\"Pretty print the results.\n\n Parameters\n ----------\n results : list\n\n \"\"\"\n if not results:\n print('No results.')\n return\n for result in results:\n result.creation = result.creation.date()\n\n df = pandas.DataFrame(results)\n df.drop('id', axis=1, inplace=True)\n df.drop('city', axis=1, inplace=True)\n df.drop('postcode', axis=1, inplace=True)\n df.drop('private_offer', axis=1, inplace=True)\n df.drop('address', axis=1, inplace=True)\n df.drop('floor_plan', axis=1, inplace=True)\n df.drop('currency', axis=1, inplace=True)\n\n df.rename(columns={\n 'guest_toilet': 'g toilet',\n 'number_of_rooms': 'rooms',\n 'living_space': 'space',\n }, inplace=True)\n print(df)\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
munkm/yt
[ "9c92deaa53459762cb35025bdc8b9048a9faac31", "9c92deaa53459762cb35025bdc8b9048a9faac31", "9c92deaa53459762cb35025bdc8b9048a9faac31", "9c92deaa53459762cb35025bdc8b9048a9faac31" ]
[ "yt/frontends/enzo/simulation_handling.py", "yt/data_objects/derived_quantities.py", "yt/frontends/tipsy/data_structures.py", "yt/visualization/fits_image.py" ]
[ "import glob\nimport os\n\nimport numpy as np\nfrom unyt import dimensions, unyt_array\nfrom unyt.unit_registry import UnitRegistry\n\nfrom yt.data_objects.time_series import DatasetSeries, SimulationTimeSeries\nfrom yt.funcs import only_on_root\nfrom yt.loaders import load\nfrom yt.utilities.cosmology import Cosmology\nfrom yt.utilities.exceptions import (\n InvalidSimulationTimeSeries,\n MissingParameter,\n NoStoppingCondition,\n YTUnidentifiedDataType,\n)\nfrom yt.utilities.logger import ytLogger as mylog\nfrom yt.utilities.parallel_tools.parallel_analysis_interface import parallel_objects\n\n\nclass EnzoSimulation(SimulationTimeSeries):\n r\"\"\"\n Initialize an Enzo Simulation object.\n\n Upon creation, the parameter file is parsed and the time and redshift\n are calculated and stored in all_outputs. A time units dictionary is\n instantiated to allow for time outputs to be requested with physical\n time units. The get_time_series can be used to generate a\n DatasetSeries object.\n\n parameter_filename : str\n The simulation parameter file.\n find_outputs : bool\n If True, subdirectories within the GlobalDir directory are\n searched one by one for datasets. Time and redshift\n information are gathered by temporarily instantiating each\n dataset. This can be used when simulation data was created\n in a non-standard way, making it difficult to guess the\n corresponding time and redshift information.\n Default: False.\n\n Examples\n --------\n >>> import yt\n >>> es = yt.load_simulation(\"enzo_tiny_cosmology/32Mpc_32.enzo\", \"Enzo\")\n >>> es.get_time_series()\n >>> for ds in es:\n ... print(ds.current_time)\n\n \"\"\"\n\n def __init__(self, parameter_filename, find_outputs=False):\n self.simulation_type = \"grid\"\n self.key_parameters = [\"stop_cycle\"]\n SimulationTimeSeries.__init__(\n self, parameter_filename, find_outputs=find_outputs\n )\n\n def _set_units(self):\n self.unit_registry = UnitRegistry()\n self.unit_registry.add(\"code_time\", 1.0, dimensions.time)\n self.unit_registry.add(\"code_length\", 1.0, dimensions.length)\n if self.cosmological_simulation:\n # Instantiate EnzoCosmology object for units and time conversions.\n self.cosmology = EnzoCosmology(\n self.parameters[\"CosmologyHubbleConstantNow\"],\n self.parameters[\"CosmologyOmegaMatterNow\"],\n self.parameters[\"CosmologyOmegaLambdaNow\"],\n self.parameters.get(\"CosmologyOmegaRadiationNow\", 0.0),\n 0.0,\n self.parameters[\"CosmologyInitialRedshift\"],\n unit_registry=self.unit_registry,\n )\n\n self.time_unit = self.cosmology.time_unit.in_units(\"s\")\n if \"h\" in self.unit_registry:\n self.unit_registry.modify(\"h\", self.hubble_constant)\n else:\n self.unit_registry.add(\n \"h\", self.hubble_constant, dimensions.dimensionless\n )\n # Comoving lengths\n for my_unit in [\"m\", \"pc\", \"AU\"]:\n new_unit = f\"{my_unit}cm\"\n # technically not true, but should be ok\n self.unit_registry.add(\n new_unit,\n self.unit_registry.lut[my_unit][0],\n dimensions.length,\n \"\\\\rm{%s}/(1+z)\" % my_unit,\n prefixable=True,\n )\n self.length_unit = self.quan(\n self.box_size, \"Mpccm / h\", registry=self.unit_registry\n )\n else:\n self.time_unit = self.quan(self.parameters[\"TimeUnits\"], \"s\")\n self.length_unit = self.quan(self.parameters[\"LengthUnits\"], \"cm\")\n self.box_size = self.length_unit\n self.domain_left_edge = self.domain_left_edge * self.length_unit\n self.domain_right_edge = self.domain_right_edge * self.length_unit\n self.unit_registry.modify(\"code_time\", self.time_unit)\n self.unit_registry.modify(\"code_length\", self.length_unit)\n self.unit_registry.add(\n \"unitary\", float(self.box_size.in_base()), self.length_unit.units.dimensions\n )\n\n def get_time_series(\n self,\n time_data=True,\n redshift_data=True,\n initial_time=None,\n final_time=None,\n initial_redshift=None,\n final_redshift=None,\n initial_cycle=None,\n final_cycle=None,\n times=None,\n redshifts=None,\n tolerance=None,\n parallel=True,\n setup_function=None,\n ):\n\n \"\"\"\n Instantiate a DatasetSeries object for a set of outputs.\n\n If no additional keywords given, a DatasetSeries object will be\n created with all potential datasets created by the simulation.\n\n Outputs can be gather by specifying a time or redshift range\n (or combination of time and redshift), with a specific list of\n times or redshifts, a range of cycle numbers (for cycle based\n output), or by simply searching all subdirectories within the\n simulation directory.\n\n time_data : bool\n Whether or not to include time outputs when gathering\n datasets for time series.\n Default: True.\n redshift_data : bool\n Whether or not to include redshift outputs when gathering\n datasets for time series.\n Default: True.\n initial_time : tuple of type (float, str)\n The earliest time for outputs to be included. This should be\n given as the value and the string representation of the units.\n For example, (5.0, \"Gyr\"). If None, the initial time of the\n simulation is used. This can be used in combination with\n either final_time or final_redshift.\n Default: None.\n final_time : tuple of type (float, str)\n The latest time for outputs to be included. This should be\n given as the value and the string representation of the units.\n For example, (13.7, \"Gyr\"). If None, the final time of the\n simulation is used. This can be used in combination with either\n initial_time or initial_redshift.\n Default: None.\n times : tuple of type (float array, str)\n A list of times for which outputs will be found and the units\n of those values. For example, ([0, 1, 2, 3], \"s\").\n Default: None.\n initial_redshift : float\n The earliest redshift for outputs to be included. If None,\n the initial redshift of the simulation is used. This can be\n used in combination with either final_time or\n final_redshift.\n Default: None.\n final_redshift : float\n The latest redshift for outputs to be included. If None,\n the final redshift of the simulation is used. This can be\n used in combination with either initial_time or\n initial_redshift.\n Default: None.\n redshifts : array_like\n A list of redshifts for which outputs will be found.\n Default: None.\n initial_cycle : float\n The earliest cycle for outputs to be included. If None,\n the initial cycle of the simulation is used. This can\n only be used with final_cycle.\n Default: None.\n final_cycle : float\n The latest cycle for outputs to be included. If None,\n the final cycle of the simulation is used. This can\n only be used in combination with initial_cycle.\n Default: None.\n tolerance : float\n Used in combination with \"times\" or \"redshifts\" keywords,\n this is the tolerance within which outputs are accepted\n given the requested times or redshifts. If None, the\n nearest output is always taken.\n Default: None.\n parallel : bool/int\n If True, the generated DatasetSeries will divide the work\n such that a single processor works on each dataset. If an\n integer is supplied, the work will be divided into that\n number of jobs.\n Default: True.\n setup_function : callable, accepts a ds\n This function will be called whenever a dataset is loaded.\n\n Examples\n --------\n\n >>> import yt\n >>> es = yt.load_simulation(\"enzo_tiny_cosmology/32Mpc_32.enzo\", \"Enzo\")\n >>> es.get_time_series(initial_redshift=10, final_time=(13.7, \"Gyr\"),\n redshift_data=False)\n >>> for ds in es:\n ... print(ds.current_time)\n >>> es.get_time_series(redshifts=[3, 2, 1, 0])\n >>> for ds in es:\n ... print(ds.current_time)\n\n \"\"\"\n\n if (\n initial_redshift is not None or final_redshift is not None\n ) and not self.cosmological_simulation:\n raise InvalidSimulationTimeSeries(\n \"An initial or final redshift has been given for a \"\n + \"noncosmological simulation.\"\n )\n\n if time_data and redshift_data:\n my_all_outputs = self.all_outputs\n elif time_data:\n my_all_outputs = self.all_time_outputs\n elif redshift_data:\n my_all_outputs = self.all_redshift_outputs\n else:\n raise InvalidSimulationTimeSeries(\n \"Both time_data and redshift_data are False.\"\n )\n\n if not my_all_outputs:\n DatasetSeries.__init__(self, outputs=[], parallel=parallel)\n mylog.info(\"0 outputs loaded into time series.\")\n return\n\n # Apply selection criteria to the set.\n if times is not None:\n my_outputs = self._get_outputs_by_key(\n \"time\", times, tolerance=tolerance, outputs=my_all_outputs\n )\n\n elif redshifts is not None:\n my_outputs = self._get_outputs_by_key(\n \"redshift\", redshifts, tolerance=tolerance, outputs=my_all_outputs\n )\n\n elif initial_cycle is not None or final_cycle is not None:\n if initial_cycle is None:\n initial_cycle = 0\n else:\n initial_cycle = max(initial_cycle, 0)\n if final_cycle is None:\n final_cycle = self.parameters[\"StopCycle\"]\n else:\n final_cycle = min(final_cycle, self.parameters[\"StopCycle\"])\n\n my_outputs = my_all_outputs[\n int(\n np.ceil(float(initial_cycle) / self.parameters[\"CycleSkipDataDump\"])\n ) : (final_cycle / self.parameters[\"CycleSkipDataDump\"])\n + 1\n ]\n\n else:\n if initial_time is not None:\n if isinstance(initial_time, float):\n my_initial_time = self.quan(initial_time, \"code_time\")\n elif isinstance(initial_time, tuple) and len(initial_time) == 2:\n my_initial_time = self.quan(*initial_time)\n elif not isinstance(initial_time, unyt_array):\n raise RuntimeError(\n \"Error: initial_time must be given as a float or \"\n + \"tuple of (value, units).\"\n )\n elif initial_redshift is not None:\n my_initial_time = self.cosmology.t_from_z(initial_redshift)\n else:\n my_initial_time = self.initial_time\n\n if final_time is not None:\n if isinstance(final_time, float):\n my_final_time = self.quan(final_time, \"code_time\")\n elif isinstance(final_time, tuple) and len(final_time) == 2:\n my_final_time = self.quan(*final_time)\n elif not isinstance(final_time, unyt_array):\n raise RuntimeError(\n \"Error: final_time must be given as a float or \"\n + \"tuple of (value, units).\"\n )\n elif final_redshift is not None:\n my_final_time = self.cosmology.t_from_z(final_redshift)\n else:\n my_final_time = self.final_time\n\n my_initial_time.convert_to_units(\"s\")\n my_final_time.convert_to_units(\"s\")\n my_times = np.array([a[\"time\"] for a in my_all_outputs])\n my_indices = np.digitize([my_initial_time, my_final_time], my_times)\n if my_initial_time == my_times[my_indices[0] - 1]:\n my_indices[0] -= 1\n my_outputs = my_all_outputs[my_indices[0] : my_indices[1]]\n\n init_outputs = []\n for output in my_outputs:\n if os.path.exists(output[\"filename\"]):\n init_outputs.append(output[\"filename\"])\n\n DatasetSeries.__init__(\n self, outputs=init_outputs, parallel=parallel, setup_function=setup_function\n )\n mylog.info(\"%d outputs loaded into time series.\", len(init_outputs))\n\n def _parse_parameter_file(self):\n \"\"\"\n Parses the parameter file and establishes the various\n dictionaries.\n \"\"\"\n\n self.conversion_factors = {}\n redshift_outputs = []\n\n # Let's read the file\n lines = open(self.parameter_filename).readlines()\n for line in (l.strip() for l in lines):\n if \"#\" in line:\n line = line[0 : line.find(\"#\")]\n if \"//\" in line:\n line = line[0 : line.find(\"//\")]\n if len(line) < 2:\n continue\n param, vals = (i.strip() for i in line.split(\"=\", 1))\n # First we try to decipher what type of value it is.\n vals = vals.split()\n # Special case approaching.\n if \"(do\" in vals:\n vals = vals[:1]\n if len(vals) == 0:\n pcast = str # Assume NULL output\n else:\n v = vals[0]\n # Figure out if it's castable to floating point:\n try:\n float(v)\n except ValueError:\n pcast = str\n else:\n if any(\".\" in v or \"e\" in v for v in vals):\n pcast = float\n elif v == \"inf\":\n pcast = str\n else:\n pcast = int\n # Now we figure out what to do with it.\n if param.endswith(\"Units\") and not param.startswith(\"Temperature\"):\n dataType = param[:-5]\n # This one better be a float.\n self.conversion_factors[dataType] = float(vals[0])\n if param.startswith(\"CosmologyOutputRedshift[\"):\n index = param[param.find(\"[\") + 1 : param.find(\"]\")]\n redshift_outputs.append(\n {\"index\": int(index), \"redshift\": float(vals[0])}\n )\n elif len(vals) == 0:\n vals = \"\"\n elif len(vals) == 1:\n vals = pcast(vals[0])\n else:\n vals = np.array([pcast(i) for i in vals if i != \"-99999\"])\n self.parameters[param] = vals\n self.refine_by = self.parameters[\"RefineBy\"]\n self.dimensionality = self.parameters[\"TopGridRank\"]\n if self.dimensionality > 1:\n self.domain_dimensions = self.parameters[\"TopGridDimensions\"]\n if len(self.domain_dimensions) < 3:\n tmp = self.domain_dimensions.tolist()\n tmp.append(1)\n self.domain_dimensions = np.array(tmp)\n self.domain_left_edge = np.array(\n self.parameters[\"DomainLeftEdge\"], \"float64\"\n ).copy()\n self.domain_right_edge = np.array(\n self.parameters[\"DomainRightEdge\"], \"float64\"\n ).copy()\n else:\n self.domain_left_edge = np.array(\n self.parameters[\"DomainLeftEdge\"], \"float64\"\n )\n self.domain_right_edge = np.array(\n self.parameters[\"DomainRightEdge\"], \"float64\"\n )\n self.domain_dimensions = np.array(\n [self.parameters[\"TopGridDimensions\"], 1, 1]\n )\n\n if self.parameters[\"ComovingCoordinates\"]:\n cosmo_attr = {\n \"box_size\": \"CosmologyComovingBoxSize\",\n \"omega_lambda\": \"CosmologyOmegaLambdaNow\",\n \"omega_matter\": \"CosmologyOmegaMatterNow\",\n \"omega_radiation\": \"CosmologyOmegaRadiationNow\",\n \"hubble_constant\": \"CosmologyHubbleConstantNow\",\n \"initial_redshift\": \"CosmologyInitialRedshift\",\n \"final_redshift\": \"CosmologyFinalRedshift\",\n }\n self.cosmological_simulation = 1\n for a, v in cosmo_attr.items():\n if v not in self.parameters:\n raise MissingParameter(self.parameter_filename, v)\n setattr(self, a, self.parameters[v])\n else:\n self.cosmological_simulation = 0\n self.omega_lambda = self.omega_matter = self.hubble_constant = 0.0\n\n # make list of redshift outputs\n self.all_redshift_outputs = []\n if not self.cosmological_simulation:\n return\n for output in redshift_outputs:\n output[\"filename\"] = os.path.join(\n self.parameters[\"GlobalDir\"],\n \"%s%04d\" % (self.parameters[\"RedshiftDumpDir\"], output[\"index\"]),\n \"%s%04d\" % (self.parameters[\"RedshiftDumpName\"], output[\"index\"]),\n )\n del output[\"index\"]\n self.all_redshift_outputs = redshift_outputs\n\n def _calculate_time_outputs(self):\n \"\"\"\n Calculate time outputs and their redshifts if cosmological.\n \"\"\"\n\n self.all_time_outputs = []\n if (\n self.final_time is None\n or \"dtDataDump\" not in self.parameters\n or self.parameters[\"dtDataDump\"] <= 0.0\n ):\n return []\n\n index = 0\n current_time = self.initial_time.copy()\n dt_datadump = self.quan(self.parameters[\"dtDataDump\"], \"code_time\")\n while current_time <= self.final_time + dt_datadump:\n filename = os.path.join(\n self.parameters[\"GlobalDir\"],\n \"%s%04d\" % (self.parameters[\"DataDumpDir\"], index),\n \"%s%04d\" % (self.parameters[\"DataDumpName\"], index),\n )\n\n output = {\"index\": index, \"filename\": filename, \"time\": current_time.copy()}\n output[\"time\"] = min(output[\"time\"], self.final_time)\n if self.cosmological_simulation:\n output[\"redshift\"] = self.cosmology.z_from_t(current_time)\n\n self.all_time_outputs.append(output)\n if np.abs(self.final_time - current_time) / self.final_time < 1e-4:\n break\n current_time += dt_datadump\n index += 1\n\n def _calculate_cycle_outputs(self):\n \"\"\"\n Calculate cycle outputs.\n \"\"\"\n\n mylog.warning(\"Calculating cycle outputs. Dataset times will be unavailable.\")\n\n if (\n self.stop_cycle is None\n or \"CycleSkipDataDump\" not in self.parameters\n or self.parameters[\"CycleSkipDataDump\"] <= 0.0\n ):\n return []\n\n self.all_time_outputs = []\n index = 0\n for cycle in range(\n 0, self.stop_cycle + 1, self.parameters[\"CycleSkipDataDump\"]\n ):\n filename = os.path.join(\n self.parameters[\"GlobalDir\"],\n \"%s%04d\" % (self.parameters[\"DataDumpDir\"], index),\n \"%s%04d\" % (self.parameters[\"DataDumpName\"], index),\n )\n\n output = {\"index\": index, \"filename\": filename, \"cycle\": cycle}\n self.all_time_outputs.append(output)\n index += 1\n\n def _get_all_outputs(self, find_outputs=False):\n \"\"\"\n Get all potential datasets and combine into a time-sorted list.\n \"\"\"\n\n # Create the set of outputs from which further selection will be done.\n if find_outputs:\n self._find_outputs()\n\n elif (\n self.parameters[\"dtDataDump\"] > 0\n and self.parameters[\"CycleSkipDataDump\"] > 0\n ):\n mylog.info(\n \"Simulation %s has both dtDataDump and CycleSkipDataDump set.\",\n self.parameter_filename,\n )\n mylog.info(\n \" Unable to calculate datasets. \"\n \"Attempting to search in the current directory\"\n )\n self._find_outputs()\n\n else:\n # Get all time or cycle outputs.\n if self.parameters[\"CycleSkipDataDump\"] > 0:\n self._calculate_cycle_outputs()\n else:\n self._calculate_time_outputs()\n\n # Calculate times for redshift outputs.\n if self.cosmological_simulation:\n for output in self.all_redshift_outputs:\n output[\"time\"] = self.cosmology.t_from_z(output[\"redshift\"])\n self.all_redshift_outputs.sort(key=lambda obj: obj[\"time\"])\n\n self.all_outputs = self.all_time_outputs + self.all_redshift_outputs\n if self.parameters[\"CycleSkipDataDump\"] <= 0:\n self.all_outputs.sort(key=lambda obj: obj[\"time\"].to_ndarray())\n\n def _calculate_simulation_bounds(self):\n \"\"\"\n Figure out the starting and stopping time and redshift for the simulation.\n \"\"\"\n\n if \"StopCycle\" in self.parameters:\n self.stop_cycle = self.parameters[\"StopCycle\"]\n\n # Convert initial/final redshifts to times.\n if self.cosmological_simulation:\n self.initial_time = self.cosmology.t_from_z(self.initial_redshift)\n self.initial_time.units.registry = self.unit_registry\n self.final_time = self.cosmology.t_from_z(self.final_redshift)\n self.final_time.units.registry = self.unit_registry\n\n # If not a cosmology simulation, figure out the stopping criteria.\n else:\n if \"InitialTime\" in self.parameters:\n self.initial_time = self.quan(\n self.parameters[\"InitialTime\"], \"code_time\"\n )\n else:\n self.initial_time = self.quan(0.0, \"code_time\")\n\n if \"StopTime\" in self.parameters:\n self.final_time = self.quan(self.parameters[\"StopTime\"], \"code_time\")\n else:\n self.final_time = None\n if not (\"StopTime\" in self.parameters or \"StopCycle\" in self.parameters):\n raise NoStoppingCondition(self.parameter_filename)\n if self.final_time is None:\n mylog.warning(\n \"Simulation %s has no stop time set, stopping condition \"\n \"will be based only on cycles.\",\n self.parameter_filename,\n )\n\n def _set_parameter_defaults(self):\n \"\"\"\n Set some default parameters to avoid problems\n if they are not in the parameter file.\n \"\"\"\n\n self.parameters[\"GlobalDir\"] = self.directory\n self.parameters[\"DataDumpName\"] = \"data\"\n self.parameters[\"DataDumpDir\"] = \"DD\"\n self.parameters[\"RedshiftDumpName\"] = \"RedshiftOutput\"\n self.parameters[\"RedshiftDumpDir\"] = \"RD\"\n self.parameters[\"ComovingCoordinates\"] = 0\n self.parameters[\"TopGridRank\"] = 3\n self.parameters[\"DomainLeftEdge\"] = np.zeros(self.parameters[\"TopGridRank\"])\n self.parameters[\"DomainRightEdge\"] = np.ones(self.parameters[\"TopGridRank\"])\n self.parameters[\"RefineBy\"] = 2 # technically not the enzo default\n self.parameters[\"StopCycle\"] = 100000\n self.parameters[\"dtDataDump\"] = 0.0\n self.parameters[\"CycleSkipDataDump\"] = 0.0\n self.parameters[\"LengthUnits\"] = 1.0\n self.parameters[\"TimeUnits\"] = 1.0\n self.parameters[\"CosmologyOmegaRadiationNow\"] = 0.0\n\n def _find_outputs(self):\n \"\"\"\n Search for directories matching the data dump keywords.\n If found, get dataset times py opening the ds.\n \"\"\"\n\n # look for time outputs.\n potential_time_outputs = glob.glob(\n os.path.join(\n self.parameters[\"GlobalDir\"], f\"{self.parameters['DataDumpDir']}*\"\n )\n )\n self.all_time_outputs = self._check_for_outputs(potential_time_outputs)\n self.all_time_outputs.sort(key=lambda obj: obj[\"time\"])\n\n # look for redshift outputs.\n potential_redshift_outputs = glob.glob(\n os.path.join(\n self.parameters[\"GlobalDir\"], f\"{self.parameters['RedshiftDumpDir']}*\"\n )\n )\n self.all_redshift_outputs = self._check_for_outputs(potential_redshift_outputs)\n self.all_redshift_outputs.sort(key=lambda obj: obj[\"time\"])\n\n self.all_outputs = self.all_time_outputs + self.all_redshift_outputs\n self.all_outputs.sort(key=lambda obj: obj[\"time\"])\n only_on_root(mylog.info, \"Located %d total outputs.\", len(self.all_outputs))\n\n # manually set final time and redshift with last output\n if self.all_outputs:\n self.final_time = self.all_outputs[-1][\"time\"]\n if self.cosmological_simulation:\n self.final_redshift = self.all_outputs[-1][\"redshift\"]\n\n def _check_for_outputs(self, potential_outputs):\n \"\"\"\n Check a list of files to see if they are valid datasets.\n \"\"\"\n\n only_on_root(\n mylog.info, \"Checking %d potential outputs.\", len(potential_outputs)\n )\n\n my_outputs = {}\n llevel = mylog.level\n # suppress logging as we load every dataset, unless set to debug\n if llevel > 10 and llevel < 40:\n mylog.setLevel(40)\n for my_storage, output in parallel_objects(\n potential_outputs, storage=my_outputs\n ):\n if self.parameters[\"DataDumpDir\"] in output:\n dir_key = self.parameters[\"DataDumpDir\"]\n output_key = self.parameters[\"DataDumpName\"]\n else:\n dir_key = self.parameters[\"RedshiftDumpDir\"]\n output_key = self.parameters[\"RedshiftDumpName\"]\n index = output[output.find(dir_key) + len(dir_key) :]\n filename = os.path.join(\n self.parameters[\"GlobalDir\"],\n f\"{dir_key}{index}\",\n f\"{output_key}{index}\",\n )\n try:\n ds = load(filename)\n except (FileNotFoundError, YTUnidentifiedDataType):\n mylog.error(\"Failed to load %s\", filename)\n continue\n my_storage.result = {\n \"filename\": filename,\n \"time\": ds.current_time.in_units(\"s\"),\n }\n if ds.cosmological_simulation:\n my_storage.result[\"redshift\"] = ds.current_redshift\n mylog.setLevel(llevel)\n my_outputs = [\n my_output for my_output in my_outputs.values() if my_output is not None\n ]\n\n return my_outputs\n\n def _write_cosmology_outputs(self, filename, outputs, start_index, decimals=3):\n \"\"\"\n Write cosmology output parameters for a cosmology splice.\n \"\"\"\n\n mylog.info(\"Writing redshift output list to %s.\", filename)\n f = open(filename, \"w\")\n for q, output in enumerate(outputs):\n f.write(\n (f\"CosmologyOutputRedshift[%d] = %.{decimals}f\\n\")\n % ((q + start_index), output[\"redshift\"])\n )\n f.close()\n\n\nclass EnzoCosmology(Cosmology):\n def __init__(\n self,\n hubble_constant,\n omega_matter,\n omega_lambda,\n omega_radiation,\n omega_curvature,\n initial_redshift,\n unit_registry=None,\n ):\n Cosmology.__init__(\n self,\n hubble_constant=hubble_constant,\n omega_matter=omega_matter,\n omega_lambda=omega_lambda,\n omega_radiation=omega_radiation,\n omega_curvature=omega_curvature,\n unit_registry=unit_registry,\n )\n self.initial_redshift = initial_redshift\n self.initial_time = self.t_from_z(self.initial_redshift)\n # time units = 1 / sqrt(4 * pi * G rho_0 * (1 + z_i)**3),\n # rho_0 = (3 * Omega_m * h**2) / (8 * pi * G)\n self.time_unit = (\n (\n 1.5\n * self.omega_matter\n * self.hubble_constant ** 2\n * (1 + self.initial_redshift) ** 3\n )\n ** -0.5\n ).in_units(\"s\")\n self.time_unit.units.registry = self.unit_registry\n", "import numpy as np\n\nfrom yt._maintenance.deprecation import issue_deprecation_warning\nfrom yt.funcs import camelcase_to_underscore, iter_fields\nfrom yt.units.yt_array import array_like_field\nfrom yt.utilities.exceptions import YTParticleTypeNotFound\nfrom yt.utilities.object_registries import derived_quantity_registry\nfrom yt.utilities.parallel_tools.parallel_analysis_interface import (\n ParallelAnalysisInterface,\n parallel_objects,\n)\nfrom yt.utilities.physical_constants import gravitational_constant_cgs\nfrom yt.utilities.physical_ratios import HUGE\n\n\ndef get_position_fields(field, data):\n axis_names = [data.ds.coordinates.axis_name[num] for num in [0, 1, 2]]\n field = data._determine_fields(field)[0]\n finfo = data.ds.field_info[field]\n if finfo.sampling_type == \"particle\":\n if finfo.alias_field:\n ftype = finfo.alias_name[0]\n else:\n ftype = finfo.name[0]\n position_fields = [(ftype, f\"particle_position_{d}\") for d in axis_names]\n else:\n position_fields = [(\"index\", ax_name) for ax_name in axis_names]\n\n return position_fields\n\n\nclass DerivedQuantity(ParallelAnalysisInterface):\n num_vals = -1\n\n def __init__(self, data_source):\n self.data_source = data_source\n\n def __init_subclass__(cls, *args, **kwargs):\n super().__init_subclass__(*args, **kwargs)\n if cls.__name__ != \"DerivedQuantity\":\n derived_quantity_registry[cls.__name__] = cls\n\n def count_values(self, *args, **kwargs):\n return\n\n def __call__(self, *args, **kwargs):\n \"\"\"Calculate results for the derived quantity\"\"\"\n # create the index if it doesn't exist yet\n self.data_source.ds.index\n self.count_values(*args, **kwargs)\n chunks = self.data_source.chunks(\n [], chunking_style=self.data_source._derived_quantity_chunking\n )\n storage = {}\n for sto, ds in parallel_objects(chunks, -1, storage=storage):\n sto.result = self.process_chunk(ds, *args, **kwargs)\n # Now storage will have everything, and will be done via pickling, so\n # the units will be preserved. (Credit to Nathan for this\n # idea/implementation.)\n values = [[] for i in range(self.num_vals)]\n for key in sorted(storage):\n for i in range(self.num_vals):\n values[i].append(storage[key][i])\n # These will be YTArrays\n values = [self.data_source.ds.arr(values[i]) for i in range(self.num_vals)]\n values = self.reduce_intermediate(values)\n return values\n\n def process_chunk(self, data, *args, **kwargs):\n raise NotImplementedError\n\n def reduce_intermediate(self, values):\n raise NotImplementedError\n\n\nclass DerivedQuantityCollection:\n def __new__(cls, data_source, *args, **kwargs):\n inst = object.__new__(cls)\n inst.data_source = data_source\n for f in inst.keys():\n setattr(inst, camelcase_to_underscore(f), inst[f])\n return inst\n\n def __getitem__(self, key):\n dq = derived_quantity_registry[key]\n # Instantiate here, so we can pass it the data object\n # Note that this means we instantiate every time we run help, etc\n # I have made my peace with this.\n return dq(self.data_source)\n\n def keys(self):\n return derived_quantity_registry.keys()\n\n\nclass WeightedAverageQuantity(DerivedQuantity):\n r\"\"\"\n Calculates the weight average of a field or fields.\n\n Returns a YTQuantity for each field requested; if one,\n it returns a single YTQuantity, if many, it returns a list of YTQuantities\n in order of the listed fields.\n\n Where f is the field and w is the weight, the weighted average is\n Sum_i(f_i \\* w_i) / Sum_i(w_i).\n\n Parameters\n ----------\n\n fields : string / tuple, or list of strings / tuples\n The field or fields of which the average value is to be calculated.\n weight : string or tuple\n The weight field.\n\n Examples\n --------\n\n >>> ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n >>> ad = ds.all_data()\n >>> print(ad.quantities.weighted_average_quantity([(\"gas\", \"density\"),\n ... (\"gas\", \"temperature\")],\n ... (\"gas\", \"mass\")))\n\n \"\"\"\n\n def count_values(self, fields, weight):\n # This is a list now\n self.num_vals = len(fields) + 1\n\n def __call__(self, fields, weight):\n fields = list(iter_fields(fields))\n rv = super().__call__(fields, weight)\n if len(rv) == 1:\n rv = rv[0]\n return rv\n\n def process_chunk(self, data, fields, weight):\n vals = [(data[field] * data[weight]).sum(dtype=np.float64) for field in fields]\n wv = data[weight].sum(dtype=np.float64)\n return vals + [wv]\n\n def reduce_intermediate(self, values):\n w = values.pop(-1).sum(dtype=np.float64)\n return [v.sum(dtype=np.float64) / w for v in values]\n\n\nclass TotalQuantity(DerivedQuantity):\n r\"\"\"\n Calculates the sum of the field or fields.\n\n Parameters\n ----------\n fields\n The field or list of fields to be summed.\n\n Examples\n --------\n\n >>> ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n >>> ad = ds.all_data()\n >>> print(ad.quantities.total_quantity([(\"gas\", \"mass\")]))\n\n \"\"\"\n\n def count_values(self, fields):\n # This is a list now\n self.num_vals = len(fields)\n\n def __call__(self, fields):\n fields = list(iter_fields(fields))\n rv = super().__call__(fields)\n if len(rv) == 1:\n rv = rv[0]\n return rv\n\n def process_chunk(self, data, fields):\n vals = [data[field].sum(dtype=np.float64) for field in fields]\n return vals\n\n def reduce_intermediate(self, values):\n return [v.sum(dtype=np.float64) for v in values]\n\n\nclass TotalMass(TotalQuantity):\n r\"\"\"\n Calculates the total mass of the object. Returns a YTArray where the\n first element is total gas mass and the second element is total particle\n mass.\n\n Examples\n --------\n\n >>> ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n >>> ad = ds.all_data()\n >>> print(ad.quantities.total_mass())\n\n \"\"\"\n\n def __call__(self):\n self.data_source.ds.index\n fi = self.data_source.ds.field_info\n if (\"gas\", \"mass\") in fi:\n gas = super().__call__([(\"gas\", \"mass\")])\n else:\n gas = self.data_source.ds.quan(0.0, \"g\")\n if (\"nbody\", \"particle_mass\") in fi:\n part = super().__call__([(\"nbody\", \"particle_mass\")])\n else:\n part = self.data_source.ds.quan(0.0, \"g\")\n return self.data_source.ds.arr([gas, part])\n\n\nclass CenterOfMass(DerivedQuantity):\n r\"\"\"\n Calculates the center of mass, using gas and/or particles.\n\n The center of mass is the mass-weighted mean position.\n\n Parameters\n ----------\n use_gas : bool\n Flag to include gas in the calculation. Gas is ignored if not\n present.\n Default: True\n use_particles : bool\n Flag to include particles in the calculation. Particles are ignored\n if not present.\n Default: False\n particle_type: string\n Flag to specify the field type of the particles to use. Useful for\n particle-based codes where you don't want to use all of the particles\n in your calculation.\n Default: 'all'\n\n Examples\n --------\n\n >>> ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n >>> ad = ds.all_data()\n >>> print(ad.quantities.center_of_mass())\n\n \"\"\"\n\n def count_values(self, use_gas=True, use_particles=False, particle_type=\"nbody\"):\n finfo = self.data_source.ds.field_info\n includes_gas = (\"gas\", \"mass\") in finfo\n includes_particles = (particle_type, \"particle_mass\") in finfo\n\n self.use_gas = use_gas & includes_gas\n self.use_particles = use_particles & includes_particles\n\n self.num_vals = 0\n if self.use_gas:\n self.num_vals += 4\n if self.use_particles:\n self.num_vals += 4\n\n def process_chunk(\n self, data, use_gas=True, use_particles=False, particle_type=\"nbody\"\n ):\n vals = []\n if self.use_gas:\n vals += [\n (data[\"gas\", ax] * data[\"gas\", \"mass\"]).sum(dtype=np.float64)\n for ax in \"xyz\"\n ]\n vals.append(data[\"gas\", \"mass\"].sum(dtype=np.float64))\n if self.use_particles:\n vals += [\n (\n data[particle_type, f\"particle_position_{ax}\"]\n * data[particle_type, \"particle_mass\"]\n ).sum(dtype=np.float64)\n for ax in \"xyz\"\n ]\n vals.append(data[particle_type, \"particle_mass\"].sum(dtype=np.float64))\n return vals\n\n def reduce_intermediate(self, values):\n if len(values) not in (4, 8):\n raise RuntimeError\n x = values.pop(0).sum(dtype=np.float64)\n y = values.pop(0).sum(dtype=np.float64)\n z = values.pop(0).sum(dtype=np.float64)\n w = values.pop(0).sum(dtype=np.float64)\n if len(values) > 0:\n # Note that this could be shorter if we pre-initialized our x,y,z,w\n # values as YTQuantity objects.\n x += values.pop(0).sum(dtype=np.float64)\n y += values.pop(0).sum(dtype=np.float64)\n z += values.pop(0).sum(dtype=np.float64)\n w += values.pop(0).sum(dtype=np.float64)\n return self.data_source.ds.arr([v / w for v in [x, y, z]])\n\n\nclass BulkVelocity(DerivedQuantity):\n r\"\"\"\n Calculates the bulk velocity, using gas and/or particles.\n\n The bulk velocity is the mass-weighted mean velocity.\n\n Parameters\n ----------\n use_gas : bool\n Flag to include gas in the calculation. Gas is ignored if not\n present.\n Default: True\n use_particles : bool\n Flag to include particles in the calculation. Particles are ignored\n if not present.\n Default: True\n particle_type: string\n Flag to specify the field type of the particles to use. Useful for\n particle-based codes where you don't want to use all of the particles\n in your calculation.\n Default: 'all'\n\n Examples\n --------\n\n >>> ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n >>> ad = ds.all_data()\n >>> print(ad.quantities.bulk_velocity())\n\n \"\"\"\n\n def count_values(self, use_gas=True, use_particles=False, particle_type=\"nbody\"):\n if use_particles and particle_type not in self.data_source.ds.particle_types:\n raise YTParticleTypeNotFound(particle_type, self.data_source.ds)\n # This is a list now\n self.num_vals = 0\n if use_gas:\n self.num_vals += 4\n if use_particles and \"nbody\" in self.data_source.ds.particle_types:\n self.num_vals += 4\n\n def process_chunk(\n self, data, use_gas=True, use_particles=False, particle_type=\"nbody\"\n ):\n vals = []\n if use_gas:\n vals += [\n (data[\"gas\", f\"velocity_{ax}\"] * data[\"gas\", \"mass\"]).sum(\n dtype=np.float64\n )\n for ax in \"xyz\"\n ]\n vals.append(data[\"gas\", \"mass\"].sum(dtype=np.float64))\n if use_particles and \"nbody\" in data.ds.particle_types:\n vals += [\n (\n data[particle_type, f\"particle_velocity_{ax}\"]\n * data[particle_type, \"particle_mass\"]\n ).sum(dtype=np.float64)\n for ax in \"xyz\"\n ]\n vals.append(data[particle_type, \"particle_mass\"].sum(dtype=np.float64))\n return vals\n\n def reduce_intermediate(self, values):\n if len(values) not in (4, 8):\n raise RuntimeError\n x = values.pop(0).sum(dtype=np.float64)\n y = values.pop(0).sum(dtype=np.float64)\n z = values.pop(0).sum(dtype=np.float64)\n w = values.pop(0).sum(dtype=np.float64)\n if len(values) > 0:\n # Note that this could be shorter if we pre-initialized our x,y,z,w\n # values as YTQuantity objects.\n x += values.pop(0).sum(dtype=np.float64)\n y += values.pop(0).sum(dtype=np.float64)\n z += values.pop(0).sum(dtype=np.float64)\n w += values.pop(0).sum(dtype=np.float64)\n return self.data_source.ds.arr([v / w for v in [x, y, z]])\n\n\nclass WeightedStandardDeviation(DerivedQuantity):\n r\"\"\"\n Calculates the weighted standard deviation and weighted mean for a field\n or list of fields. Returns a YTArray for each field requested; if one,\n it returns a single YTArray, if many, it returns a list of YTArrays\n in order of the listed fields. The first element of each YTArray is\n the weighted standard deviation, and the second element is the weighted mean.\n\n Where f is the field, w is the weight, and <f_w> is the weighted mean,\n the weighted standard deviation is\n sqrt( Sum_i( (f_i - <f_w>)^2 \\* w_i ) / Sum_i(w_i) ).\n\n Parameters\n ----------\n\n fields : string / tuple, or list of strings / tuples\n The field or fields of which the average value is to be calculated.\n weight : string or tuple\n The weight field.\n\n Examples\n --------\n\n >>> ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n >>> ad = ds.all_data()\n >>> print(ad.quantities.weighted_standard_deviation([(\"gas\", \"density\"),\n ... (\"gas\", \"temperature\")],\n ... (\"gas\", \"mass\")))\n\n \"\"\"\n\n def count_values(self, fields, weight):\n # This is a list now\n self.num_vals = 2 * len(fields) + 1\n\n def __call__(self, fields, weight):\n fields = list(iter_fields(fields))\n units = [self.data_source.ds._get_field_info(field).units for field in fields]\n rv = super().__call__(fields, weight)\n rv = [self.data_source.ds.arr(v, u) for v, u in zip(rv, units)]\n if len(rv) == 1:\n rv = rv[0]\n return rv\n\n def process_chunk(self, data, fields, weight):\n my_weight = data[weight].d.sum(dtype=np.float64)\n if my_weight == 0:\n return [0.0 for field in fields] + [0.0 for field in fields] + [0.0]\n my_means = [\n (data[field].d * data[weight].d).sum(dtype=np.float64) / my_weight\n for field in fields\n ]\n my_var2s = [\n (data[weight].d * (data[field].d - my_mean) ** 2).sum(dtype=np.float64)\n / my_weight\n for field, my_mean in zip(fields, my_means)\n ]\n return my_means + my_var2s + [my_weight]\n\n def reduce_intermediate(self, values):\n my_weight = values.pop(-1)\n all_weight = my_weight.sum(dtype=np.float64)\n rvals = []\n for i in range(int(len(values) / 2)):\n my_mean = values[i]\n my_var2 = values[i + int(len(values) / 2)]\n all_mean = (my_weight * my_mean).sum(dtype=np.float64) / all_weight\n ret = [\n (\n np.sqrt(\n (my_weight * (my_var2 + (my_mean - all_mean) ** 2)).sum(\n dtype=np.float64\n )\n / all_weight\n )\n ),\n all_mean,\n ]\n rvals.append(np.array(ret))\n return rvals\n\n\nclass WeightedVariance(WeightedStandardDeviation):\n def __call__(self, fields, weight):\n issue_deprecation_warning(\n \"'weighted_variance' incorrectly returns the \"\n \"standard deviation and has been deprecated. \"\n \"Use 'weighted_standard_deviation' instead.\",\n since=\"4.0.0\",\n removal=\"4.1.0\",\n )\n return super().__call__(fields, weight)\n\n\nclass AngularMomentumVector(DerivedQuantity):\n r\"\"\"\n Calculates the angular momentum vector, using gas (grid-based) and/or particles.\n\n The angular momentum vector is the mass-weighted mean specific angular momentum.\n Returns a YTArray of the vector.\n\n Parameters\n ----------\n use_gas : bool\n Flag to include grid-based gas in the calculation. Gas is ignored if not\n present.\n Default: True\n use_particles : bool\n Flag to include particles in the calculation. Particles are ignored\n if not present.\n Default: True\n particle_type: string\n Flag to specify the field type of the particles to use. Useful for\n particle-based codes where you don't want to use all of the particles\n in your calculation.\n Default: 'all'\n\n Examples\n --------\n\n # Find angular momentum vector of galaxy in grid-based isolated galaxy dataset\n >>> ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n >>> ad = ds.all_data()\n >>> print(ad.quantities.angular_momentum_vector())\n\n # Find angular momentum vector of gas disk in particle-based dataset\n >>> ds = load(\"FIRE_M12i_ref11/snapshot_600.hdf5\")\n >>> _, c = ds.find_max(('gas', 'density'))\n >>> sp = ds.sphere(c, (10, 'kpc'))\n >>> search_args = dict(use_gas=False, use_particles=True, particle_type='PartType0')\n >>> print(sp.quantities.angular_momentum_vector(**search_args))\n\n \"\"\"\n\n def count_values(self, use_gas=True, use_particles=True, particle_type=\"all\"):\n if use_particles and particle_type not in self.data_source.ds.particle_types:\n raise YTParticleTypeNotFound(particle_type, self.data_source.ds)\n num_vals = 0\n # create the index if it doesn't exist yet\n self.data_source.ds.index\n self.particle_type = particle_type\n self.use_gas = use_gas & ((\"gas\", \"mass\") in self.data_source.ds.field_info)\n self.use_particles = use_particles & (\n (self.particle_type, \"particle_mass\") in self.data_source.ds.field_info\n )\n if self.use_gas:\n num_vals += 4\n if self.use_particles:\n num_vals += 4\n self.num_vals = num_vals\n\n def process_chunk(\n self, data, use_gas=True, use_particles=False, particle_type=\"all\"\n ):\n rvals = []\n if self.use_gas:\n rvals.extend(\n [\n (\n data[\"gas\", f\"specific_angular_momentum_{axis}\"]\n * data[\"gas\", \"mass\"]\n ).sum(dtype=np.float64)\n for axis in \"xyz\"\n ]\n )\n rvals.append(data[\"gas\", \"mass\"].sum(dtype=np.float64))\n if self.use_particles:\n rvals.extend(\n [\n (\n data[\n self.particle_type,\n f\"particle_specific_angular_momentum_{axis}\",\n ]\n * data[self.particle_type, \"particle_mass\"]\n ).sum(dtype=np.float64)\n for axis in \"xyz\"\n ]\n )\n rvals.append(\n data[self.particle_type, \"particle_mass\"].sum(dtype=np.float64)\n )\n return rvals\n\n def reduce_intermediate(self, values):\n jx = values.pop(0).sum(dtype=np.float64)\n jy = values.pop(0).sum(dtype=np.float64)\n jz = values.pop(0).sum(dtype=np.float64)\n m = values.pop(0).sum(dtype=np.float64)\n if values:\n jx += values.pop(0).sum(dtype=np.float64)\n jy += values.pop(0).sum(dtype=np.float64)\n jz += values.pop(0).sum(dtype=np.float64)\n m += values.pop(0).sum(dtype=np.float64)\n return self.data_source.ds.arr([jx / m, jy / m, jz / m])\n\n\nclass Extrema(DerivedQuantity):\n r\"\"\"\n Calculates the min and max value of a field or list of fields.\n Returns a YTArray for each field requested. If one, a single YTArray\n is returned, if many, a list of YTArrays in order of field list is\n returned. The first element of each YTArray is the minimum of the\n field and the second is the maximum of the field.\n\n Parameters\n ----------\n fields\n The field or list of fields over which the extrema are to be\n calculated.\n non_zero : bool\n If True, only positive values are considered in the calculation.\n Default: False\n\n Examples\n --------\n\n >>> ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n >>> ad = ds.all_data()\n >>> print(ad.quantities.extrema([(\"gas\", \"density\"),\n ... (\"gas\", \"temperature\")]))\n\n \"\"\"\n\n def count_values(self, fields, non_zero):\n self.num_vals = len(fields) * 2\n\n def __call__(self, fields, non_zero=False):\n fields = list(iter_fields(fields))\n rv = super().__call__(fields, non_zero)\n if len(rv) == 1:\n rv = rv[0]\n return rv\n\n def process_chunk(self, data, fields, non_zero):\n vals = []\n for field in fields:\n field = data._determine_fields(field)[0]\n fd = data[field]\n if non_zero:\n fd = fd[fd > 0.0]\n if fd.size > 0:\n vals += [fd.min(), fd.max()]\n else:\n vals += [\n array_like_field(data, HUGE, field),\n array_like_field(data, -HUGE, field),\n ]\n return vals\n\n def reduce_intermediate(self, values):\n # The values get turned into arrays here.\n return [\n self.data_source.ds.arr([mis.min(), mas.max()])\n for mis, mas in zip(values[::2], values[1::2])\n ]\n\n\nclass SampleAtMaxFieldValues(DerivedQuantity):\n _sign = -1\n r\"\"\"\n Calculates the maximum value and returns whichever fields are asked to be\n sampled.\n\n Parameters\n ----------\n field : tuple or string\n The field over which the extrema are to be calculated.\n sample_fields : list of fields\n The fields to sample and return at the minimum value.\n\n Examples\n --------\n\n >>> ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n >>> ad = ds.all_data()\n >>> print(ad.quantities.sample_at_max_field_values((\"gas\", \"density\"),\n ... [(\"gas\", \"temperature\"), (\"gas\", \"velocity_magnitude\")]))\n\n \"\"\"\n\n def count_values(self, field, sample_fields):\n # field itself, then index, then the number of sample fields\n self.num_vals = 1 + len(sample_fields)\n\n def __call__(self, field, sample_fields):\n rv = super().__call__(field, sample_fields)\n if len(rv) == 1:\n rv = rv[0]\n return rv\n\n def process_chunk(self, data, field, sample_fields):\n field = data._determine_fields(field)[0]\n ma = array_like_field(data, self._sign * HUGE, field)\n vals = [array_like_field(data, -1, sf) for sf in sample_fields]\n maxi = -1\n if data[field].size > 0:\n maxi = self._func(data[field])\n ma = data[field][maxi]\n vals = [data[sf][maxi] for sf in sample_fields]\n return (ma,) + tuple(vals)\n\n def reduce_intermediate(self, values):\n i = self._func(values[0]) # ma is values[0]\n return [val[i] for val in values]\n\n def _func(self, arr):\n return np.argmax(arr)\n\n\nclass MaxLocation(SampleAtMaxFieldValues):\n r\"\"\"\n Calculates the maximum value plus the x, y, and z position of the maximum.\n\n Parameters\n ----------\n\n field : tuple or string\n The field over which the extrema are to be calculated.\n\n Examples\n --------\n\n >>> ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n >>> ad = ds.all_data()\n >>> print(ad.quantities.max_location((\"gas\", \"density\")))\n\n \"\"\"\n\n def __call__(self, field):\n # Make sure we have an index\n self.data_source.index\n sample_fields = get_position_fields(field, self.data_source)\n rv = super().__call__(field, sample_fields)\n if len(rv) == 1:\n rv = rv[0]\n return rv\n\n\nclass SampleAtMinFieldValues(SampleAtMaxFieldValues):\n _sign = 1\n r\"\"\"\n Calculates the minimum value and returns whichever fields are asked to be\n sampled.\n\n Parameters\n ----------\n field : tuple or string\n The field over which the extrema are to be calculated.\n sample_fields : list of fields\n The fields to sample and return at the minimum value.\n\n Examples\n --------\n\n >>> ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n >>> ad = ds.all_data()\n >>> print(ad.quantities.sample_at_min_field_values((\"gas\", \"density\"),\n ... [(\"gas\", \"temperature\"), (\"gas\", \"velocity_magnitude\")]))\n\n \"\"\"\n\n def _func(self, arr):\n return np.argmin(arr)\n\n\nclass MinLocation(SampleAtMinFieldValues):\n r\"\"\"\n Calculates the minimum value plus the x, y, and z position of the minimum.\n\n Parameters\n ----------\n\n field : tuple or string\n The field over which the extrema are to be calculated.\n\n Examples\n --------\n\n >>> ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n >>> ad = ds.all_data()\n >>> print(ad.quantities.min_location((\"gas\", \"density\")))\n\n \"\"\"\n\n def __call__(self, field):\n # Make sure we have an index\n self.data_source.index\n sample_fields = get_position_fields(field, self.data_source)\n rv = super().__call__(field, sample_fields)\n if len(rv) == 1:\n rv = rv[0]\n return rv\n\n\nclass SpinParameter(DerivedQuantity):\n r\"\"\"\n Calculates the dimensionless spin parameter.\n\n Given by Equation 3 of Peebles (1971, A&A, 11, 377), the spin parameter\n is defined as\n\n .. math::\n\n \\lambda = (L * |E|^(1/2)) / (G * M^5/2),\n\n where L is the total angular momentum, E is the total energy (kinetic and\n potential), G is the gravitational constant, and M is the total mass.\n\n Parameters\n ----------\n use_gas : bool\n Flag to include gas in the calculation. Gas is ignored if not\n present.\n Default: True\n use_particles : bool\n Flag to include particles in the calculation. Particles are ignored\n if not present.\n Default: True\n particle_type : str\n Particle type to be used for Center of mass calculation when use_particle\n = True.\n Default: all\n\n Examples\n --------\n\n >>> ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n >>> ad = ds.all_data()\n >>> print(ad.quantities.spin_parameter())\n\n \"\"\"\n\n def count_values(self, **kwargs):\n self.num_vals = 3\n\n def process_chunk(\n self, data, use_gas=True, use_particles=True, particle_type=\"nbody\"\n ):\n if use_particles and particle_type not in self.data_source.ds.particle_types:\n raise YTParticleTypeNotFound(particle_type, self.data_source.ds)\n use_gas &= (\"gas\", \"mass\") in self.data_source.ds.field_info\n use_particles &= (\n particle_type,\n \"particle_mass\",\n ) in self.data_source.ds.field_info\n e = data.ds.quan(0.0, \"erg\")\n j = data.ds.quan(0.0, \"g*cm**2/s\")\n m = data.ds.quan(0.0, \"g\")\n if use_gas:\n e += (data[\"gas\", \"kinetic_energy_density\"] * data[\"gas\", \"volume\"]).sum(\n dtype=np.float64\n )\n j += data[\"gas\", \"angular_momentum_magnitude\"].sum(dtype=np.float64)\n m += data[\"gas\", \"mass\"].sum(dtype=np.float64)\n if use_particles:\n e += (\n data[particle_type, \"particle_velocity_magnitude\"] ** 2\n * data[particle_type, \"particle_mass\"]\n ).sum(dtype=np.float64)\n j += data[particle_type, \"particle_angular_momentum_magnitude\"].sum(\n dtype=np.float64\n )\n m += data[particle_type, \"particle_mass\"].sum(dtype=np.float64)\n return (e, j, m)\n\n def reduce_intermediate(self, values):\n e = values.pop(0).sum(dtype=np.float64)\n j = values.pop(0).sum(dtype=np.float64)\n m = values.pop(0).sum(dtype=np.float64)\n return j * np.sqrt(np.abs(e)) / m ** 2.5 / gravitational_constant_cgs\n", "import glob\nimport os\nimport struct\n\nimport numpy as np\n\nfrom yt.data_objects.static_output import ParticleFile\nfrom yt.frontends.sph.data_structures import SPHDataset, SPHParticleIndex\nfrom yt.utilities.cosmology import Cosmology\nfrom yt.utilities.physical_constants import G\nfrom yt.utilities.physical_ratios import cm_per_kpc\n\nfrom .fields import TipsyFieldInfo\n\n\nclass TipsyFile(ParticleFile):\n def __init__(self, ds, io, filename, file_id, range=None):\n super().__init__(ds, io, filename, file_id, range)\n if not hasattr(io, \"_field_list\"):\n io._create_dtypes(self)\n # Check automatically what the domain size is\n io._update_domain(self)\n self._calculate_offsets(io._field_list)\n\n def _calculate_offsets(self, field_list, pcounts=None):\n self.field_offsets = self.io._calculate_particle_offsets(self, None)\n\n\nclass TipsyDataset(SPHDataset):\n _index_class = SPHParticleIndex\n _file_class = TipsyFile\n _field_info_class = TipsyFieldInfo\n _particle_mass_name = \"Mass\"\n _particle_coordinates_name = \"Coordinates\"\n _sph_ptypes = (\"Gas\",)\n _header_spec = (\n (\"time\", \"d\"),\n (\"nbodies\", \"i\"),\n (\"ndim\", \"i\"),\n (\"nsph\", \"i\"),\n (\"ndark\", \"i\"),\n (\"nstar\", \"i\"),\n (\"dummy\", \"i\"),\n )\n\n def __init__(\n self,\n filename,\n dataset_type=\"tipsy\",\n field_dtypes=None,\n unit_base=None,\n parameter_file=None,\n cosmology_parameters=None,\n index_order=None,\n index_filename=None,\n kdtree_filename=None,\n kernel_name=None,\n bounding_box=None,\n units_override=None,\n unit_system=\"cgs\",\n ):\n # Because Tipsy outputs don't have a fixed domain boundary, one can\n # specify a bounding box which effectively gives a domain_left_edge\n # and domain_right_edge\n self.bounding_box = bounding_box\n self.filter_bbox = bounding_box is not None\n if field_dtypes is None:\n field_dtypes = {}\n success, self.endian = self._validate_header(filename)\n if not success:\n print(\"SOMETHING HAS GONE WRONG. NBODIES != SUM PARTICLES.\")\n print(\n \"%s != (sum == %s + %s + %s)\"\n % (\n self.parameters[\"nbodies\"],\n self.parameters[\"nsph\"],\n self.parameters[\"ndark\"],\n self.parameters[\"nstar\"],\n )\n )\n print(\"Often this can be fixed by changing the 'endian' parameter.\")\n print(\"This defaults to '>' but may in fact be '<'.\")\n raise RuntimeError\n self.storage_filename = None\n\n # My understanding is that dtypes are set on a field by field basis,\n # not on a (particle type, field) basis\n self._field_dtypes = field_dtypes\n\n self._unit_base = unit_base or {}\n\n self._cosmology_parameters = cosmology_parameters\n if parameter_file is not None:\n parameter_file = os.path.abspath(parameter_file)\n self._param_file = parameter_file\n filename = os.path.abspath(filename)\n if units_override is not None:\n raise RuntimeError(\n \"units_override is not supported for TipsyDataset. \"\n + \"Use unit_base instead.\"\n )\n super().__init__(\n filename,\n dataset_type=dataset_type,\n unit_system=unit_system,\n index_order=index_order,\n index_filename=index_filename,\n kdtree_filename=kdtree_filename,\n kernel_name=kernel_name,\n )\n\n def __str__(self):\n return os.path.basename(self.parameter_filename)\n\n def _parse_parameter_file(self):\n\n # Parsing the header of the tipsy file, from this we obtain\n # the snapshot time and particle counts.\n\n f = open(self.parameter_filename, \"rb\")\n hh = self.endian + \"\".join([\"%s\" % (b) for a, b in self._header_spec])\n hvals = {\n a: c\n for (a, b), c in zip(\n self._header_spec, struct.unpack(hh, f.read(struct.calcsize(hh)))\n )\n }\n self.parameters.update(hvals)\n self._header_offset = f.tell()\n\n # These are always true, for now.\n self.dimensionality = 3\n self.refine_by = 2\n self.parameters[\"HydroMethod\"] = \"sph\"\n\n # Read in parameter file, if available.\n if self._param_file is None:\n pfn = glob.glob(os.path.join(self.directory, \"*.param\"))\n assert len(pfn) < 2, \"More than one param file is in the data directory\"\n if pfn == []:\n pfn = None\n else:\n pfn = pfn[0]\n else:\n pfn = self._param_file\n\n if pfn is not None:\n for line in (l.strip() for l in open(pfn)):\n # skip comment lines and blank lines\n l = line.strip()\n if l.startswith(\"#\") or l == \"\":\n continue\n # parse parameters according to tipsy parameter type\n param, val = (i.strip() for i in line.split(\"=\", 1))\n val = val.split(\"#\")[0]\n if param.startswith(\"n\") or param.startswith(\"i\"):\n val = int(val)\n elif param.startswith(\"d\"):\n val = float(val)\n elif param.startswith(\"b\"):\n val = bool(float(val))\n self.parameters[param] = val\n\n self.current_time = hvals[\"time\"]\n self.domain_dimensions = np.ones(3, \"int32\")\n periodic = self.parameters.get(\"bPeriodic\", True)\n period = self.parameters.get(\"dPeriod\", None)\n self._periodicity = (periodic, periodic, periodic)\n self.cosmological_simulation = float(\n self.parameters.get(\"bComove\", self._cosmology_parameters is not None)\n )\n if self.cosmological_simulation and period is None:\n period = 1.0\n if self.bounding_box is None:\n if periodic and period is not None:\n # If we are periodic, that sets our domain width to\n # either 1 or dPeriod.\n self.domain_left_edge = np.zeros(3, \"float64\") - 0.5 * period\n self.domain_right_edge = np.zeros(3, \"float64\") + 0.5 * period\n else:\n self.domain_left_edge = None\n self.domain_right_edge = None\n else:\n # This ensures that we know a bounding box has been applied\n self._domain_override = True\n bbox = np.array(self.bounding_box, dtype=\"float64\")\n if bbox.shape == (2, 3):\n bbox = bbox.transpose()\n self.domain_left_edge = bbox[:, 0]\n self.domain_right_edge = bbox[:, 1]\n\n # If the cosmology parameters dictionary got set when data is\n # loaded, we can assume it's a cosmological data set\n if self.cosmological_simulation == 1.0:\n cosm = self._cosmology_parameters or {}\n # In comoving simulations, time stores the scale factor a\n self.scale_factor = hvals[\"time\"]\n dcosm = dict(\n current_redshift=(1.0 / self.scale_factor) - 1.0,\n omega_lambda=self.parameters.get(\n \"dLambda\", cosm.get(\"omega_lambda\", 0.0)\n ),\n omega_matter=self.parameters.get(\n \"dOmega0\", cosm.get(\"omega_matter\", 0.0)\n ),\n hubble_constant=self.parameters.get(\n \"dHubble0\", cosm.get(\"hubble_constant\", 1.0)\n ),\n )\n for param in dcosm.keys():\n pval = dcosm[param]\n setattr(self, param, pval)\n else:\n kpc_unit = self.parameters.get(\"dKpcUnit\", 1.0)\n self._unit_base[\"cm\"] = 1.0 / (kpc_unit * cm_per_kpc)\n\n self.filename_template = self.parameter_filename\n self.file_count = 1\n\n f.close()\n\n def _set_derived_attrs(self):\n if self.bounding_box is None and (\n self.domain_left_edge is None or self.domain_right_edge is None\n ):\n self.domain_left_edge = np.array([np.nan, np.nan, np.nan])\n self.domain_right_edge = np.array([np.nan, np.nan, np.nan])\n self.index\n super()._set_derived_attrs()\n\n def _set_code_unit_attributes(self):\n # First try to set units based on parameter file\n if self.cosmological_simulation:\n mu = self.parameters.get(\"dMsolUnit\", 1.0)\n self.mass_unit = self.quan(mu, \"Msun\")\n lu = self.parameters.get(\"dKpcUnit\", 1000.0)\n # In cosmological runs, lengths are stored as length*scale_factor\n self.length_unit = self.quan(lu, \"kpc\") * self.scale_factor\n density_unit = self.mass_unit / (self.length_unit / self.scale_factor) ** 3\n if \"dHubble0\" in self.parameters:\n # Gasoline's internal hubble constant, dHubble0, is stored in\n # units of proper code time\n self.hubble_constant *= np.sqrt(G * density_unit)\n # Finally, we scale the hubble constant by 100 km/s/Mpc\n self.hubble_constant /= self.quan(100, \"km/s/Mpc\")\n # If we leave it as a YTQuantity, the cosmology object\n # used below will add units back on.\n self.hubble_constant = self.hubble_constant.to_value(\"\")\n else:\n mu = self.parameters.get(\"dMsolUnit\", 1.0)\n self.mass_unit = self.quan(mu, \"Msun\")\n lu = self.parameters.get(\"dKpcUnit\", 1.0)\n self.length_unit = self.quan(lu, \"kpc\")\n\n # If unit base is defined by the user, override all relevant units\n if self._unit_base is not None:\n for my_unit in [\"length\", \"mass\", \"time\"]:\n if my_unit in self._unit_base:\n my_val = self._unit_base[my_unit]\n my_val = (\n self.quan(*my_val)\n if isinstance(my_val, tuple)\n else self.quan(my_val)\n )\n setattr(self, f\"{my_unit}_unit\", my_val)\n\n # Finally, set the dependent units\n if self.cosmological_simulation:\n cosmo = Cosmology(\n hubble_constant=self.hubble_constant,\n omega_matter=self.omega_matter,\n omega_lambda=self.omega_lambda,\n )\n self.current_time = cosmo.lookback_time(self.current_redshift, 1e6)\n # mass units are rho_crit(z=0) * domain volume\n mu = (\n cosmo.critical_density(0.0)\n * (1 + self.current_redshift) ** 3\n * self.length_unit ** 3\n )\n self.mass_unit = self.quan(mu.in_units(\"Msun\"), \"Msun\")\n density_unit = self.mass_unit / (self.length_unit / self.scale_factor) ** 3\n # need to do this again because we've modified the hubble constant\n self.unit_registry.modify(\"h\", self.hubble_constant)\n else:\n density_unit = self.mass_unit / self.length_unit ** 3\n\n if not hasattr(self, \"time_unit\"):\n self.time_unit = 1.0 / np.sqrt(density_unit * G)\n\n @staticmethod\n def _validate_header(filename):\n \"\"\"\n This method automatically detects whether the tipsy file is big/little endian\n and is not corrupt/invalid. It returns a tuple of (Valid, endianswap) where\n Valid is a boolean that is true if the file is a tipsy file, and endianswap is\n the endianness character '>' or '<'.\n \"\"\"\n try:\n f = open(filename, \"rb\")\n except Exception:\n return False, 1\n try:\n f.seek(0, os.SEEK_END)\n fs = f.tell()\n f.seek(0, os.SEEK_SET)\n # Read in the header\n t, n, ndim, ng, nd, ns = struct.unpack(\"<diiiii\", f.read(28))\n except (OSError, struct.error):\n return False, 1\n endianswap = \"<\"\n # Check Endianness\n if ndim < 1 or ndim > 3:\n endianswap = \">\"\n f.seek(0)\n t, n, ndim, ng, nd, ns = struct.unpack(\">diiiii\", f.read(28))\n # File is borked if this is true. The header is 28 bytes, and may\n # Be followed by a 4 byte pad. Next comes gas particles, which use\n # 48 bytes, followed by 36 bytes per dark matter particle, and 44 bytes\n # per star particle. If positions are stored as doubles, each of these\n # sizes is increased by 12 bytes.\n if (\n fs != 28 + 48 * ng + 36 * nd + 44 * ns\n and fs != 28 + 60 * ng + 48 * nd + 56 * ns\n and fs != 32 + 48 * ng + 36 * nd + 44 * ns\n and fs != 32 + 60 * ng + 48 * nd + 56 * ns\n ):\n f.close()\n return False, 0\n f.close()\n return True, endianswap\n\n @classmethod\n def _is_valid(cls, filename, *args, **kwargs):\n return TipsyDataset._validate_header(filename)[0]\n", "import re\nimport sys\nfrom numbers import Number as numeric_type\n\nimport numpy as np\nfrom more_itertools import first, mark_ends\n\nfrom yt.data_objects.construction_data_containers import YTCoveringGrid\nfrom yt.data_objects.image_array import ImageArray\nfrom yt.fields.derived_field import DerivedField\nfrom yt.funcs import fix_axis, is_sequence, iter_fields, mylog\nfrom yt.units import dimensions\nfrom yt.units.unit_object import Unit\nfrom yt.units.yt_array import YTArray, YTQuantity\nfrom yt.utilities.on_demand_imports import _astropy\nfrom yt.utilities.parallel_tools.parallel_analysis_interface import parallel_root_only\nfrom yt.visualization.fixed_resolution import FixedResolutionBuffer\nfrom yt.visualization.volume_rendering.off_axis_projection import off_axis_projection\n\n\nclass UnitfulHDU:\n def __init__(self, hdu):\n self.hdu = hdu\n self.header = self.hdu.header\n self.name = self.header[\"BTYPE\"]\n self.units = self.header[\"BUNIT\"]\n self.shape = self.hdu.shape\n\n @property\n def data(self):\n return YTArray(self.hdu.data, self.units)\n\n def __repr__(self):\n im_shape = \" x \".join([str(s) for s in self.shape])\n return f\"FITSImage: {self.name} ({im_shape}, {self.units})\"\n\n\nclass FITSImageData:\n def __init__(\n self,\n data,\n fields=None,\n length_unit=None,\n width=None,\n img_ctr=None,\n wcs=None,\n current_time=None,\n time_unit=None,\n mass_unit=None,\n velocity_unit=None,\n magnetic_unit=None,\n ds=None,\n unit_header=None,\n **kwargs,\n ):\n r\"\"\"Initialize a FITSImageData object.\n\n FITSImageData contains a collection of FITS ImageHDU instances and\n WCS information, along with units for each of the images. FITSImageData\n instances can be constructed from ImageArrays, NumPy arrays, dicts\n of such arrays, FixedResolutionBuffers, and YTCoveringGrids. The latter\n two are the most powerful because WCS information can be constructed\n automatically from their coordinates.\n\n Parameters\n ----------\n data : FixedResolutionBuffer or a YTCoveringGrid. Or, an\n ImageArray, an numpy.ndarray, or dict of such arrays\n The data to be made into a FITS image or images.\n fields : single string or list of strings, optional\n The field names for the data. If *fields* is none and *data* has\n keys, it will use these for the fields. If *data* is just a\n single array one field name must be specified.\n length_unit : string\n The units of the WCS coordinates and the length unit of the file.\n Defaults to the length unit of the dataset, if there is one, or\n \"cm\" if there is not.\n width : float or YTQuantity\n The width of the image. Either a single value or iterable of values.\n If a float, assumed to be in *units*. Only used if this information\n is not already provided by *data*.\n img_ctr : array_like or YTArray\n The center coordinates of the image. If a list or NumPy array,\n it is assumed to be in *units*. Only used if this information\n is not already provided by *data*.\n wcs : `~astropy.wcs.WCS` instance, optional\n Supply an AstroPy WCS instance. Will override automatic WCS\n creation from FixedResolutionBuffers and YTCoveringGrids.\n current_time : float, tuple, or YTQuantity, optional\n The current time of the image(s). If not specified, one will\n be set from the dataset if there is one. If a float, it will\n be assumed to be in *time_unit* units.\n time_unit : string\n The default time units of the file. Defaults to \"s\".\n mass_unit : string\n The default time units of the file. Defaults to \"g\".\n velocity_unit : string\n The default velocity units of the file. Defaults to \"cm/s\".\n magnetic_unit : string\n The default magnetic units of the file. Defaults to \"gauss\".\n ds : `~yt.static_output.Dataset` instance, optional\n The dataset associated with the image(s), typically used\n to transfer metadata to the header(s). Does not need to be\n specified if *data* has a dataset as an attribute.\n\n Examples\n --------\n\n >>> # This example uses a FRB.\n >>> ds = load(\"sloshing_nomag2_hdf5_plt_cnt_0150\")\n >>> prj = ds.proj(2, \"kT\", weight_field=(\"gas\", \"density\"))\n >>> frb = prj.to_frb((0.5, \"Mpc\"), 800)\n >>> # This example just uses the FRB and puts the coords in kpc.\n >>> f_kpc = FITSImageData(frb, fields=\"kT\", length_unit=\"kpc\",\n ... time_unit=(1.0, \"Gyr\"))\n >>> # This example specifies a specific WCS.\n >>> from astropy.wcs import WCS\n >>> w = WCS(naxis=self.dimensionality)\n >>> w.wcs.crval = [30., 45.] # RA, Dec in degrees\n >>> w.wcs.cunit = [\"deg\"]*2\n >>> nx, ny = 800, 800\n >>> w.wcs.crpix = [0.5*(nx+1), 0.5*(ny+1)]\n >>> w.wcs.ctype = [\"RA---TAN\",\"DEC--TAN\"]\n >>> scale = 1./3600. # One arcsec per pixel\n >>> w.wcs.cdelt = [-scale, scale]\n >>> f_deg = FITSImageData(frb, fields=\"kT\", wcs=w)\n >>> f_deg.writeto(\"temp.fits\")\n \"\"\"\n\n if fields is not None:\n fields = list(iter_fields(fields))\n\n if ds is None:\n ds = getattr(data, \"ds\", None)\n\n self.fields = []\n self.field_units = {}\n\n if unit_header is None:\n self._set_units(\n ds, [length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit]\n )\n else:\n self._set_units_from_header(unit_header)\n\n wcs_unit = str(self.length_unit.units)\n\n self._fix_current_time(ds, current_time)\n\n if width is None:\n width = 1.0\n if isinstance(width, tuple):\n if ds is None:\n width = YTQuantity(width[0], width[1])\n else:\n width = ds.quan(width[0], width[1])\n if img_ctr is None:\n img_ctr = np.zeros(3)\n\n exclude_fields = [\n \"x\",\n \"y\",\n \"z\",\n \"px\",\n \"py\",\n \"pz\",\n \"pdx\",\n \"pdy\",\n \"pdz\",\n \"weight_field\",\n ]\n\n if isinstance(data, _astropy.pyfits.PrimaryHDU):\n data = _astropy.pyfits.HDUList([data])\n\n if isinstance(data, _astropy.pyfits.HDUList):\n self.hdulist = data\n for hdu in data:\n self.fields.append(hdu.header[\"btype\"])\n self.field_units[hdu.header[\"btype\"]] = hdu.header[\"bunit\"]\n\n self.shape = self.hdulist[0].shape\n self.dimensionality = len(self.shape)\n wcs_names = [key for key in self.hdulist[0].header if \"WCSNAME\" in key]\n for name in wcs_names:\n if name == \"WCSNAME\":\n key = \" \"\n else:\n key = name[-1]\n w = _astropy.pywcs.WCS(\n header=self.hdulist[0].header, key=key, naxis=self.dimensionality\n )\n setattr(self, \"wcs\" + key.strip().lower(), w)\n\n return\n\n self.hdulist = _astropy.pyfits.HDUList()\n\n if hasattr(data, \"keys\"):\n img_data = data\n if fields is None:\n fields = list(img_data.keys())\n elif isinstance(data, np.ndarray):\n if fields is None:\n mylog.warning(\n \"No field name given for this array. Calling it 'image_data'.\"\n )\n fn = \"image_data\"\n fields = [fn]\n else:\n fn = fields[0]\n img_data = {fn: data}\n\n for fd in fields:\n if isinstance(fd, tuple):\n self.fields.append(fd[1])\n elif isinstance(fd, DerivedField):\n self.fields.append(fd.name[1])\n else:\n self.fields.append(fd)\n\n # Sanity checking names\n s = set()\n duplicates = {f for f in self.fields if f in s or s.add(f)}\n if len(duplicates) > 0:\n for i, fd in enumerate(self.fields):\n if fd in duplicates:\n if isinstance(fields[i], tuple):\n ftype, fname = fields[i]\n elif isinstance(fields[i], DerivedField):\n ftype, fname = fields[i].name\n else:\n raise RuntimeError(\n f\"Cannot distinguish between fields with same name {fd}!\"\n )\n self.fields[i] = f\"{ftype}_{fname}\"\n\n for is_first, _is_last, (i, (name, field)) in mark_ends(\n enumerate(zip(self.fields, fields))\n ):\n if name not in exclude_fields:\n this_img = img_data[field]\n if hasattr(img_data[field], \"units\"):\n if this_img.units.is_code_unit:\n mylog.warning(\n \"Cannot generate an image with code \"\n \"units. Converting to units in CGS.\"\n )\n funits = this_img.units.get_base_equivalent(\"cgs\")\n else:\n funits = this_img.units\n self.field_units[name] = str(funits)\n else:\n self.field_units[name] = \"dimensionless\"\n mylog.info(\"Making a FITS image of field %s\", name)\n if isinstance(this_img, ImageArray):\n if i == 0:\n self.shape = this_img.shape[::-1]\n this_img = np.asarray(this_img)\n else:\n if i == 0:\n self.shape = this_img.shape\n this_img = np.asarray(this_img.T)\n if is_first:\n hdu = _astropy.pyfits.PrimaryHDU(this_img)\n else:\n hdu = _astropy.pyfits.ImageHDU(this_img)\n hdu.name = name\n hdu.header[\"btype\"] = name\n hdu.header[\"bunit\"] = re.sub(\"()\", \"\", self.field_units[name])\n for unit in (\"length\", \"time\", \"mass\", \"velocity\", \"magnetic\"):\n if unit == \"magnetic\":\n short_unit = \"bf\"\n else:\n short_unit = unit[0]\n key = f\"{short_unit}unit\"\n value = getattr(self, f\"{unit}_unit\")\n if value is not None:\n hdu.header[key] = float(value.value)\n hdu.header.comments[key] = f\"[{value.units}]\"\n hdu.header[\"time\"] = float(self.current_time.value)\n if hasattr(self, \"current_redshift\"):\n hdu.header[\"HUBBLE\"] = self.hubble_constant\n hdu.header[\"REDSHIFT\"] = self.current_redshift\n self.hdulist.append(hdu)\n\n self.dimensionality = len(self.shape)\n\n if wcs is None:\n w = _astropy.pywcs.WCS(\n header=self.hdulist[0].header, naxis=self.dimensionality\n )\n # FRBs and covering grids are special cases where\n # we have coordinate information, so we take advantage\n # of this and construct the WCS object\n if isinstance(img_data, FixedResolutionBuffer):\n dx = (img_data.bounds[1] - img_data.bounds[0]).to_value(wcs_unit)\n dy = (img_data.bounds[3] - img_data.bounds[2]).to_value(wcs_unit)\n dx /= self.shape[0]\n dy /= self.shape[1]\n xctr = 0.5 * (img_data.bounds[1] + img_data.bounds[0]).to_value(\n wcs_unit\n )\n yctr = 0.5 * (img_data.bounds[3] + img_data.bounds[2]).to_value(\n wcs_unit\n )\n center = [xctr, yctr]\n cdelt = [dx, dy]\n elif isinstance(img_data, YTCoveringGrid):\n cdelt = img_data.dds.to_value(wcs_unit)\n center = 0.5 * (img_data.left_edge + img_data.right_edge).to_value(\n wcs_unit\n )\n else:\n # If img_data is just an array we use the width and img_ctr\n # parameters to determine the cell widths\n if not is_sequence(width):\n width = [width] * self.dimensionality\n if isinstance(width[0], YTQuantity):\n cdelt = [\n wh.to_value(wcs_unit) / n for wh, n in zip(width, self.shape)\n ]\n else:\n cdelt = [float(wh) / n for wh, n in zip(width, self.shape)]\n center = img_ctr[: self.dimensionality]\n w.wcs.crpix = 0.5 * (np.array(self.shape) + 1)\n w.wcs.crval = center\n w.wcs.cdelt = cdelt\n w.wcs.ctype = [\"linear\"] * self.dimensionality\n w.wcs.cunit = [wcs_unit] * self.dimensionality\n self.set_wcs(w)\n else:\n self.set_wcs(wcs)\n\n def _fix_current_time(self, ds, current_time):\n if ds is None:\n registry = None\n else:\n registry = ds.unit_registry\n tunit = Unit(self.time_unit, registry=registry)\n if current_time is None:\n if ds is not None:\n current_time = ds.current_time\n else:\n self.current_time = YTQuantity(0.0, \"s\")\n return\n elif isinstance(current_time, numeric_type):\n current_time = YTQuantity(current_time, tunit)\n elif isinstance(current_time, tuple):\n current_time = YTQuantity(current_time[0], current_time[1])\n self.current_time = current_time.to(tunit)\n\n def _set_units(self, ds, base_units):\n if ds is not None:\n if getattr(ds, \"cosmological_simulation\", False):\n self.hubble_constant = ds.hubble_constant\n self.current_redshift = ds.current_redshift\n attrs = (\n \"length_unit\",\n \"mass_unit\",\n \"time_unit\",\n \"velocity_unit\",\n \"magnetic_unit\",\n )\n cgs_units = (\"cm\", \"g\", \"s\", \"cm/s\", \"gauss\")\n for unit, attr, cgs_unit in zip(base_units, attrs, cgs_units):\n if unit is None:\n if ds is not None:\n u = getattr(ds, attr, None)\n elif attr == \"velocity_unit\":\n u = self.length_unit / self.time_unit\n elif attr == \"magnetic_unit\":\n u = np.sqrt(\n 4.0\n * np.pi\n * self.mass_unit\n / (self.time_unit ** 2 * self.length_unit)\n )\n else:\n u = cgs_unit\n else:\n u = unit\n\n if isinstance(u, str):\n uq = YTQuantity(1.0, u)\n elif isinstance(u, numeric_type):\n uq = YTQuantity(u, cgs_unit)\n elif isinstance(u, YTQuantity):\n uq = u.copy()\n elif isinstance(u, tuple):\n uq = YTQuantity(u[0], u[1])\n else:\n uq = None\n\n if uq is not None and hasattr(self, \"hubble_constant\"):\n # Don't store cosmology units\n atoms = {str(a) for a in uq.units.expr.atoms()}\n if str(uq.units).startswith(\"cm\") or \"h\" in atoms or \"a\" in atoms:\n uq.convert_to_cgs()\n\n if uq is not None and uq.units.is_code_unit:\n mylog.warning(\n \"Cannot use code units of '%s' \"\n \"when creating a FITSImageData instance! \"\n \"Converting to a cgs equivalent.\",\n uq.units,\n )\n uq.convert_to_cgs()\n\n if attr == \"length_unit\" and uq.value != 1.0:\n mylog.warning(\"Converting length units from %s to %s.\", uq, uq.units)\n uq = YTQuantity(1.0, uq.units)\n\n setattr(self, attr, uq)\n\n def _set_units_from_header(self, header):\n if \"hubble\" in header:\n self.hubble_constant = header[\"HUBBLE\"]\n self.current_redshift = header[\"REDSHIFT\"]\n for unit in [\"length\", \"time\", \"mass\", \"velocity\", \"magnetic\"]:\n if unit == \"magnetic\":\n key = \"BFUNIT\"\n else:\n key = unit[0].upper() + \"UNIT\"\n if key not in header:\n continue\n u = header.comments[key].strip(\"[]\")\n uq = YTQuantity(header[key], u)\n setattr(self, unit + \"_unit\", uq)\n\n def set_wcs(self, wcs, wcsname=None, suffix=None):\n \"\"\"\n Set the WCS coordinate information for all images\n with a WCS object *wcs*.\n \"\"\"\n if wcsname is None:\n wcs.wcs.name = \"yt\"\n else:\n wcs.wcs.name = wcsname\n h = wcs.to_header()\n if suffix is None:\n self.wcs = wcs\n else:\n setattr(self, \"wcs\" + suffix, wcs)\n for img in self.hdulist:\n for k, v in h.items():\n kk = k\n if suffix is not None:\n kk += suffix\n img.header[kk] = v\n\n def change_image_name(self, old_name, new_name):\n \"\"\"\n Change the name of a FITS image.\n\n Parameters\n ----------\n old_name : string\n The old name of the image.\n new_name : string\n The new name of the image.\n \"\"\"\n idx = self.fields.index(old_name)\n self.hdulist[idx].name = new_name\n self.hdulist[idx].header[\"BTYPE\"] = new_name\n self.field_units[new_name] = self.field_units.pop(old_name)\n self.fields[idx] = new_name\n\n def convolve(self, field, kernel, **kwargs):\n \"\"\"\n Convolve an image with a kernel, either a simple\n Gaussian kernel or one provided by AstroPy. Currently,\n this only works for 2D images.\n\n All keyword arguments are passed to\n :meth:`~astropy.convolution.convolve`.\n\n Parameters\n ----------\n field : string\n The name of the field to convolve.\n kernel : float, YTQuantity, (value, unit) tuple, or AstroPy Kernel object\n The kernel to convolve the image with. If this is an AstroPy Kernel\n object, the image will be convolved with it. Otherwise, it is\n assumed that the kernel is a Gaussian and that this value is\n the standard deviation. If a float, it is assumed that the units\n are pixels, but a (value, unit) tuple or YTQuantity can be supplied\n to specify the standard deviation in physical units.\n\n Examples\n --------\n >>> fid = FITSSlice(ds, \"z\", (\"gas\", \"density\"))\n >>> fid.convolve(\"density\", (3.0, \"kpc\"))\n \"\"\"\n if self.dimensionality == 3:\n raise RuntimeError(\"Convolution currently only works for 2D FITSImageData!\")\n conv = _astropy.conv\n if field not in self.keys():\n raise KeyError(f\"{field} not an image!\")\n idx = self.fields.index(field)\n if not isinstance(kernel, conv.Kernel):\n if not isinstance(kernel, numeric_type):\n unit = str(self.wcs.wcs.cunit[0])\n pix_scale = YTQuantity(self.wcs.wcs.cdelt[0], unit)\n if isinstance(kernel, tuple):\n stddev = YTQuantity(kernel[0], kernel[1]).to(unit)\n else:\n stddev = kernel.to(unit)\n kernel = stddev / pix_scale\n kernel = conv.Gaussian2DKernel(x_stddev=kernel)\n self.hdulist[idx].data = conv.convolve(self.hdulist[idx].data, kernel, **kwargs)\n\n def update_header(self, field, key, value):\n \"\"\"\n Update the FITS header for *field* with a\n *key*, *value* pair. If *field* == \"all\", all\n headers will be updated.\n \"\"\"\n if field == \"all\":\n for img in self.hdulist:\n img.header[key] = value\n else:\n if field not in self.keys():\n raise KeyError(f\"{field} not an image!\")\n idx = self.fields.index(field)\n self.hdulist[idx].header[key] = value\n\n def update_all_headers(self, key, value):\n mylog.warning(\n \"update_all_headers is deprecated. \"\n \"Use update_header('all', key, value) instead.\"\n )\n self.update_header(\"all\", key, value)\n\n def keys(self):\n return self.fields\n\n def has_key(self, key):\n return key in self.fields\n\n def values(self):\n return [self[k] for k in self.fields]\n\n def items(self):\n return [(k, self[k]) for k in self.fields]\n\n def __getitem__(self, item):\n return UnitfulHDU(self.hdulist[item])\n\n def __repr__(self):\n return str([self[k] for k in self.keys()])\n\n def info(self, output=None):\n \"\"\"\n Summarize the info of the HDUs in this `FITSImageData`\n instance.\n\n Note that this function prints its results to the console---it\n does not return a value.\n\n Parameters\n ----------\n output : file, boolean, optional\n A file-like object to write the output to. If `False`, does not\n output to a file and instead returns a list of tuples representing\n the FITSImageData info. Writes to ``sys.stdout`` by default.\n \"\"\"\n hinfo = self.hdulist.info(output=False)\n num_cols = len(hinfo[0])\n if output is None:\n output = sys.stdout\n if num_cols == 8:\n header = \"No. Name Ver Type Cards Dimensions Format Units\" # NOQA E501\n format = \"{:3d} {:10} {:3} {:11} {:5d} {} {} {}\"\n else:\n header = (\n \"No. Name Type Cards Dimensions Format Units\"\n )\n format = \"{:3d} {:10} {:11} {:5d} {} {} {}\"\n if self.hdulist._file is None:\n name = \"(No file associated with this FITSImageData)\"\n else:\n name = self.hdulist._file.name\n results = [f\"Filename: {name}\", header]\n for line in hinfo:\n units = self.field_units[self.hdulist[line[0]].header[\"btype\"]]\n summary = tuple(list(line[:-1]) + [units])\n if output:\n results.append(format.format(*summary))\n else:\n results.append(summary)\n\n if output:\n output.write(\"\\n\".join(results))\n output.write(\"\\n\")\n output.flush()\n else:\n return results[2:]\n\n @parallel_root_only\n def writeto(self, fileobj, fields=None, overwrite=False, **kwargs):\n r\"\"\"\n Write all of the fields or a subset of them to a FITS file.\n\n Parameters\n ----------\n fileobj : string\n The name of the file to write to.\n fields : list of strings, optional\n The fields to write to the file. If not specified\n all of the fields in the buffer will be written.\n clobber : overwrite, optional\n Whether or not to overwrite a previously existing file.\n Default: False\n **kwargs\n Additional keyword arguments are passed to\n :meth:`~astropy.io.fits.HDUList.writeto`.\n \"\"\"\n if fields is None:\n hdus = self.hdulist\n else:\n hdus = _astropy.pyfits.HDUList()\n for field in fields:\n hdus.append(self.hdulist[field])\n hdus.writeto(fileobj, overwrite=overwrite, **kwargs)\n\n def to_glue(self, label=\"yt\", data_collection=None):\n \"\"\"\n Takes the data in the FITSImageData instance and exports it to\n Glue (http://glueviz.org) for interactive analysis. Optionally\n add a *label*. If you are already within the Glue environment, you\n can pass a *data_collection* object, otherwise Glue will be started.\n \"\"\"\n from glue.core import Data, DataCollection\n from glue.core.coordinates import coordinates_from_header\n\n try:\n from glue.app.qt.application import GlueApplication\n except ImportError:\n from glue.qt.glue_application import GlueApplication\n\n image = Data(label=label)\n image.coords = coordinates_from_header(self.wcs.to_header())\n for k in self.fields:\n image.add_component(self[k].data, k)\n if data_collection is None:\n dc = DataCollection([image])\n app = GlueApplication(dc)\n app.start()\n else:\n data_collection.append(image)\n\n def to_aplpy(self, **kwargs):\n \"\"\"\n Use APLpy (http://aplpy.github.io) for plotting. Returns an\n `aplpy.FITSFigure` instance. All keyword arguments are passed\n to the `aplpy.FITSFigure` constructor.\n \"\"\"\n import aplpy\n\n return aplpy.FITSFigure(self.hdulist, **kwargs)\n\n def get_data(self, field):\n \"\"\"\n Return the data array of the image corresponding to *field*\n with units attached. Deprecated.\n \"\"\"\n return self[field].data\n\n def set_unit(self, field, units):\n \"\"\"\n Set the units of *field* to *units*.\n \"\"\"\n if field not in self.keys():\n raise KeyError(f\"{field} not an image!\")\n idx = self.fields.index(field)\n new_data = YTArray(self.hdulist[idx].data, self.field_units[field]).to(units)\n self.hdulist[idx].data = new_data.v\n self.hdulist[idx].header[\"bunit\"] = units\n self.field_units[field] = units\n\n def pop(self, key):\n \"\"\"\n Remove a field with name *key*\n and return it as a new FITSImageData\n instance.\n \"\"\"\n if key not in self.keys():\n raise KeyError(f\"{key} not an image!\")\n idx = self.fields.index(key)\n im = self.hdulist.pop(idx)\n self.field_units.pop(key)\n self.fields.remove(key)\n f = _astropy.pyfits.PrimaryHDU(im.data, header=im.header)\n return FITSImageData(f, current_time=f.header[\"TIME\"], unit_header=f.header)\n\n def close(self):\n self.hdulist.close()\n\n @classmethod\n def from_file(cls, filename):\n \"\"\"\n Generate a FITSImageData instance from one previously written to\n disk.\n\n Parameters\n ----------\n filename : string\n The name of the file to open.\n \"\"\"\n f = _astropy.pyfits.open(filename, lazy_load_hdus=False)\n return cls(f, current_time=f[0].header[\"TIME\"], unit_header=f[0].header)\n\n @classmethod\n def from_images(cls, image_list):\n \"\"\"\n Generate a new FITSImageData instance from a list of FITSImageData\n instances.\n\n Parameters\n ----------\n image_list : list of FITSImageData instances\n The images to be combined.\n \"\"\"\n image_list = image_list if isinstance(image_list, list) else [image_list]\n first_image = first(image_list)\n\n w = first_image.wcs\n img_shape = first_image.shape\n data = []\n for is_first, _is_last, fid in mark_ends(image_list):\n assert_same_wcs(w, fid.wcs)\n if img_shape != fid.shape:\n raise RuntimeError(\"Images do not have the same shape!\")\n for hdu in fid.hdulist:\n if is_first:\n data.append(_astropy.pyfits.PrimaryHDU(hdu.data, header=hdu.header))\n else:\n data.append(_astropy.pyfits.ImageHDU(hdu.data, header=hdu.header))\n data = _astropy.pyfits.HDUList(data)\n return cls(\n data,\n current_time=first_image.current_time,\n unit_header=first_image[0].header,\n )\n\n def create_sky_wcs(\n self,\n sky_center,\n sky_scale,\n ctype=None,\n crota=None,\n cd=None,\n pc=None,\n wcsname=\"celestial\",\n replace_old_wcs=True,\n ):\n \"\"\"\n Takes a Cartesian WCS and converts it to one in a\n sky-based coordinate system.\n\n Parameters\n ----------\n sky_center : iterable of floats\n Reference coordinates of the WCS in degrees.\n sky_scale : tuple or YTQuantity\n Conversion between an angle unit and a length unit,\n e.g. (3.0, \"arcsec/kpc\")\n ctype : list of strings, optional\n The type of the coordinate system to create. Default:\n A \"tangential\" projection.\n crota : 2-element ndarray, optional\n Rotation angles between cartesian coordinates and\n the celestial coordinates.\n cd : 2x2-element ndarray, optional\n Dimensioned coordinate transformation matrix.\n pc : 2x2-element ndarray, optional\n Coordinate transformation matrix.\n wcsname : string, optional\n The name of the WCS to be stored in the FITS header.\n replace_old_wcs : boolean, optional\n Whether or not to overwrite the default WCS of the\n FITSImageData instance. If false, a second WCS will\n be added to the header. Default: True.\n \"\"\"\n if ctype is None:\n ctype = [\"RA---TAN\", \"DEC--TAN\"]\n old_wcs = self.wcs\n naxis = old_wcs.naxis\n crval = [sky_center[0], sky_center[1]]\n if isinstance(sky_scale, YTQuantity):\n scaleq = sky_scale\n else:\n scaleq = YTQuantity(sky_scale[0], sky_scale[1])\n if scaleq.units.dimensions != dimensions.angle / dimensions.length:\n raise RuntimeError(\n f\"sky_scale {sky_scale} not in correct dimensions of angle/length!\"\n )\n deltas = old_wcs.wcs.cdelt\n units = [str(unit) for unit in old_wcs.wcs.cunit]\n new_dx = (YTQuantity(-deltas[0], units[0]) * scaleq).in_units(\"deg\")\n new_dy = (YTQuantity(deltas[1], units[1]) * scaleq).in_units(\"deg\")\n new_wcs = _astropy.pywcs.WCS(naxis=naxis)\n cdelt = [new_dx.v, new_dy.v]\n cunit = [\"deg\"] * 2\n if naxis == 3:\n crval.append(old_wcs.wcs.crval[2])\n cdelt.append(old_wcs.wcs.cdelt[2])\n ctype.append(old_wcs.wcs.ctype[2])\n cunit.append(old_wcs.wcs.cunit[2])\n new_wcs.wcs.crpix = old_wcs.wcs.crpix\n new_wcs.wcs.cdelt = cdelt\n new_wcs.wcs.crval = crval\n new_wcs.wcs.cunit = cunit\n new_wcs.wcs.ctype = ctype\n if crota is not None:\n new_wcs.wcs.crota = crota\n if cd is not None:\n new_wcs.wcs.cd = cd\n if pc is not None:\n new_wcs.wcs.cd = pc\n if replace_old_wcs:\n self.set_wcs(new_wcs, wcsname=wcsname)\n else:\n self.set_wcs(new_wcs, wcsname=wcsname, suffix=\"a\")\n\n\nclass FITSImageBuffer(FITSImageData):\n pass\n\n\ndef sanitize_fits_unit(unit):\n if unit == \"Mpc\":\n mylog.info(\"Changing FITS file length unit to kpc.\")\n unit = \"kpc\"\n elif unit == \"au\":\n unit = \"AU\"\n return unit\n\n\naxis_wcs = [[1, 2], [0, 2], [0, 1]]\n\n\ndef construct_image(ds, axis, data_source, center, image_res, width, length_unit):\n if width is None:\n width = ds.domain_width[axis_wcs[axis]]\n unit = ds.get_smallest_appropriate_unit(width[0])\n mylog.info(\n \"Making an image of the entire domain, \"\n \"so setting the center to the domain center.\"\n )\n else:\n width = ds.coordinates.sanitize_width(axis, width, None)\n unit = str(width[0].units)\n if is_sequence(image_res):\n nx, ny = image_res\n else:\n nx, ny = image_res, image_res\n dx = width[0] / nx\n dy = width[1] / ny\n crpix = [0.5 * (nx + 1), 0.5 * (ny + 1)]\n if unit == \"unitary\":\n unit = ds.get_smallest_appropriate_unit(ds.domain_width.max())\n elif unit == \"code_length\":\n unit = ds.get_smallest_appropriate_unit(ds.quan(1.0, \"code_length\"))\n unit = sanitize_fits_unit(unit)\n if length_unit is None:\n length_unit = unit\n if any(char.isdigit() for char in length_unit) and \"*\" in length_unit:\n length_unit = length_unit.split(\"*\")[-1]\n cunit = [length_unit] * 2\n ctype = [\"LINEAR\"] * 2\n cdelt = [dx.in_units(length_unit), dy.in_units(length_unit)]\n if is_sequence(axis):\n crval = center.in_units(length_unit)\n else:\n crval = [center[idx].in_units(length_unit) for idx in axis_wcs[axis]]\n if hasattr(data_source, \"to_frb\"):\n if is_sequence(axis):\n frb = data_source.to_frb(width[0], (nx, ny), height=width[1])\n else:\n frb = data_source.to_frb(width[0], (nx, ny), center=center, height=width[1])\n else:\n frb = None\n w = _astropy.pywcs.WCS(naxis=2)\n w.wcs.crpix = crpix\n w.wcs.cdelt = cdelt\n w.wcs.crval = crval\n w.wcs.cunit = cunit\n w.wcs.ctype = ctype\n return w, frb, length_unit\n\n\ndef assert_same_wcs(wcs1, wcs2):\n from numpy.testing import assert_allclose\n\n assert wcs1.naxis == wcs2.naxis\n for i in range(wcs1.naxis):\n assert wcs1.wcs.cunit[i] == wcs2.wcs.cunit[i]\n assert wcs1.wcs.ctype[i] == wcs2.wcs.ctype[i]\n assert_allclose(wcs1.wcs.crpix, wcs2.wcs.crpix)\n assert_allclose(wcs1.wcs.cdelt, wcs2.wcs.cdelt)\n assert_allclose(wcs1.wcs.crval, wcs2.wcs.crval)\n crota1 = getattr(wcs1.wcs, \"crota\", None)\n crota2 = getattr(wcs2.wcs, \"crota\", None)\n if crota1 is None or crota2 is None:\n assert crota1 == crota2\n else:\n assert_allclose(wcs1.wcs.crota, wcs2.wcs.crota)\n cd1 = getattr(wcs1.wcs, \"cd\", None)\n cd2 = getattr(wcs2.wcs, \"cd\", None)\n if cd1 is None or cd2 is None:\n assert cd1 == cd2\n else:\n assert_allclose(wcs1.wcs.cd, wcs2.wcs.cd)\n pc1 = getattr(wcs1.wcs, \"pc\", None)\n pc2 = getattr(wcs2.wcs, \"pc\", None)\n if pc1 is None or pc2 is None:\n assert pc1 == pc2\n else:\n assert_allclose(wcs1.wcs.pc, wcs2.wcs.pc)\n\n\nclass FITSSlice(FITSImageData):\n r\"\"\"\n Generate a FITSImageData of an on-axis slice.\n\n Parameters\n ----------\n ds : :class:`~yt.data_objects.static_output.Dataset`\n The dataset object.\n axis : character or integer\n The axis of the slice. One of \"x\",\"y\",\"z\", or 0,1,2.\n fields : string or list of strings\n The fields to slice\n image_res : an int or 2-tuple of ints\n Specify the resolution of the resulting image. A single value will be\n used for both axes, whereas a tuple of values will be used for the\n individual axes. Default: 512\n center : A sequence of floats, a string, or a tuple.\n The coordinate of the center of the image. If set to 'c', 'center' or\n left blank, the plot is centered on the middle of the domain. If set\n to 'max' or 'm', the center will be located at the maximum of the\n ('gas', 'density') field. Centering on the max or min of a specific\n field is supported by providing a tuple such as (\"min\",\"temperature\")\n or (\"max\",\"dark_matter_density\"). Units can be specified by passing in\n *center* as a tuple containing a coordinate and string unit name or by\n passing in a YTArray. If a list or unitless array is supplied, code\n units are assumed.\n width : tuple or a float.\n Width can have four different formats to support variable\n x and y widths. They are:\n\n ================================== =======================\n format example\n ================================== =======================\n (float, string) (10,'kpc')\n ((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))\n float 0.2\n (float, float) (0.2, 0.3)\n ================================== =======================\n\n For example, (10, 'kpc') specifies a width that is 10 kiloparsecs\n wide in the x and y directions, ((10,'kpc'),(15,'kpc')) specifies a\n width that is 10 kiloparsecs wide along the x axis and 15\n kiloparsecs wide along the y axis. In the other two examples, code\n units are assumed, for example (0.2, 0.3) specifies a width that has an\n x width of 0.2 and a y width of 0.3 in code units.\n length_unit : string, optional\n the length units that the coordinates are written in. The default\n is to use the default length unit of the dataset.\n \"\"\"\n\n def __init__(\n self,\n ds,\n axis,\n fields,\n image_res=512,\n center=\"c\",\n width=None,\n length_unit=None,\n **kwargs,\n ):\n fields = list(iter_fields(fields))\n axis = fix_axis(axis, ds)\n center, dcenter = ds.coordinates.sanitize_center(center, axis)\n slc = ds.slice(axis, center[axis], **kwargs)\n w, frb, lunit = construct_image(\n ds, axis, slc, dcenter, image_res, width, length_unit\n )\n super().__init__(frb, fields=fields, length_unit=lunit, wcs=w)\n\n\nclass FITSProjection(FITSImageData):\n r\"\"\"\n Generate a FITSImageData of an on-axis projection.\n\n Parameters\n ----------\n ds : :class:`~yt.data_objects.static_output.Dataset`\n The dataset object.\n axis : character or integer\n The axis along which to project. One of \"x\",\"y\",\"z\", or 0,1,2.\n fields : string or list of strings\n The fields to project\n image_res : an int or 2-tuple of ints\n Specify the resolution of the resulting image. A single value will be\n used for both axes, whereas a tuple of values will be used for the\n individual axes. Default: 512\n center : A sequence of floats, a string, or a tuple.\n The coordinate of the center of the image. If set to 'c', 'center' or\n left blank, the plot is centered on the middle of the domain. If set\n to 'max' or 'm', the center will be located at the maximum of the\n ('gas', 'density') field. Centering on the max or min of a specific\n field is supported by providing a tuple such as (\"min\",\"temperature\")\n or (\"max\",\"dark_matter_density\"). Units can be specified by passing in\n *center* as a tuple containing a coordinate and string unit name or by\n passing in a YTArray. If a list or unitless array is supplied, code\n units are assumed.\n width : tuple or a float.\n Width can have four different formats to support variable\n x and y widths. They are:\n\n ================================== =======================\n format example\n ================================== =======================\n (float, string) (10,'kpc')\n ((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))\n float 0.2\n (float, float) (0.2, 0.3)\n ================================== =======================\n\n For example, (10, 'kpc') specifies a width that is 10 kiloparsecs\n wide in the x and y directions, ((10,'kpc'),(15,'kpc')) specifies a\n width that is 10 kiloparsecs wide along the x axis and 15\n kiloparsecs wide along the y axis. In the other two examples, code\n units are assumed, for example (0.2, 0.3) specifies a width that has an\n x width of 0.2 and a y width of 0.3 in code units.\n weight_field : string\n The field used to weight the projection.\n length_unit : string, optional\n the length units that the coordinates are written in. The default\n is to use the default length unit of the dataset.\n \"\"\"\n\n def __init__(\n self,\n ds,\n axis,\n fields,\n image_res=512,\n center=\"c\",\n width=None,\n weight_field=None,\n length_unit=None,\n **kwargs,\n ):\n fields = list(iter_fields(fields))\n axis = fix_axis(axis, ds)\n center, dcenter = ds.coordinates.sanitize_center(center, axis)\n prj = ds.proj(fields[0], axis, weight_field=weight_field, **kwargs)\n w, frb, lunit = construct_image(\n ds, axis, prj, dcenter, image_res, width, length_unit\n )\n super().__init__(frb, fields=fields, length_unit=lunit, wcs=w)\n\n\nclass FITSOffAxisSlice(FITSImageData):\n r\"\"\"\n Generate a FITSImageData of an off-axis slice.\n\n Parameters\n ----------\n ds : :class:`~yt.data_objects.static_output.Dataset`\n The dataset object.\n normal : a sequence of floats\n The vector normal to the projection plane.\n fields : string or list of strings\n The fields to slice\n image_res : an int or 2-tuple of ints\n Specify the resolution of the resulting image. A single value will be\n used for both axes, whereas a tuple of values will be used for the\n individual axes. Default: 512\n center : A sequence of floats, a string, or a tuple.\n The coordinate of the center of the image. If set to 'c', 'center' or\n left blank, the plot is centered on the middle of the domain. If set\n to 'max' or 'm', the center will be located at the maximum of the\n ('gas', 'density') field. Centering on the max or min of a specific\n field is supported by providing a tuple such as (\"min\",\"temperature\")\n or (\"max\",\"dark_matter_density\"). Units can be specified by passing in\n *center* as a tuple containing a coordinate and string unit name or by\n passing in a YTArray. If a list or unitless array is supplied, code\n units are assumed.\n width : tuple or a float.\n Width can have four different formats to support variable\n x and y widths. They are:\n\n ================================== =======================\n format example\n ================================== =======================\n (float, string) (10,'kpc')\n ((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))\n float 0.2\n (float, float) (0.2, 0.3)\n ================================== =======================\n\n For example, (10, 'kpc') specifies a width that is 10 kiloparsecs\n wide in the x and y directions, ((10,'kpc'),(15,'kpc')) specifies a\n width that is 10 kiloparsecs wide along the x axis and 15\n kiloparsecs wide along the y axis. In the other two examples, code\n units are assumed, for example (0.2, 0.3) specifies a width that has an\n x width of 0.2 and a y width of 0.3 in code units.\n north_vector : a sequence of floats\n A vector defining the 'up' direction in the plot. This\n option sets the orientation of the slicing plane. If not\n set, an arbitrary grid-aligned north-vector is chosen.\n length_unit : string, optional\n the length units that the coordinates are written in. The default\n is to use the default length unit of the dataset.\n \"\"\"\n\n def __init__(\n self,\n ds,\n normal,\n fields,\n image_res=512,\n center=\"c\",\n width=None,\n north_vector=None,\n length_unit=None,\n ):\n fields = list(iter_fields(fields))\n center, dcenter = ds.coordinates.sanitize_center(center, 4)\n cut = ds.cutting(normal, center, north_vector=north_vector)\n center = ds.arr([0.0] * 2, \"code_length\")\n w, frb, lunit = construct_image(\n ds, normal, cut, center, image_res, width, length_unit\n )\n super().__init__(frb, fields=fields, length_unit=lunit, wcs=w)\n\n\nclass FITSOffAxisProjection(FITSImageData):\n r\"\"\"\n Generate a FITSImageData of an off-axis projection.\n\n Parameters\n ----------\n ds : :class:`~yt.data_objects.static_output.Dataset`\n This is the dataset object corresponding to the\n simulation output to be plotted.\n normal : a sequence of floats\n The vector normal to the projection plane.\n fields : string, list of strings\n The name of the field(s) to be plotted.\n image_res : an int or 2-tuple of ints\n Specify the resolution of the resulting image. A single value will be\n used for both axes, whereas a tuple of values will be used for the\n individual axes. Default: 512\n center : A sequence of floats, a string, or a tuple.\n The coordinate of the center of the image. If set to 'c', 'center' or\n left blank, the plot is centered on the middle of the domain. If set\n to 'max' or 'm', the center will be located at the maximum of the\n ('gas', 'density') field. Centering on the max or min of a specific\n field is supported by providing a tuple such as (\"min\",\"temperature\")\n or (\"max\",\"dark_matter_density\"). Units can be specified by passing in\n *center* as a tuple containing a coordinate and string unit name or by\n passing in a YTArray. If a list or unitless array is supplied, code\n units are assumed.\n width : tuple or a float.\n Width can have four different formats to support variable\n x and y widths. They are:\n\n ================================== =======================\n format example\n ================================== =======================\n (float, string) (10,'kpc')\n ((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))\n float 0.2\n (float, float) (0.2, 0.3)\n ================================== =======================\n\n For example, (10, 'kpc') specifies a width that is 10 kiloparsecs\n wide in the x and y directions, ((10,'kpc'),(15,'kpc')) specifies a\n width that is 10 kiloparsecs wide along the x axis and 15\n kiloparsecs wide along the y axis. In the other two examples, code\n units are assumed, for example (0.2, 0.3) specifies a width that has an\n x width of 0.2 and a y width of 0.3 in code units.\n depth : A tuple or a float\n A tuple containing the depth to project through and the string\n key of the unit: (width, 'unit'). If set to a float, code units\n are assumed\n weight_field : string\n The name of the weighting field. Set to None for no weight.\n north_vector : a sequence of floats\n A vector defining the 'up' direction in the plot. This\n option sets the orientation of the slicing plane. If not\n set, an arbitrary grid-aligned north-vector is chosen.\n method : string\n The method of projection. Valid methods are:\n\n \"integrate\" with no weight_field specified : integrate the requested\n field along the line of sight.\n\n \"integrate\" with a weight_field specified : weight the requested\n field by the weighting field and integrate along the line of sight.\n\n \"sum\" : This method is the same as integrate, except that it does not\n multiply by a path length when performing the integration, and is\n just a straight summation of the field along the given axis. WARNING:\n This should only be used for uniform resolution grid datasets, as other\n datasets may result in unphysical images.\n data_source : yt.data_objects.data_containers.YTSelectionContainer, optional\n If specified, this will be the data source used for selecting regions\n to project.\n length_unit : string, optional\n the length units that the coordinates are written in. The default\n is to use the default length unit of the dataset.\n \"\"\"\n\n def __init__(\n self,\n ds,\n normal,\n fields,\n center=\"c\",\n width=(1.0, \"unitary\"),\n weight_field=None,\n image_res=512,\n data_source=None,\n north_vector=None,\n depth=(1.0, \"unitary\"),\n method=\"integrate\",\n length_unit=None,\n ):\n fields = list(iter_fields(fields))\n center, dcenter = ds.coordinates.sanitize_center(center, 4)\n buf = {}\n width = ds.coordinates.sanitize_width(normal, width, depth)\n wd = tuple(el.in_units(\"code_length\").v for el in width)\n if not is_sequence(image_res):\n image_res = (image_res, image_res)\n res = (image_res[0], image_res[1])\n if data_source is None:\n source = ds\n else:\n source = data_source\n for field in fields:\n buf[field] = off_axis_projection(\n source,\n center,\n normal,\n wd,\n res,\n field,\n north_vector=north_vector,\n method=method,\n weight=weight_field,\n ).swapaxes(0, 1)\n center = ds.arr([0.0] * 2, \"code_length\")\n w, not_an_frb, lunit = construct_image(\n ds, normal, buf, center, image_res, width, length_unit\n )\n super().__init__(buf, fields=fields, wcs=w, length_unit=lunit, ds=ds)\n" ]
[ [ "numpy.abs", "numpy.ones", "numpy.digitize", "numpy.array", "numpy.zeros" ], [ "numpy.array", "numpy.argmax", "numpy.argmin", "numpy.abs" ], [ "numpy.array", "numpy.zeros", "numpy.sqrt", "numpy.ones" ], [ "numpy.sqrt", "numpy.asarray", "numpy.testing.assert_allclose", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
veras222/cs231n-assignment
[ "c94bf669011b1dc3f237b867a90b071ac16f8ce5" ]
[ "assignments/assignment1/metrics.py" ]
[ "import numpy as np\ndef binary_classification_metrics(prediction, ground_truth):\n '''\n Computes metrics for binary classification\n\n Arguments:\n prediction, np array of bool (num_samples) - model predictions\n ground_truth, np array of bool (num_samples) - true labels\n\n Returns:\n precision, recall, f1, accuracy - classification metrics\n '''\n tp = np.count_nonzero(np.logical_and(prediction,ground_truth))\n fn = np.count_nonzero(np.logical_and(np.logical_not(prediction),ground_truth))\n fp = np.count_nonzero(np.logical_and(prediction,np.logical_not(ground_truth)))\n tn = np.count_nonzero(np.logical_and(np.logical_not(prediction), np.logical_not(ground_truth)))\n print(\"prediction\" )\n print(prediction)\n print(\"ground_truth\" )\n print(ground_truth)\n print(\"TP: %d\" % tp)\n print(\"FN: %d\" % fn)\n print(\"FP: %d\" % fp)\n print(\"TN: %d\" % tn)\n\n precision = tp/(tp+fp)\n recall = tp/(tp+fn)\n accuracy = (tp + tn)/(tp+tn+fp+fn)\n f1 = 2*precision*recall/(precision+recall)\n \n return precision, recall, f1, accuracy\n\n\ndef multiclass_accuracy(prediction, ground_truth):\n '''\n Computes metrics for multiclass classification\n Arguments:\n prediction, np array of int (num_samples) - model predictions\n ground_truth, np array of int (num_samples) - true labels\n Returns:\n accuracy - ratio of accurate predictions to total samples\n '''\n return np.mean(prediction == ground_truth)\n" ]
[ [ "numpy.logical_not", "numpy.logical_and", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
weiqiao/trajopt
[ "11f75be747a02de3b45baf3e9c8db6aa7ba789f2" ]
[ "trajopt/algos/mppi.py" ]
[ "\"\"\"\nThis implements a shooting trajectory optimization algorithm.\nThe closest known algorithm is perhaps MPPI and hence we stick to that terminology.\nUses a filtered action sequence to generate smooth motions.\n\"\"\"\n\nimport numpy as np\nfrom trajopt.algos.trajopt_base import Trajectory\nfrom trajopt.utils import gather_paths_parallel\n\nclass MPPI(Trajectory):\n def __init__(self, env, H, paths_per_cpu,\n num_cpu=1,\n kappa=1.0,\n gamma=1.0,\n mean=None,\n filter_coefs=None,\n default_act='repeat',\n warmstart=True,\n seed=123,\n ):\n self.env, self.seed = env, seed\n self.n, self.m = env.observation_dim, env.action_dim\n self.H, self.paths_per_cpu, self.num_cpu = H, paths_per_cpu, num_cpu\n self.warmstart = warmstart\n\n self.mean, self.filter_coefs, self.kappa, self.gamma = mean, filter_coefs, kappa, gamma\n if mean is None:\n self.mean = np.zeros(self.m)\n if filter_coefs is None:\n self.filter_coefs = [np.ones(self.m), 1.0, 0.0, 0.0]\n self.default_act = default_act\n\n self.sol_state = []\n self.sol_act = []\n self.sol_reward = []\n self.sol_obs = []\n\n self.env.reset()\n self.env.set_seed(seed)\n self.env.reset(seed=seed)\n self.sol_state.append(self.env.get_env_state().copy())\n self.sol_obs.append(self.env.get_obs())\n self.act_sequence = np.ones((self.H, self.m)) * self.mean\n self.init_act_sequence = self.act_sequence.copy()\n\n def update(self, paths):\n num_traj = len(paths)\n act = np.array([paths[i][\"actions\"] for i in range(num_traj)])\n R = self.score_trajectory(paths)\n S = np.exp(self.kappa*(R-np.max(R)))\n\n # blend the action sequence\n weighted_seq = S*act.T\n act_sequence = np.sum(weighted_seq.T, axis=0)/(np.sum(S) + 1e-6)\n self.act_sequence = act_sequence\n # idx = np.argmax(R)\n # self.act_sequence = act[idx]\n\n def advance_time(self, act_sequence=None):\n act_sequence = self.act_sequence if act_sequence is None else act_sequence\n # accept first action and step\n action = act_sequence[0].copy()\n self.env.real_env_step(True)\n _, r, _, _ = self.env.step(action)\n self.sol_act.append(action)\n self.sol_state.append(self.env.get_env_state().copy())\n self.sol_obs.append(self.env.get_obs())\n self.sol_reward.append(r)\n\n # get updated action sequence\n if self.warmstart:\n self.act_sequence[:-1] = act_sequence[1:]\n if self.default_act == 'repeat':\n self.act_sequence[-1] = self.act_sequence[-2]\n else:\n self.act_sequence[-1] = self.mean.copy()\n else:\n self.act_sequence = self.init_act_sequence.copy()\n\n def score_trajectory(self, paths):\n scores = np.zeros(len(paths))\n for i in range(len(paths)):\n scores[i] = 0.0\n for t in range(paths[i][\"rewards\"].shape[0]):\n scores[i] += (self.gamma**t)*paths[i][\"rewards\"][t]\n return scores\n\n def do_rollouts(self, seed):\n paths = gather_paths_parallel(self.env.env_id,\n self.sol_state[-1],\n self.act_sequence,\n self.filter_coefs,\n seed,\n self.paths_per_cpu,\n self.num_cpu,\n )\n return paths\n\n def train_step(self, niter=1):\n t = len(self.sol_state) - 1\n for _ in range(niter):\n paths = self.do_rollouts(self.seed+t)\n self.update(paths)\n self.advance_time()\n" ]
[ [ "numpy.max", "numpy.zeros", "numpy.sum", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Alex-W-16/ElecSus-AW
[ "dba476fe45df57fb838b773aaf5bab191d40af16" ]
[ "elecsus/libs/solve_dielectric_NEW.py" ]
[ "# Copyright 2017 J. Keaveney\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n#\t http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nSolve the dielectric tensor for the roots of the complex refractive index by setting determinant to 0\n\nUse analytic solutions for the 'easy' geometries - Faraday (B-field aligned with wavevector) and Voigt (B-field orthogonal to k-vector)\nUse sympy to calculate solutions for all other non-trivial geometries.\nSince the solutions for the non-trivial geometries depend on the susceptibility, array operations don't completely work, so it's *much* slower to calculate\n\nLast updated 2018-02-19 JK\n\"\"\"\n# py 2.7 compatibility\nfrom __future__ import (division, print_function, absolute_import)\n\n\nfrom sympy import Symbol, cos, sin, pi, simplify, eye, powsimp, powdenest, lambdify, solve, solveset\n#from sympy.solvers.solveset import linsolve as solve\nfrom sympy.matrices import det, Matrix\nfrom sympy import Array\n\nimport numpy as np\n\nimport scipy.linalg as la\nfrom scipy.linalg import qr\nimport scipy\n\nimport time\n\nfrom .FundamentalConstants import e0\n\ndef square_root(alpha, z):\n\t\"Square root with different branch cut defined by alpha parameter.\"\n\t##FROM https://flothesof.github.io/branch-cuts-with-square-roots.html\n\targument = np.angle(z) # between -pi and +pi\n\tmodulus = np.abs(z)\n\targument = np.mod(argument + alpha, 2 * np.pi) - alpha\n\treturn np.sqrt(modulus) * np.exp(1j * argument / 2)\n\ndef non_standard_n1_n2_RotMat(chiL, chiR, chiZ, THETA):\n\t'''\n\tWave equation solution method for an arbitrary, non-standard magnetic field angle THETA.\n\tThis function is called by the solve_diel function if the system is not in the Voigt or Faraday geometry.\n\n\tReturns the rotation matrix to transform the coordinate system into the normal mode basis,\n\tand returns the two refractive index arrays.\n\n\tNote: This function is not valid for the systems in the Voigt or Faraday\n\tgeometries due to the cotan(THETA) and cosec(THETA) terms.\n\n\tInputs:\n\n\t\tchiL, chiR, chiZ\t:\t1D numpy arrays, of length N, that are the frequency-dependent electric susceptibilities\n\t\tTHETA\t\t\t\t:\tFloat, Magnetic field angle in radians\n\n\tOutputs:\n\t\tRotMat\t:\tRotation matrix to transform coordinate system, dimensions (3, 3, N)\n\t\tn1\t\t:\tFirst solution for refractive index, dimensions (N)\n\t\tn2\t\t:\tSecond solution for refractive index, dimensions (N)\n\t'''\n\n\n\t# Storing values of trigonometric functions for future use\n\tC = np.cos(THETA)\t\t#cos\n\tS = np.sin(THETA)\t\t#sin\n\tcT = 1/np.tan(THETA) \t#cotan\n\tcS = 1/np.sin(THETA)\t#cosec\n\n\t# Relating the elements of the dielectric tensor to the electric susceptibility\n\te_x = 0.5*(2.+chiL+chiR)\n\te_xy = 0.5j*(chiR-chiL)\n\te_z = 1.0+chiZ\n\n\n\t##METHOD USING DEFAULT NUMPY BRANCH CUT\n\n\t# Calculating the elements of the complex refractive index\n\t# (0j in elemB to ensure numpy succesfully computes the square root for otherwise non-complex values)\n\telemA = 2*e_x*e_z+(e_x**2-e_x*e_z+e_xy**2)*S**2\n\telemB = np.sqrt((e_x**2-e_x*e_z+e_xy**2)**2*S**4-4*e_xy**2*e_z**2*C**2 +0j)\n\telemC = 2*(e_x*S**2+e_z*C**2)\n\n\t# Using the elements to calculate the solutions for the refractive index and their squares\n\tn1_sq = (elemA - elemB)/elemC\n\tn2_sq = (elemA + elemB)/elemC\n\tn1 = np.sqrt(n1_sq)\n\tn2 = np.sqrt(n2_sq)\n\n\n\t##METHOD USING A CUSTOMISED BRANCH CUT USING square_root FUNCTION (Commented out by default)\n\n\t# alpha defines the branch cut location and is arbitrary\n\n\t'''alpha = 3*np.pi/2\n\n\telemA = 2*e_x*e_z+(e_x**2-e_x*e_z+e_xy**2)*S**2\n\telemB = square_root(alpha, (e_x**2-e_x*e_z+e_xy**2)**2*S**4-4*e_xy**2*e_z**2*C**2 +0j)\n\telemC = 2*(e_x*S**2+e_z*C**2)\n\n\tn1_sq = (elemA - elemB) / elemC\n\tn2_sq = (elemA + elemB) / elemC\n\tn1 = square_root(alpha, n1_sq)\n\tn2 = square_root(alpha, n2_sq)'''\n\n\t## Calculating the elements of the eigenvectors for n1 and n2 according to the analytical solution\n\n\t# n1\n\tn1e1 = (2*e_z*cT*(e_x+e_z*cT**2))/(-e_x**2-e_xy**2+3*e_x*e_z+\n\t\t\t2*e_z**2*cT**2 + (elemB - 2*e_x*e_z)*cS**2)\n\n\tn1e2 = (2*e_xy*(e_x + e_z*cT**2)*(e_x**2+e_xy**2-3*e_x*e_z - 2*e_z*(e_x + e_z)*cT**2 -\n\t\t\t2*e_z**2*cT**4 - (elemB -2*e_x*e_z)*cS**2)*S)/((e_x**2 + e_xy**2 - 3*e_x*e_z -\n\t\t\t2*e_z**2*cT**2 - (elemB - 2*e_x*e_z)*cS**2) * (e_x**2 - e_xy**2 +\n\t\t\te_x*e_z + 2*e_x*e_z*cT**2 + (elemB - 2*e_x*e_z)*cS**2))\n\n\t# n2\n\tn2e1 = -((2*e_z*cT*(e_x+e_z*cT**2))/(e_x**2 + e_xy**2 - 3*e_x*e_z -\n\t\t\t\t2*e_z**2*cT**2 + (elemB + 2*e_x*e_z)*cS**2))\n\n\tn2e2 = (2*e_xy*(e_x+e_z*cT**2)*S**3*(elemB+2*e_x*e_z +\n\t\t\te_z*(-e_x - 2*e_z + e_x*np.cos(2*THETA))*cT**2 + (e_x**2 + e_xy**2 -\n\t\t\t3*e_x*e_z)*S**2))/((elemB+2*e_x*e_z -\n\t\t\t2*e_z**2*C**2 + (e_x**2 + e_xy**2 -\n\t\t\t3*e_x*e_z)*S**2)*(-elemB - 2*e_x*e_z +\n\t\t\t2*e_x*e_z*C**2 + (e_x**2 - e_xy**2 + e_x*e_z)*S**2))\n\n\t# Constructing the Rotation Matrix\n\tones = np.ones(len(e_x))\n\tzeros = np.zeros(len(e_x))\n\n\t# Populate the rotation matrix\n\n\tRotMat = np.array([[n1e1,n2e1,zeros],\n\t\t\t\t\t [n1e2,n2e2,zeros],\n\t\t\t\t\t [ones,ones,ones]]).T\n\n\treturn n1, n2, RotMat\n\n\n\n\n\n\n\ndef solve_diel(chiL, chiR, chiZ, THETA, Bfield, verbose=False,force_numeric=False, use_old_method=False):\n\t'''\n\tSolves the wave equation to find the two propagating normal modes of the system,\n\tfor a given magnetic field angle THETA. For the general case, use symbolic python to\n\tsolve for the roots of n-squared.\n\t(Escapes this slow approach for the two analytic cases for the Voigt and Faraday geometries)\n\n\tReturns the rotation matrix to transform the coordinate system into the normal mode basis,\n\tand returns the two refractive index arrays.\n\n\tInputs:\n\n\t\tchiL, chiR, chiZ\t:\t1D lists or numpy arrays, of length N, that are the frequency-dependent electric susceptibilities\n\t\tTHETA\t\t\t\t:\tFloat, Magnetic field angle in radians\n\t\tBfield\t\t\t\t:\tFloat, Magnitude of applied magnetic field (skips slow approach if magnetic field is very close to zero)\n\n\tOptions:\n\n\t\tverbose\t\t\t:\tBoolean to output more print statements (timing reports mostly)\n\t\tforce_numeric\t:\tIf True, forces all angles to go through the numeric approach, rather than escaping for the analytic cases (THETA=0, THETA=pi/2...)\n\t\tuse_old_method\t:\tIf True, forces use of numerical method rather than analytical method.\n\n\n\tOutputs:\n\t\tRotMat\t:\tRotation matrix to transform coordinate system, dimensions (3, 3, N)\n\t\tn1\t\t:\tFirst solution for refractive index, dimensions (N)\n\t\tn2\t\t:\tSecond solution for refractive index, dimensions (N)\n\n\t'''\n\n\tapprox_threshold = 1e-4 \t##Value used do determine if THETA is close enough\n\t\t\t\t\t\t\t\t##to Voigt or Faraday geometries to use an approximation\n\n\n\n\tif verbose:\n\t\tprint(('B-field angle (rad, pi rad): ',THETA, THETA/np.pi))\n\n\tstt = time.clock()\n\n\t# make chiL,R,Z arrays if not already\n\tchiL = np.array(chiL)\n\tchiR = np.array(chiR)\n\tchiZ = np.array(chiZ)\n\n\t#verbose=True\n\n\t#### Escape the slow loop for analytic (Faraday and Voigt) cases\n\t## For these analytic cases we can use array operations and it is therefore\n\t## much faster to compute\n\tif (abs(THETA%(2*np.pi) - np.pi/2) < approx_threshold) or (abs(THETA%(2*np.pi) - 3*np.pi/2) < approx_threshold) and (not force_numeric):\n\t\t# ANALYTIC SOLNS FOR VOIGT\n\t\tif verbose: print('Voigt - analytic')\n\n\t\t# solutions for elements of the dielectric tensor:\n\t\tex = 0.5 * (2. + chiL + chiR)\n\t\texy = 0.5j * (chiR - chiL)\n\t\tez = 1.0 + chiZ\n\n\t\t# refractive indices to propagate\n\t\tn1 = np.sqrt(ex + exy**2/ex)\n\t\tn2 = np.sqrt(ez)\n\n\t\t#ev1 = [np.zeros(len(ex)),ex/exy,np.ones(len(ex))]\n\t\t#ev2 = [np.ones(len(ex)),np.zeros(len(ex)),np.zeros(len(ex))]\n\t\t#ev3 = [np.zeros(len(ex)),np.zeros(len(ex)),np.ones(len(ex))]\n\n\t\t#RotMat = np.array([ev1,ev2,ev3])\n\n\t\tones = np.ones(len(ex))\n\t\tzeros = np.zeros(len(ex))\n\n\n\t\t# *Changed output to new array format\n\n\t\tRotMat = np.array([[zeros, ones, zeros],\n\t\t\t\t\t\t [ex/exy, zeros, zeros],\n\t\t\t\t\t\t [ones, zeros, ones]]).T\n\n\t\tif verbose:\n\t\t\tprint('Shortcut:')\n\t\t\tprint((RotMat.shape))\n\t\t\tprint((n1.shape))\n\t\t\tprint((n2.shape))\n\n\telif ((abs(THETA) < approx_threshold) or ((abs(THETA - np.pi)) < approx_threshold) or abs(Bfield)<1e-2) and (not force_numeric): ## Use Faraday geometry if Bfield is very close to zero\n\t\t# ANALYTIC SOLNS FOR FARADAY\n\t\t#if verbose:\n\t\tif verbose: print('Faraday - analytic TT')\n\n\t\tex = 0.5 * (2. + chiL + chiR)\n\t\texy = 0.5j * (chiR - chiL)\n\t\te_z = 1.0 + chiZ\n\n\t\tn1 = np.sqrt(ex + 1.j*exy)\n\t\tn2 = np.sqrt(ex - 1.j*exy)\n\n\t\t#ev1 = np.array([-1.j*np.ones(len(ex)),np.ones(len(ex)),np.zeros(len(ex))])\n\t\t#ev2 = np.array([1.j*np.ones(len(ex)),np.ones(len(ex)),np.zeros(len(ex))])\n\t\t#ev3 = [np.zeros(len(ex)),np.zeros(len(ex)),np.ones(len(ex))]\n\n\t\tones = np.ones(len(ex))\n\t\tzeros = np.zeros(len(ex))\n\n\t\t# *Changed output to new array format\n\n\t\tif (abs(THETA) < approx_threshold):\n\t\t\t#RotMat = np.array([ev1,ev2,ev3])\n\n\t\t\tRotMat = np.array([[-1.j*ones, 1.j*ones, zeros],\n\t\t\t\t\t\t\t [ones, ones, zeros],\n\t\t\t\t\t\t\t [zeros, zeros, ones]]).T\n\n\t\telse:\n\t\t\t#if anti-aligned, swap the two eigenvectors\n\t\t\t#RotMat = np.array([ev2,ev1,ev3])\n\n\t\t\tRotMat = np.array([[1.j*ones, -1.j*ones, zeros],\n\t\t\t\t\t\t\t [ones, ones, zeros],\n\t\t\t\t\t\t\t [zeros, zeros, ones]]).T\n\n\t\tif verbose:\n\t\t\tprint('Shortcut:')\n\t\t\tprint((RotMat.shape))\n\t\t\tprint((n1.shape))\n\t\t\tprint((n2.shape))\n\n\n\n\n\n\telif use_old_method:\n\t\tprint(\"using old method\")\n\t\tif verbose: print('Non-analytic angle.. This will take a while...')\t##### THIS IS THE ONE THAT's WRONG....\n\t\t# set up sympy symbols\n\t\ttheta = Symbol('theta',real=True)\n\t\tn_sq = Symbol('n_sq')\n\t\te_x = Symbol('e_x')\n\t\te_xy = Symbol('e_xy')\n\t\te_z = Symbol('e_z')\n\n\t\t# General form of the dielectric tensor\n\t\tDielMat = Matrix (( \t[(e_x - n_sq)*cos(theta), e_xy, e_x*sin(theta)],\n\t\t\t\t\t\t\t\t\t[-e_xy * cos(theta), e_x - n_sq, -e_xy*sin(theta)],\n\t\t\t\t\t\t\t\t\t[(n_sq - e_z)*sin(theta), 0, e_z*cos(theta)] \t\t\t))\n\n\t\tet1 = time.clock() - stt\n\n\t\t# Substitute in angle\n\t\tDielMat_sub = DielMat.subs(theta, pi*THETA/np.pi)\n\n\t\tet2 = time.clock() - stt\n\n\t\t# Find solutions for complex indices for a given angle\n\t\tsolns = solve(det(DielMat_sub), n_sq)\n\n\t\tet3a = time.clock() - stt\n\t\t#print et3a\n\n\t\t# Find first refractive index\n\t\tDielMat_sub1 = DielMat_sub.subs(n_sq, solns[0])\n\t\tn1 = np.zeros(len(chiL),dtype='complex')\n\t\tn1old = np.zeros(len(chiL),dtype='complex')\n\t\t# Find second refractive index\n\t\tDielMat_sub2 = DielMat_sub.subs(n_sq, solns[1])\n\t\tn2 = np.zeros(len(chiL),dtype='complex')\n\t\tn2old = np.zeros(len(chiL),dtype='complex')\n\n\t\tet3b = time.clock() - stt\n\n\t\tDsub1 = lambdify((e_x,e_xy,e_z), DielMat_sub1, 'numpy')\n\t\tDsub2 = lambdify((e_x,e_xy,e_z), DielMat_sub2, 'numpy')\n\n\t\tnsub1 = lambdify((e_x,e_xy,e_z), solns[0], 'numpy')\n\t\tnsub2 = lambdify((e_x,e_xy,e_z), solns[1], 'numpy')\n\n\t\t# Initialise rotation matrix\n\t\tRotMat = np.zeros((3,3,len(chiL)),dtype='complex')\n\n\t\tet3c = time.clock() - stt\n\n\t\t# populate refractive index arrays\n\t\tn1 = np.sqrt(nsub1(0.5*(2.+chiL+chiR), 0.5j*(chiR-chiL), (1.0+chiZ)))\n\t\tn2 = np.sqrt(nsub2(0.5*(2.+chiL+chiR), 0.5j*(chiR-chiL), (1.0+chiZ)))\n\n\t\tet3 = time.clock() - stt\n\n\t\tif verbose:\n\t\t\tprint(('setup time:', et1, et1))\n\t\t\tprint(('solve nsq: (total/solve/sub in) ', et3a, et3a-et2, et2-et1))\n\t\t\tprint(('get nsq arrays (tot time / populate ref. index / gen. lambdify / sub in): ', et3, et3-et3c, et3c-et3b, et3b-et3a))\n\n\t\t# loop over all elements of chiL,R,Z to populate eigenvectors\n\t\t# time-limiting step for arrays of length >~ 5000\n\t\tfor i, (cL, cR, cZ) in enumerate(zip(chiL,chiR,chiZ)):\n\t\t\t#if verbose: print 'Detuning point i: ',i\n\n\t\t\t#time diagnostics\n\t\t\tst = time.clock()\n\n\n\t\t\t'''\t\n\t\t## OLD and slow method::\n\t\t\t# Sub in values of susceptibility\n\t\t\tDielMat_sub1a = DielMat_sub1.subs(e_x, 0.5*(2.+cL+cR))\n\t\t\tDielMat_sub1a = DielMat_sub1a.subs(e_xy, 0.5j*(cR-cL))\n\t\t\tDielMat_sub1a = DielMat_sub1a.subs(e_z, (1.0+cZ))\n\t\t\t\n\t\t\tet1 = time.clock() - st\n\t\t\t\n\t\t\t# Evaluate and convert to numpy array\n\t\t\tDM = np.array(DielMat_sub1a.evalf())\n\t\t\tDMa = np.zeros((3,3),dtype='complex')\n\t\t\tfor ii in range(3):\n\t\t\t\tfor jj in range(3):\n\t\t\t\t\tDMa[ii,jj] = np.complex128(DM[ii,jj])\n\t\t\t\n\t\t\tet2 = time.clock() - st\n\t\t\n\t\t\t# use scipy to find eigenvector\n\t\t\t#ev1 = Matrix(DMa).nullspace()\n\t\t\t#print 'Sympy: ', ev1\n\t\t\t\n\t\t\tev1old = nullOld(DMa).T[0]\n\t\t\t#ev1 = null(DMaNP).T\n\t\t\t\n\t\t\t# sub in for ref. index\n\t\t\tn1soln = solns[0].subs(e_x, 0.5*(2.+cL+cR))\n\t\t\tn1soln = n1soln.subs(e_xy, 0.5j*(cR-cL))\n\t\t\tn1soln = n1soln.subs(e_z, (1.0+cZ))\n\t\t\t\n\t\t\t# Populate the refractive index array\n\t\t\tn1old[i] = np.sqrt(np.complex128(n1soln.evalf()))\n\t\t## /OLD method\n\t\t\t'''\n\n\t\t\t# NEW method\n\n\t\t\t# Sub in values of susceptibility\n\t\t\tDMaNP = Dsub1(0.5*(2.+cL+cR), 0.5j*(cR-cL), (1.0+cZ))\n\t\t\t#print DMa\n\t\t\tev1 = null(DMaNP).T\n\t\t\t# Populate the refractive index array\n\t\t\t#n1[i] = np.sqrt(nsub1(0.5*(2.+cL+cR), 0.5j*(cR-cL), (1.0+cZ)))\n\n\n\t\t\t'''\n\t\t\t## METHOD COMPARISON\n\t\t\t\n\t\t\tprint 'SymPy:'\n\t\t\tprint DMa\n\t\t\tprint DMa.shape, type(DMa)\n\t\t\tprint 'Numpy'\n\t\t\tprint DMaNP\n\t\t\tprint DMaNP.shape, type(DMaNP)\n\t\t\t\n\t\t\tprint 'Eigenvectors ...'\n\t\t\tprint 'Old: ', ev1old\t\t\t\n\t\t\tprint 'New: ',ev1\n\t\t\t'''\n\n\t\t\t#print '\\n\\n\\n'\n\n\t\t\t#print 'scipy: ', ev1\n\n\t\t\tet3 = time.clock() - st\n\n\t\t\tet4 = time.clock() - st\n\n\t\t\t#\n\t\t\t## Now repeat the above for second eigenvector\n\t\t\t#\n\n\t\t## NEW\n\t\t\t# Sub in values of susceptibility\n\t\t\tDMaNP = Dsub2(0.5*(2.+cL+cR), 0.5j*(cR-cL), (1.0+cZ))\n\t\t\t# Find null eigenvector\n\t\t\tev2 = null(DMaNP).T\n\t\t\t# Populate the refractive index array\n\t\t\t#n2[i] = np.sqrt(nsub2(0.5*(2.+cL+cR), 0.5j*(cR-cL), (1.0+cZ)))\n\n\t\t\tet5 = time.clock() - st\n\n\t\t\t'''\n\t\t## OLD\n\t\t\t# Evaluate and convert to numpy array\n\t\t\tDielMat_sub2a = DielMat_sub2.subs(e_x, 0.5*(2.+cL+cR))\n\t\t\tDielMat_sub2a = DielMat_sub2a.subs(e_xy, 0.5j*(cR-cL))\n\t\t\tDielMat_sub2a = DielMat_sub2a.subs(e_z, (1.0+cZ))\n\t\t\t\n\t\t\tDM = np.array(DielMat_sub2a.evalf())\n\t\t\tDMa = np.zeros((3,3),dtype='complex')\n\t\t\tfor ii in range(3):\n\t\t\t\tfor jj in range(3):\n\t\t\t\t\tDMa[ii,jj] = np.complex128(DM[ii,jj])\n\t\t\t\t\t\n\t\t\tet6 = time.clock() - st\n\t\t\t\n\t\t\t# use scipy to find eigenvector\n\t\t\tev2old = nullOld(DMa).T[0]\n\t\t\t\n\t\t\tet7 = time.clock() - st\n\t\t\t\n\t\t\t# sub in for ref. index\n\t\t\tn2soln = solns[1].subs(e_x, 0.5*(2.+cL+cR))\n\t\t\tn2soln = n2soln.subs(e_xy, 0.5j*(cR-cL))\n\t\t\tn2soln = n2soln.subs(e_z, (1.0+cZ))\n\t\t\t\n\t\t\t# Populate the refractive index array\n\t\t\tn2old[i] = np.sqrt(np.complex128(n2soln.evalf()))\n\t\t\t'''\n\n\n\t\t\t# Populate the rotation matrix\n\t\t\tRotMat[:,:,i] = [ev1, ev2, [0,0,1]]\n\n\n\n\t\tet_tot = time.clock() - stt\n\t\tif verbose:\n\t\t\tprint(('Time elapsed (non-analytic angle):', et_tot))\n\n\n\n\telse:\n\t\tif verbose: print(\"Using analytical method\")\n\n\t\t#Uses analytical method\n\t\tn1, n2, RotMat = non_standard_n1_n2_RotMat(chiL, chiR, chiZ, THETA)\n\n\n\t#print(\"RotMat =\", RotMat)\n\t#print(\"n1 = \", n1)\n\t#print(\"n2 = \", n2, '\\n')\n\n\n\n\tif verbose: print('SD done')\n\treturn RotMat, n1, n2\n\n\ndef null(A,tol=1e-6):\n\tee, ev = la.eig(A)\n\n\t#for E,V in zip(ee,ev.T):\n\t#\tprint 'Eigs:',abs(E), '\\t', E#, '\\t', V\n\t#print '\\n'\n\n\tz = list(zip(ee,ev.T))\n\tzs = sorted(z, key=lambda f: abs(f[0])) # sort by absolute value of eigenvectors\n\tees, evs = list(zip(*zs))\n\n\t#for E,V in zip(ee,ev):\n\t#\tprint abs(E), '\\t', E, '::', V\n\n\tif abs(ees[0]<tol):\n\t\treturn evs[0].T\n\telse:\n\t\tprint('No null eigenvector found! List of eigenvalules:')\n\t\tfor E,V in zip(ee,ev.T):\n\t\t\tprint(('Eigs:',abs(E), '\\t', E, '\\n\\t', V))\n\t\tprint('\\n')\n\t\treturn 0\n\ndef test_null():\n\tA = np.matrix([[2,3,5],[-4,2,3],[0,0,0]])\n\tSymA = Matrix(A)\n\n\tnv = null(A)\n\tnvold = nullOld(A)\n\n\tprint((nv.T))\n\tprint((nvold.T[0]))\n\tprint((SymA.nullspace()[0].evalf()))\n\n\tprint((A * nv))\n\ndef test_solveset():\n\tx = Symbol('x')\n\tA = Matrix([[x,2,x*x],[4,5,x],[x,8,9]])\n\n\tsolns = solve(det(A), x)\n\tsolns_set = list(solveset(det(A), x))\n\n\tprint(solns)\n\tprint('\\n')\n\tprint(solns_set)\n\n\tprint('\\n\\n\\n')\n\tprint((solns[0]))\n\tprint('\\n')\n\tprint((solns_set[0]))\n\n\tsoln_sub = solns[0].subs(x, 1)\n\tsolnset_sub = solns_set[0].subs(x, 1)\n\n\ts1 = soln_sub.evalf()\n\ts1set = solnset_sub.evalf()\n\n\ts2set = solns_set[1].subs(x, 1).evalf()\n\n\tprint(s1)\n\tprint(s1set)\n\tprint(s2set)\n\ndef nullOld(A, eps=1e-14):\n\t\"\"\" Find the null eigenvector x of matrix A, such that Ax=0\"\"\"\n\t# Taken with gratitude from http://stackoverflow.com/questions/5889142/python-numpy-scipy-finding-the-null-space-of-a-matrix\n\tu, s, vh = la.svd(A)\n\tnull_mask = (s <= eps)\n\tnull_space = scipy.compress(null_mask, vh, axis=0)\n\treturn scipy.transpose(null_space)\n\n\n'''\ndef null(A, atol=1e-15, rtol=0):\n\t\"\"\"Compute an approximate basis for the nullspace of A.\n\n\tThe algorithm used by this function is based on the singular value\n\tdecomposition of `A`.\n\n\tParameters\n\t----------\n\tA : ndarray\n\t\tA should be at most 2-D. A 1-D array with length k will be treated\n\t\tas a 2-D with shape (1, k)\n\tatol : float\n\t\tThe absolute tolerance for a zero singular value. Singular values\n\t\tsmaller than `atol` are considered to be zero.\n\trtol : float\n\t\tThe relative tolerance. Singular values less than rtol*smax are\n\t\tconsidered to be zero, where smax is the largest singular value.\n\n\tIf both `atol` and `rtol` are positive, the combined tolerance is the\n\tmaximum of the two; that is::\n\t\ttol = max(atol, rtol * smax)\n\tSingular values smaller than `tol` are considered to be zero.\n\n\tReturn value\n\t------------\n\tns : ndarray\n\t\tIf `A` is an array with shape (m, k), then `ns` will be an array\n\t\twith shape (k, n), where n is the estimated dimension of the\n\t\tnullspace of `A`. The columns of `ns` are a basis for the\n\t\tnullspace; each element in numpy.dot(A, ns) will be approximately\n\t\tzero.\n\t\"\"\"\n\n\tA = np.atleast_2d(A)\n\tu, s, vh = la.svd(A)\n\ttol = max(atol, rtol * s[0])\n\tnnz = (s >= tol).sum()\n\tprint nnz\n\tns = vh[nnz:].conj().T\n\treturn ns\n'''\n\ndef main():\n\t\"\"\" General test method \"\"\"\n\tfrom . import spectra as sp\n\tp_dict = {'Bfield':700,'rb85frac':1,'Btheta':88*np.pi/180,'Bphi':0*np.pi/180,'lcell':75e-3,'T':84,'Dline':'D2','Elem':'Cs'}\n\tchiL,chiR,chiZ = sp.calc_chi(np.linspace(-3500,3500,10),p_dict)\n\n\t#print 'ez: ',chiZ + 1 # ez / e0\n\t#print 'ex: ',0.5*(2+chiL+chiR) # ex / e0\n\t#print 'exy: ',0.5j*(chiR-chiL) # exy / e0\n\n\tRotMat, n1, n2 = solve_diel(chiL,chiR,chiZ,88*np.pi/180)\n\tprint((RotMat.shape))\n\ndef calculation_time_analysis():\n\t\"\"\" Test method for looking at timing performance \"\"\"\n\tfrom . import spectra as sp\n\tp_dict = {'Bfield':700,'rb85frac':1,'Btheta':88*np.pi/180,'Bphi':0*np.pi/180,'lcell':75e-3,'T':84,'Dline':'D2','Elem':'Cs'}\n\tchiL,chiR,chiZ = sp.calc_chi([-3500],p_dict)\n\n\tfor angle in [0, np.pi/32, np.pi/16, np.pi/8, np.pi/4, np.pi/2]:\n\t\tprint(('Angle (degrees): ',angle*180/np.pi))\n\t\tRotMat, n1, n2 = solve_diel(chiL,chiR,chiZ,angle)\n\ndef test_equivalence():\n\t\"\"\" Test numeric vs analytic solutions \"\"\"\n\n\tfrom . import spectra as sp\n\n\t#analytic\n\tp_dict = {'Bfield':15000,'rb85frac':1,'Btheta':0*np.pi/180,'Bphi':0*np.pi/180,'lcell':1e-3,'T':84,'Dline':'D2','Elem':'Rb'}\n\tchiL1,chiR1,chiZ1 = sp.calc_chi([-18400],p_dict)\n\tRotMat1, n11, n21 = solve_diel(chiL1,chiR1,chiZ1,0,150,force_numeric=False)\n\n\t#numeric\n\tchiL2, chiR2, chiZ2 = chiL1, chiR1, chiZ1\n\t#chiL2,chiR2,chiZ2 = sp.calc_chi([-18400],p_dict)\n\tRotMat2, n12, n22 = solve_diel(chiL2,chiR2,chiZ2,0,150,force_numeric=True)\n\n\tprint('RM 1')\n\tprint(RotMat1)\n\n\tprint('RM 2')\n\tprint(RotMat2)\n\n\tprint('n1_1 (analytic)')\n\tprint(n11)\n\tprint('n1_2')\n\tprint(n12)\n\tprint('n2_1 (analytic)')\n\tprint(n21)\n\tprint('n2_2')\n\tprint(n22)\n\n\tprint('chi1')\n\tprint((chiL1, chiR1, chiZ1))\n\n\tprint('chi2')\n\tprint((chiL2, chiR2, chiZ2))\n\nif __name__ == '__main__':\n\ttest_equivalence()" ]
[ [ "numpy.matrix", "scipy.linalg.svd", "numpy.abs", "numpy.sqrt", "scipy.transpose", "numpy.linspace", "numpy.cos", "numpy.sin", "numpy.exp", "scipy.compress", "numpy.tan", "numpy.mod", "numpy.angle", "numpy.array", "scipy.linalg.eig" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "0.12", "0.10" ], "tensorflow": [] } ]
qiaozhijian/FCGF
[ "6c3f0dbaa918e6622f637a26ffa7b9912726a7a8" ]
[ "lib/trainer.py" ]
[ "# -*- coding: future_fstrings -*-\n#\n# Written by Chris Choy <[email protected]>\n# Distributed under MIT License\nimport gc\nimport json\nimport logging\nimport os\nimport os.path as osp\n\nimport MinkowskiEngine as ME\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom tensorboardX import SummaryWriter\n\nimport util.transform_estimation as te\nfrom lib.eval import find_nn_gpu\nfrom lib.metrics import pdist, corr_dist\nfrom lib.timer import Timer, AverageMeter\nfrom model import load_model\nfrom util.file import ensure_dir\nfrom util.misc import _hash\n\n\nclass AlignmentTrainer:\n\n def __init__(\n self,\n config,\n data_loader,\n val_data_loader=None,\n ):\n num_feats = 1 # occupancy only for 3D Match dataset. For ScanNet, use RGB 3 channels.\n\n # Model initialization\n Model = load_model(config.model)\n model = Model(\n num_feats,\n config.model_n_out,\n bn_momentum=config.bn_momentum,\n normalize_feature=config.normalize_feature,\n conv1_kernel_size=config.conv1_kernel_size,\n D=3)\n\n # 是否加载预训练模型\n if config.weights:\n checkpoint = torch.load(config.weights)\n model.load_state_dict(checkpoint['state_dict'])\n\n # logging.info(model)\n\n self.config = config\n self.model = model\n self.max_epoch = config.max_epoch\n self.save_freq = config.save_freq_epoch\n self.val_max_iter = config.val_max_iter\n self.val_epoch_freq = config.val_epoch_freq\n\n self.best_val_metric = config.best_val_metric\n self.best_val_epoch = -np.inf\n self.best_val = -np.inf\n\n if config.use_gpu and not torch.cuda.is_available():\n logging.warning('Warning: There\\'s no CUDA support on this machine, '\n 'training is performed on CPU.')\n raise ValueError('GPU not available, but cuda flag set')\n\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n self.optimizer = getattr(optim, config.optimizer)(\n model.parameters(),\n lr=config.lr,\n momentum=config.momentum,\n weight_decay=config.weight_decay)\n\n self.scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, config.exp_gamma)\n\n self.start_epoch = 1\n self.checkpoint_dir = config.out_dir\n\n ensure_dir(self.checkpoint_dir)\n json.dump(\n config,\n open(os.path.join(self.checkpoint_dir, 'config.json'), 'w'),\n indent=4,\n sort_keys=False)\n\n self.iter_size = config.iter_size\n self.batch_size = data_loader.batch_size\n self.data_loader = data_loader\n self.val_data_loader = val_data_loader\n\n self.test_valid = True if self.val_data_loader is not None else False\n self.log_step = int(np.sqrt(self.config.batch_size))\n self.model = self.model.to(self.device)\n self.writer = SummaryWriter(logdir=config.out_dir)\n\n if config.resume is not None:\n if osp.isfile(config.resume):\n logging.info(\"=> loading checkpoint '{}'\".format(config.resume))\n state = torch.load(config.resume)\n self.start_epoch = state['epoch']\n model.load_state_dict(state['state_dict'])\n self.scheduler.load_state_dict(state['scheduler'])\n self.optimizer.load_state_dict(state['optimizer'])\n\n if 'best_val' in state.keys():\n self.best_val = state['best_val']\n self.best_val_epoch = state['best_val_epoch']\n self.best_val_metric = state['best_val_metric']\n else:\n raise ValueError(f\"=> no checkpoint found at '{config.resume}'\")\n\n def train(self):\n \"\"\"\n Full training logic\n \"\"\"\n # Baseline random feature performance\n if self.test_valid:\n with torch.no_grad():\n val_dict = self._valid_epoch()\n\n for k, v in val_dict.items():\n self.writer.add_scalar(f'val/{k}', v, 0)\n\n for epoch in range(self.start_epoch, self.max_epoch + 1):\n lr = self.scheduler.get_lr()\n logging.info(f\" Epoch: {epoch}, LR: {lr}\")\n self._train_epoch(epoch)\n self._save_checkpoint(epoch)\n self.scheduler.step()\n\n if self.test_valid and epoch % self.val_epoch_freq == 0:\n with torch.no_grad():\n val_dict = self._valid_epoch()\n\n for k, v in val_dict.items():\n self.writer.add_scalar(f'val/{k}', v, epoch)\n if self.best_val < val_dict[self.best_val_metric]:\n logging.info(\n f'Saving the best val model with {self.best_val_metric}: {val_dict[self.best_val_metric]}'\n )\n self.best_val = val_dict[self.best_val_metric]\n self.best_val_epoch = epoch\n self._save_checkpoint(epoch, 'best_val_checkpoint')\n else:\n logging.info(\n f'Current best val model with {self.best_val_metric}: {self.best_val} at epoch {self.best_val_epoch}'\n )\n\n def _save_checkpoint(self, epoch, filename='checkpoint'):\n state = {\n 'epoch': epoch,\n 'state_dict': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'scheduler': self.scheduler.state_dict(),\n 'config': self.config,\n 'best_val': self.best_val,\n 'best_val_epoch': self.best_val_epoch,\n 'best_val_metric': self.best_val_metric\n }\n filename = os.path.join(self.checkpoint_dir, f'{filename}.pth')\n logging.info(\"Saving checkpoint: {} ...\".format(filename))\n torch.save(state, filename)\n\n\nclass ContrastiveLossTrainer(AlignmentTrainer):\n\n def __init__(\n self,\n config,\n data_loader,\n val_data_loader=None,\n ):\n if val_data_loader is not None:\n assert val_data_loader.batch_size == 1, \"Val set batch size must be 1 for now.\"\n AlignmentTrainer.__init__(self, config, data_loader, val_data_loader)\n self.neg_thresh = config.neg_thresh #1.4\n self.pos_thresh = config.pos_thresh #0.1\n self.neg_weight = config.neg_weight #1\n\n def apply_transform(self, pts, trans):\n R = trans[:3, :3]\n T = trans[:3, 3]\n return pts @ R.t() + T\n\n def generate_rand_negative_pairs(self, positive_pairs, hash_seed, N0, N1, N_neg=0):\n \"\"\"\n Generate random negative pairs\n \"\"\"\n if not isinstance(positive_pairs, np.ndarray):\n positive_pairs = np.array(positive_pairs, dtype=np.int64)\n if N_neg < 1:\n N_neg = positive_pairs.shape[0] * 2\n pos_keys = _hash(positive_pairs, hash_seed)\n\n neg_pairs = np.floor(np.random.rand(int(N_neg), 2) * np.array([[N0, N1]])).astype(\n np.int64)\n neg_keys = _hash(neg_pairs, hash_seed)\n mask = np.isin(neg_keys, pos_keys, assume_unique=False)\n return neg_pairs[np.logical_not(mask)]\n\n def _train_epoch(self, epoch):\n gc.collect()\n self.model.train()\n # Epoch starts from 1\n total_loss = 0\n total_num = 0.0\n\n data_loader = self.data_loader\n data_loader_iter = self.data_loader.__iter__()\n\n iter_size = self.iter_size\n start_iter = (epoch - 1) * (len(data_loader) // iter_size)\n\n data_meter, data_timer, total_timer = AverageMeter(), Timer(), Timer()\n\n # Main training\n for curr_iter in range(len(data_loader) // iter_size):\n self.optimizer.zero_grad()\n batch_pos_loss, batch_neg_loss, batch_loss = 0, 0, 0\n\n data_time = 0\n total_timer.tic()\n for iter_idx in range(iter_size):\n # Caffe iter size\n data_timer.tic()\n input_dict = data_loader_iter.next()\n data_time += data_timer.toc(average=False)\n\n # pairs consist of (xyz1 index, xyz0 index)\n sinput0 = ME.SparseTensor(\n input_dict['sinput0_F'].to(self.device),\n coordinates=input_dict['sinput0_C'].to(self.device))\n F0 = self.model(sinput0).F\n\n sinput1 = ME.SparseTensor(\n input_dict['sinput1_F'].to(self.device),\n coordinates=input_dict['sinput1_C'].to(self.device))\n F1 = self.model(sinput1).F\n\n N0, N1 = len(sinput0), len(sinput1)\n\n pos_pairs = input_dict['correspondences']\n neg_pairs = self.generate_rand_negative_pairs(pos_pairs, max(N0, N1), N0, N1)\n pos_pairs = pos_pairs.long().to(self.device)\n neg_pairs = torch.from_numpy(neg_pairs).long().to(self.device)\n\n neg0 = F0.index_select(0, neg_pairs[:, 0])\n neg1 = F1.index_select(0, neg_pairs[:, 1])\n pos0 = F0.index_select(0, pos_pairs[:, 0])\n pos1 = F1.index_select(0, pos_pairs[:, 1])\n\n # Positive loss\n pos_loss = (pos0 - pos1).pow(2).sum(1)\n\n # Negative loss\n neg_loss = F.relu(self.neg_thresh -\n ((neg0 - neg1).pow(2).sum(1) + 1e-4).sqrt()).pow(2)\n\n pos_loss_mean = pos_loss.mean() / iter_size\n neg_loss_mean = neg_loss.mean() / iter_size\n\n # Weighted loss\n loss = pos_loss_mean + self.neg_weight * neg_loss_mean\n loss.backward(\n ) # To accumulate gradient, zero gradients only at the begining of iter_size\n batch_loss += loss.item()\n batch_pos_loss += pos_loss_mean.item()\n batch_neg_loss += neg_loss_mean.item()\n\n self.optimizer.step()\n\n torch.cuda.empty_cache()\n\n total_loss += batch_loss\n total_num += 1.0\n total_timer.toc()\n data_meter.update(data_time)\n\n # Print logs\n if curr_iter % self.config.stat_freq == 0:\n self.writer.add_scalar('train/loss', batch_loss, start_iter + curr_iter)\n self.writer.add_scalar('train/pos_loss', batch_pos_loss, start_iter + curr_iter)\n self.writer.add_scalar('train/neg_loss', batch_neg_loss, start_iter + curr_iter)\n logging.info(\n \"Train Epoch: {} [{}/{}], Current Loss: {:.3e} Pos: {:.3f} Neg: {:.3f}\"\n .format(epoch, curr_iter,\n len(self.data_loader) //\n iter_size, batch_loss, batch_pos_loss, batch_neg_loss) +\n \"\\tData time: {:.4f}, Train time: {:.4f}, Iter time: {:.4f}\".format(\n data_meter.avg, total_timer.avg - data_meter.avg, total_timer.avg))\n data_meter.reset()\n total_timer.reset()\n\n def _valid_epoch(self):\n # Change the network to evaluation mode\n self.model.eval()\n self.val_data_loader.dataset.reset_seed(0)\n num_data = 0\n hit_ratio_meter, feat_match_ratio, loss_meter, rte_meter, rre_meter = AverageMeter(\n ), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter()\n data_timer, feat_timer, matching_timer = Timer(), Timer(), Timer()\n tot_num_data = len(self.val_data_loader.dataset)\n if self.val_max_iter > 0:\n tot_num_data = min(self.val_max_iter, tot_num_data)\n data_loader_iter = self.val_data_loader.__iter__()\n\n for batch_idx in range(tot_num_data):\n data_timer.tic()\n input_dict = data_loader_iter.next()\n data_timer.toc()\n\n # pairs consist of (xyz1 index, xyz0 index)\n feat_timer.tic()\n sinput0 = ME.SparseTensor(\n input_dict['sinput0_F'].to(self.device),\n coordinates=input_dict['sinput0_C'].to(self.device))\n F0 = self.model(sinput0).F\n\n sinput1 = ME.SparseTensor(\n input_dict['sinput1_F'].to(self.device),\n coordinates=input_dict['sinput1_C'].to(self.device))\n F1 = self.model(sinput1).F\n feat_timer.toc()\n\n matching_timer.tic()\n xyz0, xyz1, T_gt = input_dict['pcd0'], input_dict['pcd1'], input_dict['T_gt']\n xyz0_corr, xyz1_corr = self.find_corr(xyz0, xyz1, F0, F1, subsample_size=5000)\n T_est = te.est_quad_linear_robust(xyz0_corr, xyz1_corr)\n\n loss = corr_dist(T_est, T_gt, xyz0, xyz1, weight=None)\n loss_meter.update(loss)\n\n rte = np.linalg.norm(T_est[:3, 3] - T_gt[:3, 3])\n rte_meter.update(rte)\n rre = np.arccos((np.trace(T_est[:3, :3].t() @ T_gt[:3, :3]) - 1) / 2)\n if not np.isnan(rre):\n rre_meter.update(rre)\n\n hit_ratio = self.evaluate_hit_ratio(\n xyz0_corr, xyz1_corr, T_gt, thresh=self.config.hit_ratio_thresh)\n hit_ratio_meter.update(hit_ratio)\n feat_match_ratio.update(hit_ratio > 0.05)\n matching_timer.toc()\n\n num_data += 1\n torch.cuda.empty_cache()\n\n if batch_idx % 100 == 0 and batch_idx > 0:\n logging.info(' '.join([\n f\"Validation iter {num_data} / {tot_num_data} : Data Loading Time: {data_timer.avg:.3f},\",\n f\"Feature Extraction Time: {feat_timer.avg:.3f}, Matching Time: {matching_timer.avg:.3f},\",\n f\"Loss: {loss_meter.avg:.3f}, RTE: {rte_meter.avg:.3f}, RRE: {rre_meter.avg:.3f},\",\n f\"Hit Ratio: {hit_ratio_meter.avg:.3f}, Feat Match Ratio: {feat_match_ratio.avg:.3f}\"\n ]))\n data_timer.reset()\n\n logging.info(' '.join([\n f\"Final Loss: {loss_meter.avg:.3f}, RTE: {rte_meter.avg:.3f}, RRE: {rre_meter.avg:.3f},\",\n f\"Hit Ratio: {hit_ratio_meter.avg:.3f}, Feat Match Ratio: {feat_match_ratio.avg:.3f}\"\n ]))\n return {\n \"loss\": loss_meter.avg,\n \"rre\": rre_meter.avg,\n \"rte\": rte_meter.avg,\n 'feat_match_ratio': feat_match_ratio.avg,\n 'hit_ratio': hit_ratio_meter.avg\n }\n\n def find_corr(self, xyz0, xyz1, F0, F1, subsample_size=-1):\n subsample = len(F0) > subsample_size\n if subsample_size > 0 and subsample:\n N0 = min(len(F0), subsample_size)\n N1 = min(len(F1), subsample_size)\n inds0 = np.random.choice(len(F0), N0, replace=False)\n inds1 = np.random.choice(len(F1), N1, replace=False)\n F0, F1 = F0[inds0], F1[inds1]\n\n # Compute the nn\n nn_inds = find_nn_gpu(F0, F1, nn_max_n=self.config.nn_max_n)\n if subsample_size > 0 and subsample:\n return xyz0[inds0], xyz1[inds1[nn_inds]]\n else:\n return xyz0, xyz1[nn_inds]\n\n def evaluate_hit_ratio(self, xyz0, xyz1, T_gth, thresh=0.1):\n xyz0 = self.apply_transform(xyz0, T_gth)\n dist = np.sqrt(((xyz0 - xyz1) ** 2).sum(1) + 1e-6)\n return (dist < thresh).float().mean().item()\n\n\nclass HardestContrastiveLossTrainer(ContrastiveLossTrainer):\n\n def contrastive_hardest_negative_loss(self,\n F0,\n F1,\n positive_pairs,\n num_pos=5192,\n num_hn_samples=2048,\n thresh=None):\n \"\"\"\n Generate negative pairs\n \"\"\"\n N0, N1 = len(F0), len(F1)\n N_pos_pairs = len(positive_pairs)\n hash_seed = max(N0, N1)\n sel0 = np.random.choice(N0, min(N0, num_hn_samples), replace=False) #replace False 不可以选相同数字\n sel1 = np.random.choice(N1, min(N1, num_hn_samples), replace=False)\n # 随机选正样本对\n if N_pos_pairs > num_pos:\n pos_sel = np.random.choice(N_pos_pairs, num_pos, replace=False)\n sample_pos_pairs = positive_pairs[pos_sel]\n else:\n sample_pos_pairs = positive_pairs\n\n # Find negatives for all F1[positive_pairs[:, 1]]\n # 随机抽取的一些特征,准备当做负样本对\n subF0, subF1 = F0[sel0], F1[sel1]\n\n pos_ind0 = sample_pos_pairs[:, 0].long()\n pos_ind1 = sample_pos_pairs[:, 1].long()\n posF0, posF1 = F0[pos_ind0], F1[pos_ind1]\n\n D01 = pdist(posF0, subF1, dist_type='L2')\n D10 = pdist(posF1, subF0, dist_type='L2')\n # fixme\n D01min, D01ind = D01.min(1)\n D10min, D10ind = D10.min(1)\n\n if not isinstance(positive_pairs, np.ndarray):\n positive_pairs = np.array(positive_pairs, dtype=np.int64)\n\n pos_keys = _hash(positive_pairs, hash_seed)\n # 提取负样本序号\n D01ind = sel1[D01ind.cpu().numpy()]\n D10ind = sel0[D10ind.cpu().numpy()]\n neg_keys0 = _hash([pos_ind0.numpy(), D01ind], hash_seed)\n neg_keys1 = _hash([D10ind, pos_ind1.numpy()], hash_seed)\n # 获得负样本中本属于正样本的部分(因为刚刚负样本是随机抽取的,所以可能抽到正样本)\n mask0 = torch.from_numpy(\n np.logical_not(np.isin(neg_keys0, pos_keys, assume_unique=False)))\n mask1 = torch.from_numpy(\n np.logical_not(np.isin(neg_keys1, pos_keys, assume_unique=False)))\n pos_loss = F.relu((posF0 - posF1).pow(2).sum(1) - self.pos_thresh)\n neg_loss0 = F.relu(self.neg_thresh - D01min[mask0]).pow(2)\n neg_loss1 = F.relu(self.neg_thresh - D10min[mask1]).pow(2)\n return pos_loss.mean(), (neg_loss0.mean() + neg_loss1.mean()) / 2\n\n def _train_epoch(self, epoch):\n gc.collect()\n self.model.train()\n # Epoch starts from 1\n total_loss = 0\n total_num = 0.0\n data_loader = self.data_loader\n data_loader_iter = self.data_loader.__iter__()\n iter_size = self.iter_size #1\n # data_timer数据加载的时间\n data_meter, data_timer, total_timer = AverageMeter(), Timer(), Timer()\n start_iter = (epoch - 1) * (len(data_loader) // iter_size)\n for curr_iter in range(len(data_loader) // iter_size):\n self.optimizer.zero_grad()\n batch_pos_loss, batch_neg_loss, batch_loss = 0, 0, 0\n\n data_time = 0\n total_timer.tic()\n for iter_idx in range(iter_size):\n data_timer.tic()\n input_dict = data_loader_iter.next()\n data_time += data_timer.toc(average=False)\n\n sinput0 = ME.SparseTensor(\n input_dict['sinput0_F'].to(self.device),\n coordinates=input_dict['sinput0_C'].to(self.device))\n F0 = self.model(sinput0).F\n\n sinput1 = ME.SparseTensor(\n input_dict['sinput1_F'].to(self.device),\n coordinates=input_dict['sinput1_C'].to(self.device))\n\n F1 = self.model(sinput1).F\n\n pos_pairs = input_dict['correspondences']\n pos_loss, neg_loss = self.contrastive_hardest_negative_loss(\n F0,\n F1,\n pos_pairs,\n # num_pos_per_batch 1024\n num_pos=self.config.num_pos_per_batch * self.config.batch_size,\n # num_hn_samples_per_batch 256\n num_hn_samples=self.config.num_hn_samples_per_batch *\n self.config.batch_size)\n\n pos_loss /= iter_size\n neg_loss /= iter_size\n loss = pos_loss + self.neg_weight * neg_loss\n loss.backward()\n\n batch_loss += loss.item()\n batch_pos_loss += pos_loss.item()\n batch_neg_loss += neg_loss.item()\n\n self.optimizer.step()\n gc.collect()\n\n torch.cuda.empty_cache()\n\n total_loss += batch_loss\n total_num += 1.0\n total_timer.toc()\n data_meter.update(data_time)\n\n if curr_iter % self.config.stat_freq == 0:\n self.writer.add_scalar('train/loss', batch_loss, start_iter + curr_iter)\n self.writer.add_scalar('train/pos_loss', batch_pos_loss, start_iter + curr_iter)\n self.writer.add_scalar('train/neg_loss', batch_neg_loss, start_iter + curr_iter)\n logging.info(\n \"Train Epoch: {} [{}/{}], Current Loss: {:.3e} Pos: {:.3f} Neg: {:.3f}\"\n .format(epoch, curr_iter,\n len(self.data_loader) //\n iter_size, batch_loss, batch_pos_loss, batch_neg_loss) +\n \"\\tData time: {:.4f}, Train time: {:.4f}, Iter time: {:.4f}\".format(\n data_meter.avg, total_timer.avg - data_meter.avg, total_timer.avg))\n data_meter.reset()\n total_timer.reset()\n\n\nclass TripletLossTrainer(ContrastiveLossTrainer):\n\n def triplet_loss(self,\n F0,\n F1,\n positive_pairs,\n num_pos=1024,\n num_hn_samples=None,\n num_rand_triplet=1024):\n \"\"\"\n Generate negative pairs\n \"\"\"\n N0, N1 = len(F0), len(F1)\n num_pos_pairs = len(positive_pairs)\n hash_seed = max(N0, N1)\n\n if num_pos_pairs > num_pos:\n pos_sel = np.random.choice(num_pos_pairs, num_pos, replace=False)\n sample_pos_pairs = positive_pairs[pos_sel]\n else:\n sample_pos_pairs = positive_pairs\n\n pos_ind0 = sample_pos_pairs[:, 0].long()\n pos_ind1 = sample_pos_pairs[:, 1].long()\n posF0, posF1 = F0[pos_ind0], F1[pos_ind1]\n\n if not isinstance(positive_pairs, np.ndarray):\n positive_pairs = np.array(positive_pairs, dtype=np.int64)\n\n pos_keys = _hash(positive_pairs, hash_seed)\n pos_dist = torch.sqrt((posF0 - posF1).pow(2).sum(1) + 1e-7)\n\n # Random triplets\n rand_inds = np.random.choice(\n num_pos_pairs, min(num_pos_pairs, num_rand_triplet), replace=False)\n rand_pairs = positive_pairs[rand_inds]\n negatives = np.random.choice(N1, min(N1, num_rand_triplet), replace=False)\n\n # Remove positives from negatives\n rand_neg_keys = _hash([rand_pairs[:, 0], negatives], hash_seed)\n rand_mask = np.logical_not(np.isin(rand_neg_keys, pos_keys, assume_unique=False))\n anchors, positives = rand_pairs[torch.from_numpy(rand_mask)].T\n negatives = negatives[rand_mask]\n\n rand_pos_dist = torch.sqrt((F0[anchors] - F1[positives]).pow(2).sum(1) + 1e-7)\n rand_neg_dist = torch.sqrt((F0[anchors] - F1[negatives]).pow(2).sum(1) + 1e-7)\n\n loss = F.relu(rand_pos_dist + self.neg_thresh - rand_neg_dist).mean()\n\n return loss, pos_dist.mean(), rand_neg_dist.mean()\n\n def _train_epoch(self, epoch):\n config = self.config\n\n gc.collect()\n self.model.train()\n\n # Epoch starts from 1\n total_loss = 0\n total_num = 0.0\n data_loader = self.data_loader\n data_loader_iter = self.data_loader.__iter__()\n iter_size = self.iter_size\n data_meter, data_timer, total_timer = AverageMeter(), Timer(), Timer()\n pos_dist_meter, neg_dist_meter = AverageMeter(), AverageMeter()\n start_iter = (epoch - 1) * (len(data_loader) // iter_size)\n for curr_iter in range(len(data_loader) // iter_size):\n self.optimizer.zero_grad()\n batch_loss = 0\n data_time = 0\n total_timer.tic()\n for iter_idx in range(iter_size):\n data_timer.tic()\n input_dict = data_loader_iter.next()\n data_time += data_timer.toc(average=False)\n\n # pairs consist of (xyz1 index, xyz0 index)\n sinput0 = ME.SparseTensor(\n input_dict['sinput0_F'].to(self.device),\n coordinates=input_dict['sinput0_C'].to(self.device))\n F0 = self.model(sinput0).F\n\n sinput1 = ME.SparseTensor(\n input_dict['sinput1_F'].to(self.device),\n coordinates=input_dict['sinput1_C'].to(self.device))\n F1 = self.model(sinput1).F\n\n pos_pairs = input_dict['correspondences']\n loss, pos_dist, neg_dist = self.triplet_loss(\n F0,\n F1,\n pos_pairs,\n num_pos=config.triplet_num_pos * config.batch_size,\n num_hn_samples=config.triplet_num_hn * config.batch_size,\n num_rand_triplet=config.triplet_num_rand * config.batch_size)\n loss /= iter_size\n loss.backward()\n batch_loss += loss.item()\n pos_dist_meter.update(pos_dist)\n neg_dist_meter.update(neg_dist)\n\n self.optimizer.step()\n gc.collect()\n\n torch.cuda.empty_cache()\n\n total_loss += batch_loss\n total_num += 1.0\n total_timer.toc()\n data_meter.update(data_time)\n\n if curr_iter % self.config.stat_freq == 0:\n self.writer.add_scalar('train/loss', batch_loss, start_iter + curr_iter)\n logging.info(\n \"Train Epoch: {} [{}/{}], Current Loss: {:.3e}, Pos dist: {:.3e}, Neg dist: {:.3e}\"\n .format(epoch, curr_iter,\n len(self.data_loader) //\n iter_size, batch_loss, pos_dist_meter.avg, neg_dist_meter.avg) +\n \"\\tData time: {:.4f}, Train time: {:.4f}, Iter time: {:.4f}\".format(\n data_meter.avg, total_timer.avg - data_meter.avg, total_timer.avg))\n pos_dist_meter.reset()\n neg_dist_meter.reset()\n data_meter.reset()\n total_timer.reset()\n\n\nclass HardestTripletLossTrainer(TripletLossTrainer):\n\n def triplet_loss(self,\n F0,\n F1,\n positive_pairs,\n num_pos=1024,\n num_hn_samples=512,\n num_rand_triplet=1024):\n \"\"\"\n Generate negative pairs\n \"\"\"\n N0, N1 = len(F0), len(F1)\n num_pos_pairs = len(positive_pairs)\n hash_seed = max(N0, N1)\n sel0 = np.random.choice(N0, min(N0, num_hn_samples), replace=False)\n sel1 = np.random.choice(N1, min(N1, num_hn_samples), replace=False)\n\n if num_pos_pairs > num_pos:\n pos_sel = np.random.choice(num_pos_pairs, num_pos, replace=False)\n sample_pos_pairs = positive_pairs[pos_sel]\n else:\n sample_pos_pairs = positive_pairs\n\n # Find negatives for all F1[positive_pairs[:, 1]]\n subF0, subF1 = F0[sel0], F1[sel1]\n\n pos_ind0 = sample_pos_pairs[:, 0].long()\n pos_ind1 = sample_pos_pairs[:, 1].long()\n posF0, posF1 = F0[pos_ind0], F1[pos_ind1]\n\n D01 = pdist(posF0, subF1, dist_type='L2')\n D10 = pdist(posF1, subF0, dist_type='L2')\n\n D01min, D01ind = D01.min(1)\n D10min, D10ind = D10.min(1)\n\n if not isinstance(positive_pairs, np.ndarray):\n positive_pairs = np.array(positive_pairs, dtype=np.int64)\n\n pos_keys = _hash(positive_pairs, hash_seed)\n\n D01ind = sel1[D01ind.cpu().numpy()]\n D10ind = sel0[D10ind.cpu().numpy()]\n neg_keys0 = _hash([pos_ind0.numpy(), D01ind], hash_seed)\n neg_keys1 = _hash([D10ind, pos_ind1.numpy()], hash_seed)\n\n mask0 = torch.from_numpy(\n np.logical_not(np.isin(neg_keys0, pos_keys, assume_unique=False)))\n mask1 = torch.from_numpy(\n np.logical_not(np.isin(neg_keys1, pos_keys, assume_unique=False)))\n pos_dist = torch.sqrt((posF0 - posF1).pow(2).sum(1) + 1e-7)\n\n # Random triplets\n rand_inds = np.random.choice(\n num_pos_pairs, min(num_pos_pairs, num_rand_triplet), replace=False)\n rand_pairs = positive_pairs[rand_inds]\n negatives = np.random.choice(N1, min(N1, num_rand_triplet), replace=False)\n\n # Remove positives from negatives\n rand_neg_keys = _hash([rand_pairs[:, 0], negatives], hash_seed)\n rand_mask = np.logical_not(np.isin(rand_neg_keys, pos_keys, assume_unique=False))\n anchors, positives = rand_pairs[torch.from_numpy(rand_mask)].T\n negatives = negatives[rand_mask]\n\n rand_pos_dist = torch.sqrt((F0[anchors] - F1[positives]).pow(2).sum(1) + 1e-7)\n rand_neg_dist = torch.sqrt((F0[anchors] - F1[negatives]).pow(2).sum(1) + 1e-7)\n\n loss = F.relu(\n torch.cat([\n rand_pos_dist + self.neg_thresh - rand_neg_dist,\n pos_dist[mask0] + self.neg_thresh - D01min[mask0],\n pos_dist[mask1] + self.neg_thresh - D10min[mask1]\n ])).mean()\n\n return loss, pos_dist.mean(), (D01min.mean() + D10min.mean()).item() / 2\n" ]
[ [ "numpy.logical_not", "numpy.sqrt", "torch.load", "numpy.random.choice", "numpy.isnan", "torch.cat", "torch.cuda.empty_cache", "torch.optim.lr_scheduler.ExponentialLR", "numpy.linalg.norm", "torch.from_numpy", "torch.nn.functional.relu", "torch.no_grad", "torch.cuda.is_available", "numpy.array", "numpy.isin", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fullbat/scilpy
[ "8f5b95a0b298ac95268c94d04a162b14fe2773ad", "8f5b95a0b298ac95268c94d04a162b14fe2773ad", "8f5b95a0b298ac95268c94d04a162b14fe2773ad", "8f5b95a0b298ac95268c94d04a162b14fe2773ad", "8f5b95a0b298ac95268c94d04a162b14fe2773ad", "8f5b95a0b298ac95268c94d04a162b14fe2773ad" ]
[ "scripts/scil_outlier_rejection.py", "scripts/scil_compute_ssst_frf.py", "scripts/scil_compute_fodf_metrics.py", "scilpy/tractanalysis/todi_util.py", "scilpy/tracking/tools.py", "scripts/scil_apply_warp_to_tractogram.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport logging\n\nimport nibabel as nib\nfrom nibabel.streamlines.tractogram import Tractogram\nimport numpy as np\n\nfrom scilpy.io.utils import (add_overwrite_arg,\n assert_inputs_exist,\n assert_outputs_exist,\n check_tracts_same_format)\nfrom scilpy.tractanalysis.features import (\n outliers_removal_using_hierarchical_quickbundles,\n prune)\n\nDESCRIPTION = \"\"\"\nClean a bundle (inliers/outliers) using hiearchical clustering.\nhttp://archive.ismrm.org/2015/2844.html\n\"\"\"\n\n\ndef _build_arg_parser():\n parser = argparse.ArgumentParser(\n description=DESCRIPTION,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('in_bundle',\n help='Fiber bundle file to remove outliers from.')\n parser.add_argument('out_bundle',\n help='Fiber bundle without outliers.')\n parser.add_argument('--remaining_bundle',\n help='Removed outliers.')\n parser.add_argument('--alpha', type=float, default=0.6,\n help='Percent of the length of the tree that clusters '\n 'of individual streamlines will be pruned.')\n add_overwrite_arg(parser)\n return parser\n\n\ndef main():\n parser = _build_arg_parser()\n args = parser.parse_args()\n\n assert_inputs_exist(parser, args.in_bundle)\n assert_outputs_exist(parser, args, args.out_bundle, args.remaining_bundle)\n if args.alpha <= 0 or args.alpha > 1:\n parser.error('--alpha should be ]0, 1]')\n\n tractogram = nib.streamlines.load(args.in_bundle)\n\n if int(tractogram.header['nb_streamlines']) == 0:\n logging.warning(\"Bundle file contains no streamline\")\n return\n\n check_tracts_same_format(parser, [args.in_bundle, args.out_bundle,\n args.remaining_bundle])\n\n streamlines = tractogram.streamlines\n\n summary = outliers_removal_using_hierarchical_quickbundles(streamlines)\n outliers, inliers = prune(streamlines, args.alpha, summary)\n\n inliers_streamlines = tractogram.streamlines[inliers]\n inliers_data_per_streamline = tractogram.tractogram.data_per_streamline[inliers]\n inliers_data_per_point = tractogram.tractogram.data_per_point[inliers]\n\n outliers_streamlines = tractogram.streamlines[outliers]\n outliers_data_per_streamline = tractogram.tractogram.data_per_streamline[outliers]\n outliers_data_per_point = tractogram.tractogram.data_per_point[outliers]\n\n if len(inliers_streamlines) == 0:\n logging.warning(\"All streamlines are considered outliers.\"\n \"Please lower the --alpha parameter\")\n else:\n inliers_tractogram = Tractogram(\n inliers_streamlines,\n affine_to_rasmm=np.eye(4),\n data_per_streamline=inliers_data_per_streamline,\n data_per_point=inliers_data_per_point)\n nib.streamlines.save(inliers_tractogram, args.out_bundle,\n header=tractogram.header)\n\n if len(outliers_streamlines) == 0:\n logging.warning(\"No outlier found. Please raise the --alpha parameter\")\n elif args.remaining_bundle:\n outlier_tractogram = Tractogram(\n outliers_streamlines,\n affine_to_rasmm=np.eye(4),\n data_per_streamline=outliers_data_per_streamline,\n data_per_point=outliers_data_per_point)\n nib.streamlines.save(outlier_tractogram, args.remaining_bundle,\n header=tractogram.header)\n\n\nif __name__ == '__main__':\n main()\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCompute a single Fiber Response Function from a DWI.\n\nA DTI fit is made, and voxels containing a single fiber population are\nfound using a threshold on the FA.\n\"\"\"\n\nfrom __future__ import division\n\nfrom builtins import str\nimport argparse\nimport logging\n\nfrom dipy.core.gradients import gradient_table\nfrom dipy.io.gradients import read_bvals_bvecs\nfrom dipy.reconst.csdeconv import auto_response\nfrom dipy.segment.mask import applymask\nimport nibabel as nib\nimport numpy as np\n\nfrom scilpy.io.utils import (add_overwrite_arg, assert_inputs_exist,\n assert_outputs_exist, add_force_b0_arg)\nfrom scilpy.utils.bvec_bval_tools import (\n check_b0_threshold, normalize_bvecs, is_normalized_bvecs)\n\n\ndef _build_arg_parser():\n p = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=\"References: [1] Tournier et al. NeuroImage 2007\")\n\n p.add_argument('input',\n help='Path of the input diffusion volume.')\n p.add_argument('bvals',\n help='Path of the bvals file, in FSL format.')\n p.add_argument('bvecs',\n help='Path of the bvecs file, in FSL format.')\n p.add_argument('frf_file',\n help='Path to the output FRF file, in .txt format, '\n 'saved by Numpy.')\n\n add_force_b0_arg(p)\n\n p.add_argument(\n '--mask',\n help='Path to a binary mask. Only the data inside the mask will be '\n 'used for computations and reconstruction. Useful if no white '\n 'matter mask is available.')\n p.add_argument(\n '--mask_wm', metavar='',\n help='Path to a binary white matter mask. Only the data inside this '\n 'mask and above the threshold defined by --fa will be used to '\n 'estimate the fiber response function.')\n p.add_argument(\n '--fa', dest='fa_thresh', default=0.7, type=float,\n help='If supplied, use this threshold as the initial threshold '\n 'to select single fiber voxels. [%(default)s]')\n p.add_argument(\n '--min_fa', dest='min_fa_thresh', default=0.5, type=float,\n help='If supplied, this is the minimal value that will be tried '\n 'when looking for single fiber voxels. [%(default)s]')\n p.add_argument(\n '--min_nvox', default=300, type=int,\n help='Minimal number of voxels needing to be identified as single '\n 'fiber voxels in the automatic estimation. [%(default)s]')\n\n p.add_argument(\n '--roi_radius', default=10, type=int,\n help='If supplied, use this radius to select single fibers from the '\n 'tensor to estimate the FRF. The roi will be a cube spanning '\n 'from the middle of the volume in each direction. [%(default)s]')\n p.add_argument(\n '--roi_center', metavar='tuple(3)',\n help='If supplied, use this center to span the roi of size '\n 'roi_radius. [center of the 3D volume]')\n\n add_overwrite_arg(p)\n\n p.add_argument('--verbose', '-v', action='store_true',\n help='Produce verbose output.')\n\n return p\n\n\ndef main():\n parser = _build_arg_parser()\n args = parser.parse_args()\n\n if args.verbose:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n assert_inputs_exist(parser, [args.input, args.bvals, args.bvecs])\n assert_outputs_exist(parser, args, args.frf_file)\n\n vol = nib.load(args.input)\n data = vol.get_data()\n\n bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs)\n\n if not is_normalized_bvecs(bvecs):\n logging.warning('Your b-vectors do not seem normalized...')\n bvecs = normalize_bvecs(bvecs)\n\n check_b0_threshold(args, bvals.min())\n gtab = gradient_table(bvals, bvecs, b0_threshold=bvals.min())\n\n if args.min_fa_thresh < 0.4:\n logging.warn(\n 'Minimal FA threshold ({}) seems really small. Make sure it '\n 'makes sense for this dataset.'.format(args.min_fa_thresh))\n\n if args.mask:\n mask = nib.load(args.mask).get_data().astype(np.bool)\n data = applymask(data, mask)\n\n if args.mask_wm:\n wm_mask = nib.load(args.mask_wm).get_data().astype('bool')\n else:\n wm_mask = np.ones_like(data[..., 0], dtype=np.bool)\n logging.warn(\n 'No white matter mask specified! mask_data will be used instead, '\n 'if it has been supplied. \\nBe *VERY* careful about the '\n 'estimation of the fiber response function to ensure no invalid '\n 'voxel was used.')\n\n data_in_wm = applymask(data, wm_mask)\n\n fa_thresh = args.fa_thresh\n # Iteratively trying to fit at least 300 voxels. Lower the FA threshold\n # when it doesn't work. Fail if the fa threshold is smaller than\n # the min_threshold.\n # We use an epsilon since the -= 0.05 might incurs numerical imprecision.\n nvox = 0\n while nvox < args.min_nvox and fa_thresh >= args.min_fa_thresh - 0.00001:\n response, ratio, nvox = auto_response(gtab, data_in_wm,\n roi_center=args.roi_center,\n roi_radius=args.roi_radius,\n fa_thr=fa_thresh,\n return_number_of_voxels=True)\n\n logging.debug(\n 'Number of indices is %s with threshold of %s', nvox, fa_thresh)\n fa_thresh -= 0.05\n\n if nvox < args.min_nvox:\n raise ValueError(\n \"Could not find at least {} voxels with sufficient FA \"\n \"to estimate the FRF!\".format(args.min_nvox))\n\n logging.debug(\"Found %i voxels with FA threshold %f for FRF estimation\",\n nvox, fa_thresh + 0.05)\n logging.debug(\"FRF eigenvalues: %s\", str(response[0]))\n logging.debug(\"Ratio for smallest to largest eigen value is %f\", ratio)\n logging.debug(\"Mean of the b=0 signal for voxels used for FRF: %f\",\n response[1])\n\n full_response = np.array([response[0][0], response[0][1],\n response[0][2], response[1]])\n\n np.savetxt(args.frf_file, full_response)\n\n\nif __name__ == \"__main__\":\n main()\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nScript to compute the maximum Apparent Fiber Density (AFD), the fiber ODFs\norientations (peaks) and the Number of Fiber Orientations (NuFO) maps from\nfiber ODFs.\n\nAFD_max map is the maximal fODF amplitude for each voxel.\n\nNuFO is the the number of maxima of the fODF with an ABSOLUTE amplitude above\nthe threshold set using --at, AND an amplitude above the RELATIVE threshold\nset using --rt.\n\nThe --at argument should be set to a value which is 1.5 times the maximal\nvalue of the fODF in the ventricules. This can be obtained with the\ncompute_fodf_max_in_ventricules.py script.\n\nBy default, will output all possible files, using default names. Specific names\ncan be specified using the file flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags will be\noutput.\n\nSee [Raffelt et al. NeuroImage 2012] and [Dell'Acqua et al HBM 2013] for the\ndefinitions.\n\"\"\"\n\nfrom __future__ import division, print_function\nimport argparse\nimport os\nimport numpy as np\nimport nibabel as nib\n\nfrom dipy.core.ndindex import ndindex\nfrom dipy.data import get_sphere\nfrom dipy.direction.peaks import reshape_peaks_for_visualization\n\nfrom scilpy.io.utils import (add_overwrite_arg, add_sh_basis_args,\n assert_inputs_exist, assert_outputs_exist)\nfrom scilpy.reconst.utils import (\n find_order_from_nb_coeff, get_b_matrix, get_maximas)\n\n\ndef _build_arg_parser():\n p = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawTextHelpFormatter)\n p.add_argument(\n 'input', metavar='fODFs',\n help='Path of the fODF volume in spherical harmonics (SH).')\n p.add_argument(\n 'at', metavar='a_threshold', type=float,\n help='WARNING!!! EXTREMELY IMPORTANT PARAMETER, VARIABLE '\n 'ACROSS DATASETS!!!\\nAbsolute threshold on fODF amplitude.\\nThis '\n 'value should set to approximately 1.5 to 2 times the maximum\\n'\n 'fODF amplitude in isotropic voxels (ex. ventricles).\\n'\n 'compute_fodf_max_in_ventricles.py can be used to find the '\n 'maximal value.\\nSee [Dell\\'Acqua et al HBM 2013].')\n\n p.add_argument(\n '--sphere', metavar='string', default='repulsion724',\n choices=['repulsion100', 'repulsion724'],\n help='Discrete sphere to use in the processing. [%(default)s].')\n p.add_argument(\n '--mask', metavar='',\n help='Path to a binary mask. Only the data inside the mask will be '\n 'used for computations and reconstruction [%(default)s].')\n p.add_argument(\n '--rt', dest='r_threshold', type=float, default='0.1',\n help='Relative threshold on fODF amplitude in percentage '\n '[%(default)s].')\n add_sh_basis_args(p)\n add_overwrite_arg(p)\n p.add_argument(\n '--vis', dest='visu', action='store_true',\n help='Export map for better visualization in FiberNavigator.\\n'\n '!WARNING! these maps should not be used to compute statistics '\n '[%(default)s].')\n p.add_argument(\n '--not_all', action='store_true',\n help='If set, only saves the files specified using the file flags '\n '[%(default)s].')\n\n g = p.add_argument_group(title='File flags')\n g.add_argument(\n '--afd', metavar='file', default='',\n help='Output filename for the AFD_max map.')\n g.add_argument(\n '--afd_total', metavar='file', default='',\n help='Output filename for the AFD_total map (SH coeff = 0).')\n g.add_argument(\n '--afd_sum', metavar='file', default='',\n help='Output filename for the sum of all peak contributions (sum of '\n 'fODF lobes on the sphere).')\n g.add_argument('--nufo', metavar='file', default='',\n help='Output filename for the NuFO map.')\n g.add_argument('--peaks', metavar='file', default='',\n help='Output filename for the extracted peaks.')\n return p\n\n\ndef load(path):\n img = nib.load(path)\n return img.get_data(), img.affine\n\n\ndef save(data, affine, output, visu=False):\n if visu:\n img = nib.Nifti1Image(np.array(data, 'uint8'), affine)\n filename, extension1 = os.path.splitext(output)\n filename, extension2 = os.path.splitext(filename)\n nib.save(img, filename+'_fibernav' + extension2 + extension1)\n else:\n img = nib.Nifti1Image(np.array(data, 'float32'), affine)\n nib.save(img, output)\n\n\ndef get_maps(data, mask, args, npeaks=5):\n nufo_map = np.zeros(data.shape[0:3])\n afd_map = np.zeros(data.shape[0:3])\n afd_sum = np.zeros(data.shape[0:3])\n\n peaks_dirs = np.zeros(list(data.shape[0:3]) + [npeaks, 3])\n order = find_order_from_nb_coeff(data)\n sphere = get_sphere(args.sphere)\n b_matrix = get_b_matrix(order, sphere, args.sh_basis)\n\n for index in ndindex(data.shape[:-1]):\n if mask[index]:\n if np.isnan(data[index]).any():\n nufo_map[index] = 0\n afd_map[index] = 0\n else:\n maximas, afd, _ = get_maximas(\n data[index], sphere, b_matrix, args.r_threshold, args.at)\n # sf = np.dot(data[index], B.T)\n\n n = min(npeaks, maximas.shape[0])\n nufo_map[index] = maximas.shape[0]\n if n == 0:\n afd_map[index] = 0.0\n nufo_map[index] = 0.0\n else:\n afd_map[index] = afd.max()\n peaks_dirs[index][:n] = maximas[:n]\n\n # sum of all coefficients, sqrt(power spectrum)\n # sum C^2 = sum fODF^2\n afd_sum[index] = np.sqrt(np.dot(data[index], data[index]))\n\n # sum of all peaks contributions to the afd\n # integral of all the lobes. Numerical sum.\n # With an infinite number of SH, this should == to afd_sum\n # sf[np.nonzero(sf < args.at)] = 0.\n # afd_sum[index] = sf.sum()/n*4*np.pi/B.shape[0]x\n\n return nufo_map, afd_map, afd_sum, peaks_dirs\n\n\ndef main():\n parser = _build_arg_parser()\n args = parser.parse_args()\n\n if not args.not_all:\n args.afd = args.afd or 'afd_max.nii.gz'\n args.afd_total = args.afd_total or 'afd_total_sh0.nii.gz'\n args.afd_sum = args.afd_sum or 'afd_sum.nii.gz'\n args.nufo = args.nufo or 'nufo.nii.gz'\n args.peaks = args.peaks or 'peaks.nii.gz'\n\n arglist = [args.afd, args.afd_total, args.afd_sum, args.nufo, args.peaks]\n if args.not_all and not any(arglist):\n parser.error('When using --not_all, you need to specify at least '\n 'one file to output.')\n\n assert_inputs_exist(parser, [])\n assert_outputs_exist(parser, args, arglist)\n\n data, affine = load(args.input)\n if args.mask is None:\n mask = np.ones(data.shape[:-1])\n else:\n mask, affine2 = load(args.mask)\n\n nufo_map, afd_map, afd_sum, peaks_dirs = get_maps(data, mask, args)\n\n # Save result\n if args.nufo:\n save(nufo_map, affine, args.nufo)\n\n if args.afd:\n save(afd_map, affine, args.afd)\n\n if args.afd_total:\n # this is the analytical afd total\n afd_tot = data[:, :, :, 0]\n save(afd_tot, affine, args.afd_total)\n\n if args.afd_sum:\n save(afd_sum, affine, args.afd_sum)\n\n if args.peaks:\n nib.save(nib.Nifti1Image(reshape_peaks_for_visualization(peaks_dirs),\n affine), args.peaks)\n if args.visu:\n if nufo_map.max() > nufo_map.min():\n nufo_map = (255 * (nufo_map - nufo_map.min()) / (nufo_map.max() -\n nufo_map.min()))\n\n if afd_map.max() > afd_map.min():\n afd_map = (255 * (afd_map - afd_map.min()) / (afd_map.max() -\n afd_map.min()))\n\n save(nufo_map, affine, args.nufo, True)\n save(afd_map, affine, args.afd, True)\n\n\nif __name__ == \"__main__\":\n main()\n", "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom numpy.linalg import norm\nfrom scipy.spatial.ckdtree import cKDTree\n\n\ndef streamlines_to_segments(streamlines):\n \"\"\"Split streamlines into its segments.\n\n Parameters\n ----------\n streamlines : list of numpy.ndarray\n List of streamlines.\n\n Returns\n -------\n segments : numpy.ndarray (2D)\n Segments array representation with the first and last points.\n \"\"\"\n vts_0_list = []\n vts_1_list = []\n for streamline in streamlines:\n vts_0_list.append(streamline[:-1])\n vts_1_list.append(streamline[1:])\n\n segments = np.stack((np.vstack(vts_0_list), np.vstack(vts_1_list)), axis=0)\n return segments\n\n\ndef streamlines_to_endpoints(streamlines):\n \"\"\"Equivalent to streamlines resampling to 2 points (first and last).\n\n Parameters\n ----------\n streamlines : list of numpy.ndarray\n List of streamlines.\n\n Returns\n -------\n endpoints : numpy.ndarray (2D)\n Endpoint array representation with the first and last points.\n \"\"\"\n endpoints = np.zeros((2, len(streamlines), 3))\n for i, streamline in enumerate(streamlines):\n endpoints[0, i] = streamline[0]\n endpoints[1, i] = streamline[-1]\n\n return endpoints\n\n\ndef streamlines_to_pts_dir_norm(streamlines):\n \"\"\"Evaluate each segment: mid position, direction, length.\n\n Parameters\n ----------\n streamlines : list of numpy.ndarray\n List of streamlines.\n\n Returns\n -------\n seg_mid : numpy.ndarray (2D)\n Mid position (x,y,z) of all streamlines' segments.\n seg_dir : numpy.ndarray (2D)\n Direction (x,y,z) of all streamlines' segments.\n seg_norm : numpy.ndarray (2D)\n Length of all streamlines' segments.\n \"\"\"\n segments = streamlines_to_segments(streamlines)\n seg_mid = get_segments_mid_pts_positions(segments)\n seg_dir, seg_norm = get_segments_dir_and_norm(segments)\n return seg_mid, seg_dir, seg_norm\n\n\ndef get_segments_mid_pts_positions(segments):\n return 0.5 * (segments[0] + segments[1])\n\n\ndef get_segments_vectors(segments):\n return segments[1] - segments[0]\n\n\ndef get_segments_dir_and_norm(segments):\n return get_vectors_dir_and_norm(get_segments_vectors(segments))\n\n\ndef get_vectors_dir_and_norm(vectors):\n vectors_norm = compute_vectors_norm(vectors)\n vectors_dir = vectors / vectors_norm.reshape((-1, 1))\n return vectors_dir, vectors_norm\n\n\ndef psf_from_sphere(sphere_vertices):\n return np.abs(np.dot(sphere_vertices, sphere_vertices.T))\n\n\n# Mask functions\ndef generate_mask_indices_1d(nb_voxel, indices_1d):\n mask_1d = np.zeros(nb_voxel, dtype=np.bool)\n mask_1d[indices_1d] = True\n return mask_1d\n\n\ndef get_indices_1d(volume_shape, pts):\n return np.ravel_multi_index(pts.T.astype(np.int), volume_shape)\n\n\ndef get_dir_to_sphere_id(vectors, sphere_vertices):\n \"\"\"Find the closest vector on the sphere vertices using a cKDT tree\n sphere_vertices must be normed (or all with equal norm).\n\n Parameters\n ----------\n vectors : numpy.ndarray (2D)\n Vectors representing the direction (x,y,z) of segments.\n sphere_vertices : numpy.ndarray (2D)\n Vertices of a Dipy sphere object.\n\n Returns\n -------\n dir_sphere_id : numpy.ndarray (1D)\n Sphere indices of the closest sphere direction for each vector\n \"\"\"\n sphere_kdtree = cKDTree(sphere_vertices)\n _, dir_sphere_id = sphere_kdtree.query(vectors, k=1, n_jobs=-1)\n return dir_sphere_id\n\n\n# Generic Functions (vector norm)\ndef compute_vectors_norm(vectors):\n return norm(vectors, ord=2, axis=-1)\n\n\ndef normalize_vectors(vectors):\n return p_normalize_vectors(vectors, 2)\n\n\ndef p_normalize_vectors(vectors, p):\n return vectors / norm(vectors, ord=p, axis=-1, keepdims=True)\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import division\n\nimport numpy as np\n\nfrom dipy.tracking.metrics import length, downsample\nfrom dipy.tracking.streamline import set_number_of_points\n\n\ndef filter_streamlines_by_length(streamlines,\n data_per_point,\n data_per_streamline,\n min_length=0., max_length=np.inf):\n \"\"\"\n Filter streamlines using minimum and max length\n\n Parameters\n ----------\n streamlines: list\n List of list of 3D points.\n\n data_per_point: dict\n dict of data with one value per point per streamline\n data_per_streamline: dict\n dict of data with one value per streamline\n\n min_length: float\n Minimum length of streamlines.\n max_length: float\n Maximum length of streamlines.\n\n Return\n ------\n filtered_streamlines: list\n List of filtered streamlines by length.\n\n filtered_data_per_point: dict\n dict of data per point for filtered streamlines\n filtered_data_per_streamline: dict\n dict of data per streamline for filtered streamlines\n \"\"\"\n\n lengths = []\n for streamline in streamlines:\n lengths.append(length(streamline))\n\n lengths = np.asarray(lengths)\n\n filter_stream = np.logical_and(lengths >= min_length,\n lengths <= max_length)\n\n filtered_streamlines = list(np.asarray(streamlines)[filter_stream])\n filtered_data_per_point = data_per_point[filter_stream]\n filtered_data_per_streamline = data_per_streamline[filter_stream]\n\n return filtered_streamlines, filtered_data_per_point, filtered_data_per_streamline\n\n\ndef get_subset_streamlines(streamlines,\n data_per_point,\n data_per_streamline,\n max_streamlines, rng_seed=None):\n \"\"\"\n Extract a specific number of streamlines\n\n Parameters\n ----------\n streamlines: list\n List of list of 3D points.\n\n data_per_point: dict\n dict of data with one value per point per streamline\n data_per_streamline: dict\n dict of data with one value per streamline\n\n max_streamlines: int\n Maximum number of streamlines to output.\n rng_seed: int\n Random number to use for shuffling the data.\n\n Return\n ------\n subset_streamlines: list\n List of a subset streamline.\n\n subset_data_per_point: dict\n dict of data per point for subset of streamlines\n subset_data_per_streamline: dict\n dict of data per streamline for subset of streamlines\n \"\"\"\n\n rng = np.random.RandomState(rng_seed)\n ind = np.arange(len(streamlines))\n rng.shuffle(ind)\n\n subset_streamlines = list(np.asarray(streamlines)[ind[:max_streamlines]])\n subset_data_per_point = data_per_point[ind[:max_streamlines]]\n subset_data_per_streamline = data_per_streamline[ind[:max_streamlines]]\n\n return subset_streamlines, subset_data_per_point, subset_data_per_streamline\n\n\ndef resample_streamlines(streamlines, num_points=0, arc_length=False):\n \"\"\"\n Resample streamlines using number of points per streamline\n\n Parameters\n ----------\n streamlines: list\n List of list of 3D points.\n num_points: int\n Number of points per streamline in the output.\n arc_length: bool\n Whether to downsample using arc length parametrization.\n\n Return\n ------\n resampled_streamlines: list\n List of resampled streamlines.\n \"\"\"\n resampled_streamlines = []\n for streamline in streamlines:\n if arc_length:\n line = set_number_of_points(streamline, num_points)\n else:\n line = downsample(streamline, num_points)\n resampled_streamlines.append(line)\n\n return resampled_streamlines\n\n\ndef get_theta(requested_theta, tracking_type):\n if requested_theta is not None:\n theta = requested_theta\n elif tracking_type == 'prob':\n theta = 20\n elif tracking_type == 'eudx':\n theta = 60\n else:\n theta = 45\n return theta\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n Warp *.trk using a non linear deformation.\n Can be used with Ants or Dipy deformation map.\n\n For more information on how to use the various registration scripts\n see the doc/tractogram_registration.md readme file\n\"\"\"\n\nimport argparse\n\nimport nibabel as nib\nimport numpy as np\n\nfrom scilpy.io.utils import (add_overwrite_arg, create_header_from_anat,\n assert_inputs_exist, assert_outputs_exist)\nfrom scilpy.utils.filenames import split_name_with_nii\nfrom scilpy.utils.streamlines import warp_tractogram\n\n\ndef transform_tractogram(in_filename, ref_filename, def_filename,\n filename_to_save, field_source):\n in_tractogram = nib.streamlines.load(in_filename)\n\n _, out_extension = split_name_with_nii(filename_to_save)\n if out_extension == '.trk':\n # Only TRK/NII can be a reference, because they have an affine\n _, ref_extension = split_name_with_nii(ref_filename)\n if ref_extension == '.trk':\n ref_tractogram = nib.streamlines.load(ref_filename, lazy_load=True)\n ref_header = ref_tractogram.header\n else:\n ref_img = nib.load(ref_filename)\n ref_header = create_header_from_anat(ref_img)\n elif out_extension == '.tck':\n ref_header = nib.streamlines.TckFile.create_empty_header()\n\n deformation = nib.load(def_filename)\n deformation_data = np.squeeze(deformation.get_data())\n\n if not np.allclose(deformation.affine,\n in_tractogram.header[\"voxel_to_rasmm\"]):\n raise ValueError('Both affines are not equal')\n\n if not np.array_equal(deformation_data.shape[0:3],\n in_tractogram.header[\"dimensions\"]):\n raise ValueError('Both dimensions are not equal')\n\n transfo = in_tractogram.header[\"voxel_to_rasmm\"]\n # Warning: Apply warp in-place\n warp_tractogram(in_tractogram.streamlines, transfo, deformation_data,\n field_source)\n\n new_tractogram = nib.streamlines.Tractogram(in_tractogram.streamlines,\n affine_to_rasmm=np.eye(4))\n nib.streamlines.save(new_tractogram, filename_to_save, header=ref_header)\n\n\ndef _build_args_parser():\n p = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,\n description=__doc__)\n\n p.add_argument('in_file',\n help='Path of the tractogram to be warped (trk).')\n\n p.add_argument('ref_file',\n help='Path of the reference file (trk, nii or nii.gz')\n\n p.add_argument('deformation',\n help='Path of the file containing deformation field.')\n\n p.add_argument('out_name',\n help='Output filename of the transformed tractogram.')\n\n p.add_argument('--field_source', default='ants', choices=['ants', 'dipy'],\n help='Source of the deformation field: [%(choices)s] \\n'\n 'be cautious, the default is [%(default)s].')\n\n add_overwrite_arg(p)\n\n return p\n\n\ndef main():\n parser = _build_args_parser()\n args = parser.parse_args()\n\n assert_inputs_exist(parser, [args.in_file, args.ref_file,\n args.deformation])\n assert_outputs_exist(parser, args, args.out_name)\n\n if not nib.streamlines.TrkFile.is_correct_format(args.in_file):\n parser.error('The input file needs to be a TRK file')\n\n _, ref_extension = split_name_with_nii(args.ref_file)\n if ref_extension == '.trk':\n if not nib.streamlines.TrkFile.is_correct_format(args.ref_file):\n parser.error('{} is not a valid TRK file.'.format(args.ref_file))\n elif ref_extension not in ['.nii', '.nii.gz']:\n parser.error('{} is an unsupported format.'.format(args.ref_file))\n\n transform_tractogram(args.in_file, args.ref_file, args.deformation,\n args.out_name, args.field_source)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.eye" ], [ "numpy.savetxt", "numpy.array", "numpy.ones_like" ], [ "numpy.dot", "numpy.isnan", "numpy.ones", "numpy.array", "numpy.zeros" ], [ "numpy.dot", "numpy.linalg.norm", "scipy.spatial.ckdtree.cKDTree", "numpy.zeros", "numpy.vstack" ], [ "numpy.asarray", "numpy.logical_and", "numpy.random.RandomState" ], [ "numpy.eye", "numpy.allclose", "numpy.array_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rfdj/eynollah
[ "efc146feb8dd6715c1f85f4fff5f333ea26548e4" ]
[ "qurator/eynollah/eynollah.py" ]
[ "# pylint: disable=no-member,invalid-name,line-too-long,missing-function-docstring,missing-class-docstring,too-many-branches\n# pylint: disable=too-many-locals,wrong-import-position,too-many-lines,too-many-statements,chained-comparison,fixme,broad-except,c-extension-no-member\n# pylint: disable=too-many-public-methods,too-many-arguments,too-many-instance-attributes,too-many-public-methods,\n# pylint: disable=consider-using-enumerate\n\"\"\"\ntool to extract table form data from alto xml data\n\"\"\"\n\nimport math\nimport os\nimport sys\nimport time\nimport warnings\nfrom pathlib import Path\nfrom multiprocessing import Process, Queue, cpu_count\nimport gc\nfrom ocrd_utils import getLogger\nimport cv2\nimport numpy as np\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\nstderr = sys.stderr\nsys.stderr = open(os.devnull, \"w\")\nfrom keras import backend as K\nfrom keras.models import load_model\nsys.stderr = stderr\nimport tensorflow as tf\ntf.get_logger().setLevel(\"ERROR\")\nwarnings.filterwarnings(\"ignore\")\n\n\nfrom .utils.contour import (\n filter_contours_area_of_image,\n find_contours_mean_y_diff,\n find_new_features_of_contours,\n get_text_region_boxes_by_given_contours,\n get_textregion_contours_in_org_image,\n return_contours_of_image,\n return_contours_of_interested_region,\n return_contours_of_interested_region_by_min_size,\n return_contours_of_interested_textline,\n return_parent_contours,\n)\nfrom .utils.rotate import (\n rotate_image,\n rotation_not_90_func,\n rotation_not_90_func_full_layout)\nfrom .utils.separate_lines import (\n textline_contours_postprocessing,\n separate_lines_new2,\n return_deskew_slop)\nfrom .utils.drop_capitals import (\n adhere_drop_capital_region_into_corresponding_textline,\n filter_small_drop_capitals_from_no_patch_layout)\nfrom .utils.marginals import get_marginals\nfrom .utils.resize import resize_image\nfrom .utils import (\n boosting_headers_by_longshot_region_segmentation,\n crop_image_inside_box,\n find_num_col,\n otsu_copy_binary,\n put_drop_out_from_only_drop_model,\n putt_bb_of_drop_capitals_of_model_in_patches_in_layout,\n check_any_text_region_in_model_one_is_main_or_header,\n small_textlines_to_parent_adherence2,\n order_of_regions,\n find_number_of_columns_in_document,\n return_boxes_of_images_by_order_of_reading_new)\nfrom .utils.pil_cv2 import check_dpi, pil2cv\nfrom .utils.xml import order_and_id_of_texts\nfrom .plot import EynollahPlotter\nfrom .writer import EynollahXmlWriter\n\nSLOPE_THRESHOLD = 0.13\nRATIO_OF_TWO_MODEL_THRESHOLD = 95.50 #98.45:\nDPI_THRESHOLD = 298\nMAX_SLOPE = 999\nKERNEL = np.ones((5, 5), np.uint8)\n\nclass Eynollah:\n def __init__(\n self,\n dir_models,\n image_filename,\n image_pil=None,\n image_filename_stem=None,\n dir_out=None,\n dir_of_cropped_images=None,\n dir_of_layout=None,\n dir_of_deskewed=None,\n dir_of_all=None,\n enable_plotting=False,\n allow_enhancement=False,\n curved_line=False,\n full_layout=False,\n input_binary=False,\n allow_scaling=False,\n headers_off=False,\n override_dpi=None,\n logger=None,\n pcgts=None,\n ):\n if image_pil:\n self._imgs = self._cache_images(image_pil=image_pil)\n else:\n self._imgs = self._cache_images(image_filename=image_filename)\n if override_dpi:\n self.dpi = override_dpi\n self.image_filename = image_filename\n self.dir_out = dir_out\n self.allow_enhancement = allow_enhancement\n self.curved_line = curved_line\n self.full_layout = full_layout\n self.input_binary = input_binary\n self.allow_scaling = allow_scaling\n self.headers_off = headers_off\n self.plotter = None if not enable_plotting else EynollahPlotter(\n dir_of_all=dir_of_all,\n dir_of_deskewed=dir_of_deskewed,\n dir_of_cropped_images=dir_of_cropped_images,\n dir_of_layout=dir_of_layout,\n image_filename_stem=Path(Path(image_filename).name).stem)\n self.writer = EynollahXmlWriter(\n dir_out=self.dir_out,\n image_filename=self.image_filename,\n curved_line=self.curved_line,\n pcgts=pcgts)\n self.logger = logger if logger else getLogger('eynollah')\n self.dir_models = dir_models\n\n self.model_dir_of_enhancement = dir_models + \"/model_enhancement.h5\"\n self.model_dir_of_binarization = dir_models + \"/model_bin_sbb_ens.h5\"\n self.model_dir_of_col_classifier = dir_models + \"/model_scale_classifier.h5\"\n self.model_region_dir_p = dir_models + \"/model_main_covid19_lr5-5_scale_1_1_great.h5\"\n self.model_region_dir_p2 = dir_models + \"/model_main_home_corona3_rot.h5\"\n self.model_region_dir_fully_np = dir_models + \"/model_no_patches_class0_30eopch.h5\"\n self.model_region_dir_fully = dir_models + \"/model_3up_new_good_no_augmentation.h5\"\n self.model_page_dir = dir_models + \"/model_page_mixed_best.h5\"\n self.model_region_dir_p_ens = dir_models + \"/model_ensemble_s.h5\"\n self.model_textline_dir = dir_models + \"/model_textline_newspapers.h5\"\n \n def _cache_images(self, image_filename=None, image_pil=None):\n ret = {}\n if image_filename:\n ret['img'] = cv2.imread(image_filename)\n self.dpi = check_dpi(image_filename)\n else:\n ret['img'] = pil2cv(image_pil)\n self.dpi = check_dpi(image_pil)\n ret['img_grayscale'] = cv2.cvtColor(ret['img'], cv2.COLOR_BGR2GRAY)\n for prefix in ('', '_grayscale'):\n ret[f'img{prefix}_uint8'] = ret[f'img{prefix}'].astype(np.uint8)\n return ret\n\n def imread(self, grayscale=False, uint8=True):\n key = 'img'\n if grayscale:\n key += '_grayscale'\n if uint8:\n key += '_uint8'\n return self._imgs[key].copy()\n \n def isNaN(self, num):\n return num != num\n\n\n def predict_enhancement(self, img):\n self.logger.debug(\"enter predict_enhancement\")\n model_enhancement, session_enhancement = self.start_new_session_and_model(self.model_dir_of_enhancement)\n\n img_height_model = model_enhancement.layers[len(model_enhancement.layers) - 1].output_shape[1]\n img_width_model = model_enhancement.layers[len(model_enhancement.layers) - 1].output_shape[2]\n if img.shape[0] < img_height_model:\n img = cv2.resize(img, (img.shape[1], img_width_model), interpolation=cv2.INTER_NEAREST)\n\n if img.shape[1] < img_width_model:\n img = cv2.resize(img, (img_height_model, img.shape[0]), interpolation=cv2.INTER_NEAREST)\n margin = int(0 * img_width_model)\n width_mid = img_width_model - 2 * margin\n height_mid = img_height_model - 2 * margin\n img = img / float(255.0)\n\n img_h = img.shape[0]\n img_w = img.shape[1]\n\n prediction_true = np.zeros((img_h, img_w, 3))\n nxf = img_w / float(width_mid)\n nyf = img_h / float(height_mid)\n\n nxf = int(nxf) + 1 if nxf > int(nxf) else int(nxf)\n nyf = int(nyf) + 1 if nyf > int(nyf) else int(nyf)\n\n for i in range(nxf):\n for j in range(nyf):\n if i == 0:\n index_x_d = i * width_mid\n index_x_u = index_x_d + img_width_model\n else:\n index_x_d = i * width_mid\n index_x_u = index_x_d + img_width_model\n if j == 0:\n index_y_d = j * height_mid\n index_y_u = index_y_d + img_height_model\n else:\n index_y_d = j * height_mid\n index_y_u = index_y_d + img_height_model\n\n if index_x_u > img_w:\n index_x_u = img_w\n index_x_d = img_w - img_width_model\n if index_y_u > img_h:\n index_y_u = img_h\n index_y_d = img_h - img_height_model\n\n img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :]\n label_p_pred = model_enhancement.predict(img_patch.reshape(1, img_patch.shape[0], img_patch.shape[1], img_patch.shape[2]))\n\n seg = label_p_pred[0, :, :, :]\n seg = seg * 255\n\n if i == 0 and j == 0:\n seg = seg[0 : seg.shape[0] - margin, 0 : seg.shape[1] - margin]\n prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + 0 : index_x_u - margin, :] = seg\n elif i == nxf - 1 and j == nyf - 1:\n seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - 0]\n prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - 0, :] = seg\n elif i == 0 and j == nyf - 1:\n seg = seg[margin : seg.shape[0] - 0, 0 : seg.shape[1] - margin]\n prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + 0 : index_x_u - margin, :] = seg\n elif i == nxf - 1 and j == 0:\n seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - 0]\n prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - 0, :] = seg\n elif i == 0 and j != 0 and j != nyf - 1:\n seg = seg[margin : seg.shape[0] - margin, 0 : seg.shape[1] - margin]\n prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + 0 : index_x_u - margin, :] = seg\n elif i == nxf - 1 and j != 0 and j != nyf - 1:\n seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - 0]\n prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - 0, :] = seg\n elif i != 0 and i != nxf - 1 and j == 0:\n seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - margin]\n prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - margin, :] = seg\n elif i != 0 and i != nxf - 1 and j == nyf - 1:\n seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - margin]\n prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - margin, :] = seg\n else:\n seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - margin]\n prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - margin, :] = seg\n\n prediction_true = prediction_true.astype(int)\n session_enhancement.close()\n del model_enhancement\n del session_enhancement\n gc.collect()\n\n return prediction_true\n\n def calculate_width_height_by_columns(self, img, num_col, width_early, label_p_pred):\n self.logger.debug(\"enter calculate_width_height_by_columns\")\n if num_col == 1 and width_early < 1100:\n img_w_new = 2000\n img_h_new = int(img.shape[0] / float(img.shape[1]) * 2000)\n elif num_col == 1 and width_early >= 2500:\n img_w_new = 2000\n img_h_new = int(img.shape[0] / float(img.shape[1]) * 2000)\n elif num_col == 1 and width_early >= 1100 and width_early < 2500:\n img_w_new = width_early\n img_h_new = int(img.shape[0] / float(img.shape[1]) * width_early)\n elif num_col == 2 and width_early < 2000:\n img_w_new = 2400\n img_h_new = int(img.shape[0] / float(img.shape[1]) * 2400)\n elif num_col == 2 and width_early >= 3500:\n img_w_new = 2400\n img_h_new = int(img.shape[0] / float(img.shape[1]) * 2400)\n elif num_col == 2 and width_early >= 2000 and width_early < 3500:\n img_w_new = width_early\n img_h_new = int(img.shape[0] / float(img.shape[1]) * width_early)\n elif num_col == 3 and width_early < 2000:\n img_w_new = 3000\n img_h_new = int(img.shape[0] / float(img.shape[1]) * 3000)\n elif num_col == 3 and width_early >= 4000:\n img_w_new = 3000\n img_h_new = int(img.shape[0] / float(img.shape[1]) * 3000)\n elif num_col == 3 and width_early >= 2000 and width_early < 4000:\n img_w_new = width_early\n img_h_new = int(img.shape[0] / float(img.shape[1]) * width_early)\n elif num_col == 4 and width_early < 2500:\n img_w_new = 4000\n img_h_new = int(img.shape[0] / float(img.shape[1]) * 4000)\n elif num_col == 4 and width_early >= 5000:\n img_w_new = 4000\n img_h_new = int(img.shape[0] / float(img.shape[1]) * 4000)\n elif num_col == 4 and width_early >= 2500 and width_early < 5000:\n img_w_new = width_early\n img_h_new = int(img.shape[0] / float(img.shape[1]) * width_early)\n elif num_col == 5 and width_early < 3700:\n img_w_new = 5000\n img_h_new = int(img.shape[0] / float(img.shape[1]) * 5000)\n elif num_col == 5 and width_early >= 7000:\n img_w_new = 5000\n img_h_new = int(img.shape[0] / float(img.shape[1]) * 5000)\n elif num_col == 5 and width_early >= 3700 and width_early < 7000:\n img_w_new = width_early\n img_h_new = int(img.shape[0] / float(img.shape[1]) * width_early)\n elif num_col == 6 and width_early < 4500:\n img_w_new = 6500 # 5400\n img_h_new = int(img.shape[0] / float(img.shape[1]) * 6500)\n else:\n img_w_new = width_early\n img_h_new = int(img.shape[0] / float(img.shape[1]) * width_early)\n\n if label_p_pred[0][int(num_col - 1)] < 0.9 and img_w_new < width_early:\n img_new = np.copy(img)\n num_column_is_classified = False\n else:\n img_new = resize_image(img, img_h_new, img_w_new)\n num_column_is_classified = True\n\n return img_new, num_column_is_classified\n\n def resize_image_with_column_classifier(self, is_image_enhanced, img_bin):\n self.logger.debug(\"enter resize_image_with_column_classifier\")\n if self.input_binary:\n img = np.copy(img_bin)\n else:\n img = self.imread()\n\n _, page_coord = self.early_page_for_num_of_column_classification(img)\n model_num_classifier, session_col_classifier = self.start_new_session_and_model(self.model_dir_of_col_classifier)\n if self.input_binary:\n img_in = np.copy(img)\n img_in = img_in / 255.0\n width_early = img_in.shape[1]\n img_in = cv2.resize(img_in, (448, 448), interpolation=cv2.INTER_NEAREST)\n img_in = img_in.reshape(1, 448, 448, 3)\n else:\n img_1ch = self.imread(grayscale=True, uint8=False)\n width_early = img_1ch.shape[1]\n img_1ch = img_1ch[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]]\n\n # plt.imshow(img_1ch)\n # plt.show()\n img_1ch = img_1ch / 255.0\n\n img_1ch = cv2.resize(img_1ch, (448, 448), interpolation=cv2.INTER_NEAREST)\n\n img_in = np.zeros((1, img_1ch.shape[0], img_1ch.shape[1], 3))\n img_in[0, :, :, 0] = img_1ch[:, :]\n img_in[0, :, :, 1] = img_1ch[:, :]\n img_in[0, :, :, 2] = img_1ch[:, :]\n\n label_p_pred = model_num_classifier.predict(img_in)\n num_col = np.argmax(label_p_pred[0]) + 1\n\n self.logger.info(\"Found %s columns (%s)\", num_col, label_p_pred)\n\n session_col_classifier.close()\n \n del model_num_classifier\n del session_col_classifier\n \n K.clear_session()\n gc.collect()\n\n\n\n img_new, _ = self.calculate_width_height_by_columns(img, num_col, width_early, label_p_pred)\n\n if img_new.shape[1] > img.shape[1]:\n img_new = self.predict_enhancement(img_new)\n is_image_enhanced = True\n\n return img, img_new, is_image_enhanced\n\n def resize_and_enhance_image_with_column_classifier(self):\n self.logger.debug(\"enter resize_and_enhance_image_with_column_classifier\")\n dpi = self.dpi\n self.logger.info(\"Detected %s DPI\", dpi)\n if self.input_binary:\n img = self.imread()\n model_bin, session_bin = self.start_new_session_and_model(self.model_dir_of_binarization)\n prediction_bin = self.do_prediction(True, img, model_bin)\n \n prediction_bin=prediction_bin[:,:,0]\n prediction_bin = (prediction_bin[:,:]==0)*1\n prediction_bin = prediction_bin*255\n \n prediction_bin =np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2)\n\n session_bin.close()\n del model_bin\n del session_bin\n gc.collect()\n \n prediction_bin = prediction_bin.astype(np.uint8)\n img= np.copy(prediction_bin)\n img_bin = np.copy(prediction_bin)\n else:\n img = self.imread()\n img_bin = None\n\n _, page_coord = self.early_page_for_num_of_column_classification(img_bin)\n model_num_classifier, session_col_classifier = self.start_new_session_and_model(self.model_dir_of_col_classifier)\n \n if self.input_binary:\n img_in = np.copy(img)\n width_early = img_in.shape[1]\n img_in = img_in / 255.0\n img_in = cv2.resize(img_in, (448, 448), interpolation=cv2.INTER_NEAREST)\n img_in = img_in.reshape(1, 448, 448, 3)\n else:\n img_1ch = self.imread(grayscale=True)\n width_early = img_1ch.shape[1]\n img_1ch = img_1ch[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]]\n\n img_1ch = img_1ch / 255.0\n img_1ch = cv2.resize(img_1ch, (448, 448), interpolation=cv2.INTER_NEAREST)\n img_in = np.zeros((1, img_1ch.shape[0], img_1ch.shape[1], 3))\n img_in[0, :, :, 0] = img_1ch[:, :]\n img_in[0, :, :, 1] = img_1ch[:, :]\n img_in[0, :, :, 2] = img_1ch[:, :]\n\n\n\n label_p_pred = model_num_classifier.predict(img_in)\n num_col = np.argmax(label_p_pred[0]) + 1\n self.logger.info(\"Found %s columns (%s)\", num_col, label_p_pred)\n session_col_classifier.close()\n K.clear_session()\n\n if dpi < DPI_THRESHOLD:\n img_new, num_column_is_classified = self.calculate_width_height_by_columns(img, num_col, width_early, label_p_pred)\n image_res = self.predict_enhancement(img_new)\n is_image_enhanced = True\n else:\n is_image_enhanced = False\n num_column_is_classified = True\n image_res = np.copy(img)\n \n session_col_classifier.close()\n\n \n self.logger.debug(\"exit resize_and_enhance_image_with_column_classifier\")\n return is_image_enhanced, img, image_res, num_col, num_column_is_classified, img_bin\n\n # pylint: disable=attribute-defined-outside-init\n def get_image_and_scales(self, img_org, img_res, scale):\n self.logger.debug(\"enter get_image_and_scales\")\n self.image = np.copy(img_res)\n self.image_org = np.copy(img_org)\n self.height_org = self.image.shape[0]\n self.width_org = self.image.shape[1]\n\n self.img_hight_int = int(self.image.shape[0] * scale)\n self.img_width_int = int(self.image.shape[1] * scale)\n self.scale_y = self.img_hight_int / float(self.image.shape[0])\n self.scale_x = self.img_width_int / float(self.image.shape[1])\n\n self.image = resize_image(self.image, self.img_hight_int, self.img_width_int)\n\n # Also set for the plotter\n if self.plotter:\n self.plotter.image_org = self.image_org\n self.plotter.scale_y = self.scale_y\n self.plotter.scale_x = self.scale_x\n # Also set for the writer\n self.writer.image_org = self.image_org\n self.writer.scale_y = self.scale_y\n self.writer.scale_x = self.scale_x\n self.writer.height_org = self.height_org\n self.writer.width_org = self.width_org\n\n def get_image_and_scales_after_enhancing(self, img_org, img_res):\n self.logger.debug(\"enter get_image_and_scales_after_enhancing\")\n self.image = np.copy(img_res)\n self.image = self.image.astype(np.uint8)\n self.image_org = np.copy(img_org)\n self.height_org = self.image_org.shape[0]\n self.width_org = self.image_org.shape[1]\n\n self.scale_y = img_res.shape[0] / float(self.image_org.shape[0])\n self.scale_x = img_res.shape[1] / float(self.image_org.shape[1])\n\n # Also set for the plotter\n if self.plotter:\n self.plotter.image_org = self.image_org\n self.plotter.scale_y = self.scale_y\n self.plotter.scale_x = self.scale_x\n # Also set for the writer\n self.writer.image_org = self.image_org\n self.writer.scale_y = self.scale_y\n self.writer.scale_x = self.scale_x\n self.writer.height_org = self.height_org\n self.writer.width_org = self.width_org\n\n def start_new_session_and_model_old(self, model_dir):\n self.logger.debug(\"enter start_new_session_and_model (model_dir=%s)\", model_dir)\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n\n session = tf.InteractiveSession()\n model = load_model(model_dir, compile=False)\n\n return model, session\n\n \n def start_new_session_and_model(self, model_dir):\n self.logger.debug(\"enter start_new_session_and_model (model_dir=%s)\", model_dir)\n gpu_options = tf.compat.v1.GPUOptions(allow_growth=True)\n #gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=7.7, allow_growth=True)\n session = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options))\n model = load_model(model_dir, compile=False)\n\n return model, session\n\n def do_prediction(self, patches, img, model, marginal_of_patch_percent=0.1):\n self.logger.debug(\"enter do_prediction\")\n\n img_height_model = model.layers[len(model.layers) - 1].output_shape[1]\n img_width_model = model.layers[len(model.layers) - 1].output_shape[2]\n\n if not patches:\n img_h_page = img.shape[0]\n img_w_page = img.shape[1]\n img = img / float(255.0)\n img = resize_image(img, img_height_model, img_width_model)\n\n label_p_pred = model.predict(img.reshape(1, img.shape[0], img.shape[1], img.shape[2]))\n\n seg = np.argmax(label_p_pred, axis=3)[0]\n seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2)\n prediction_true = resize_image(seg_color, img_h_page, img_w_page)\n prediction_true = prediction_true.astype(np.uint8)\n\n\n else:\n if img.shape[0] < img_height_model:\n img = resize_image(img, img_height_model, img.shape[1])\n\n if img.shape[1] < img_width_model:\n img = resize_image(img, img.shape[0], img_width_model)\n\n self.logger.info(\"Image dimensions: %sx%s\", img_height_model, img_width_model)\n margin = int(marginal_of_patch_percent * img_height_model)\n width_mid = img_width_model - 2 * margin\n height_mid = img_height_model - 2 * margin\n img = img / float(255.0)\n img = img.astype(np.float16)\n img_h = img.shape[0]\n img_w = img.shape[1]\n prediction_true = np.zeros((img_h, img_w, 3))\n mask_true = np.zeros((img_h, img_w))\n nxf = img_w / float(width_mid)\n nyf = img_h / float(height_mid)\n nxf = int(nxf) + 1 if nxf > int(nxf) else int(nxf)\n nyf = int(nyf) + 1 if nyf > int(nyf) else int(nyf)\n\n for i in range(nxf):\n for j in range(nyf):\n if i == 0:\n index_x_d = i * width_mid\n index_x_u = index_x_d + img_width_model\n else:\n index_x_d = i * width_mid\n index_x_u = index_x_d + img_width_model\n if j == 0:\n index_y_d = j * height_mid\n index_y_u = index_y_d + img_height_model\n else:\n index_y_d = j * height_mid\n index_y_u = index_y_d + img_height_model\n if index_x_u > img_w:\n index_x_u = img_w\n index_x_d = img_w - img_width_model\n if index_y_u > img_h:\n index_y_u = img_h\n index_y_d = img_h - img_height_model\n\n img_patch = img[index_y_d:index_y_u, index_x_d:index_x_u, :]\n label_p_pred = model.predict(img_patch.reshape(1, img_patch.shape[0], img_patch.shape[1], img_patch.shape[2]))\n seg = np.argmax(label_p_pred, axis=3)[0]\n seg_color = np.repeat(seg[:, :, np.newaxis], 3, axis=2)\n\n if i == 0 and j == 0:\n seg_color = seg_color[0 : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :]\n seg = seg[0 : seg.shape[0] - margin, 0 : seg.shape[1] - margin]\n mask_true[index_y_d + 0 : index_y_u - margin, index_x_d + 0 : index_x_u - margin] = seg\n prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + 0 : index_x_u - margin, :] = seg_color\n elif i == nxf - 1 and j == nyf - 1:\n seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - 0, :]\n seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - 0]\n mask_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - 0] = seg\n prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - 0, :] = seg_color\n elif i == 0 and j == nyf - 1:\n seg_color = seg_color[margin : seg_color.shape[0] - 0, 0 : seg_color.shape[1] - margin, :]\n seg = seg[margin : seg.shape[0] - 0, 0 : seg.shape[1] - margin]\n mask_true[index_y_d + margin : index_y_u - 0, index_x_d + 0 : index_x_u - margin] = seg\n prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + 0 : index_x_u - margin, :] = seg_color\n elif i == nxf - 1 and j == 0:\n seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :]\n seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - 0]\n mask_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - 0] = seg\n prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - 0, :] = seg_color\n elif i == 0 and j != 0 and j != nyf - 1:\n seg_color = seg_color[margin : seg_color.shape[0] - margin, 0 : seg_color.shape[1] - margin, :]\n seg = seg[margin : seg.shape[0] - margin, 0 : seg.shape[1] - margin]\n mask_true[index_y_d + margin : index_y_u - margin, index_x_d + 0 : index_x_u - margin] = seg\n prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + 0 : index_x_u - margin, :] = seg_color\n elif i == nxf - 1 and j != 0 and j != nyf - 1:\n seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - 0, :]\n seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - 0]\n mask_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - 0] = seg\n prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - 0, :] = seg_color\n elif i != 0 and i != nxf - 1 and j == 0:\n seg_color = seg_color[0 : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :]\n seg = seg[0 : seg.shape[0] - margin, margin : seg.shape[1] - margin]\n mask_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - margin] = seg\n prediction_true[index_y_d + 0 : index_y_u - margin, index_x_d + margin : index_x_u - margin, :] = seg_color\n elif i != 0 and i != nxf - 1 and j == nyf - 1:\n seg_color = seg_color[margin : seg_color.shape[0] - 0, margin : seg_color.shape[1] - margin, :]\n seg = seg[margin : seg.shape[0] - 0, margin : seg.shape[1] - margin]\n mask_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - margin] = seg\n prediction_true[index_y_d + margin : index_y_u - 0, index_x_d + margin : index_x_u - margin, :] = seg_color\n else:\n seg_color = seg_color[margin : seg_color.shape[0] - margin, margin : seg_color.shape[1] - margin, :]\n seg = seg[margin : seg.shape[0] - margin, margin : seg.shape[1] - margin]\n mask_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - margin] = seg\n prediction_true[index_y_d + margin : index_y_u - margin, index_x_d + margin : index_x_u - margin, :] = seg_color\n\n prediction_true = prediction_true.astype(np.uint8)\n del model\n gc.collect()\n return prediction_true\n\n def early_page_for_num_of_column_classification(self,img_bin):\n self.logger.debug(\"enter early_page_for_num_of_column_classification\")\n if self.input_binary:\n img =np.copy(img_bin)\n img = img.astype(np.uint8)\n else:\n img = self.imread()\n model_page, session_page = self.start_new_session_and_model(self.model_page_dir)\n img = cv2.GaussianBlur(img, (5, 5), 0)\n\n img_page_prediction = self.do_prediction(False, img, model_page)\n\n imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY)\n _, thresh = cv2.threshold(imgray, 0, 255, 0)\n thresh = cv2.dilate(thresh, KERNEL, iterations=3)\n contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n if len(contours)>0:\n cnt_size = np.array([cv2.contourArea(contours[j]) for j in range(len(contours))])\n cnt = contours[np.argmax(cnt_size)]\n x, y, w, h = cv2.boundingRect(cnt)\n box = [x, y, w, h]\n else:\n box = [0, 0, img.shape[1], img.shape[0]]\n croped_page, page_coord = crop_image_inside_box(box, img)\n session_page.close()\n del model_page\n del session_page\n gc.collect()\n K.clear_session()\n self.logger.debug(\"exit early_page_for_num_of_column_classification\")\n return croped_page, page_coord\n\n def extract_page(self):\n self.logger.debug(\"enter extract_page\")\n cont_page = []\n model_page, session_page = self.start_new_session_and_model(self.model_page_dir)\n img = cv2.GaussianBlur(self.image, (5, 5), 0)\n img_page_prediction = self.do_prediction(False, img, model_page)\n imgray = cv2.cvtColor(img_page_prediction, cv2.COLOR_BGR2GRAY)\n _, thresh = cv2.threshold(imgray, 0, 255, 0)\n thresh = cv2.dilate(thresh, KERNEL, iterations=3)\n contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n \n if len(contours)>0:\n cnt_size = np.array([cv2.contourArea(contours[j]) for j in range(len(contours))])\n cnt = contours[np.argmax(cnt_size)]\n x, y, w, h = cv2.boundingRect(cnt)\n if x <= 30:\n w += x\n x = 0\n if (self.image.shape[1] - (x + w)) <= 30:\n w = w + (self.image.shape[1] - (x + w))\n if y <= 30:\n h = h + y\n y = 0\n if (self.image.shape[0] - (y + h)) <= 30:\n h = h + (self.image.shape[0] - (y + h))\n\n box = [x, y, w, h]\n else:\n box = [0, 0, img.shape[1], img.shape[0]]\n croped_page, page_coord = crop_image_inside_box(box, self.image)\n cont_page.append(np.array([[page_coord[2], page_coord[0]], [page_coord[3], page_coord[0]], [page_coord[3], page_coord[1]], [page_coord[2], page_coord[1]]]))\n session_page.close()\n del model_page\n del session_page\n gc.collect()\n K.clear_session()\n self.logger.debug(\"exit extract_page\")\n return croped_page, page_coord, cont_page\n\n def extract_text_regions(self, img, patches, cols):\n self.logger.debug(\"enter extract_text_regions\")\n img_height_h = img.shape[0]\n img_width_h = img.shape[1]\n\n model_region, session_region = self.start_new_session_and_model(self.model_region_dir_fully if patches else self.model_region_dir_fully_np)\n\n if not patches:\n img = otsu_copy_binary(img)\n img = img.astype(np.uint8)\n prediction_regions2 = None\n else:\n if cols == 1:\n img2 = otsu_copy_binary(img)\n img2 = img2.astype(np.uint8)\n img2 = resize_image(img2, int(img_height_h * 0.7), int(img_width_h * 0.7))\n marginal_of_patch_percent = 0.1\n prediction_regions2 = self.do_prediction(patches, img2, model_region, marginal_of_patch_percent)\n prediction_regions2 = resize_image(prediction_regions2, img_height_h, img_width_h)\n\n if cols == 2:\n img2 = otsu_copy_binary(img)\n img2 = img2.astype(np.uint8)\n img2 = resize_image(img2, int(img_height_h * 0.4), int(img_width_h * 0.4))\n marginal_of_patch_percent = 0.1\n prediction_regions2 = self.do_prediction(patches, img2, model_region, marginal_of_patch_percent)\n prediction_regions2 = resize_image(prediction_regions2, img_height_h, img_width_h)\n\n elif cols > 2:\n img2 = otsu_copy_binary(img)\n img2 = img2.astype(np.uint8)\n img2 = resize_image(img2, int(img_height_h * 0.3), int(img_width_h * 0.3))\n marginal_of_patch_percent = 0.1\n prediction_regions2 = self.do_prediction(patches, img2, model_region, marginal_of_patch_percent)\n prediction_regions2 = resize_image(prediction_regions2, img_height_h, img_width_h)\n\n if cols == 2:\n img = otsu_copy_binary(img)\n img = img.astype(np.uint8)\n if img_width_h >= 2000:\n img = resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9))\n img = img.astype(np.uint8)\n\n if cols == 1:\n img = otsu_copy_binary(img)\n img = img.astype(np.uint8)\n img = resize_image(img, int(img_height_h * 0.5), int(img_width_h * 0.5))\n img = img.astype(np.uint8)\n\n if cols == 3:\n if (self.scale_x == 1 and img_width_h > 3000) or (self.scale_x != 1 and img_width_h > 2800):\n img = otsu_copy_binary(img)\n img = img.astype(np.uint8)\n img = resize_image(img, int(img_height_h * 2800 / float(img_width_h)), 2800)\n else:\n img = otsu_copy_binary(img)\n img = img.astype(np.uint8)\n\n if cols == 4:\n if (self.scale_x == 1 and img_width_h > 4000) or (self.scale_x != 1 and img_width_h > 3700):\n img = otsu_copy_binary(img)\n img = img.astype(np.uint8)\n img= resize_image(img, int(img_height_h * 3700 / float(img_width_h)), 3700)\n else:\n img = otsu_copy_binary(img)\n img = img.astype(np.uint8)\n img= resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9))\n\n if cols == 5:\n if self.scale_x == 1 and img_width_h > 5000:\n img = otsu_copy_binary(img)\n img = img.astype(np.uint8)\n img= resize_image(img, int(img_height_h * 0.7), int(img_width_h * 0.7))\n else:\n img = otsu_copy_binary(img)\n img = img.astype(np.uint8)\n img= resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9) )\n\n if cols >= 6:\n if img_width_h > 5600:\n img = otsu_copy_binary(img)\n img = img.astype(np.uint8)\n img= resize_image(img, int(img_height_h * 5600 / float(img_width_h)), 5600)\n else:\n img = otsu_copy_binary(img)\n img = img.astype(np.uint8)\n img= resize_image(img, int(img_height_h * 0.9), int(img_width_h * 0.9))\n\n marginal_of_patch_percent = 0.1\n prediction_regions = self.do_prediction(patches, img, model_region, marginal_of_patch_percent)\n prediction_regions = resize_image(prediction_regions, img_height_h, img_width_h)\n\n session_region.close()\n del model_region\n del session_region\n gc.collect()\n \n self.logger.debug(\"exit extract_text_regions\")\n return prediction_regions, prediction_regions2\n\n def get_slopes_and_deskew_new(self, contours, contours_par, textline_mask_tot, image_page_rotated, boxes, slope_deskew):\n self.logger.debug(\"enter get_slopes_and_deskew_new\")\n num_cores = cpu_count()\n queue_of_all_params = Queue()\n\n processes = []\n nh = np.linspace(0, len(boxes), num_cores + 1)\n indexes_by_text_con = np.array(range(len(contours_par)))\n for i in range(num_cores):\n boxes_per_process = boxes[int(nh[i]) : int(nh[i + 1])]\n contours_per_process = contours[int(nh[i]) : int(nh[i + 1])]\n contours_par_per_process = contours_par[int(nh[i]) : int(nh[i + 1])]\n indexes_text_con_per_process = indexes_by_text_con[int(nh[i]) : int(nh[i + 1])]\n\n processes.append(Process(target=self.do_work_of_slopes_new, args=(queue_of_all_params, boxes_per_process, textline_mask_tot, contours_per_process, contours_par_per_process, indexes_text_con_per_process, image_page_rotated, slope_deskew)))\n for i in range(num_cores):\n processes[i].start()\n\n slopes = []\n all_found_texline_polygons = []\n all_found_text_regions = []\n all_found_text_regions_par = []\n boxes = []\n all_box_coord = []\n all_index_text_con = []\n for i in range(num_cores):\n list_all_par = queue_of_all_params.get(True)\n slopes_for_sub_process = list_all_par[0]\n polys_for_sub_process = list_all_par[1]\n boxes_for_sub_process = list_all_par[2]\n contours_for_subprocess = list_all_par[3]\n contours_par_for_subprocess = list_all_par[4]\n boxes_coord_for_subprocess = list_all_par[5]\n indexes_for_subprocess = list_all_par[6]\n for j in range(len(slopes_for_sub_process)):\n slopes.append(slopes_for_sub_process[j])\n all_found_texline_polygons.append(polys_for_sub_process[j])\n boxes.append(boxes_for_sub_process[j])\n all_found_text_regions.append(contours_for_subprocess[j])\n all_found_text_regions_par.append(contours_par_for_subprocess[j])\n all_box_coord.append(boxes_coord_for_subprocess[j])\n all_index_text_con.append(indexes_for_subprocess[j])\n for i in range(num_cores):\n processes[i].join()\n self.logger.debug('slopes %s', slopes)\n self.logger.debug(\"exit get_slopes_and_deskew_new\")\n return slopes, all_found_texline_polygons, boxes, all_found_text_regions, all_found_text_regions_par, all_box_coord, all_index_text_con\n\n def get_slopes_and_deskew_new_curved(self, contours, contours_par, textline_mask_tot, image_page_rotated, boxes, mask_texts_only, num_col, scale_par, slope_deskew):\n self.logger.debug(\"enter get_slopes_and_deskew_new_curved\")\n num_cores = cpu_count()\n queue_of_all_params = Queue()\n\n processes = []\n nh = np.linspace(0, len(boxes), num_cores + 1)\n indexes_by_text_con = np.array(range(len(contours_par)))\n\n for i in range(num_cores):\n boxes_per_process = boxes[int(nh[i]) : int(nh[i + 1])]\n contours_per_process = contours[int(nh[i]) : int(nh[i + 1])]\n contours_par_per_process = contours_par[int(nh[i]) : int(nh[i + 1])]\n indexes_text_con_per_process = indexes_by_text_con[int(nh[i]) : int(nh[i + 1])]\n\n processes.append(Process(target=self.do_work_of_slopes_new_curved, args=(queue_of_all_params, boxes_per_process, textline_mask_tot, contours_per_process, contours_par_per_process, image_page_rotated, mask_texts_only, num_col, scale_par, indexes_text_con_per_process, slope_deskew)))\n\n for i in range(num_cores):\n processes[i].start()\n\n slopes = []\n all_found_texline_polygons = []\n all_found_text_regions = []\n all_found_text_regions_par = []\n boxes = []\n all_box_coord = []\n all_index_text_con = []\n\n for i in range(num_cores):\n list_all_par = queue_of_all_params.get(True)\n polys_for_sub_process = list_all_par[0]\n boxes_for_sub_process = list_all_par[1]\n contours_for_subprocess = list_all_par[2]\n contours_par_for_subprocess = list_all_par[3]\n boxes_coord_for_subprocess = list_all_par[4]\n indexes_for_subprocess = list_all_par[5]\n slopes_for_sub_process = list_all_par[6]\n for j in range(len(polys_for_sub_process)):\n slopes.append(slopes_for_sub_process[j])\n all_found_texline_polygons.append(polys_for_sub_process[j][::-1])\n boxes.append(boxes_for_sub_process[j])\n all_found_text_regions.append(contours_for_subprocess[j])\n all_found_text_regions_par.append(contours_par_for_subprocess[j])\n all_box_coord.append(boxes_coord_for_subprocess[j])\n all_index_text_con.append(indexes_for_subprocess[j])\n\n for i in range(num_cores):\n processes[i].join()\n # print(slopes,'slopes')\n return all_found_texline_polygons, boxes, all_found_text_regions, all_found_text_regions_par, all_box_coord, all_index_text_con, slopes\n\n def do_work_of_slopes_new_curved(self, queue_of_all_params, boxes_text, textline_mask_tot_ea, contours_per_process, contours_par_per_process, image_page_rotated, mask_texts_only, num_col, scale_par, indexes_r_con_per_pro, slope_deskew):\n self.logger.debug(\"enter do_work_of_slopes_new_curved\")\n slopes_per_each_subprocess = []\n bounding_box_of_textregion_per_each_subprocess = []\n textlines_rectangles_per_each_subprocess = []\n contours_textregion_per_each_subprocess = []\n contours_textregion_par_per_each_subprocess = []\n all_box_coord_per_process = []\n index_by_text_region_contours = []\n\n textline_cnt_separated = np.zeros(textline_mask_tot_ea.shape)\n\n for mv in range(len(boxes_text)):\n\n all_text_region_raw = textline_mask_tot_ea[boxes_text[mv][1] : boxes_text[mv][1] + boxes_text[mv][3], boxes_text[mv][0] : boxes_text[mv][0] + boxes_text[mv][2]]\n all_text_region_raw = all_text_region_raw.astype(np.uint8)\n img_int_p = all_text_region_raw[:, :]\n\n # img_int_p=cv2.erode(img_int_p,KERNEL,iterations = 2)\n # plt.imshow(img_int_p)\n # plt.show()\n\n if img_int_p.shape[0] / img_int_p.shape[1] < 0.1:\n slopes_per_each_subprocess.append(0)\n slope_for_all = [slope_deskew][0]\n else:\n try:\n textline_con, hierarchy = return_contours_of_image(img_int_p)\n textline_con_fil = filter_contours_area_of_image(img_int_p, textline_con, hierarchy, max_area=1, min_area=0.0008)\n y_diff_mean = find_contours_mean_y_diff(textline_con_fil)\n if self.isNaN(y_diff_mean):\n slope_for_all = MAX_SLOPE\n else:\n sigma_des = max(1, int(y_diff_mean * (4.0 / 40.0)))\n img_int_p[img_int_p > 0] = 1\n slope_for_all = return_deskew_slop(img_int_p, sigma_des, plotter=self.plotter)\n\n if abs(slope_for_all) < 0.5:\n slope_for_all = [slope_deskew][0]\n\n except Exception as why:\n self.logger.error(why)\n slope_for_all = MAX_SLOPE\n\n if slope_for_all == MAX_SLOPE:\n slope_for_all = [slope_deskew][0]\n slopes_per_each_subprocess.append(slope_for_all)\n\n index_by_text_region_contours.append(indexes_r_con_per_pro[mv])\n _, crop_coor = crop_image_inside_box(boxes_text[mv], image_page_rotated)\n\n if abs(slope_for_all) < 45:\n # all_box_coord.append(crop_coor)\n textline_region_in_image = np.zeros(textline_mask_tot_ea.shape)\n cnt_o_t_max = contours_par_per_process[mv]\n x, y, w, h = cv2.boundingRect(cnt_o_t_max)\n mask_biggest = np.zeros(mask_texts_only.shape)\n mask_biggest = cv2.fillPoly(mask_biggest, pts=[cnt_o_t_max], color=(1, 1, 1))\n mask_region_in_patch_region = mask_biggest[y : y + h, x : x + w]\n textline_biggest_region = mask_biggest * textline_mask_tot_ea\n\n # print(slope_for_all,'slope_for_all')\n textline_rotated_separated = separate_lines_new2(textline_biggest_region[y : y + h, x : x + w], 0, num_col, slope_for_all, plotter=self.plotter)\n\n # new line added\n ##print(np.shape(textline_rotated_separated),np.shape(mask_biggest))\n textline_rotated_separated[mask_region_in_patch_region[:, :] != 1] = 0\n # till here\n\n textline_cnt_separated[y : y + h, x : x + w] = textline_rotated_separated\n textline_region_in_image[y : y + h, x : x + w] = textline_rotated_separated\n\n # plt.imshow(textline_region_in_image)\n # plt.show()\n # plt.imshow(textline_cnt_separated)\n # plt.show()\n\n pixel_img = 1\n cnt_textlines_in_image = return_contours_of_interested_textline(textline_region_in_image, pixel_img)\n\n textlines_cnt_per_region = []\n for jjjj in range(len(cnt_textlines_in_image)):\n mask_biggest2 = np.zeros(mask_texts_only.shape)\n mask_biggest2 = cv2.fillPoly(mask_biggest2, pts=[cnt_textlines_in_image[jjjj]], color=(1, 1, 1))\n if num_col + 1 == 1:\n mask_biggest2 = cv2.dilate(mask_biggest2, KERNEL, iterations=5)\n else:\n mask_biggest2 = cv2.dilate(mask_biggest2, KERNEL, iterations=4)\n\n pixel_img = 1\n mask_biggest2 = resize_image(mask_biggest2, int(mask_biggest2.shape[0] * scale_par), int(mask_biggest2.shape[1] * scale_par))\n cnt_textlines_in_image_ind = return_contours_of_interested_textline(mask_biggest2, pixel_img)\n try:\n textlines_cnt_per_region.append(cnt_textlines_in_image_ind[0])\n except Exception as why:\n self.logger.error(why)\n else:\n add_boxes_coor_into_textlines = True\n textlines_cnt_per_region = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contours_par_per_process[mv], boxes_text[mv], add_boxes_coor_into_textlines)\n add_boxes_coor_into_textlines = False\n # print(np.shape(textlines_cnt_per_region),'textlines_cnt_per_region')\n\n textlines_rectangles_per_each_subprocess.append(textlines_cnt_per_region)\n bounding_box_of_textregion_per_each_subprocess.append(boxes_text[mv])\n contours_textregion_per_each_subprocess.append(contours_per_process[mv])\n contours_textregion_par_per_each_subprocess.append(contours_par_per_process[mv])\n all_box_coord_per_process.append(crop_coor)\n\n queue_of_all_params.put([textlines_rectangles_per_each_subprocess, bounding_box_of_textregion_per_each_subprocess, contours_textregion_per_each_subprocess, contours_textregion_par_per_each_subprocess, all_box_coord_per_process, index_by_text_region_contours, slopes_per_each_subprocess])\n\n def do_work_of_slopes_new(self, queue_of_all_params, boxes_text, textline_mask_tot_ea, contours_per_process, contours_par_per_process, indexes_r_con_per_pro, image_page_rotated, slope_deskew):\n self.logger.debug('enter do_work_of_slopes_new')\n slopes_per_each_subprocess = []\n bounding_box_of_textregion_per_each_subprocess = []\n textlines_rectangles_per_each_subprocess = []\n contours_textregion_per_each_subprocess = []\n contours_textregion_par_per_each_subprocess = []\n all_box_coord_per_process = []\n index_by_text_region_contours = []\n for mv in range(len(boxes_text)):\n _, crop_coor = crop_image_inside_box(boxes_text[mv],image_page_rotated)\n mask_textline = np.zeros((textline_mask_tot_ea.shape))\n mask_textline = cv2.fillPoly(mask_textline,pts=[contours_per_process[mv]],color=(1,1,1))\n all_text_region_raw = (textline_mask_tot_ea*mask_textline[:,:])[boxes_text[mv][1]:boxes_text[mv][1]+boxes_text[mv][3] , boxes_text[mv][0]:boxes_text[mv][0]+boxes_text[mv][2] ]\n all_text_region_raw=all_text_region_raw.astype(np.uint8)\n img_int_p=all_text_region_raw[:,:]#self.all_text_region_raw[mv]\n img_int_p=cv2.erode(img_int_p,KERNEL,iterations = 2)\n\n if img_int_p.shape[0]/img_int_p.shape[1]<0.1:\n slopes_per_each_subprocess.append(0)\n slope_for_all = [slope_deskew][0]\n all_text_region_raw = textline_mask_tot_ea[boxes_text[mv][1] : boxes_text[mv][1] + boxes_text[mv][3], boxes_text[mv][0] : boxes_text[mv][0] + boxes_text[mv][2]]\n cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contours_par_per_process[mv], boxes_text[mv], 0)\n textlines_rectangles_per_each_subprocess.append(cnt_clean_rot)\n index_by_text_region_contours.append(indexes_r_con_per_pro[mv])\n bounding_box_of_textregion_per_each_subprocess.append(boxes_text[mv])\n else:\n try:\n textline_con, hierarchy = return_contours_of_image(img_int_p)\n textline_con_fil = filter_contours_area_of_image(img_int_p, textline_con, hierarchy, max_area=1, min_area=0.00008)\n y_diff_mean = find_contours_mean_y_diff(textline_con_fil)\n if self.isNaN(y_diff_mean):\n slope_for_all = MAX_SLOPE\n else:\n sigma_des = int(y_diff_mean * (4.0 / 40.0))\n if sigma_des < 1:\n sigma_des = 1\n img_int_p[img_int_p > 0] = 1\n slope_for_all = return_deskew_slop(img_int_p, sigma_des, plotter=self.plotter)\n if abs(slope_for_all) <= 0.5:\n slope_for_all = [slope_deskew][0]\n except Exception as why:\n self.logger.error(why)\n slope_for_all = MAX_SLOPE\n if slope_for_all == MAX_SLOPE:\n slope_for_all = [slope_deskew][0]\n slopes_per_each_subprocess.append(slope_for_all)\n mask_only_con_region = np.zeros(textline_mask_tot_ea.shape)\n mask_only_con_region = cv2.fillPoly(mask_only_con_region, pts=[contours_par_per_process[mv]], color=(1, 1, 1))\n\n # plt.imshow(mask_only_con_region)\n # plt.show()\n all_text_region_raw = np.copy(textline_mask_tot_ea[boxes_text[mv][1] : boxes_text[mv][1] + boxes_text[mv][3], boxes_text[mv][0] : boxes_text[mv][0] + boxes_text[mv][2]])\n mask_only_con_region = mask_only_con_region[boxes_text[mv][1] : boxes_text[mv][1] + boxes_text[mv][3], boxes_text[mv][0] : boxes_text[mv][0] + boxes_text[mv][2]]\n\n ##plt.imshow(textline_mask_tot_ea)\n ##plt.show()\n ##plt.imshow(all_text_region_raw)\n ##plt.show()\n ##plt.imshow(mask_only_con_region)\n ##plt.show()\n\n all_text_region_raw[mask_only_con_region == 0] = 0\n cnt_clean_rot = textline_contours_postprocessing(all_text_region_raw, slope_for_all, contours_par_per_process[mv], boxes_text[mv])\n\n textlines_rectangles_per_each_subprocess.append(cnt_clean_rot)\n index_by_text_region_contours.append(indexes_r_con_per_pro[mv])\n bounding_box_of_textregion_per_each_subprocess.append(boxes_text[mv])\n\n contours_textregion_per_each_subprocess.append(contours_per_process[mv])\n contours_textregion_par_per_each_subprocess.append(contours_par_per_process[mv])\n all_box_coord_per_process.append(crop_coor)\n queue_of_all_params.put([slopes_per_each_subprocess, textlines_rectangles_per_each_subprocess, bounding_box_of_textregion_per_each_subprocess, contours_textregion_per_each_subprocess, contours_textregion_par_per_each_subprocess, all_box_coord_per_process, index_by_text_region_contours])\n\n def textline_contours(self, img, patches, scaler_h, scaler_w):\n self.logger.debug('enter textline_contours')\n\n model_textline, session_textline = self.start_new_session_and_model(self.model_textline_dir if patches else self.model_textline_dir_np)\n img = img.astype(np.uint8)\n img_org = np.copy(img)\n img_h = img_org.shape[0]\n img_w = img_org.shape[1]\n img = resize_image(img_org, int(img_org.shape[0] * scaler_h), int(img_org.shape[1] * scaler_w))\n prediction_textline = self.do_prediction(patches, img, model_textline)\n prediction_textline = resize_image(prediction_textline, img_h, img_w)\n prediction_textline_longshot = self.do_prediction(False, img, model_textline)\n prediction_textline_longshot_true_size = resize_image(prediction_textline_longshot, img_h, img_w)\n\n session_textline.close()\n\n\n return prediction_textline[:, :, 0], prediction_textline_longshot_true_size[:, :, 0]\n\n def do_work_of_slopes(self, q, poly, box_sub, boxes_per_process, textline_mask_tot, contours_per_process):\n self.logger.debug('enter do_work_of_slopes')\n slope_biggest = 0\n slopes_sub = []\n boxes_sub_new = []\n poly_sub = []\n for mv in range(len(boxes_per_process)):\n crop_img, _ = crop_image_inside_box(boxes_per_process[mv], np.repeat(textline_mask_tot[:, :, np.newaxis], 3, axis=2))\n crop_img = crop_img[:, :, 0]\n crop_img = cv2.erode(crop_img, KERNEL, iterations=2)\n try:\n textline_con, hierarchy = return_contours_of_image(crop_img)\n textline_con_fil = filter_contours_area_of_image(crop_img, textline_con, hierarchy, max_area=1, min_area=0.0008)\n y_diff_mean = find_contours_mean_y_diff(textline_con_fil)\n sigma_des = max(1, int(y_diff_mean * (4.0 / 40.0)))\n crop_img[crop_img > 0] = 1\n slope_corresponding_textregion = return_deskew_slop(crop_img, sigma_des, plotter=self.plotter)\n except Exception as why:\n self.logger.error(why)\n slope_corresponding_textregion = MAX_SLOPE\n\n if slope_corresponding_textregion == MAX_SLOPE:\n slope_corresponding_textregion = slope_biggest\n slopes_sub.append(slope_corresponding_textregion)\n\n cnt_clean_rot = textline_contours_postprocessing(crop_img, slope_corresponding_textregion, contours_per_process[mv], boxes_per_process[mv])\n\n poly_sub.append(cnt_clean_rot)\n boxes_sub_new.append(boxes_per_process[mv])\n\n q.put(slopes_sub)\n poly.put(poly_sub)\n box_sub.put(boxes_sub_new)\n\n def get_regions_from_xy_2models(self,img,is_image_enhanced, num_col_classifier):\n self.logger.debug(\"enter get_regions_from_xy_2models\")\n erosion_hurts = False\n img_org = np.copy(img)\n img_height_h = img_org.shape[0]\n img_width_h = img_org.shape[1]\n\n model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens)\n\n ratio_y=1.3\n ratio_x=1\n\n img = resize_image(img_org, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x))\n\n prediction_regions_org_y = self.do_prediction(True, img, model_region)\n prediction_regions_org_y = resize_image(prediction_regions_org_y, img_height_h, img_width_h )\n\n #plt.imshow(prediction_regions_org_y[:,:,0])\n #plt.show()\n prediction_regions_org_y = prediction_regions_org_y[:,:,0]\n mask_zeros_y = (prediction_regions_org_y[:,:]==0)*1\n \n ##img_only_regions_with_sep = ( (prediction_regions_org_y[:,:] != 3) & (prediction_regions_org_y[:,:] != 0) )*1\n img_only_regions_with_sep = ( prediction_regions_org_y[:,:] == 1 )*1\n img_only_regions_with_sep = img_only_regions_with_sep.astype(np.uint8)\n \n try:\n img_only_regions = cv2.erode(img_only_regions_with_sep[:,:], KERNEL, iterations=20)\n\n _, _ = find_num_col(img_only_regions, multiplier=6.0)\n \n img = resize_image(img_org, int(img_org.shape[0]), int(img_org.shape[1]*(1.2 if is_image_enhanced else 1)))\n\n prediction_regions_org = self.do_prediction(True, img, model_region)\n prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h )\n\n ##plt.imshow(prediction_regions_org[:,:,0])\n ##plt.show()\n prediction_regions_org=prediction_regions_org[:,:,0]\n prediction_regions_org[(prediction_regions_org[:,:]==1) & (mask_zeros_y[:,:]==1)]=0\n \n session_region.close()\n del model_region\n del session_region\n gc.collect()\n\n model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p2)\n img = resize_image(img_org, int(img_org.shape[0]), int(img_org.shape[1]))\n prediction_regions_org2 = self.do_prediction(True, img, model_region, 0.2)\n prediction_regions_org2=resize_image(prediction_regions_org2, img_height_h, img_width_h )\n\n\n session_region.close()\n del model_region\n del session_region\n gc.collect()\n\n mask_zeros2 = (prediction_regions_org2[:,:,0] == 0)\n mask_lines2 = (prediction_regions_org2[:,:,0] == 3)\n text_sume_early = (prediction_regions_org[:,:] == 1).sum()\n prediction_regions_org_copy = np.copy(prediction_regions_org)\n prediction_regions_org_copy[(prediction_regions_org_copy[:,:]==1) & (mask_zeros2[:,:]==1)] = 0\n text_sume_second = ((prediction_regions_org_copy[:,:]==1)*1).sum()\n\n rate_two_models = text_sume_second / float(text_sume_early) * 100\n\n self.logger.info(\"ratio_of_two_models: %s\", rate_two_models)\n if not(is_image_enhanced and rate_two_models < RATIO_OF_TWO_MODEL_THRESHOLD):\n prediction_regions_org = np.copy(prediction_regions_org_copy)\n \n \n\n prediction_regions_org[(mask_lines2[:,:]==1) & (prediction_regions_org[:,:]==0)]=3\n mask_lines_only=(prediction_regions_org[:,:]==3)*1\n prediction_regions_org = cv2.erode(prediction_regions_org[:,:], KERNEL, iterations=2)\n\n #plt.imshow(text_region2_1st_channel)\n #plt.show()\n\n prediction_regions_org = cv2.dilate(prediction_regions_org[:,:], KERNEL, iterations=2)\n \n \n if rate_two_models<=40:\n if self.input_binary:\n prediction_bin = np.copy(img_org)\n else:\n model_bin, session_bin = self.start_new_session_and_model(self.model_dir_of_binarization)\n prediction_bin = self.do_prediction(True, img_org, model_bin)\n prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h )\n \n prediction_bin=prediction_bin[:,:,0]\n prediction_bin = (prediction_bin[:,:]==0)*1\n prediction_bin = prediction_bin*255\n \n prediction_bin =np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2)\n\n session_bin.close()\n del model_bin\n del session_bin\n gc.collect()\n \n \n \n model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens)\n ratio_y=1\n ratio_x=1\n\n\n img = resize_image(prediction_bin, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x))\n\n prediction_regions_org = self.do_prediction(True, img, model_region)\n prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h )\n prediction_regions_org=prediction_regions_org[:,:,0]\n \n mask_lines_only=(prediction_regions_org[:,:]==3)*1\n session_region.close()\n del model_region\n del session_region\n gc.collect()\n \n \n mask_texts_only=(prediction_regions_org[:,:]==1)*1\n mask_images_only=(prediction_regions_org[:,:]==2)*1\n \n \n \n polygons_lines_xml, hir_lines_xml = return_contours_of_image(mask_lines_only)\n polygons_lines_xml = textline_con_fil = filter_contours_area_of_image(mask_lines_only, polygons_lines_xml, hir_lines_xml, max_area=1, min_area=0.00001)\n\n polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only, 1, 0.00001)\n polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only, 1, 0.00001)\n\n text_regions_p_true = np.zeros(prediction_regions_org.shape)\n text_regions_p_true = cv2.fillPoly(text_regions_p_true,pts = polygons_of_only_lines, color=(3, 3, 3))\n text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2\n\n text_regions_p_true=cv2.fillPoly(text_regions_p_true,pts=polygons_of_only_texts, color=(1,1,1))\n\n \n\n K.clear_session()\n return text_regions_p_true, erosion_hurts, polygons_lines_xml\n except:\n \n if self.input_binary:\n prediction_bin = np.copy(img_org)\n else:\n session_region.close()\n del model_region\n del session_region\n gc.collect()\n \n model_bin, session_bin = self.start_new_session_and_model(self.model_dir_of_binarization)\n prediction_bin = self.do_prediction(True, img_org, model_bin)\n prediction_bin = resize_image(prediction_bin, img_height_h, img_width_h )\n prediction_bin=prediction_bin[:,:,0]\n \n prediction_bin = (prediction_bin[:,:]==0)*1\n \n prediction_bin = prediction_bin*255\n \n prediction_bin =np.repeat(prediction_bin[:, :, np.newaxis], 3, axis=2)\n\n \n \n session_bin.close()\n del model_bin\n del session_bin\n gc.collect()\n \n \n \n model_region, session_region = self.start_new_session_and_model(self.model_region_dir_p_ens)\n ratio_y=1\n ratio_x=1\n\n\n img = resize_image(prediction_bin, int(img_org.shape[0]*ratio_y), int(img_org.shape[1]*ratio_x))\n\n prediction_regions_org = self.do_prediction(True, img, model_region)\n prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h )\n prediction_regions_org=prediction_regions_org[:,:,0]\n \n #mask_lines_only=(prediction_regions_org[:,:]==3)*1\n session_region.close()\n del model_region\n del session_region\n gc.collect()\n \n #img = resize_image(img_org, int(img_org.shape[0]*1), int(img_org.shape[1]*1))\n \n #prediction_regions_org = self.do_prediction(True, img, model_region)\n \n #prediction_regions_org = resize_image(prediction_regions_org, img_height_h, img_width_h )\n \n #prediction_regions_org = prediction_regions_org[:,:,0]\n \n #prediction_regions_org[(prediction_regions_org[:,:] == 1) & (mask_zeros_y[:,:] == 1)]=0\n #session_region.close()\n #del model_region\n #del session_region\n #gc.collect()\n \n \n \n \n mask_lines_only = (prediction_regions_org[:,:] ==3)*1\n \n mask_texts_only = (prediction_regions_org[:,:] ==1)*1\n \n mask_images_only=(prediction_regions_org[:,:] ==2)*1\n \n polygons_lines_xml, hir_lines_xml = return_contours_of_image(mask_lines_only)\n polygons_lines_xml = textline_con_fil = filter_contours_area_of_image(mask_lines_only, polygons_lines_xml, hir_lines_xml, max_area=1, min_area=0.00001)\n \n \n polygons_of_only_texts = return_contours_of_interested_region(mask_texts_only,1,0.00001)\n \n polygons_of_only_lines = return_contours_of_interested_region(mask_lines_only,1,0.00001)\n \n \n text_regions_p_true = np.zeros(prediction_regions_org.shape)\n \n text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_lines, color=(3,3,3))\n \n text_regions_p_true[:,:][mask_images_only[:,:] == 1] = 2\n \n text_regions_p_true = cv2.fillPoly(text_regions_p_true, pts = polygons_of_only_texts, color=(1,1,1))\n \n erosion_hurts = True\n K.clear_session()\n return text_regions_p_true, erosion_hurts, polygons_lines_xml\n\n def do_order_of_regions_full_layout(self, contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot):\n self.logger.debug(\"enter do_order_of_regions_full_layout\")\n cx_text_only, cy_text_only, x_min_text_only, _, _, _, y_cor_x_min_main = find_new_features_of_contours(contours_only_text_parent)\n cx_text_only_h, cy_text_only_h, x_min_text_only_h, _, _, _, y_cor_x_min_main_h = find_new_features_of_contours(contours_only_text_parent_h)\n\n try:\n arg_text_con = []\n for ii in range(len(cx_text_only)):\n for jj in range(len(boxes)):\n if (x_min_text_only[ii] + 80) >= boxes[jj][0] and (x_min_text_only[ii] + 80) < boxes[jj][1] and y_cor_x_min_main[ii] >= boxes[jj][2] and y_cor_x_min_main[ii] < boxes[jj][3]:\n arg_text_con.append(jj)\n break\n args_contours = np.array(range(len(arg_text_con)))\n arg_text_con_h = []\n for ii in range(len(cx_text_only_h)):\n for jj in range(len(boxes)):\n if (x_min_text_only_h[ii] + 80) >= boxes[jj][0] and (x_min_text_only_h[ii] + 80) < boxes[jj][1] and y_cor_x_min_main_h[ii] >= boxes[jj][2] and y_cor_x_min_main_h[ii] < boxes[jj][3]:\n arg_text_con_h.append(jj)\n break\n args_contours_h = np.array(range(len(arg_text_con_h)))\n\n order_by_con_head = np.zeros(len(arg_text_con_h))\n order_by_con_main = np.zeros(len(arg_text_con))\n\n ref_point = 0\n order_of_texts_tot = []\n id_of_texts_tot = []\n for iij in range(len(boxes)):\n\n args_contours_box = args_contours[np.array(arg_text_con) == iij]\n args_contours_box_h = args_contours_h[np.array(arg_text_con_h) == iij]\n con_inter_box = []\n con_inter_box_h = []\n\n for box in args_contours_box:\n con_inter_box.append(contours_only_text_parent[box])\n\n for box in args_contours_box_h:\n con_inter_box_h.append(contours_only_text_parent_h[box])\n\n indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions(textline_mask_tot[int(boxes[iij][2]) : int(boxes[iij][3]), int(boxes[iij][0]) : int(boxes[iij][1])], con_inter_box, con_inter_box_h, boxes[iij][2])\n\n order_of_texts, id_of_texts = order_and_id_of_texts(con_inter_box, con_inter_box_h, matrix_of_orders, indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point)\n\n indexes_sorted_main = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 1]\n indexes_by_type_main = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 1]\n indexes_sorted_head = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 2]\n indexes_by_type_head = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 2]\n\n for zahler, _ in enumerate(args_contours_box):\n arg_order_v = indexes_sorted_main[zahler]\n order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = np.where(indexes_sorted == arg_order_v)[0][0] + ref_point\n\n for zahler, _ in enumerate(args_contours_box_h):\n arg_order_v = indexes_sorted_head[zahler]\n order_by_con_head[args_contours_box_h[indexes_by_type_head[zahler]]] = np.where(indexes_sorted == arg_order_v)[0][0] + ref_point\n\n for jji in range(len(id_of_texts)):\n order_of_texts_tot.append(order_of_texts[jji] + ref_point)\n id_of_texts_tot.append(id_of_texts[jji])\n ref_point += len(id_of_texts)\n\n order_of_texts_tot = []\n for tj1 in range(len(contours_only_text_parent)):\n order_of_texts_tot.append(int(order_by_con_main[tj1]))\n\n for tj1 in range(len(contours_only_text_parent_h)):\n order_of_texts_tot.append(int(order_by_con_head[tj1]))\n\n order_text_new = []\n for iii in range(len(order_of_texts_tot)):\n order_text_new.append(np.where(np.array(order_of_texts_tot) == iii)[0][0])\n\n except Exception as why:\n self.logger.error(why)\n arg_text_con = []\n for ii in range(len(cx_text_only)):\n for jj in range(len(boxes)):\n if cx_text_only[ii] >= boxes[jj][0] and cx_text_only[ii] < boxes[jj][1] and cy_text_only[ii] >= boxes[jj][2] and cy_text_only[ii] < boxes[jj][3]: # this is valid if the center of region identify in which box it is located\n arg_text_con.append(jj)\n break\n args_contours = np.array(range(len(arg_text_con)))\n\n order_by_con_main = np.zeros(len(arg_text_con))\n\n ############################# head\n\n arg_text_con_h = []\n for ii in range(len(cx_text_only_h)):\n for jj in range(len(boxes)):\n if cx_text_only_h[ii] >= boxes[jj][0] and cx_text_only_h[ii] < boxes[jj][1] and cy_text_only_h[ii] >= boxes[jj][2] and cy_text_only_h[ii] < boxes[jj][3]: # this is valid if the center of region identify in which box it is located\n arg_text_con_h.append(jj)\n break\n args_contours_h = np.array(range(len(arg_text_con_h)))\n\n order_by_con_head = np.zeros(len(arg_text_con_h))\n\n ref_point = 0\n order_of_texts_tot = []\n id_of_texts_tot = []\n for iij, _ in enumerate(boxes):\n args_contours_box = args_contours[np.array(arg_text_con) == iij]\n args_contours_box_h = args_contours_h[np.array(arg_text_con_h) == iij]\n con_inter_box = []\n con_inter_box_h = []\n\n for box in args_contours_box:\n con_inter_box.append(contours_only_text_parent[box])\n\n for box in args_contours_box_h:\n con_inter_box_h.append(contours_only_text_parent_h[box])\n\n indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions(textline_mask_tot[int(boxes[iij][2]) : int(boxes[iij][3]), int(boxes[iij][0]) : int(boxes[iij][1])], con_inter_box, con_inter_box_h, boxes[iij][2])\n\n order_of_texts, id_of_texts = order_and_id_of_texts(con_inter_box, con_inter_box_h, matrix_of_orders, indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point)\n\n indexes_sorted_main = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 1]\n indexes_by_type_main = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 1]\n indexes_sorted_head = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 2]\n indexes_by_type_head = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 2]\n\n for zahler, _ in enumerate(args_contours_box):\n arg_order_v = indexes_sorted_main[zahler]\n order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = np.where(indexes_sorted == arg_order_v)[0][0] + ref_point\n\n for zahler, _ in enumerate(args_contours_box_h):\n arg_order_v = indexes_sorted_head[zahler]\n order_by_con_head[args_contours_box_h[indexes_by_type_head[zahler]]] = np.where(indexes_sorted == arg_order_v)[0][0] + ref_point\n\n for jji, _ in enumerate(id_of_texts):\n order_of_texts_tot.append(order_of_texts[jji] + ref_point)\n id_of_texts_tot.append(id_of_texts[jji])\n ref_point += len(id_of_texts)\n\n order_of_texts_tot = []\n for tj1 in range(len(contours_only_text_parent)):\n order_of_texts_tot.append(int(order_by_con_main[tj1]))\n\n for tj1 in range(len(contours_only_text_parent_h)):\n order_of_texts_tot.append(int(order_by_con_head[tj1]))\n\n order_text_new = []\n for iii in range(len(order_of_texts_tot)):\n order_text_new.append(np.where(np.array(order_of_texts_tot) == iii)[0][0])\n return order_text_new, id_of_texts_tot\n\n def do_order_of_regions_no_full_layout(self, contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot):\n self.logger.debug(\"enter do_order_of_regions_no_full_layout\")\n cx_text_only, cy_text_only, x_min_text_only, _, _, _, y_cor_x_min_main = find_new_features_of_contours(contours_only_text_parent)\n\n try:\n arg_text_con = []\n for ii in range(len(cx_text_only)):\n for jj in range(len(boxes)):\n if (x_min_text_only[ii] + 80) >= boxes[jj][0] and (x_min_text_only[ii] + 80) < boxes[jj][1] and y_cor_x_min_main[ii] >= boxes[jj][2] and y_cor_x_min_main[ii] < boxes[jj][3]:\n arg_text_con.append(jj)\n break\n args_contours = np.array(range(len(arg_text_con)))\n order_by_con_main = np.zeros(len(arg_text_con))\n\n ref_point = 0\n order_of_texts_tot = []\n id_of_texts_tot = []\n for iij in range(len(boxes)):\n args_contours_box = args_contours[np.array(arg_text_con) == iij]\n con_inter_box = []\n con_inter_box_h = []\n for i in range(len(args_contours_box)):\n con_inter_box.append(contours_only_text_parent[args_contours_box[i]])\n\n indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions(textline_mask_tot[int(boxes[iij][2]) : int(boxes[iij][3]), int(boxes[iij][0]) : int(boxes[iij][1])], con_inter_box, con_inter_box_h, boxes[iij][2])\n\n order_of_texts, id_of_texts = order_and_id_of_texts(con_inter_box, con_inter_box_h, matrix_of_orders, indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point)\n\n indexes_sorted_main = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 1]\n indexes_by_type_main = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 1]\n\n for zahler, _ in enumerate(args_contours_box):\n arg_order_v = indexes_sorted_main[zahler]\n order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = np.where(indexes_sorted == arg_order_v)[0][0] + ref_point\n\n for jji, _ in enumerate(id_of_texts):\n order_of_texts_tot.append(order_of_texts[jji] + ref_point)\n id_of_texts_tot.append(id_of_texts[jji])\n ref_point += len(id_of_texts)\n\n order_of_texts_tot = []\n for tj1 in range(len(contours_only_text_parent)):\n order_of_texts_tot.append(int(order_by_con_main[tj1]))\n\n order_text_new = []\n for iii in range(len(order_of_texts_tot)):\n order_text_new.append(np.where(np.array(order_of_texts_tot) == iii)[0][0])\n \n except Exception as why:\n self.logger.error(why)\n arg_text_con = []\n for ii in range(len(cx_text_only)):\n for jj in range(len(boxes)):\n if cx_text_only[ii] >= boxes[jj][0] and cx_text_only[ii] < boxes[jj][1] and cy_text_only[ii] >= boxes[jj][2] and cy_text_only[ii] < boxes[jj][3]: # this is valid if the center of region identify in which box it is located\n arg_text_con.append(jj)\n break\n args_contours = np.array(range(len(arg_text_con)))\n\n order_by_con_main = np.zeros(len(arg_text_con))\n\n ref_point = 0\n order_of_texts_tot = []\n id_of_texts_tot = []\n for iij in range(len(boxes)):\n args_contours_box = args_contours[np.array(arg_text_con) == iij]\n con_inter_box = []\n con_inter_box_h = []\n\n for i in range(len(args_contours_box)):\n con_inter_box.append(contours_only_text_parent[args_contours_box[i]])\n\n indexes_sorted, matrix_of_orders, kind_of_texts_sorted, index_by_kind_sorted = order_of_regions(textline_mask_tot[int(boxes[iij][2]) : int(boxes[iij][3]), int(boxes[iij][0]) : int(boxes[iij][1])], con_inter_box, con_inter_box_h, boxes[iij][2])\n\n order_of_texts, id_of_texts = order_and_id_of_texts(con_inter_box, con_inter_box_h, matrix_of_orders, indexes_sorted, index_by_kind_sorted, kind_of_texts_sorted, ref_point)\n\n indexes_sorted_main = np.array(indexes_sorted)[np.array(kind_of_texts_sorted) == 1]\n indexes_by_type_main = np.array(index_by_kind_sorted)[np.array(kind_of_texts_sorted) == 1]\n\n for zahler, _ in enumerate(args_contours_box):\n arg_order_v = indexes_sorted_main[zahler]\n order_by_con_main[args_contours_box[indexes_by_type_main[zahler]]] = np.where(indexes_sorted == arg_order_v)[0][0] + ref_point\n\n for jji, _ in enumerate(id_of_texts):\n order_of_texts_tot.append(order_of_texts[jji] + ref_point)\n id_of_texts_tot.append(id_of_texts[jji])\n ref_point += len(id_of_texts)\n\n order_of_texts_tot = []\n for tj1 in range(len(contours_only_text_parent)):\n order_of_texts_tot.append(int(order_by_con_main[tj1]))\n\n order_text_new = []\n for iii in range(len(order_of_texts_tot)):\n order_text_new.append(np.where(np.array(order_of_texts_tot) == iii)[0][0])\n \n return order_text_new, id_of_texts_tot\n\n def do_order_of_regions(self, *args, **kwargs):\n if self.full_layout:\n return self.do_order_of_regions_full_layout(*args, **kwargs)\n return self.do_order_of_regions_no_full_layout(*args, **kwargs)\n\n def run_graphics_and_columns(self, text_regions_p_1, num_col_classifier, num_column_is_classified, erosion_hurts):\n img_g = self.imread(grayscale=True, uint8=True)\n\n img_g3 = np.zeros((img_g.shape[0], img_g.shape[1], 3))\n img_g3 = img_g3.astype(np.uint8)\n img_g3[:, :, 0] = img_g[:, :]\n img_g3[:, :, 1] = img_g[:, :]\n img_g3[:, :, 2] = img_g[:, :]\n\n image_page, page_coord, cont_page = self.extract_page()\n if self.plotter:\n self.plotter.save_page_image(image_page)\n\n text_regions_p_1 = text_regions_p_1[page_coord[0] : page_coord[1], page_coord[2] : page_coord[3]]\n mask_images = (text_regions_p_1[:, :] == 2) * 1\n mask_images = mask_images.astype(np.uint8)\n mask_images = cv2.erode(mask_images[:, :], KERNEL, iterations=10)\n mask_lines = (text_regions_p_1[:, :] == 3) * 1\n mask_lines = mask_lines.astype(np.uint8)\n img_only_regions_with_sep = ((text_regions_p_1[:, :] != 3) & (text_regions_p_1[:, :] != 0)) * 1\n img_only_regions_with_sep = img_only_regions_with_sep.astype(np.uint8)\n \n \n if erosion_hurts:\n img_only_regions = np.copy(img_only_regions_with_sep[:,:])\n else:\n img_only_regions = cv2.erode(img_only_regions_with_sep[:,:], KERNEL, iterations=6)\n \n \n try:\n num_col, _ = find_num_col(img_only_regions, multiplier=6.0)\n num_col = num_col + 1\n if not num_column_is_classified:\n num_col_classifier = num_col + 1\n except Exception as why:\n self.logger.error(why)\n num_col = None\n return num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, text_regions_p_1, cont_page\n\n def run_enhancement(self):\n self.logger.info(\"resize and enhance image\")\n is_image_enhanced, img_org, img_res, num_col_classifier, num_column_is_classified, img_bin = self.resize_and_enhance_image_with_column_classifier()\n self.logger.info(\"Image is %senhanced\", '' if is_image_enhanced else 'not ')\n K.clear_session()\n scale = 1\n if is_image_enhanced:\n if self.allow_enhancement:\n img_res = img_res.astype(np.uint8)\n self.get_image_and_scales(img_org, img_res, scale)\n else:\n self.get_image_and_scales_after_enhancing(img_org, img_res)\n else:\n if self.allow_enhancement:\n self.get_image_and_scales(img_org, img_res, scale)\n else:\n self.get_image_and_scales(img_org, img_res, scale)\n if self.allow_scaling:\n img_org, img_res, is_image_enhanced = self.resize_image_with_column_classifier(is_image_enhanced, img_bin)\n self.get_image_and_scales_after_enhancing(img_org, img_res)\n return img_res, is_image_enhanced, num_col_classifier, num_column_is_classified\n\n def run_textline(self, image_page):\n scaler_h_textline = 1 # 1.2#1.2\n scaler_w_textline = 1 # 0.9#1\n textline_mask_tot_ea, _ = self.textline_contours(image_page, True, scaler_h_textline, scaler_w_textline)\n K.clear_session()\n if self.plotter:\n self.plotter.save_plot_of_textlines(textline_mask_tot_ea, image_page)\n return textline_mask_tot_ea\n\n def run_deskew(self, textline_mask_tot_ea):\n sigma = 2\n main_page_deskew = True\n slope_deskew = return_deskew_slop(cv2.erode(textline_mask_tot_ea, KERNEL, iterations=2), sigma, main_page_deskew, plotter=self.plotter)\n slope_first = 0\n\n if self.plotter:\n self.plotter.save_deskewed_image(slope_deskew)\n self.logger.info(\"slope_deskew: %s\", slope_deskew)\n return slope_deskew, slope_first\n\n def run_marginals(self, image_page, textline_mask_tot_ea, mask_images, mask_lines, num_col_classifier, slope_deskew, text_regions_p_1):\n image_page_rotated, textline_mask_tot = image_page[:, :], textline_mask_tot_ea[:, :]\n textline_mask_tot[mask_images[:, :] == 1] = 0\n\n text_regions_p_1[mask_lines[:, :] == 1] = 3\n text_regions_p = text_regions_p_1[:, :]\n text_regions_p = np.array(text_regions_p)\n\n if num_col_classifier in (1, 2):\n try:\n regions_without_separators = (text_regions_p[:, :] == 1) * 1\n regions_without_separators = regions_without_separators.astype(np.uint8)\n text_regions_p = get_marginals(rotate_image(regions_without_separators, slope_deskew), text_regions_p, num_col_classifier, slope_deskew, kernel=KERNEL)\n except Exception as e:\n self.logger.error(\"exception %s\", e)\n\n if self.plotter:\n self.plotter.save_plot_of_layout_main_all(text_regions_p, image_page)\n self.plotter.save_plot_of_layout_main(text_regions_p, image_page)\n return textline_mask_tot, text_regions_p, image_page_rotated\n\n def run_boxes_no_full_layout(self, image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, erosion_hurts):\n self.logger.debug('enter run_boxes_no_full_layout')\n if np.abs(slope_deskew) >= SLOPE_THRESHOLD:\n _, textline_mask_tot_d, text_regions_p_1_n = rotation_not_90_func(image_page, textline_mask_tot, text_regions_p, slope_deskew)\n text_regions_p_1_n = resize_image(text_regions_p_1_n, text_regions_p.shape[0], text_regions_p.shape[1])\n textline_mask_tot_d = resize_image(textline_mask_tot_d, text_regions_p.shape[0], text_regions_p.shape[1])\n regions_without_separators_d = (text_regions_p_1_n[:, :] == 1) * 1\n regions_without_separators = (text_regions_p[:, :] == 1) * 1 # ( (text_regions_p[:,:]==1) | (text_regions_p[:,:]==2) )*1 #self.return_regions_without_separators_new(text_regions_p[:,:,0],img_only_regions)\n if np.abs(slope_deskew) < SLOPE_THRESHOLD:\n text_regions_p_1_n = None\n textline_mask_tot_d = None\n regions_without_separators_d = None\n pixel_lines = 3\n if np.abs(slope_deskew) < SLOPE_THRESHOLD:\n _, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document(np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), num_col_classifier, pixel_lines)\n\n if np.abs(slope_deskew) >= SLOPE_THRESHOLD:\n _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document(np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), num_col_classifier, pixel_lines)\n K.clear_session()\n\n self.logger.info(\"num_col_classifier: %s\", num_col_classifier)\n\n if num_col_classifier >= 3:\n if np.abs(slope_deskew) < SLOPE_THRESHOLD:\n regions_without_separators = regions_without_separators.astype(np.uint8)\n regions_without_separators = cv2.erode(regions_without_separators[:, :], KERNEL, iterations=6)\n else:\n regions_without_separators_d = regions_without_separators_d.astype(np.uint8)\n regions_without_separators_d = cv2.erode(regions_without_separators_d[:, :], KERNEL, iterations=6)\n t1 = time.time()\n if np.abs(slope_deskew) < SLOPE_THRESHOLD:\n boxes = return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_without_separators, matrix_of_lines_ch, num_col_classifier, erosion_hurts)\n boxes_d = None\n self.logger.debug(\"len(boxes): %s\", len(boxes))\n else:\n boxes_d = return_boxes_of_images_by_order_of_reading_new(splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, num_col_classifier, erosion_hurts)\n boxes = None\n self.logger.debug(\"len(boxes): %s\", len(boxes_d))\n\n self.logger.info(\"detecting boxes took %ss\", str(time.time() - t1))\n img_revised_tab = text_regions_p[:, :]\n polygons_of_images = return_contours_of_interested_region(img_revised_tab, 2)\n\n # plt.imshow(img_revised_tab)\n # plt.show()\n K.clear_session()\n self.logger.debug('exit run_boxes_no_full_layout')\n return polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, boxes, boxes_d\n\n def run_boxes_full_layout(self, image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, img_only_regions):\n self.logger.debug('enter run_boxes_full_layout')\n # set first model with second model\n text_regions_p[:, :][text_regions_p[:, :] == 2] = 5\n text_regions_p[:, :][text_regions_p[:, :] == 3] = 6\n text_regions_p[:, :][text_regions_p[:, :] == 4] = 8\n\n K.clear_session()\n image_page = image_page.astype(np.uint8)\n\n regions_fully, regions_fully_only_drop = self.extract_text_regions(image_page, True, cols=num_col_classifier)\n text_regions_p[:,:][regions_fully[:,:,0]==6]=6\n regions_fully_only_drop = put_drop_out_from_only_drop_model(regions_fully_only_drop, text_regions_p)\n regions_fully[:, :, 0][regions_fully_only_drop[:, :, 0] == 4] = 4\n K.clear_session()\n\n # plt.imshow(regions_fully[:,:,0])\n # plt.show()\n regions_fully = putt_bb_of_drop_capitals_of_model_in_patches_in_layout(regions_fully)\n # plt.imshow(regions_fully[:,:,0])\n # plt.show()\n K.clear_session()\n regions_fully_np, _ = self.extract_text_regions(image_page, False, cols=num_col_classifier)\n # plt.imshow(regions_fully_np[:,:,0])\n # plt.show()\n if num_col_classifier > 2:\n regions_fully_np[:, :, 0][regions_fully_np[:, :, 0] == 4] = 0\n else:\n regions_fully_np = filter_small_drop_capitals_from_no_patch_layout(regions_fully_np, text_regions_p)\n\n # plt.imshow(regions_fully_np[:,:,0])\n # plt.show()\n K.clear_session()\n # plt.imshow(regions_fully[:,:,0])\n # plt.show()\n regions_fully = boosting_headers_by_longshot_region_segmentation(regions_fully, regions_fully_np, img_only_regions)\n # plt.imshow(regions_fully[:,:,0])\n # plt.show()\n text_regions_p[:, :][regions_fully[:, :, 0] == 4] = 4\n text_regions_p[:, :][regions_fully_np[:, :, 0] == 4] = 4\n #plt.imshow(text_regions_p)\n #plt.show()\n\n if np.abs(slope_deskew) >= SLOPE_THRESHOLD:\n _, textline_mask_tot_d, text_regions_p_1_n, regions_fully_n = rotation_not_90_func_full_layout(image_page, textline_mask_tot, text_regions_p, regions_fully, slope_deskew)\n\n text_regions_p_1_n = resize_image(text_regions_p_1_n, text_regions_p.shape[0], text_regions_p.shape[1])\n textline_mask_tot_d = resize_image(textline_mask_tot_d, text_regions_p.shape[0], text_regions_p.shape[1])\n regions_fully_n = resize_image(regions_fully_n, text_regions_p.shape[0], text_regions_p.shape[1])\n regions_without_separators_d = (text_regions_p_1_n[:, :] == 1) * 1\n else:\n text_regions_p_1_n = None\n textline_mask_tot_d = None\n regions_without_separators_d = None\n\n regions_without_separators = (text_regions_p[:, :] == 1) * 1 # ( (text_regions_p[:,:]==1) | (text_regions_p[:,:]==2) )*1 #self.return_regions_without_separators_new(text_regions_p[:,:,0],img_only_regions)\n\n K.clear_session()\n img_revised_tab = np.copy(text_regions_p[:, :])\n polygons_of_images = return_contours_of_interested_region(img_revised_tab, 5)\n self.logger.debug('exit run_boxes_full_layout')\n return polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, regions_fully, regions_without_separators\n\n def run(self):\n \"\"\"\n Get image and scales, then extract the page of scanned image\n \"\"\"\n self.logger.debug(\"enter run\")\n\n t0 = time.time()\n img_res, is_image_enhanced, num_col_classifier, num_column_is_classified = self.run_enhancement()\n \n self.logger.info(\"Enhancing took %ss \", str(time.time() - t0))\n\n t1 = time.time()\n text_regions_p_1 ,erosion_hurts, polygons_lines_xml = self.get_regions_from_xy_2models(img_res, is_image_enhanced, num_col_classifier)\n self.logger.info(\"Textregion detection took %ss \", str(time.time() - t1))\n\n t1 = time.time()\n num_col, num_col_classifier, img_only_regions, page_coord, image_page, mask_images, mask_lines, text_regions_p_1, cont_page = \\\n self.run_graphics_and_columns(text_regions_p_1, num_col_classifier, num_column_is_classified, erosion_hurts)\n self.logger.info(\"Graphics detection took %ss \", str(time.time() - t1))\n self.logger.info('cont_page %s', cont_page)\n\n if not num_col:\n self.logger.info(\"No columns detected, outputting an empty PAGE-XML\")\n pcgts = self.writer.build_pagexml_no_full_layout([], page_coord, [], [], [], [], [], [], [], [], [], [], cont_page, [])\n self.logger.info(\"Job done in %ss\", str(time.time() - t1))\n return pcgts\n\n t1 = time.time()\n textline_mask_tot_ea = self.run_textline(image_page)\n self.logger.info(\"textline detection took %ss\", str(time.time() - t1))\n\n t1 = time.time()\n slope_deskew, slope_first = self.run_deskew(textline_mask_tot_ea)\n self.logger.info(\"deskewing took %ss\", str(time.time() - t1))\n t1 = time.time()\n\n textline_mask_tot, text_regions_p, image_page_rotated = self.run_marginals(image_page, textline_mask_tot_ea, mask_images, mask_lines, num_col_classifier, slope_deskew, text_regions_p_1)\n self.logger.info(\"detection of marginals took %ss\", str(time.time() - t1))\n t1 = time.time()\n\n if not self.full_layout:\n polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, boxes, boxes_d = self.run_boxes_no_full_layout(image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, erosion_hurts)\n\n pixel_img = 4\n min_area_mar = 0.00001\n polygons_of_marginals = return_contours_of_interested_region(text_regions_p, pixel_img, min_area_mar)\n \n if self.full_layout:\n polygons_of_images, img_revised_tab, text_regions_p_1_n, textline_mask_tot_d, regions_without_separators_d, regions_fully, regions_without_separators = self.run_boxes_full_layout(image_page, textline_mask_tot, text_regions_p, slope_deskew, num_col_classifier, img_only_regions)\n\n text_only = ((img_revised_tab[:, :] == 1)) * 1\n if np.abs(slope_deskew) >= SLOPE_THRESHOLD:\n text_only_d = ((text_regions_p_1_n[:, :] == 1)) * 1\n\n min_con_area = 0.000005\n if np.abs(slope_deskew) >= SLOPE_THRESHOLD:\n contours_only_text, hir_on_text = return_contours_of_image(text_only)\n contours_only_text_parent = return_parent_contours(contours_only_text, hir_on_text)\n \n if len(contours_only_text_parent) > 0:\n areas_cnt_text = np.array([cv2.contourArea(contours_only_text_parent[j]) for j in range(len(contours_only_text_parent))])\n areas_cnt_text = areas_cnt_text / float(text_only.shape[0] * text_only.shape[1])\n self.logger.info('areas_cnt_text %s', areas_cnt_text)\n contours_biggest = contours_only_text_parent[np.argmax(areas_cnt_text)]\n contours_only_text_parent = [contours_only_text_parent[jz] for jz in range(len(contours_only_text_parent)) if areas_cnt_text[jz] > min_con_area]\n areas_cnt_text_parent = [areas_cnt_text[jz] for jz in range(len(areas_cnt_text)) if areas_cnt_text[jz] > min_con_area]\n\n index_con_parents = np.argsort(areas_cnt_text_parent)\n contours_only_text_parent = list(np.array(contours_only_text_parent)[index_con_parents])\n areas_cnt_text_parent = list(np.array(areas_cnt_text_parent)[index_con_parents])\n\n cx_bigest_big, cy_biggest_big, _, _, _, _, _ = find_new_features_of_contours([contours_biggest])\n cx_bigest, cy_biggest, _, _, _, _, _ = find_new_features_of_contours(contours_only_text_parent)\n\n contours_only_text_d, hir_on_text_d = return_contours_of_image(text_only_d)\n contours_only_text_parent_d = return_parent_contours(contours_only_text_d, hir_on_text_d)\n\n areas_cnt_text_d = np.array([cv2.contourArea(contours_only_text_parent_d[j]) for j in range(len(contours_only_text_parent_d))])\n areas_cnt_text_d = areas_cnt_text_d / float(text_only_d.shape[0] * text_only_d.shape[1])\n \n if len(areas_cnt_text_d)>0:\n contours_biggest_d = contours_only_text_parent_d[np.argmax(areas_cnt_text_d)]\n index_con_parents_d=np.argsort(areas_cnt_text_d)\n contours_only_text_parent_d=list(np.array(contours_only_text_parent_d)[index_con_parents_d] )\n areas_cnt_text_d=list(np.array(areas_cnt_text_d)[index_con_parents_d] )\n\n cx_bigest_d_big, cy_biggest_d_big, _, _, _, _, _ = find_new_features_of_contours([contours_biggest_d])\n cx_bigest_d, cy_biggest_d, _, _, _, _, _ = find_new_features_of_contours(contours_only_text_parent_d)\n try:\n if len(cx_bigest_d) >= 5:\n cx_bigest_d_last5 = cx_bigest_d[-5:]\n cy_biggest_d_last5 = cy_biggest_d[-5:]\n dists_d = [math.sqrt((cx_bigest_big[0] - cx_bigest_d_last5[j]) ** 2 + (cy_biggest_big[0] - cy_biggest_d_last5[j]) ** 2) for j in range(len(cy_biggest_d_last5))]\n ind_largest = len(cx_bigest_d) -5 + np.argmin(dists_d)\n else:\n cx_bigest_d_last5 = cx_bigest_d[-len(cx_bigest_d):]\n cy_biggest_d_last5 = cy_biggest_d[-len(cx_bigest_d):]\n dists_d = [math.sqrt((cx_bigest_big[0]-cx_bigest_d_last5[j])**2 + (cy_biggest_big[0]-cy_biggest_d_last5[j])**2) for j in range(len(cy_biggest_d_last5))]\n ind_largest = len(cx_bigest_d) - len(cx_bigest_d) + np.argmin(dists_d)\n \n cx_bigest_d_big[0] = cx_bigest_d[ind_largest]\n cy_biggest_d_big[0] = cy_biggest_d[ind_largest]\n except Exception as why:\n self.logger.error(why)\n\n (h, w) = text_only.shape[:2]\n center = (w // 2.0, h // 2.0)\n M = cv2.getRotationMatrix2D(center, slope_deskew, 1.0)\n M_22 = np.array(M)[:2, :2]\n p_big = np.dot(M_22, [cx_bigest_big, cy_biggest_big])\n x_diff = p_big[0] - cx_bigest_d_big\n y_diff = p_big[1] - cy_biggest_d_big\n\n contours_only_text_parent_d_ordered = []\n for i in range(len(contours_only_text_parent)):\n p = np.dot(M_22, [cx_bigest[i], cy_biggest[i]])\n p[0] = p[0] - x_diff[0]\n p[1] = p[1] - y_diff[0]\n dists = [math.sqrt((p[0] - cx_bigest_d[j]) ** 2 + (p[1] - cy_biggest_d[j]) ** 2) for j in range(len(cx_bigest_d))]\n contours_only_text_parent_d_ordered.append(contours_only_text_parent_d[np.argmin(dists)])\n # img2=np.zeros((text_only.shape[0],text_only.shape[1],3))\n # img2=cv2.fillPoly(img2,pts=[contours_only_text_parent_d[np.argmin(dists)]] ,color=(1,1,1))\n # plt.imshow(img2[:,:,0])\n # plt.show()\n else:\n contours_only_text_parent_d_ordered = []\n contours_only_text_parent_d = []\n contours_only_text_parent = []\n \n else:\n contours_only_text_parent_d_ordered = []\n contours_only_text_parent_d = []\n contours_only_text_parent = []\n \n else:\n contours_only_text, hir_on_text = return_contours_of_image(text_only)\n contours_only_text_parent = return_parent_contours(contours_only_text, hir_on_text)\n \n if len(contours_only_text_parent) > 0:\n areas_cnt_text = np.array([cv2.contourArea(contours_only_text_parent[j]) for j in range(len(contours_only_text_parent))])\n areas_cnt_text = areas_cnt_text / float(text_only.shape[0] * text_only.shape[1])\n\n contours_biggest = contours_only_text_parent[np.argmax(areas_cnt_text)]\n contours_only_text_parent = [contours_only_text_parent[jz] for jz in range(len(contours_only_text_parent)) if areas_cnt_text[jz] > min_con_area]\n areas_cnt_text_parent = [areas_cnt_text[jz] for jz in range(len(areas_cnt_text)) if areas_cnt_text[jz] > min_con_area]\n\n index_con_parents = np.argsort(areas_cnt_text_parent)\n contours_only_text_parent = list(np.array(contours_only_text_parent)[index_con_parents])\n areas_cnt_text_parent = list(np.array(areas_cnt_text_parent)[index_con_parents])\n\n cx_bigest_big, cy_biggest_big, _, _, _, _, _ = find_new_features_of_contours([contours_biggest])\n cx_bigest, cy_biggest, _, _, _, _, _ = find_new_features_of_contours(contours_only_text_parent)\n self.logger.debug('areas_cnt_text_parent %s', areas_cnt_text_parent)\n # self.logger.debug('areas_cnt_text_parent_d %s', areas_cnt_text_parent_d)\n # self.logger.debug('len(contours_only_text_parent) %s', len(contours_only_text_parent_d))\n else:\n pass\n txt_con_org = get_textregion_contours_in_org_image(contours_only_text_parent, self.image, slope_first)\n boxes_text, _ = get_text_region_boxes_by_given_contours(contours_only_text_parent)\n boxes_marginals, _ = get_text_region_boxes_by_given_contours(polygons_of_marginals)\n \n if not self.curved_line:\n slopes, all_found_texline_polygons, boxes_text, txt_con_org, contours_only_text_parent, all_box_coord, index_by_text_par_con = self.get_slopes_and_deskew_new(txt_con_org, contours_only_text_parent, textline_mask_tot_ea, image_page_rotated, boxes_text, slope_deskew)\n slopes_marginals, all_found_texline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, all_box_coord_marginals, _ = self.get_slopes_and_deskew_new(polygons_of_marginals, polygons_of_marginals, textline_mask_tot_ea, image_page_rotated, boxes_marginals, slope_deskew)\n else:\n \n scale_param = 1\n all_found_texline_polygons, boxes_text, txt_con_org, contours_only_text_parent, all_box_coord, index_by_text_par_con, slopes = self.get_slopes_and_deskew_new_curved(txt_con_org, contours_only_text_parent, cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=1), image_page_rotated, boxes_text, text_only, num_col_classifier, scale_param, slope_deskew)\n all_found_texline_polygons = small_textlines_to_parent_adherence2(all_found_texline_polygons, textline_mask_tot_ea, num_col_classifier)\n all_found_texline_polygons_marginals, boxes_marginals, _, polygons_of_marginals, all_box_coord_marginals, _, slopes_marginals = self.get_slopes_and_deskew_new_curved(polygons_of_marginals, polygons_of_marginals, cv2.erode(textline_mask_tot_ea, kernel=KERNEL, iterations=1), image_page_rotated, boxes_marginals, text_only, num_col_classifier, scale_param, slope_deskew)\n all_found_texline_polygons_marginals = small_textlines_to_parent_adherence2(all_found_texline_polygons_marginals, textline_mask_tot_ea, num_col_classifier)\n K.clear_session()\n if self.full_layout:\n if np.abs(slope_deskew) >= SLOPE_THRESHOLD:\n contours_only_text_parent_d_ordered = list(np.array(contours_only_text_parent_d_ordered)[index_by_text_par_con])\n text_regions_p, contours_only_text_parent, contours_only_text_parent_h, all_box_coord, all_box_coord_h, all_found_texline_polygons, all_found_texline_polygons_h, slopes, _, contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered = check_any_text_region_in_model_one_is_main_or_header(text_regions_p, regions_fully, contours_only_text_parent, all_box_coord, all_found_texline_polygons, slopes, contours_only_text_parent_d_ordered)\n else:\n contours_only_text_parent_d_ordered = None\n text_regions_p, contours_only_text_parent, contours_only_text_parent_h, all_box_coord, all_box_coord_h, all_found_texline_polygons, all_found_texline_polygons_h, slopes, _, contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered = check_any_text_region_in_model_one_is_main_or_header(text_regions_p, regions_fully, contours_only_text_parent, all_box_coord, all_found_texline_polygons, slopes, contours_only_text_parent_d_ordered)\n\n if self.plotter:\n self.plotter.save_plot_of_layout(text_regions_p, image_page)\n self.plotter.save_plot_of_layout_all(text_regions_p, image_page)\n\n K.clear_session()\n\n polygons_of_tabels = []\n pixel_img = 4\n polygons_of_drop_capitals = return_contours_of_interested_region_by_min_size(text_regions_p, pixel_img)\n all_found_texline_polygons = adhere_drop_capital_region_into_corresponding_textline(text_regions_p, polygons_of_drop_capitals, contours_only_text_parent, contours_only_text_parent_h, all_box_coord, all_box_coord_h, all_found_texline_polygons, all_found_texline_polygons_h, kernel=KERNEL, curved_line=self.curved_line)\n\n # print(len(contours_only_text_parent_h),len(contours_only_text_parent_h_d_ordered),'contours_only_text_parent_h')\n pixel_lines = 6\n\n if not self.headers_off:\n if np.abs(slope_deskew) < SLOPE_THRESHOLD:\n num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document(np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), num_col_classifier, pixel_lines, contours_only_text_parent_h)\n else:\n _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document(np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), num_col_classifier, pixel_lines, contours_only_text_parent_h_d_ordered)\n elif self.headers_off:\n if np.abs(slope_deskew) < SLOPE_THRESHOLD:\n num_col, _, matrix_of_lines_ch, splitter_y_new, _ = find_number_of_columns_in_document(np.repeat(text_regions_p[:, :, np.newaxis], 3, axis=2), num_col_classifier, pixel_lines)\n else:\n _, _, matrix_of_lines_ch_d, splitter_y_new_d, _ = find_number_of_columns_in_document(np.repeat(text_regions_p_1_n[:, :, np.newaxis], 3, axis=2), num_col_classifier, pixel_lines)\n\n # print(peaks_neg_fin,peaks_neg_fin_d,'num_col2')\n # print(splitter_y_new,splitter_y_new_d,'num_col_classifier')\n # print(matrix_of_lines_ch.shape,matrix_of_lines_ch_d.shape,'matrix_of_lines_ch')\n\n if num_col_classifier >= 3:\n if np.abs(slope_deskew) < SLOPE_THRESHOLD:\n regions_without_separators = regions_without_separators.astype(np.uint8)\n regions_without_separators = cv2.erode(regions_without_separators[:, :], KERNEL, iterations=6)\n random_pixels_for_image = np.random.randn(regions_without_separators.shape[0], regions_without_separators.shape[1])\n random_pixels_for_image[random_pixels_for_image < -0.5] = 0\n random_pixels_for_image[random_pixels_for_image != 0] = 1\n regions_without_separators[(random_pixels_for_image[:, :] == 1) & (text_regions_p[:, :] == 5)] = 1\n else:\n regions_without_separators_d = regions_without_separators_d.astype(np.uint8)\n regions_without_separators_d = cv2.erode(regions_without_separators_d[:, :], KERNEL, iterations=6)\n random_pixels_for_image = np.random.randn(regions_without_separators_d.shape[0], regions_without_separators_d.shape[1])\n random_pixels_for_image[random_pixels_for_image < -0.5] = 0\n random_pixels_for_image[random_pixels_for_image != 0] = 1\n regions_without_separators_d[(random_pixels_for_image[:, :] == 1) & (text_regions_p_1_n[:, :] == 5)] = 1\n\n if np.abs(slope_deskew) < SLOPE_THRESHOLD:\n boxes = return_boxes_of_images_by_order_of_reading_new(splitter_y_new, regions_without_separators, matrix_of_lines_ch, num_col_classifier, erosion_hurts)\n else:\n boxes_d = return_boxes_of_images_by_order_of_reading_new(splitter_y_new_d, regions_without_separators_d, matrix_of_lines_ch_d, num_col_classifier, erosion_hurts)\n\n if self.plotter:\n self.plotter.write_images_into_directory(polygons_of_images, image_page)\n\n if self.full_layout:\n if np.abs(slope_deskew) < SLOPE_THRESHOLD:\n order_text_new, id_of_texts_tot = self.do_order_of_regions(contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot)\n else:\n order_text_new, id_of_texts_tot = self.do_order_of_regions(contours_only_text_parent_d_ordered, contours_only_text_parent_h_d_ordered, boxes_d, textline_mask_tot_d)\n\n pcgts = self.writer.build_pagexml_full_layout(contours_only_text_parent, contours_only_text_parent_h, page_coord, order_text_new, id_of_texts_tot, all_found_texline_polygons, all_found_texline_polygons_h, all_box_coord, all_box_coord_h, polygons_of_images, polygons_of_tabels, polygons_of_drop_capitals, polygons_of_marginals, all_found_texline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_xml)\n self.logger.info(\"Job done in %ss\", str(time.time() - t0))\n return pcgts\n else:\n contours_only_text_parent_h = None\n if np.abs(slope_deskew) < SLOPE_THRESHOLD:\n order_text_new, id_of_texts_tot = self.do_order_of_regions(contours_only_text_parent, contours_only_text_parent_h, boxes, textline_mask_tot)\n else:\n contours_only_text_parent_d_ordered = list(np.array(contours_only_text_parent_d_ordered)[index_by_text_par_con])\n order_text_new, id_of_texts_tot = self.do_order_of_regions(contours_only_text_parent_d_ordered, contours_only_text_parent_h, boxes_d, textline_mask_tot_d)\n pcgts = self.writer.build_pagexml_no_full_layout(txt_con_org, page_coord, order_text_new, id_of_texts_tot, all_found_texline_polygons, all_box_coord, polygons_of_images, polygons_of_marginals, all_found_texline_polygons_marginals, all_box_coord_marginals, slopes, slopes_marginals, cont_page, polygons_lines_xml)\n self.logger.info(\"Job done in %ss\", str(time.time() - t0))\n return pcgts\n" ]
[ [ "numpy.dot", "tensorflow.compat.v1.ConfigProto", "tensorflow.InteractiveSession", "tensorflow.compat.v1.GPUOptions", "numpy.abs", "tensorflow.get_logger", "numpy.ones", "tensorflow.ConfigProto", "numpy.copy", "numpy.argmax", "numpy.random.randn", "numpy.argmin", "numpy.argsort", "numpy.repeat", "numpy.array", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sanggggg/incubator-nemo
[ "1f75e778313c6da06401a9c12850a557891c373e" ]
[ "bin/metric-parser.py" ]
[ "#!/usr/bin/env python3\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport json\nimport numpy as np\nimport os\nimport sys\n\n\ndef main():\n try:\n filepath = sys.argv[1]\n except IndexError:\n print(\"Please provide the file path for the metric log file.\")\n else:\n if not os.path.isfile(filepath):\n print(\"File path {} does not exist. Exiting...\".format(filepath))\n sys.exit()\n\n metricDictionary = dict()\n vertexToMetricDict = dict()\n with open(filepath, 'r') as fp:\n for line in fp:\n metricInJson = json.loads(line)\n metricKey = metricInJson[\"computationUnitId\"]\n metricDictionary[metricKey] = metricInJson[\"metricList\"]\n if metricKey.find('Task-vertex-') != -1: # Vertex metric\n vertexIdSuffix = metricKey.split('Task-vertex-')[1]\n if vertexIdSuffix.find('_') != -1: # physical level metric\n vertexId = 'vertex-' + vertexIdSuffix.split('_')[0]\n metricDictList = metricDictionary[metricKey]\n if isinstance(metricDictList, dict):\n metricDictList = [metricDictList]\n for metricDict in metricDictList:\n for key, value in metricDict.items():\n if (key != 'EndTime') & (key != 'StartTime'):\n vertexMetricDict = vertexToMetricDict.get(vertexId, dict())\n vertexMetricDictValueList = vertexMetricDict.get(key, [])\n vertexMetricDictValueList.append(value)\n vertexMetricDict[key] = vertexMetricDictValueList\n vertexToMetricDict[vertexId] = vertexMetricDict\n\n query_metric = True\n while (query_metric):\n user_input = input(\"1 - View metric for a computation unit, 2 - View metric for all IR vertices, 3 - exit: \")\n if user_input == \"1\":\n computationUnitId = input(\"Enter computation unit ID: \")\n for metric in metricDictionary[computationUnitId]:\n print(metric)\n elif user_input == \"2\":\n for vertexId, metricDict in sorted(vertexToMetricDict.items()):\n print(vertexId)\n metricKeys, valuesMin, valuesMedian, valuesMax, valuesMean, valuesSum = ['Metric'], ['Min'], ['Median'], [\n 'Max'], ['Mean'], ['Total']\n for metricKey, metricValues in metricDict.items():\n metricKeys.append(metricKey)\n valuesMin.append(str(np.min(metricValues)))\n valuesMedian.append(str(np.median(metricValues)))\n valuesMax.append(str(np.max(metricValues)))\n valuesMean.append(str(np.mean(metricValues)))\n valuesSum.append(str(np.sum(metricValues)))\n padding = 1\n widthKey, widthMin, widthMedian, widthMax, widthMean, widthSum = map(lambda x: len(max(x, key=len)) + padding,\n [metricKeys, valuesMin, valuesMedian,\n valuesMax, valuesMean, valuesSum])\n templete = '{:<%s} {:<%s} {:<%s} {:<%s} {:<%s} {:<%s}' % (\n widthKey, widthMin, widthMedian, widthMax, widthMean, widthSum)\n for metricKey, valueMin, valueMedian, valueMax, valueMean, valueSum in zip(metricKeys, valuesMin,\n valuesMedian, valuesMax,\n valuesMean, valuesSum):\n print(templete.format(metricKey, valueMin, valueMedian, valueMax, valueMean, valueSum))\n else:\n print(\"Exiting metric parser\")\n query_metric = False\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.min", "numpy.median", "numpy.max", "numpy.mean", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
liangsheng02/Locating-Language-Specific-Information-in-Contextualized-Embeddings
[ "1209a50eefde8545f8c26c063c3080bbcd97edc7", "1209a50eefde8545f8c26c063c3080bbcd97edc7" ]
[ "LI_figure_appendix.py", "LI_figure2.py" ]
[ "import random\r\nimport os\r\nimport numpy as np\r\nimport argparse\r\nimport pickle\r\nimport itertools\r\nimport torch\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.model_selection import train_test_split\r\nfrom densray import *\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\nimport matplotlib\r\nmatplotlib.use('Agg')\r\nimport matplotlib.pyplot as plt\r\n\r\ndef go(dim=10, cross=False):\r\n #first x dims\r\n model = LogisticRegression(random_state=0)\r\n model.fit(x_train[:,:dim], y_train)\r\n a = model.score(x_test[:,:dim], y_test)\r\n\r\n #[x:2x]\r\n model.fit(x_train[:,dim:2*dim], y_train)\r\n b = model.score(x_test[:,dim:2*dim], y_test)\r\n\r\n #randomly choose x dims from [x:]\r\n if cross:\r\n idx = random.sample(range(dim,768-dim), 5)\r\n score = 0\r\n for i in range(5):\r\n model.fit(x_train[:,idx[i]:idx[i]+dim], y_train)\r\n score += model.score(x_test[:,idx[i]:idx[i]+dim], y_test)\r\n c = score/5\r\n else:\r\n idx = random.sample(range(dim, 768 - dim), 1)\r\n score = 0\r\n model.fit(x_train[:, idx[0]:idx[0] + dim], y_train)\r\n score += model.score(x_test[:, idx[0]:idx[0] + dim], y_test)\r\n c = score\r\n return a,b,c\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--seed', default=0, type=int)\r\n args = parser.parse_args()\r\n\r\n all_langs = 'af, sq, ar, an, hy, ast, az, ba, eu, bar, be, bn, bpy, bs, br, bg, my, ca, ceb, ce, zh, zh_classical, ' \\\r\n 'cv, hr, cs, da, nl, en, et, fi, fr, gl, ka, de, el, gu, ht, he, hi, hu, is, io, id, ga, it, ja, jv, kn, ' \\\r\n 'kk, ky, ko, la, lv, lt, lmo, nds, lb, mk, mg, ms, ml, mr, min, ne, new, no, nn, oc, fa, pms, pl, pt, ' \\\r\n 'pa, ro, ru, sco, sr, sh, scn, sk, sl, azb, es, su, sw, sv, tl, tg, ta, tt, te, tr, uk, ur, uz, vi, vo, ' \\\r\n 'war, cy, fy, pnb, yo, th, mn'.split(', ')\r\n Austronesian = 'ceb,id,jv,mg,ms,min,su,tl,vi,war'.split(',')\r\n Italic = 'an,ca,fr,gl,ht,it,la,pms,scn,es'.split(',')\r\n Germanic = 'af,bar,nl,en,de,is,nds,lb,sco,fy'.split(',')\r\n cls_model = LogisticRegression(random_state=args.seed)\r\n\r\n random.seed(args.seed)\r\n # Q for all langs\r\n idx = random.sample(range(12, 768 - 2), 5)\r\n for family in [Austronesian,Italic,Germanic,random.sample(all_langs, 10)]:\r\n embs = [torch.load('/mounts/work/language_subspace/mwiki_emb_2/token/12/' + i + '.pt')[:10000] for i in family]\r\n dsr = DensRay(embs)\r\n dsr.fit()\r\n #print(\"Q\")\r\n\r\n # CLS pairwise\r\n dims = list(range(0, 15+1, 1))\r\n acc_a, acc_b, acc_c = np.empty((0, len(dims))), np.empty((0, len(dims))), np.empty((0, len(dims)))\r\n for pair in list(itertools.combinations(family, 2)):\r\n # X\r\n emb = torch.Tensor(()).cpu()\r\n for i in pair:\r\n e = torch.load('/mounts/work/language_subspace/mwiki_emb_2/token/12/' + i + '.pt')[-10000:]\r\n eid = random.sample(list(range(len(e))), 10000)\r\n emb = torch.cat((emb, e[eid]))\r\n emb = torch.mm(emb, dsr.eigvecs)\r\n emb = emb.cpu().detach().numpy()\r\n #print(\"X\")\r\n # Y\r\n y = []\r\n for i in range(2):\r\n y.extend([i] * 10000)\r\n y = np.array(y)\r\n # split\r\n x_train, x_test, y_train, y_test = train_test_split(emb, y, random_state=0, train_size=0.8)\r\n # train\r\n a, b, c = np.array([]), np.array([]), np.array([])\r\n #print(\"Y\")\r\n for dim in dims:\r\n cls_model.fit(x_train[:, dim:dim+2], y_train)\r\n aa = cls_model.score(x_test[:, dim:dim+2], y_test)\r\n a = np.concatenate((a, [aa]))\r\n # random baseline\r\n score = 0\r\n for i in range(5):\r\n cls_model.fit(x_train[:, idx[i]:(idx[i] + 2)], y_train)\r\n score += cls_model.score(x_test[:, idx[i]:(idx[i] + 2)], y_test)\r\n cc = score / 5\r\n # pairwise summary: diff\r\n acc_a = np.vstack((acc_a, a))\r\n #summary = [(acc.mean(axis=0),acc.std(axis=0)) for acc in [acc_a,acc_b,acc_c]]\r\n print([round(x-cc, 4) for x in acc_a.mean(axis=0)], sep=\"=\")\r\n\r\n", "import random\r\nimport os\r\nimport numpy as np\r\nimport argparse\r\nimport pickle\r\nimport itertools\r\nimport torch\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn.linear_model import LogisticRegression\r\nimport joblib\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\nfrom sklearn.model_selection import train_test_split\r\nfrom densray import *\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--seed', type=int, default=42)\r\n parser.add_argument('--mode', type=str, default='token')\r\n parser.add_argument('--lda', action=\"store_true\", default=False)\r\n parser.add_argument('--xlmr', action=\"store_true\", default=False)\r\n parser.add_argument('--svc', action=\"store_true\", default=False)\r\n args = parser.parse_args()\r\n\r\n all_langs = 'af,am,ar,as,az,be,bg,bn,bn_rom,br,bs,ca,cs,cy,da,de,el,en,eo,es,et,eu,fa,fi,fr,fy,ga,gd,gl,gu,ha,he,' \\\r\n 'hi,hi_rom,hr,hu,hy,id,is,it,ja,jv,ka,kk,km,kn,ko,ku,ky,la,lo,lt,lv,mg,mk,ml,mn,mr,ms,my,my_zaw,ne,' \\\r\n 'nl,no,om,or,pa,pl,ps,pt,ro,ru,sa,sd,si,sk,sl,so,sq,sr,su,sv,sw,ta,ta_rom,te,te_rom,th,tl,tr,ug,uk,' \\\r\n 'ur,ur_rom,uz,vi,xh,yi,zh,zh_classical'.split(',') if args.xlmr \\\r\n else 'af,sq,ar,an,hy,ast,az,ba,eu,bar,be,bn,bpy,bs,br,bg,my,ca,ceb,ce,zh,zh_classical,cv,hr,cs,da,nl,en,et,' \\\r\n 'fi,fr,gl,ka,de,el,gu,ht,he,hi,hu,is,io,id,ga,it,ja,jv,kn,kk,ky,ko,la,lv,lt,lmo,nds,lb,mk,mg,ms,ml,mr,' \\\r\n 'min,ne,new,no,nn,oc,fa,pms,pl,pt,pa,ro,ru,sco,sr,sh,scn,sk,sl,azb,es,su,sw,sv,tl,tg,ta,tt,te,tr,uk,ur,' \\\r\n 'uz,vi,vo,war,cy,fy,pnb,yo,th,mn'.split(',')\r\n random.seed(args.seed)\r\n n_langs = len(all_langs)\r\n n_samples = 5000\r\n ids = random.sample(range(0, 10000), n_samples)\r\n cls_model = LinearSVC(random_state=args.seed) if args.svc else LogisticRegression(random_state=args.seed)\r\n emb_path = 'cc-100_emb_2' if args.xlmr else 'mwiki_emb_2'\r\n\r\n for layer in range(1,13):\r\n if not args.lda:\r\n densray_path = '/mounts/work/language_subspace/' + emb_path + '/' + args.mode + '/' + str(layer) + '/Q_' + str(n_langs) + '.pt'\r\n if not os.path.exists(densray_path):\r\n embs = [torch.load('/mounts/work/language_subspace/'+emb_path+'/'+args.mode+'/'+str(layer)+'/'+all_langs[i]+'.pt')[ids,:] for i in range(len(all_langs))]\r\n dsr = DensRay(embs)\r\n dsr.fit()\r\n Q = dsr.eigvecs\r\n torch.save(Q, densray_path, _use_new_zipfile_serialization=False)\r\n torch.save(dsr.eigvals, '/mounts/work/language_subspace/'+emb_path+'/' + args.mode + '/'+str(layer)+'/Eigvals_'+str(n_langs)+'.pt',\r\n _use_new_zipfile_serialization=False)\r\n else:\r\n Q = torch.load(densray_path)\r\n else:\r\n lda_pth = '/mounts/work/language_subspace/' + emb_path + '/' + args.mode + '/' + str(layer) + '/lda_' + str(n_langs) + '.model'\r\n if not os.path.exists(lda_pth):\r\n embs = torch.tensor(())\r\n labels = []\r\n for i in range(len(all_langs)):\r\n embs = torch.cat((embs, torch.load('/mounts/work/language_subspace/'+emb_path+'/'+args.mode+'/'+str(layer)+'/'+all_langs[i]+'.pt')[ids, :]))\r\n labels.extend([i] * n_samples)\r\n lda = LinearDiscriminantAnalysis()\r\n lda.fit(embs.numpy(), labels)\r\n joblib.dump(lda, lda_pth)\r\n else:\r\n lda = joblib.load(lda_pth)\r\n\r\n # pairwise langid classification\r\n dims = []\r\n dims.extend(list(range(0, 200+1, 10))[1:])\r\n acc_a = np.empty((0, len(dims)))\r\n for pair in random.sample(list(itertools.combinations(all_langs, 2)), 10):\r\n # X\r\n emb = torch.Tensor(()).cpu()\r\n for i in pair:\r\n e = torch.load('/mounts/work/language_subspace/'+emb_path+'/' + args.mode + '/'+str(layer)+'/' + i + '.pt')[-10000:]\r\n eid = random.sample(list(range(len(e))), n_samples)\r\n emb = torch.cat((emb, e[eid]))\r\n if not args.lda:\r\n emb = torch.mm(emb, Q)\r\n emb = emb.cpu().detach().numpy()\r\n else:\r\n emb = emb.numpy()\r\n emb2 = lda.transform(emb)\r\n emb = np.hstack((emb2[:, :103], emb[:, :]))\r\n # Y\r\n y = []\r\n for i in range(2):\r\n y.extend([i] * n_samples)\r\n y = np.array(y)\r\n # split\r\n x_train, x_test, y_train, y_test = train_test_split(emb, y, random_state=args.seed, train_size=0.8)\r\n # train\r\n a = np.array([])\r\n # every 10 dims\r\n for dim in dims:\r\n cls_model.fit(x_train[:, (dim-10):dim], y_train)\r\n aa = cls_model.score(x_test[:, (dim-10):dim], y_test)\r\n a = np.concatenate((a, [aa]))\r\n # random baseline\r\n idx = random.sample(range(105, 768 - 10), 5)\r\n score = 0\r\n for i in range(5):\r\n cls_model.fit(x_train[:, idx[i]:(idx[i] + 10)], y_train)\r\n score += cls_model.score(x_test[:, idx[i]:(idx[i] + 10)], y_test)\r\n cc = score / 5\r\n # pairwise summary: diff\r\n acc_a = np.vstack((acc_a, a))\r\n print('layer_'+str(layer), [round(x-cc, 4) for x in acc_a.mean(axis=0)], sep=\"=\")\r\n\r\n" ]
[ [ "torch.mm", "sklearn.linear_model.LogisticRegression", "torch.Tensor", "torch.load", "torch.cat", "matplotlib.use", "sklearn.model_selection.train_test_split", "numpy.concatenate", "numpy.array", "numpy.vstack" ], [ "numpy.hstack", "torch.mm", "sklearn.linear_model.LogisticRegression", "torch.Tensor", "torch.load", "torch.cat", "sklearn.model_selection.train_test_split", "torch.tensor", "numpy.concatenate", "sklearn.svm.LinearSVC", "sklearn.discriminant_analysis.LinearDiscriminantAnalysis", "numpy.array", "numpy.vstack", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mad-lab-fau/tpcp
[ "ef906afec4c58338dd286f37d62996fc83964404", "ef906afec4c58338dd286f37d62996fc83964404" ]
[ "examples/algorithms/_01_algorithms_qrs_detection.py", "tpcp/_utils/_general.py" ]
[ "r\"\"\"\n\n.. _custom_algorithms_qrs_detection:\n\nAlgorithms - A real world example: QRS-Detection\n================================================\n\nIn this example we will implement a custom algorithm and discuss, when you might want to use an algorithm class over\njust pipelines.\n\nSpecifically we will implement a simple algorithm, designed to identify individual QRS complexes from a continuous\nECG signal.\nIf you have no idea what this all means, don't worry about it.\nSimply we want to find peaks in a continuous signal that has some artifacts.\n\n.. warning:: The algorithm we design is **not** a good algorithms! There are way better and properly evaluated\n algorithms to do this job! Don't use this algorithms for anything :)\n\nWhen should you use custom algorithms?\n--------------------------------------\nAlgorithms are a completely optional feature of tpcp and in many cases not required.\nHowever, algorithm subclasses provide a structured way to implement new algorithms when you don't have any better\nstructure to follow.\nFurther they allow the setting of nested parameters (e.g. when used as parameters to pipelines) and can benefit from\nother tooling in tpcp (e.g. cloning).\nFor more general information have a look at the general documentation page :ref:`datasets_algorithms_pipelines`.\n\nImplementing QRS-Detection\n--------------------------\nIn general our QRS-Detection will have two steps:\n\n1. High-pass filter the data to remove baseline drift. We will use a Butterworth filter for that.\n2. Apply a peak finding strategy to find the (hopefully dominant) R-peaks.\n We will use :func:`~scipy.signal.find_peaks` with a couple of parameters for that.\n\nAs all algorithms, our algorithm needs to inherit from `tpcp.Algorithm` and implement an action method.\nIn our case we will call the action method `detect`, as it makes sense based on what the algorithm does.\nThis `detect` method will first do the filtering and then the peak search, which we will split into two methods to keep\nthings easier to understand.\n\nIf you just want the final implementation, without all the explanation, check\n:ref:`custom_algorithms_qrs_detection_final`.\n\nOk that is still a bunch of code... But let's focus on the aspects that are important in general:\n\n1. We inherit from `Algorithm`\n2. We get and define all parameters in the init without modification\n3. We define the name of out action method using `_action_method = \"detect\"`\n4. After we do the computations, we set the results on the instance\n5. We return self\n6. (Optionally) we applied the :func:`~tpcp.make_action_safe` decorator to our action method, which makes some runtimes\n checks to ensure our implementation follows the tpcp spec.\n\n\"\"\"\nfrom typing import List\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import signal\n\nfrom tpcp import Algorithm, Parameter, make_action_safe\n\n\nclass QRSDetector(Algorithm):\n _action_methods = \"detect\"\n\n # Input Parameters\n high_pass_filter_cutoff_hz: Parameter[float]\n max_heart_rate_bpm: Parameter[float]\n min_r_peak_height_over_baseline: Parameter[float]\n\n # Results\n r_peak_positions_: pd.Series\n\n # Some internal constants\n _HIGH_PASS_FILTER_ORDER: int = 4\n\n def __init__(\n self,\n max_heart_rate_bpm: float = 200.0,\n min_r_peak_height_over_baseline: float = 1.0,\n high_pass_filter_cutoff_hz: float = 0.5,\n ):\n self.max_heart_rate_bpm = max_heart_rate_bpm\n self.min_r_peak_height_over_baseline = min_r_peak_height_over_baseline\n self.high_pass_filter_cutoff_hz = high_pass_filter_cutoff_hz\n\n @make_action_safe\n def detect(self, single_channel_ecg: pd.Series, sampling_rate_hz: float):\n ecg = single_channel_ecg.to_numpy().flatten()\n\n filtered_signal = self._filter(ecg, sampling_rate_hz)\n peak_positions = self._search_strategy(filtered_signal, sampling_rate_hz)\n\n self.r_peak_positions_ = pd.Series(peak_positions)\n return self\n\n def _search_strategy(\n self, filtered_signal: np.ndarray, sampling_rate_hz: float, use_height: bool = True\n ) -> np.ndarray:\n # Calculate the minimal distance based on the expected heart rate\n min_distance_between_peaks = 1 / (self.max_heart_rate_bpm / 60) * sampling_rate_hz\n\n height = None\n if use_height:\n height = self.min_r_peak_height_over_baseline\n peaks, _ = signal.find_peaks(filtered_signal, distance=min_distance_between_peaks, height=height)\n return peaks\n\n def _filter(self, ecg_signal: np.ndarray, sampling_rate_hz: float) -> np.ndarray:\n sos = signal.butter(\n btype=\"high\",\n N=self._HIGH_PASS_FILTER_ORDER,\n Wn=self.high_pass_filter_cutoff_hz,\n output=\"sos\",\n fs=sampling_rate_hz,\n )\n return signal.sosfiltfilt(sos, ecg_signal)\n\n\n# %%\n# Testing the implementation\n# --------------------------\n# To test the implementation, we load our example ECG data using the dataset created in a previous example.\n#\n# Based on the simple test we can see that our algorithm works (at least for this piece of data).\nfrom pathlib import Path\n\nfrom examples.datasets.datasets_final_ecg import ECGExampleData\n\n# Loading the data\ntry:\n HERE = Path(__file__).parent\nexcept NameError:\n HERE = Path(\".\").resolve()\ndata_path = HERE.parent.parent / \"example_data/ecg_mit_bih_arrhythmia/data\"\nexample_data = ECGExampleData(data_path)\necg_data = example_data[0].data[\"ecg\"]\n\n# Initialize the algorithm\nalgorithm = QRSDetector()\nalgorithm = algorithm.detect(ecg_data, example_data.sampling_rate_hz)\n\n# Visualize the results\nimport matplotlib.pyplot as plt\n\nplt.figure()\nplt.plot(ecg_data[:5000])\nsubset_peaks = algorithm.r_peak_positions_[algorithm.r_peak_positions_ < 5000.0]\nplt.plot(subset_peaks, ecg_data[subset_peaks], \"s\")\nplt.show()\n\n# %%\n# Making the algorithm trainable\n# ------------------------------\n# The implementation so far heavily depends on the value of the `min_r_peak_height_over_baseline` parameter.\n# If this is set incorrectly, everything will go wrong.\n# This parameter describes the minimal expected value of the filtered signal at the position of an R-peak.\n# Without looking at the filtered data, this value is hard to guess.\n# The value will depend on potential preprocessing applied to the data and the measurement conditions.\n# But we should be able to calculate a suitable value based on some training data (with R-peak annotations) recorded\n# under similar conditions.\n#\n# Therefore, we will create a second implementation of our algorithm that is *trainable*.\n# Meaning, we will implement a method (`self_optimize`) that is able to estimate a suitable value for our cutoff\n# based on some training data.\n# Note, that we do not provide a generic base class for optimizable algorithms.\n# If you need one, create your own class with a call signature for `self_optimize` that makes sense for the group of\n# algorithms you are trying to implement.\n#\n# From an implementation perspective, this means that we need to do the following things:\n#\n# 1. Implement a `self_optimize` method that takes the data of multiple recordings including the reference labels to\n# calculate a suitable threshold. This method should modify only parameters marked as `OptimizableParameter` and then\n# return `self`.\n# 2. We need to mark the parameters that we want to optimize as `OptimizableParameter` using the type annotations on\n# the class level.\n# 3. We introduce a new parameter called `r_peak_match_tolerance_s` that is used by our `self_optimize` method.\n# Changing it, changes the output of our optimization.\n# Therefore, it is a Hyper-Parameter of our method.\n# We mark it as such using the type-hints on class level.\n# 5. (Optional) Wrap the `self_optimize` method with the :func:`~tpcp.make_optimize_safe` decorator. It will perform\n# some runtime checks and inform us, if we did not implement `self_optimize` as expected.\n#\n# .. note:: The process required to implement an optimizable algorith will always be very similar to what we did\n# here.\n# It doesn't matter, if the optimization only optimizes a threshold or trains a neuronal network.\n# The structure will be very similar.\n#\n# From a scientific perspective, we optimize our parameter by trying to find all R-peaks without a height restriction\n# first.\n# Based on the detected R-peaks, we determine, which of them are actually correctly detected, by checking if they are\n# within the threshold `r_peak_match_tolerance_s` of a reference R-peak.\n# Then we find the best height threshold to maximise our predictive power within these preliminary detected peaks.\n#\n# Again, there are probably better ways to do it... But this is just an example, and we already have way too much code\n# that is not relevant for you to understand the basics of Algorithms.\nfrom sklearn.metrics import roc_curve\n\nfrom examples.algorithms.algorithms_qrs_detection_final import match_events_with_reference\nfrom tpcp import HyperParameter, OptimizableParameter, make_optimize_safe\n\n\nclass OptimizableQrsDetector(QRSDetector):\n min_r_peak_height_over_baseline: OptimizableParameter[float]\n r_peak_match_tolerance_s: HyperParameter[float]\n\n def __init__(\n self,\n max_heart_rate_bpm: float = 200.0,\n min_r_peak_height_over_baseline: float = 1.0,\n r_peak_match_tolerance_s: float = 0.01,\n high_pass_filter_cutoff_hz: float = 1,\n ):\n self.r_peak_match_tolerance_s = r_peak_match_tolerance_s\n super().__init__(\n max_heart_rate_bpm=max_heart_rate_bpm,\n min_r_peak_height_over_baseline=min_r_peak_height_over_baseline,\n high_pass_filter_cutoff_hz=high_pass_filter_cutoff_hz,\n )\n\n @make_optimize_safe\n def self_optimize(self, ecg_data: List[pd.Series], r_peaks: List[pd.Series], sampling_rate_hz: float):\n all_labels = []\n all_peak_heights = []\n for d, p in zip(ecg_data, r_peaks):\n filtered = self._filter(d.to_numpy().flatten(), sampling_rate_hz)\n # Find all potential peaks without the height threshold\n potential_peaks = self._search_strategy(filtered, sampling_rate_hz, use_height=False)\n # Determine the label for each peak, by matching them with our ground truth\n labels = np.zeros(potential_peaks.shape)\n matches, _ = match_events_with_reference(\n events=potential_peaks,\n reference=p.to_numpy().astype(int),\n tolerance=self.r_peak_match_tolerance_s * sampling_rate_hz,\n one_to_one=True,\n )\n labels[matches] = 1\n labels = labels.astype(bool)\n all_labels.append(labels)\n all_peak_heights.append(filtered[potential_peaks])\n all_labels = np.hstack(all_labels)\n all_peak_heights = np.hstack(all_peak_heights)\n # We \"brute-force\" a good cutoff by testing a bunch of thresholds and then calculating the Youden Index for\n # each.\n fpr, tpr, thresholds = roc_curve(all_labels, all_peak_heights)\n youden_index = tpr - fpr\n # The best Youden index gives us a balance between sensitivity and specificity.\n self.min_r_peak_height_over_baseline = thresholds[np.argmax(youden_index)]\n return self\n\n\n# %%\n# Testing the implementation\n# --------------------------\n# To test the trainable implementation, we need a train and a test set.\n# In this case we simply use the first two recordings as train set and a third recording as test set.\n#\n# Then we first call `self_optimize` with the train data.\n\ntrain_data = example_data[:2]\ntrain_ecg_data = [d.data[\"ecg\"] for d in train_data]\ntrain_r_peaks = [d.r_peak_positions_[\"r_peak_position\"] for d in train_data]\n\nalgorithm = OptimizableQrsDetector()\nalgorithm = algorithm.self_optimize(train_ecg_data, train_r_peaks, train_data.sampling_rate_hz)\n\n# %%\n# After the optimization, we can access the modified parameters.\nprint(\n \"The optimized value of the threshold `min_r_peak_height_over_baseline` is:\",\n algorithm.min_r_peak_height_over_baseline,\n)\n\n# %%\n# Then we can apply the algorithm to our test set.\n# And again, we can see that the algorithm works fine on the piece of data we are inspecting here.\ntest_data = example_data[3]\ntest_ecg_data = test_data.data[\"ecg\"]\n\nalgorithm = algorithm.detect(test_ecg_data, test_data.sampling_rate_hz)\n\n# Visualize the results\nplt.figure()\nplt.plot(test_ecg_data[:5000])\nsubset_peaks = algorithm.r_peak_positions_[algorithm.r_peak_positions_ < 5000.0]\nplt.plot(subset_peaks, test_ecg_data[subset_peaks], \"s\")\nplt.show()\n", "\"\"\"Some helper to work with the format the results of GridSearches and CVs.\"\"\"\nfrom __future__ import annotations\n\nimport copy\nimport numbers\nfrom typing import TYPE_CHECKING, Dict, List, Optional, Tuple\n\nimport numpy as np\n\nif TYPE_CHECKING:\n pass\n\n\ndef _aggregate_final_results(results: List) -> Dict:\n \"\"\"Aggregate the list of dict to dict of np ndarray/list.\n\n Modified based on sklearn.model_selection._validation._aggregate_score_dicts\n\n\n Parameters\n ----------\n results : list of dict\n List of dicts of the results for all scorers. This is a flat list,\n assumed originally to be of row major order.\n\n Example\n -------\n >>> results = [{'a': 1, 'b':10}, {'a': 2, 'b':2}, {'a': 3, 'b':3}, {'a': 10, 'b': 10}]\n >>>\n >>> _aggregate_final_results(results) # doctest: +SKIP\n {'a': array([1, 2, 3, 10]),\n 'b': array([10, 2, 3, 10])}\n\n \"\"\"\n return {\n key: np.asarray([score[key] for score in results])\n if isinstance(results[0][key], numbers.Number)\n else [score[key] for score in results]\n for key in results[0]\n }\n\n\ndef _normalize_score_results(scores: List, prefix=\"\", single_score_key=\"score\"):\n \"\"\"Create a scoring dictionary based on the type of `scores`.\"\"\"\n if isinstance(scores[0], dict):\n # multimetric scoring\n return {prefix + k: v for k, v in _aggregate_final_results(scores).items()}\n # single\n return {prefix + single_score_key: scores}\n\n\ndef _prefix_para_dict(params_dict: Optional[Dict], prefix=\"pipeline__\") -> Optional[Dict]:\n \"\"\"Add a prefix to all parameter names in the dictionary.\n\n This can be helpful to adjust a parameter grid that was originally created for a pipeline to work on a wrapper like\n `Optimize` using the `__` naming convention for nested objects.\n \"\"\"\n if not params_dict:\n return None\n return {prefix + k: v for k, v in params_dict.items()}\n\n\ndef _get_nested_paras(param_dict: Optional[Dict], nested_object_name=\"pipeline\") -> Dict:\n \"\"\"Get the parameters belonging to a nested object and remove the suffix.\n\n If the parameter of a double nested object are required, use `level_1__level_1`.\n \"\"\"\n if not param_dict:\n return {}\n return {k.split(\"__\", 1)[1]: v for k, v in param_dict.items() if k.startswith(f\"{nested_object_name}__\")}\n\n\ndef _split_hyper_and_pure_parameters(\n param_dict: List[Dict], pure_parameters: Optional[List[str]]\n) -> List[Tuple[Optional[Dict], Optional[Dict]]]:\n \"\"\"Split a list of parameters in hyperparameters and pure parameters.\n\n For each dictionary in the list, this separates the pure parameters (names provided in input) from all\n hyperparameters (remaining parameters).\n If either the none of the pure parameters is present in a parameter dict or all parameters are pure parameters,\n the pure or the hyperparameters are `None`.\n\n Returns\n -------\n split_parameters\n List of tuples `(hyper, pure)` for each of the para dicts in the input list.\n\n \"\"\"\n if pure_parameters is None:\n return [(c, None) for c in param_dict]\n split_param_dict = []\n for c in param_dict:\n c = copy.copy(c) # Otherwise, we remove elements from the actual parameter list that is passed as input.\n tmp = {}\n for k in list(c.keys()):\n if k in pure_parameters:\n tmp[k] = c.pop(k)\n split_param_dict.append((c or None, tmp or None))\n return split_param_dict\n" ]
[ [ "numpy.hstack", "scipy.signal.find_peaks", "pandas.Series", "sklearn.metrics.roc_curve", "matplotlib.pyplot.plot", "scipy.signal.butter", "numpy.argmax", "scipy.signal.sosfiltfilt", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.figure" ], [ "numpy.asarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "1.6", "1.10", "1.4", "1.9", "1.5", "1.2", "1.7", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mdornseif/fastface
[ "72772db1fae4af17e829cd5479c4848fe5eb8948", "72772db1fae4af17e829cd5479c4848fe5eb8948" ]
[ "fastface/utils/cluster.py", "doc_samples/testing.py" ]
[ "import math\nimport random\n\nimport torch\n\n\nclass KMeans:\n \"\"\"Test\"\"\"\n\n def __init__(self, k: int, distance_fn=None, dim_size: int = 2, nstart: int = 2):\n # TODO use nstart\n assert distance_fn is not None, \"please provide a distance function\"\n\n self._params = torch.empty(k, dim_size, dtype=torch.float32)\n self._distance_fn = distance_fn\n self._best_distance_score = math.inf\n\n def fit(self, points: torch.Tensor):\n assert (\n len(points.shape) == 2\n ), \"shape length of the points \\\n must be 2 but found {}\".format(\n len(points.shape)\n )\n assert isinstance(\n points, torch.Tensor\n ), \"points must be torch.tensor but found {}\".format(type(points))\n sample_size = points.size(0)\n k = self._params.size(0)\n\n self._params = points[random.sample(range(sample_size), k=k), :]\n # self._params: torch.Tensor(k, dim_size)\n\n latest_cluster = torch.zeros(sample_size, dtype=torch.long)\n # latest_cluster: torch.Tensor(sample_size)\n\n while 1:\n # points: torch.Tensor(sample_size, dim_size)\n # self._params: torch.Tensor(k, dim_size)\n dists = self._distance_fn(points, self._params)\n # dists: torch.Tensor(sample_size, k)\n\n assigned_clusters = torch.argmin(dists, dim=1)\n # assigned_clusters: torch.Tensor(sample_size)\n\n if (latest_cluster == assigned_clusters).all():\n # break if converged\n break\n\n for i in range(k):\n self._params[i] = points[assigned_clusters == i, :].median(dim=0)[0]\n\n latest_cluster = assigned_clusters\n", "import pytorch_lightning as pl\nimport torch\n\nimport fastface as ff\n\n# checkout available pretrained models\nprint(ff.list_pretrained_models())\n# [\"lffd_slim\", \"lffd_original\"]\n\n# build pl.LightningModule using pretrained weights\nmodel = ff.FaceDetector.from_pretrained(\"lffd_slim\")\n\n# set model to eval mode\nmodel.eval()\n\n# build transforms\ntransforms = ff.transforms.Compose(\n ff.transforms.Interpolate(target_size=480),\n ff.transforms.Padding(target_size=(480, 480)),\n)\n\n# build torch.utils.data.Dataset\nds = ff.dataset.FDDBDataset(phase=\"test\", transforms=transforms)\n\n# build torch.utils.data.DataLoader\ndl = ds.get_dataloader(batch_size=1, num_workers=0)\n\n# add average precision pl.metrics.Metric to the model\nmodel.add_metric(\"average_precision\", ff.metric.AveragePrecision(iou_threshold=0.5))\n\n# define pl.Trainer for testing\ntrainer = pl.Trainer(\n benchmark=True,\n logger=False,\n checkpoint_callback=False,\n gpus=1 if torch.cuda.is_available() else 0,\n precision=32,\n)\n\n# run test\ntrainer.test(model, test_dataloaders=[dl])\n\"\"\"\nDATALOADER:0 TEST RESULTS\n{'average_precision': 0.9459084272384644}\n\"\"\"\n" ]
[ [ "torch.argmin", "torch.empty", "torch.zeros" ], [ "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
arita37/pic-recon
[ "703f80eb6d191f68441ce71bc0f388556cb3e1bc", "703f80eb6d191f68441ce71bc0f388556cb3e1bc" ]
[ "pic_recon/src/recon_picgm_extended.py", "pic_recon/tfwavelets/nodes.py" ]
[ "\"\"\" Copyright (c) 2021, Varun A. Kelkar, Computational Imaging Science Lab @ UIUC.\n\nThis work is made available under the MIT License.\nContact: [email protected]\n\"\"\"\n\nimport sys\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--data_type\", type=str, default='brain', \n help=\"Data type on which the network is trained: knee/knee-ood/brain/brain-ood/cpx/cpx-ood\")\nparser.add_argument(\"--mask_type\", type=str, default='cartesian_4x', help=\"MRI subsampling rate\")\nparser.add_argument(\"--savedir\", type=str, default='../results/', help=\"Folder to save the results\")\nparser.add_argument(\"--network_path\", type=str, default='')\nparser.add_argument(\"--logfile\", type=str, default='', help=\"Path to logfile\")\nparser.add_argument(\"--step\", type=float, default=1.e-3, help=\"Step size\")\nparser.add_argument(\"--lamda\", type=float, default=0., help=\"Lamda value\")\nparser.add_argument(\"--lamda_w\", type=float, default=0., help=\"Lamda value for w\")\nparser.add_argument(\"--alpha\", type=float, default=0., help=\"Alpha value for PIC reg.\")\nparser.add_argument(\"--tv\", type=float, default=0, help=\"TV regularization parameter\")\nparser.add_argument(\"--niter\", type=int, default=20000, help=\"niter\")\nparser.add_argument(\"--sampling_seed\", type=int, default=0, help=\"Seed for sampling the ground truth for the inverse crime case\")\nparser.add_argument(\"--mode\", type=str, default='inverse_crime', help=\"Whether or not to do an inverse crime study.\")\nparser.add_argument(\"--ablation\", action='store_true')\nparser.add_argument(\"--input_shape\", type=int, nargs='+', default=[1,256,256,1], help=\"Shape of x\")\nparser.add_argument(\"--num_points\", default=8, type=int, help=\"Number of points during parameter sweep\")\nparser.add_argument(\"--snr\", type=float, default=20., help=\"SNR\")\nparser.add_argument(\"--cutoff_levels\", type=int, default=[0,14], nargs='+', help=\"Cutoff levels at which to do style mixing\")\nparser.add_argument(\"--optim_varnames\", type=str, nargs='+', default=['w'], help=\"Variables over which to optimize (can be w or z and/or zn)\")\n# parser.add_argument(\"--pic_method\", type=str, default='extended', help=\"Use PIC recon solver 1 or 2 or 3 or noinv\")\nparser.add_argument(\"--gt_filename\", type=str, default='', help=\"path to the ground truth x\")\nparser.add_argument(\"--pi_filename\", type=str, default='', help=\"path to the prior image x\")\nparser.add_argument(\"--extend\", action='store_true', help=\"Extend w space\")\nparser.add_argument(\"--smart_init\", action='store_true', help=\"Smart initialization based on the value of the loss function\")\nparser.add_argument(\"--fileroot\", type=str, default='', help=\"file root\")\nargs = parser.parse_args()\n\nlogfile = args.logfile\nclass Logger(object):\n def __init__(self):\n self.terminal = sys.stdout\n self.log = open(logfile, \"a\")\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message) \n\n def flush(self):\n #this flush method is needed for python 3 compatibility.\n #this handles the flush command by doing nothing.\n #you might want to specify some extra behavior here.\n pass \n\nsys.stdout = Logger()\nprint(args, flush=True)\n\n# ---\n\nlogdir = args.network_path\nprint(logdir)\n\n# ---\n\nimport importlib\nsys.path.append('../../')\nif 'stylegan2-ada' in args.network_path:\n sys.path.append('../../stylegan2-ada')\n import model_oop as sgan\nelse:\n sys.path.append('../../stylegan2')\n import stylegan2.model_oop as sgan\nimport importlib\nimport tensorflow as tf\nimport numpy as np\nimport pickle\nimport os\nfrom forward_models import *\nfrom inverse_problems import *\nimport utils\nimport scipy.linalg as la\nimport imageio as io\n\n# load the generative model\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = sess = tf.Session(config=config).__enter__()\ngen = sgan.StyleGAN(logdir, sess=sess)\n\nDIM = args.input_shape[1]\n\n# ground truth\nif args.mode == 'inverse_crime':\n gt_basename = os.path.splitext(os.path.basename(args.gt_filename))[0][1:]\n xnp = np.load(args.gt_filename)\n xgt_path = os.path.join(args.savedir, 'x'+gt_basename)\n np.save(xgt_path+'.npy', xnp); io.imsave(xgt_path+'.png', np.squeeze(xnp))\n\n latents_name = os.path.split(args.gt_filename)\n latents_name = os.path.join( latents_name[0], 'l' + latents_name[1][1:])\n latents = np.load(latents_name, allow_pickle=True).item()\n wnp = latents['w']; znnp = latents['zn'] \n latents_path = os.path.join(args.savedir, 'l'+gt_basename)\n np.save(latents_path+'.npy', latents)\n\nelif args.mode == 'simulation':\n gt_basename = os.path.splitext(os.path.basename(args.gt_filename))[0][1:]\n xnp = np.load(args.gt_filename)\n xgt_path = os.path.join(args.savedir, 'x'+gt_basename)\n np.save(xgt_path+'.npy', xnp); io.imsave(xgt_path+'.png', np.squeeze(xnp))\n\npi_basename = os.path.splitext(os.path.basename(args.pi_filename))[0][1:]\nx_prior = np.load(args.pi_filename)\nlatents_name = os.path.split(args.pi_filename)\nlatents_name = os.path.join( latents_name[0], 'l' + latents_name[1][1:])\nlatents = np.load(latents_name, allow_pickle=True).item()\nw_prior = latents['w']; zn_prior = latents['zn']\nx_prior_path = os.path.join(args.savedir, 'x'+pi_basename)\nx_prior_g = gen(w_prior, zn_prior, Numpy=True, use_latent='w', dim_out=DIM)\nnp.save(x_prior_path+'.npy', x_prior_g); io.imsave(x_prior_path+'.png', np.squeeze(x_prior_g))\nlatents_path = os.path.join(args.savedir, 'l'+pi_basename)\nnp.save(latents_path+'.npy', latents)\n\nrnd = np.random.RandomState(seed=1234)\n# z_init = np.load('zinit_csgm.npy')\n\n# Forward model\n# Gaussian\nif 'gaussian' in args.mask_type:\n samp = float(args.mask_type.split('_')[1])\n shapex = args.input_shape\n shapey = [1, int(np.prod(shapex)*samp)]\n fwd_op = GaussianSensor(shapey, shapex, assign_on_the_fly=(samp>=0.2))\n data_dtype = tf.float32\nelif 'mask' in args.mask_type:\n # # Variable disc Poisson sampler\n shapey = [1,256,256]\n center_fractions = [0.1]\n accelerations = [8] # center_fractions and accelerations dont matter for poisson disc sampling\n fwd_op = MRISubsampler(shapey, args.input_shape, center_fractions, accelerations, loadfile=f'../masks_mri/{args.mask_type}.npy', is_complex=(args.data_type=='experimental'))\n data_dtype = tf.complex64\n\n# solver\nsolver = PICGMSolver(gen, fwd_op,\n optim_varnames = args.optim_varnames,\n regularization_types = {'w': 'l2w'},\n dim_out = DIM,\n data_dtype = data_dtype)\n\n# Get measurement\ny_meas = fwd_op._np(xnp)\n# NOTE: CAUTION: I AM CURRENTLY USING THE ORTHO MODE IN FFT HENCE CAN GET AWAY WITH CALCULATING POWER OF xnp \npower_adjustment = 10.*np.log10(la.norm(xnp)**2/la.norm(y_meas)**2)\nSNR = args.snr - power_adjustment * ('mask' in args.mask_type)\nnoise_mode = 'complex' if 'mask' in args.mask_type else 'gaussian'\ny_meas = utils.add_noise(y_meas, SNR, mode=noise_mode, seed=42)\nnp.save(os.path.join(args.savedir, f'y_meas_{SNR}SNR'+ gt_basename[2:] +'.npy'), y_meas)\n\n# Initialization\nif args.mode == 'inverse_crime':\n zn_init = gen.latent_coeffs_to_array(zn_prior)\nelif args.mode == 'simulation':\n _,_,_,zn_init = gen.sample(temp=0)\nw_init = np.zeros(gen.shapew); # w_init[:,:] = gen.wavg\nw_init = w_prior.copy()\n# w_init[:,7:9] = np.load('brats_t2_latent.npy', allow_pickle=True).item()['w'][:,7:9]\n# w_init[:,3:5] = gen.wavg\n\nif args.smart_init:\n # if os.path.exists(os.path.join(args.savedir, 'w_best_init.npy')): pass # w_init = np.load(os.path.join(args.savedir, 'w_best_init.npy'))\n # else:\n _,wrand = gen.sample_w(batch_size=100, temp=1, seed=789)\n wrand = np.concatenate([wrand, w_prior], axis=0)\n ww = w_init.copy()\n losses = []\n for i,wr in enumerate(wrand):\n ww[:, args.cutoff_levels[0] : args.cutoff_levels[1] ] = wr[ args.cutoff_levels[0] : args.cutoff_levels[1] ]\n x_init = gen(ww, zn_init, use_latent='w', Numpy=True, dim_out=DIM)\n losses.append( la.norm(y_meas - fwd_op._np(x_init)) )\n print(i, losses[-1])\n losses = np.array(losses)\n best_init_idx = np.where(losses==losses.min())[0]\n print(f\"Picking {best_init_idx}\")\n w_init[:, args.cutoff_levels[0] : args.cutoff_levels[1] ] = wrand[best_init_idx, args.cutoff_levels[0] : args.cutoff_levels[1] ]\n np.save(os.path.join(args.savedir, 'w_best_init.npy'), w_init) \n\n\nzn_init = gen.latent_coeffs_to_array(zn_init)*0\n\nif args.ablation:\n lamda_ws = list(10**(4.*np.linspace(0.,1.,args.num_points) + np.log10(args.lamda_w))) \n basename = f'{args.mask_type}_picgm_level{args.cutoff_levels[0]}-{args.cutoff_levels[1]}_lam{args.lamda}_lamw{{}}_tv{args.tv}_step{args.step}_{args.niter}_{args.snr}SNR'\n basenames = [basename.format(l) for l in lamda_ws]\nelse:\n lamda_ws = [args.lamda_w]\n basenames = [args.fileroot]\n\nfor basename, lamda_w in zip(basenames, lamda_ws):\n xestname = os.path.join(args.savedir, 'xest_'+basename+'.npy')\n lestname = os.path.join(args.savedir, 'lest_'+basename+'.npy')\n imgname = os.path.join(args.savedir, 'xest_'+basename+'.png')\n print(xestname)\n xest,west,znst = solver.fit(y_meas,\n step = args.step,\n lamdas = {'w':lamda_w, 'zn': args.lamda},\n inits = {'w': w_init, 'zn': zn_init},\n n_iter = args.niter,\n scheduling = 'adam',\n w_prior_image = w_prior,\n cutoff_levels = args.cutoff_levels,\n step_schedule_params = {'beta1':0.9, 'beta2':0.999, 'epsil':1.e-8},\n extend = args.extend,\n check_recon_error = True,\n ground_truth = xnp)\n\n np.save(xestname, np.clip(xest, -1,1))\n np.save(lestname, {'w': west, 'zn': znst})\n io.imsave(imgname, np.squeeze(xest))", "\"\"\"\nThe 'nodes' module contains methods to construct TF subgraphs computing the 1D or 2D DWT\nor IDWT. Intended to be used if you need a DWT in your own TF graph.\n\"\"\"\n\nimport tensorflow as tf\n\n\ndef cyclic_conv1d(input_node, filter_):\n \"\"\"\n Cyclic convolution\n\n Args:\n input_node: Input signal (3-tensor [batch, width, in_channels])\n filter_: Filter\n\n Returns:\n Tensor with the result of a periodic convolution\n \"\"\"\n # Create shorthands for TF nodes\n kernel_node = filter_.coeffs\n tl_node, tr_node, bl_node, br_node = filter_.edge_matrices\n\n # Do inner convolution\n inner = tf.nn.conv1d(input_node, kernel_node[::-1], stride=1, padding='VALID')\n\n # Create shorthands for shapes\n input_shape = tf.shape(input_node)\n tl_shape = tf.shape(tl_node)\n tr_shape = tf.shape(tr_node)\n bl_shape = tf.shape(bl_node)\n br_shape = tf.shape(br_node)\n\n # Slices of the input signal corresponding to the corners\n tl_slice = tf.slice(input_node,\n [0, 0, 0],\n [-1, tl_shape[2], -1])\n tr_slice = tf.slice(input_node,\n [0, input_shape[1] - tr_shape[2], 0],\n [-1, tr_shape[2], -1])\n bl_slice = tf.slice(input_node,\n [0, 0, 0],\n [-1, bl_shape[2], -1])\n br_slice = tf.slice(input_node,\n [0, input_shape[1] - br_shape[2], 0],\n [-1, br_shape[2], -1])\n\n # TODO: It just werks (It's the magic of the algorithm). i.e. Why do we have to transpose?\n tl = tl_node @ tf.transpose(tl_slice, perm=[2, 1, 0])\n tr = tr_node @ tf.transpose(tr_slice, perm=[2, 1, 0])\n bl = bl_node @ tf.transpose(bl_slice, perm=[2, 1, 0])\n br = br_node @ tf.transpose(br_slice, perm=[2, 1, 0])\n\n head = tf.transpose(tl + tr, perm=[2, 1, 0])\n tail = tf.transpose(bl + br, perm=[2, 1, 0])\n\n return tf.concat((head, inner, tail), axis=1)\n\n\ndef cyclic_conv1d_alt(input_node, filter_):\n \"\"\"\n Alternative cyclic convolution. Uses more memory than cyclic_conv1d.\n\n Args:\n input_node: Input signal\n filter_ (Filter): Filter object\n\n Returns:\n Tensor with the result of a periodic convolution.\n \"\"\"\n c = int(input_node.shape[2])\n kernel_node = filter_.coeffs\n\n N = int(input_node.shape[1])\n\n start = N - filter_.num_neg()\n end = filter_.num_pos() - 1\n\n # Perodically extend input signal\n input_new = tf.concat(\n (input_node[:, start:, :], input_node, input_node[:, 0:end, :]),\n axis=1\n )\n\n # Convolve with periodic extension\n result = tf.nn.conv1d(input_new, kernel_node[::-1], stride=1, padding=\"VALID\")\n\n return result\n\n\ndef upsample(input_node, odd=False):\n \"\"\"Upsamples. Doubles the length of the input, filling with zeros\n\n Args:\n input_node: 3-tensor [batch, spatial dim, channels] to be upsampled\n odd: Bool, optional. If True, content of input_node will be\n placed on the odd indeces of the output. Otherwise, the\n content will be places on the even indeces. This is the\n default behaviour.\n\n Returns:\n The upsampled output Tensor.\n \"\"\"\n\n columns = []\n for col in tf.unstack(input_node, axis=1):\n columns.extend([col, tf.zeros_like(col)])\n\n if odd:\n # https://stackoverflow.com/questions/30097512/how-to-perform-a-pairwise-swap-of-a-list\n # TODO: Understand\n # Rounds down to even number\n l = len(columns) & -2\n columns[1:l:2], columns[:l:2] = columns[:l:2], columns[1:l:2]\n\n # TODO: Should we actually expand the dimension?\n return tf.expand_dims(tf.concat(columns, 1), -1)\n\n\ndef dwt1d(input_node, wavelet, levels=1):\n \"\"\"\n Constructs a TF computational graph computing the 1D DWT of an input signal.\n\n Args:\n input_node: A 3D tensor containing the signal. The dimensions should be\n [batch, signal, channels].\n wavelet: Wavelet object\n levels: Number of levels.\n\n Returns:\n The output node of the DWT graph.\n \"\"\"\n # TODO: Check that level is a reasonable number\n # TODO: Check types\n\n coeffs = [None] * (levels + 1)\n\n last_level = input_node\n\n for level in range(levels):\n lp_res = cyclic_conv1d_alt(last_level, wavelet.decomp_lp)[:, ::2, :]\n hp_res = cyclic_conv1d_alt(last_level, wavelet.decomp_hp)[:, 1::2, :]\n\n last_level = lp_res\n coeffs[levels - level] = hp_res\n\n coeffs[0] = last_level\n return tf.concat(coeffs, axis=1)\n\n\ndef dwt2d(input_node, wavelet, levels=1):\n \"\"\"\n Constructs a TF computational graph computing the 2D DWT of an input signal.\n\n Args:\n input_node: A 3D tensor containing the signal. The dimensions should be\n [rows, cols, channels].\n wavelet: Wavelet object.\n levels: Number of levels.\n\n Returns:\n The output node of the DWT graph.\n \"\"\"\n c = int(input_node.shape[2])\n results = []\n for i in range(c):\n results.append(\n dwt2d_singlechannel(input_node[:,:,i:i+1], wavelet, levels=levels)\n )\n\n return tf.concat(results, axis=-1) \n\n\ndef dwt2d_singlechannel(input_node, wavelet, levels=1):\n \"\"\"\n Constructs a TF computational graph computing the 2D DWT of an input signal.\n\n Args:\n input_node: A 3D tensor containing the signal. The dimensions should be\n [rows, cols, 1].\n wavelet: Wavelet object.\n levels: Number of levels.\n\n Returns:\n The output node of the DWT graph.\n \"\"\"\n # TODO: Check that level is a reasonable number\n # TODO: Check types\n\n coeffs = [None] * levels\n\n last_level = input_node\n m, n = int(input_node.shape[0]), int(input_node.shape[1])\n c = int(input_node.shape[2])\n\n for level in range(levels):\n local_m, local_n = m // (2 ** level), n // (2 ** level)\n\n first_pass = dwt1d(last_level, wavelet, 1)\n second_pass = tf.transpose(\n dwt1d(\n tf.transpose(first_pass, perm=[1, 0, 2]),\n wavelet,\n 1\n ),\n perm=[1, 0, 2]\n )\n\n last_level = tf.slice(second_pass, [0, 0, 0], [local_m // 2, local_n // 2, c])\n coeffs[level] = [\n tf.slice(second_pass, [local_m // 2, 0, 0], [local_m // 2, local_n // 2, c]),\n tf.slice(second_pass, [0, local_n // 2, 0], [local_m // 2, local_n // 2, c]),\n tf.slice(second_pass, [local_m // 2, local_n // 2, 0],\n [local_m // 2, local_n // 2, c])\n ]\n\n for level in range(levels - 1, -1, -1):\n upper_half = tf.concat([last_level, coeffs[level][0]], 0)\n lower_half = tf.concat([coeffs[level][1], coeffs[level][2]], 0)\n\n last_level = tf.concat([upper_half, lower_half], 1)\n\n return last_level\n\n\ndef idwt1d(input_node, wavelet, levels=1):\n \"\"\"\n Constructs a TF graph that computes the 1D inverse DWT for a given wavelet.\n\n Args:\n input_node (tf.placeholder): Input signal. A 3D tensor with dimensions\n as [batch, signal, channels]\n wavelet (tfwavelets.dwtcoeffs.Wavelet): Wavelet object.\n levels (int): Number of levels.\n\n Returns:\n Output node of IDWT graph.\n \"\"\"\n m, n = int(input_node.shape[0]), int(input_node.shape[1])\n\n first_n = n // (2 ** levels)\n last_level = tf.slice(input_node, [0, 0, 0], [m, first_n, 1])\n\n for level in range(levels - 1, -1 , -1):\n local_n = n // (2 ** level)\n\n detail = tf.slice(input_node, [0, local_n//2, 0], [m, local_n//2, 1])\n\n lowres_padded = upsample(last_level, odd=False)\n detail_padded = upsample(detail, odd=True)\n\n lowres_filtered = cyclic_conv1d_alt(lowres_padded, wavelet.recon_lp)\n detail_filtered = cyclic_conv1d_alt(detail_padded, wavelet.recon_hp)\n\n last_level = lowres_filtered + detail_filtered\n\n return last_level\n\ndef idwt2d(input_node, wavelet, levels=1):\n \"\"\"\n Constructs a TF graph that computes the 2D inverse DWT for a given wavelet.\n\n Args:\n input_node (tf.placeholder): Input signal. A 3D tensor with dimensions\n as [rows, cols, channels]\n wavelet (tfwavelets.dwtcoeffs.Wavelet): Wavelet object.\n levels (int): Number of levels.\n\n Returns:\n Output node of IDWT graph.\n \"\"\"\n c = int(input_node.shape[2])\n results = []\n for i in range(c):\n results.append(\n idwt2d_singlechannel(input_node[:,:,i:i+1], wavelet, levels=levels)\n )\n\n return tf.concat(results, axis=-1) \n\n\ndef idwt2d_singlechannel(input_node, wavelet, levels=1):\n \"\"\"\n Constructs a TF graph that computes the 2D inverse DWT for a given wavelet.\n\n Args:\n input_node (tf.placeholder): Input signal. A 3D tensor with dimensions\n as [rows, cols, 1]\n wavelet (tfwavelets.dwtcoeffs.Wavelet): Wavelet object.\n levels (int): Number of levels.\n\n Returns:\n Output node of IDWT graph.\n \"\"\"\n m, n = int(input_node.shape[0]), int(input_node.shape[1])\n first_m, first_n = m // (2 ** levels), n // (2 ** levels)\n\n last_level = tf.slice(input_node, [0, 0, 0], [first_m, first_n, 1])\n\n for level in range(levels - 1, -1, -1):\n local_m, local_n = m // (2 ** level), n // (2 ** level)\n\n # Extract detail spaces\n detail_tr = tf.slice(input_node, [local_m // 2, 0, 0],\n [local_n // 2, local_m // 2, 1])\n detail_bl = tf.slice(input_node, [0, local_n // 2, 0],\n [local_n // 2, local_m // 2, 1])\n detail_br = tf.slice(input_node, [local_n // 2, local_m // 2, 0],\n [local_n // 2, local_m // 2, 1])\n\n # Construct image of this DWT level\n upper_half = tf.concat([last_level, detail_tr], 0)\n lower_half = tf.concat([detail_bl, detail_br], 0)\n\n this_level = tf.concat([upper_half, lower_half], 1)\n\n # First pass, corresponding to second pass in dwt2d\n first_pass = tf.transpose(\n idwt1d(\n tf.transpose(this_level, perm=[1, 0, 2]),\n wavelet,\n 1\n ),\n perm=[1, 0, 2]\n )\n # Second pass, corresponding to first pass in dwt2d\n second_pass = idwt1d(first_pass, wavelet, 1)\n\n last_level = second_pass\n\n return last_level\n" ]
[ [ "numpy.linspace", "numpy.clip", "numpy.squeeze", "numpy.save", "tensorflow.ConfigProto", "numpy.concatenate", "numpy.log10", "tensorflow.Session", "numpy.prod", "scipy.linalg.norm", "numpy.load", "numpy.array", "numpy.random.RandomState", "numpy.zeros" ], [ "tensorflow.concat", "tensorflow.transpose", "tensorflow.unstack", "tensorflow.shape", "tensorflow.slice", "tensorflow.zeros_like", "tensorflow.nn.conv1d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "0.12", "0.10" ], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
heitor57/poi-rss
[ "12990af118f19595be01bf80e26a7ee93f9d05d8", "12990af118f19595be01bf80e26a7ee93f9d05d8", "12990af118f19595be01bf80e26a7ee93f9d05d8" ]
[ "algorithms/library/areamanager.py", "algorithms/library/methods/Pm2.py", "algorithms/library/methods/pgc/GeoDivPropensity.py" ]
[ "import math\nimport numpy as np\ndef delimiter_area(case):\n if case == \"lasvegas\":\n print('Area selected: Las Vegas')\n city = \"LasVegas\"\n area = {}\n area['city'] = city.lower()\n area['final_latitude'] = 36.389326\n area['initial_latitude'] = 36.123935\n area['initial_longitude'] = -115.427600\n area['final_longitude'] = -115.048827\n elif case == \"phoenix\": \n print('Area selected: Phoenix')\n city = \"Phoenix\"\n area = {}\n area['city'] = city.lower()\n area['final_latitude'] = 34.003012\n area['initial_latitude'] = 33.006796\n area['initial_longitude'] = -112.606674\n area['final_longitude'] = -111.381699\n #34.995653, -81.034521 35.400766, -80.651372\n elif case == \"charlotte\": \n print('Area selected: Charlotte')\n city = \"Charlotte\"\n area = {}\n area['city'] = city.lower()\n area['final_latitude'] = 35.400766\n area['initial_latitude'] = 34.995653\n area['initial_longitude'] = -81.034521\n area['final_longitude'] = -80.651372\n elif case == \"madison\":\n print('Area selected: Madison')\n city = \"Madison\"\n area = {}\n area['city'] = city.lower()\n area['final_latitude'] = 43.215156\n area['initial_latitude'] = 42.936791\n area['initial_longitude'] = -89.608990\n area['final_longitude'] = -89.179837\n elif case == \"montreal\":\n print('Area selected: Montreal')\n city = \"Montreal\"\n area = {}\n area['city'] = city.lower()\n area['initial_latitude'] = 45.349779\n area['initial_longitude'] = -74.024676\n area['final_latitude'] = 45.817165\n area['final_longitude'] = -73.339345\n elif case == \"pittsburgh\":\n print('Area selected: %s' % (case))\n city = case\n area = {}\n area['city'] = city.lower()\n area['initial_latitude'] = 40.359118\n area['initial_longitude'] = -80.102733\n area['final_latitude'] = 40.502851\n area['final_longitude'] = -79.854168\n\n return area\ndef poi_in_area(area,poi):\n if (poi['latitude']>=area['initial_latitude']) and\\\n (poi['latitude']<=area['final_latitude'])and\\\n (poi['longitude']>=area['initial_longitude']) and\\\n (poi['longitude']<=area['final_longitude']):\n return True\n return False\ndef pois_in_area(area,df_business):\n return df_business[(df_business['latitude']>=area['initial_latitude']) &\\\n (df_business['latitude']<=area['final_latitude'])&\\\n (df_business['longitude']>=area['initial_longitude']) &\\\n (df_business['longitude']<=area['final_longitude'])]\ndef poi_set_subarea(area,df_business_in_area,distance_km):\n '''\n area: is for example a city like phoenix, las vegas, new york, etc.\n df_business_in_area: is a dataframe of business filtered in a area\n distance_km: is the distance of subareas or area in km^2\n '''\n longitude_delta = abs(area['final_longitude']-area['initial_longitude'])\n latitude_delta = abs(area['final_latitude']-area['initial_latitude'])\n avg_latitude = (area['initial_latitude']+area['final_latitude'])/2.0\n LAT_DEGREE_KM_EQUATOR=111.0 # 110.57\n LONG_DEGREE_KM_EQUATOR=111.321 # 111.32\n # define step degree in latitude\n subarea_latitude_delta_degree = distance_km/LAT_DEGREE_KM_EQUATOR\n # define step degree in longitude\n subarea_longitude_delta_degree = distance_km/(LONG_DEGREE_KM_EQUATOR * math.cos(avg_latitude * math.pi/180))\n # number of subareas\n num_subareas = math.ceil(longitude_delta/subarea_longitude_delta_degree) * math.ceil(latitude_delta/subarea_latitude_delta_degree)\n \n df_business_in_area['subarea_id']=\\\n np.abs(np.ceil((df_business_in_area['longitude']-area['final_longitude'])/subarea_longitude_delta_degree))+\\\n (np.abs(np.ceil((df_business_in_area['latitude']-area['initial_latitude'])/subarea_latitude_delta_degree))-1)\\\n * (np.ceil(longitude_delta/subarea_longitude_delta_degree))\n# Code that explains the above logic \n#\n# for index,row in df_business_in_area.iterrows():\n# latitude_poi_in_subarea = (row['latitude']-area['initial_latitude'])/subarea_latitude_delta_degree\n# longitude_poi_in_subarea = (row['longitude']-area['final_longitude'])/subarea_longitude_delta_degree\n# line = abs(math.ceil(latitude_poi_in_subarea)) \n# column = abs(math.ceil(longitude_poi_in_subarea))\n# subarea_id = column + (line -1) * (math.ceil(longitude_delta/subarea_longitude_delta_degree))\n# row['subarea_id']=subarea_id\n return df_business_in_area\n", "import numpy as np\nimport scipy.special\nimport scipy.stats\nfrom collections import defaultdict\nfrom time import time\nfrom tqdm import tqdm\n\nclass Pm2:\n def __init__(self,training_matrix,poi_cats,main_quotient_weight):\n self.training_matrix=training_matrix\n self.num_users = self.training_matrix.shape[0]\n self.num_items = self.training_matrix.shape[1]\n self.poi_cats=poi_cats\n cats = set()\n for catss in list(poi_cats.values()):\n cats.update(catss)\n self.cats = list(cats)\n self.num_cats = len(cats)\n self.main_quotient_weight = main_quotient_weight\n\n\n self.cat_to_id = dict()\n for i, cat in enumerate(self.cats):\n self.cat_to_id[cat] = i\n \n self.visits_cats_matrix = np.zeros((self.num_users,self.num_cats))\n print(\"Creating categories visits matrix...\")\n for uid in range(self.num_users):\n lids = np.nonzero(self.training_matrix[uid])[0]\n for lid in lids:\n cats = poi_cats[lid]\n for cat in cats:\n cat_index = self.cat_to_id[cat]\n self.visits_cats_matrix[uid,cat_index] += 1\n print(\"Categories visits matrix created.\")\n\n\n # self.items_cats_matrix = np.zeros((self.num_items,self.num_cats))\n # print(\"Creating pois x categories (relevance in cat) matrix...\")\n # for lid in range(self.num_items):\n # for cat in poi_cats[lid]:\n # cat_index = self.cat_to_id[cat]\n # self.items_cats_matrix[lid,cat_index] += 1\n # for cid in range(self.num_cats):\n # self.items_cats_matrix[:,cid] = self.items_cats_matrix[:,cid]/np.count_nonzero(self.items_cats_matrix[:,cid])\n # print(\"pois x categories (relevance in cat) matrix created.\")\n \n \n\n def getUserCategoryProbability(self,uid):\n sum_value = np.sum(self.visits_cats_matrix[uid])\n if sum_value == 0:\n print(f\"User {uid} without categories visits\")\n if sum_value == 0:\n prob = np.zeros(self.num_cats)\n prob = prob+1/self.num_cats\n else:\n prob = self.visits_cats_matrix[uid]/sum_value\n\n return prob\n\n @staticmethod\n @np.vectorize\n def sainteLagueQuotient(v, s):\n return v/(2*s + 1)\n\n def objective_function(self, candidate_id, score, quotient, i_star):\n sub_sum = 0\n main_sum = 0\n for cat in self.poi_cats[candidate_id]:\n cat_id = self.cat_to_id[cat]\n if cat_id != i_star:\n sub_sum += score * quotient[cat_id]\n else:\n main_sum += score * quotient[i_star]\n #sub_sum = np.sum(quotient*score,where=list(range(self.num_cats))!=i_star)\n return self.main_quotient_weight*main_sum+(1-self.main_quotient_weight)*sub_sum\n \n def pm2(self,uid,tmp_rec_list,tmp_score_list,K):\n # from pudb import set_trace; set_trace()\n #sainteLagueQuotient = np.vectorize(self.sainteLagueQuotient)\n quotient = np.zeros(self.num_cats)\n # multiply probability with rec list size\n v = self.getUserCategoryProbability(uid)*K\n\n s = np.zeros(self.num_cats)\n rec_list=[]\n final_scores=[]\n\n for i in range(K):\n max_quotient = 0\n quotient = self.sainteLagueQuotient(v,s)\n # category with max value\n i_star = np.argmax(quotient)\n num_cur_candidates = len(tmp_rec_list)\n\n poi_to_insert = None\n max_objective_value = -200\n\n for j in range(num_cur_candidates):\n candidate_poi_id = tmp_rec_list[j]\n candidate_score = tmp_score_list[j]\n objective_value = self.objective_function(candidate_poi_id,candidate_score,\n quotient, i_star)\n if objective_value > max_objective_value:\n max_objective_value=objective_value\n poi_to_insert=candidate_poi_id\n old_score = candidate_score\n\n if poi_to_insert is not None:\n rm_idx=tmp_rec_list.index(poi_to_insert)\n tmp_rec_list.pop(rm_idx)\n tmp_score_list.pop(rm_idx)\n rec_list.append(poi_to_insert)\n final_scores.append(max_objective_value)\n poi_num_cats = len(self.poi_cats[poi_to_insert])\n if poi_num_cats != 0:\n if old_score != 0:\n for cat in self.poi_cats[poi_to_insert]:\n cat_id = self.cat_to_id[cat]\n s[cat_id] += old_score/(old_score*poi_num_cats)\n # else:\n # print('PM2 selected poi with old score = 0 ?!?!?')\n # else:\n # print('PM2 selected poi with no cats ?!?!?')\n return rec_list,final_scores\n", "import numpy as np\nfrom sklearn.cluster import DBSCAN\n# import os\n# import sys\n# module_path = os.path.abspath(os.path.join('..'))\n# if module_path not in sys.path:\n# sys.path.append(module_path)\n\nimport library.geo_utils as geo_utils\nimport collections\nimport scipy\nfrom concurrent.futures import ProcessPoolExecutor\n\nfrom parallel_util import run_parallel\n\nimport library.cat_utils as cat_utils\nimport library.geo_utils as geo_utils\nimport metrics\n# (0.6118019290456039, 3.8000000000000003, 5), DBSCAN SILHOUETTE\n\nclass GeoDivPropensity():\n CHKS = 50 # chunk size for parallel pool executor\n _instance = None\n METHODS = ['walk'# ,'num_poi','num_cat','visits','walk_raw'\n # ,'ildg'\n # ,'inverse_walk'\n ,# 'dbscan','inv_dbscan','inv_wcluster','wcluster',\n # 'perfect'\n ]\n GEO_DIV_PROPENSITY_METHODS_PRETTY_NAME = {\n 'walk': 'Mean radius of visited POIs',\n 'num_poi': 'Number of visited POIs',\n 'ildg': 'Geographical ILD',\n 'inverse_walk': 'Inverse of mean radius of visited POIs',\n 'dbscan': 'Using clusters of visited pois',\n 'inv_dbscan': 'Inverse of dbscan',\n 'inv_wcluster': 'Inverse weighted clustering',\n 'wcluster': 'Weighted clustering',\n }\n\n @classmethod\n def getInstance(cls, *args, **kwargs):\n if cls._instance is None:\n cls._instance=cls(*args,**kwargs)\n elif len(args) > 0 or len(kwargs) > 0:\n cls._instance.__init__(*args,**kwargs)\n return cls._instance\n\n def __init__(self,training_matrix,poi_coos,poi_cats,undirected_category_tree,geo_div_method):\n self.training_matrix=training_matrix\n self.poi_coos=poi_coos\n self.poi_cats=poi_cats\n self.undirected_category_tree = undirected_category_tree\n self.users_categories_visits = cat_utils.get_users_cat_visits(self.training_matrix,\n self.poi_cats)\n self.mean_walk=self.cmean_dist_pois()\n self.users_mean_walk=self.cmean_dist_users()\n\n # import scipy.stats\n # print('mean walk',self.mean_walk)\n # print(scipy.stats.describe(self.users_mean_walk))\n import matplotlib.pyplot as plt\n num_bins = 50\n heights, bins, _ = plt.hist(self.users_mean_walk,bins=num_bins,color='k')\n bin_width = np.diff(bins)[0]\n bin_pos = bins[:-1] + bin_width / 2\n mask = (bin_pos <= self.mean_walk)\n fig, ax = plt.subplots(1,1)\n ax.bar(bin_pos[mask], heights[mask], width=bin_width, color='red')\n ax.bar(bin_pos[~mask], heights[~mask], width=bin_width, color='black')\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n ax.set_xticks([])\n ax.set_yticks([])\n fig.savefig('resultadotemp.png',bbox_inches='tight')\n fig.savefig('resultadotemp.eps',bbox_inches='tight')\n raise SystemExit\n \n self.geo_div_method = geo_div_method\n self.GEO_METHODS = {\n \"walk\": self.geo_div_walk,\n \"num_poi\": self.geo_div_num_poi,\n \"num_cat\": self.geo_div_num_cat,\n \"visits\": self.geo_div_visits,\n \"walk_raw\": self.geo_div_walk_raw,\n \"ildg\": self.geo_div_ildg,\n \"inverse_walk\": self.geo_div_inverse_walk,\n \"dbscan\": self.geo_div_dbscan,\n \"inv_dbscan\": self.geo_div_inv_dbscan,\n \"inv_wcluster\": self.geo_div_inv_wcluster,\n \"wcluster\": self.geo_div_wcluster,\n }\n\n\n self.max_user_visits = self.training_matrix.sum(axis=1).max()\n \n self.geo_div_propensity=None\n\n\n def cmean_dist_pois(self):\n lats=np.array([])\n longs=np.array([])\n for i,j in self.poi_coos.items():\n lats=np.append(lats,j[0])\n longs=np.append(longs,j[1])\n lat,lon = np.mean(lats),np.mean(longs)\n md=0\n for i in range(len(lats)):\n md+=geo_utils.dist((lat,lon),(lats[i],longs[i]))\n return md/len(lats)\n\n def cmean_dist_users(self):\n users_cmean=list()\n for i in range(self.training_matrix.shape[0]):\n lids=self.training_matrix[i].nonzero()[0]\n lats=np.array([])\n longs=np.array([])\n for lid in lids:\n lats=np.append(lats,self.poi_coos[lid][0])\n longs=np.append(longs,self.poi_coos[lid][1])\n lat,lon = np.mean(lats),np.mean(longs)\n md=0\n \n for i in range(len(lats)):\n md+=geo_utils.dist((lat,lon),(lats[i],longs[i]))\n users_cmean.append(md/len(lats))\n return users_cmean\n\n def cmedian_dist_pois(self):\n lats=np.array([])\n longs=np.array([])\n for i,j in self.poi_coos.items():\n lats=np.append(lats,j[0])\n longs=np.append(longs,j[1])\n lat,lon = np.median(lats),np.median(longs)\n md=0\n for i in range(len(lats)):\n md+=geo_utils.dist((lat,lon),(lats[i],longs[i]))\n return md/len(lats)\n\n def cmedian_dist_users(self):\n users_cmean=list()\n for i in range(self.training_matrix.shape[0]):\n lids=self.training_matrix[i].nonzero()[0]\n lats=np.array([])\n longs=np.array([])\n for lid in lids:\n lats=np.append(lats,self.poi_coos[lid][0])\n longs=np.append(longs,self.poi_coos[lid][1])\n lat,lon = np.median(lats),np.median(longs)\n md=0\n \n for i in range(len(lats)):\n md+=geo_utils.dist((lat,lon),(lats[i],longs[i]))\n users_cmean.append(md/len(lats))\n return users_cmean\n\n @classmethod\n def geo_div_walk(cls,uid):\n self = cls.getInstance()\n norm_prop=min((self.users_mean_walk[uid]/self.mean_walk),1)\n # self.geo_div_propensity=norm_prop\n return norm_prop\n\n @classmethod\n def geo_div_walk_raw(cls,uid):\n self = cls.getInstance()\n norm_prop=self.mean_walk.copy()\n # self.geo_div_propensity=norm_prop\n return norm_prop\n\n @classmethod\n def geo_div_num_cat(cls, uid):\n self = cls.getInstance()\n cats_visits = self.users_categories_visits[uid]\n return len(cats_visits)/(len(self.undirected_category_tree)-1)\n\n @classmethod\n def geo_div_num_poi(cls, uid):\n self = cls.getInstance()\n lids = self.training_matrix[uid].nonzero()[0]\n return len(lids)/self.training_matrix.shape[1]\n\n @classmethod\n def geo_div_ildg(cls, uid):\n self = cls.getInstance()\n lids = self.training_matrix[uid].nonzero()[0]\n ildg = metrics.ildgk(lids,self.poi_coos)\n return ildg\n\n @classmethod\n def geo_div_visits(cls, uid):\n self = cls.getInstance()\n lids = self.training_matrix[uid].nonzero()[0]\n visits = self.training_matrix[uid,lids].sum()\n return visits/self.max_user_visits\n\n @classmethod\n def geo_div_inverse_walk(cls, uid):\n self = cls.getInstance()\n norm_prop=min((self.users_mean_walk[uid]/self.mean_walk),1)\n # self.geo_div_propensity=norm_prop\n return 1-norm_prop\n\n @classmethod\n def geo_div_dbscan(cls, uid):\n self = cls.getInstance()\n km = 3.8\n min_samples = 5\n \n points = [self.poi_coos[lid]\n for lid in np.nonzero(self.training_matrix[uid])[0]\n # for _ in range(self.training_matrix[uid,lid])\n ]\n db = DBSCAN(eps=geo_utils.km_to_lat(km), min_samples=min_samples).fit(points)\n labels = db.labels_\n # Number of clusters in labels, ignoring noise if present.\n a = n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n b = n_noise_ = list(labels).count(-1)\n\n return ((b - a)/(max(a,b)) + 1)/2\n\n\n @classmethod\n def geo_div_inv_dbscan(cls, uid):\n self = cls.getInstance()\n km = 3.8\n min_samples = 5\n \n points = [self.poi_coos[lid]\n for lid in np.nonzero(self.training_matrix[uid])[0]\n # for _ in range(self.training_matrix[uid,lid])\n ]\n db = DBSCAN(eps=geo_utils.km_to_lat(km), min_samples=min_samples).fit(points)\n labels = db.labels_\n # Number of clusters in labels, ignoring noise if present.\n a = n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n b = n_noise_ = list(labels).count(-1)\n\n return 1-((b - a)/(max(a,b)) + 1)/2\n\n @classmethod\n def geo_div_inv_wcluster(cls, uid):\n self = cls.getInstance()\n km = 3.8\n min_samples = 5\n \n points = [self.poi_coos[lid]\n for lid in np.nonzero(self.training_matrix[uid])[0]\n # for _ in range(self.training_matrix[uid,lid])\n ]\n db = DBSCAN(eps=geo_utils.km_to_lat(km), min_samples=min_samples).fit(points)\n labels = db.labels_\n # Number of clusters in labels, ignoring noise if present.\n unique, counts = np.unique(labels, return_counts=True)\n u_c = dict(zip(unique, counts))\n wa = 0\n wb = 0\n for lab, amount in u_c.items():\n if lab != -1:\n wa += amount\n else:\n wb += amount\n a = n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n b = n_noise_ = list(labels).count(-1)\n a*= wa\n b*= wb\n\n return 1-((b - a)/(max(a,b)) + 1)/2\n\n\n @classmethod\n def geo_div_wcluster(cls, uid):\n self = cls.getInstance()\n km = 3.8\n min_samples = 5\n \n points = [self.poi_coos[lid]\n for lid in np.nonzero(self.training_matrix[uid])[0]\n # for _ in range(self.training_matrix[uid,lid])\n ]\n db = DBSCAN(eps=geo_utils.km_to_lat(km), min_samples=min_samples).fit(points)\n labels = db.labels_\n # Number of clusters in labels, ignoring noise if present.\n unique, counts = np.unique(labels, return_counts=True)\n u_c = dict(zip(unique, counts))\n wa = 0\n wb = 0\n for lab, amount in u_c.items():\n if lab != -1:\n wa += amount\n else:\n wb += amount\n a = n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n b = n_noise_ = list(labels).count(-1)\n a*= wa\n b*= wb\n\n return ((b - a)/(max(a,b)) + 1)/2\n\n def compute_div_propensity(self):\n func = self.GEO_METHODS.get(self.geo_div_method,\n lambda: \"Invalid method\")\n # self.geo_div_propensity = func()\n args=[(uid,) for uid in range(self.training_matrix.shape[0])]\n self.geo_div_propensity = run_parallel(func, args, self.CHKS)\n if self.geo_div_method == 'ildg':\n self.geo_div_propensity = self.geo_div_propensity/np.max(self.geo_div_propensity)\n self.geo_div_propensity = np.array(self.geo_div_propensity)\n\n # bins = np.append(np.arange(0,1,1/(3-1)),1)\n # centers = (bins[1:]+bins[:-1])/2\n # self.geo_div_propensity = bins[np.digitize(self.geo_div_propensity, centers)]\n\n # if self.geo_div_method == 'dbscan':\n # self.geo_div_propensity[self.geo_div_propensity>=0.5] = 1\n # self.geo_div_propensity[(self.geo_div_propensity<0.5) & (self.geo_div_propensity>=0.3)] = 0.5\n # self.geo_div_propensity[self.geo_div_propensity<0.3] = 0\n return self.geo_div_propensity\n" ]
[ [ "numpy.ceil" ], [ "numpy.argmax", "numpy.zeros", "numpy.sum", "numpy.nonzero" ], [ "numpy.nonzero", "numpy.unique", "numpy.median", "matplotlib.pyplot.subplots", "numpy.max", "numpy.append", "numpy.diff", "numpy.mean", "numpy.array", "matplotlib.pyplot.hist" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GOAL-Robots/real_robots
[ "6dd5b70bad14426483e2d3ee29b3d8708d34e1ba" ]
[ "tests/test_actions.py" ]
[ "import real_robots\nimport numpy as np\nimport gym\nimport matplotlib.pyplot as plt\nimport types\nimport pybullet\nimport sys\nimport os\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\ndef generate_plan(point_1, point_2):\n\n n_timesteps = 100\n home = np.zeros(9)\n home2 = np.zeros(9)\n home2[5] = np.pi / 2\n home2[6] = np.pi / 2\n\n def goToPosXY(coords):\n desiredOrientation = pybullet.getQuaternionFromEuler([0, 3.14, -1.57])\n action = pybullet.calculateInverseKinematics(0, 7, coords,\n desiredOrientation,\n maxNumIterations=1000,\n residualThreshold=0.001)\n return action[:9]\n\n def interpolate3D(p1, p2, steps):\n p1 = np.array(p1)\n p2 = np.array(p2)\n dist = np.linalg.norm(p2 - p1)\n pieces = int(dist / 0.05) + 1\n pieces = min(pieces, steps)\n coords = np.linspace(p1, p2, pieces + 1)\n joints = np.zeros((steps, 9))\n chunk = int(steps/pieces)\n for i, coord in enumerate(coords[1:]):\n print(\"debug\", coord)\n joints[i*chunk:, :] = goToPosXY(coord)\n return joints\n\n point_1_h = goToPosXY(np.hstack([point_1, 0.6]))\n point_1_l = goToPosXY(np.hstack([point_1, 0.46]))\n point_2_h = goToPosXY(np.hstack([point_2, 0.6]))\n point_2_l = goToPosXY(np.hstack([point_2, 0.46]))\n\n actionsParts = []\n actionsParts += [np.tile(home2, (100, 1))]\n actionsParts += [np.tile(point_1_h, (100, 1))]\n actionsParts += [np.tile(point_1_l, (50, 1))]\n actionsParts += [interpolate3D(np.hstack([point_1, 0.46]), np.hstack([point_2, 0.46]), 500)]\n actionsParts += [np.tile(point_2_h, (50, 1))]\n actionsParts += [np.tile(home2, (100, 1))]\n actionsParts += [np.tile(home, (100, 1))]\n\n raw_actions = np.vstack(actionsParts)\n\n\n xy_parts = []\n xy_parts += [np.linspace(np.array([-0.41, 0.0, 1.14]), np.array([-0.41, 0.0, 1.14]), n_timesteps)]\n xy_parts += [np.linspace(np.hstack([point_1, 0.6]), np.hstack([point_1, 0.6]), n_timesteps)]\n xy_parts += [np.linspace(np.hstack([point_1, 0.46]), np.hstack([point_1, 0.46]), 50)]\n xy_parts += [np.linspace(np.hstack([point_2, 0.46]), np.hstack([point_2, 0.46]), 500)]\n xy_parts += [np.linspace(np.hstack([point_2, 0.6]), np.hstack([point_2, 0.6]), 50)]\n xy_parts += [np.linspace(np.array([-0.41, 0.0, 1.14]), np.array([-0.41, 0.0, 1.14]), n_timesteps)]\n xy_parts += [np.linspace(np.array([-0.55, 0.0, 1.27]), np.array([-0.55, 0.0, 1.27]), n_timesteps)]\n raw_xy = np.vstack(xy_parts)\n\n checktimes = [199, 249, 749, 849, 999]\n# checkpoints = [point_1_h, point_1_l, point_2_l, point_2_h, home]\n checkpoints = [raw_xy[z] for z in checktimes]\n\n\n checklabels = [(100, 'home2'), (200, 'point_1_h'), \n (250, 'point_1_l'), (750, 'point_2_l'), \n (800, 'point_2_h'),\n (900, 'home2'), (1000, 'home')]\n\n return raw_actions, checktimes, checkpoints, raw_xy, checklabels\n\nn_obj = 3\nenvString = 'REALRobot2020-R1J{}-v0'.format(n_obj)\nenv = gym.make(envString)\nenv.render(\"human\")\nobs = env.reset()\n\ndef drawPoint():\n x = np.random.rand()*0.8-0.4\n y = np.random.rand()*0.27-0.29\n return (y, x)\n\nrender = False\n\nfor obj in env.robot.used_objects[1:]:\n env.robot.object_poses[obj][0] += 0.3\n env.robot.object_poses[obj][2] += 0.3\n env.robot.reset_object(obj)\n\nreplay = False\n\nx = [-0.5, 0, 0.5]\ny = [-0.25, 0.05]\nz = [0.46, 0.60]\n\n#Note varie:\n#1) lo shelf può andare un paio di centimetri più indietro (ingrandire plane)\n\nperimeter = np.array(np.meshgrid(x, y, z)).T.reshape(-1, 3)\n\nperimeter = np.array(np.meshgrid(y, x)).T.reshape(-1, 2)\n\n\nallCombinations = []\n\nfor p1 in perimeter:\n for p2 in perimeter:\n allCombinations += [(p1,p2)]\n\n\nwhile True:\n printThis = False\n if not replay:\n point_1 = drawPoint()\n point_2 = drawPoint()\n point_1 = perimeter[np.random.choice(len(perimeter))]\n point_2 = perimeter[np.random.choice(len(perimeter))]\n if len(allCombinations) > 0:\n point_1, point_2 = allCombinations.pop()\n else:\n break\n render = False\n else:\n render = True\n\n raw_actions, checktimes, checkpoints, raw_xy, labels = generate_plan(point_1, point_2)\n print(\"{:.3f} {:.3f} {:.3f} {:.3f}\".format(*point_1, *point_2)) \n record = np.zeros(len(raw_actions))\n record_xy = np.zeros(raw_xy.shape)\n record_xy_diff = np.zeros(len(raw_xy))\n for i in range(len(raw_actions)):\n obs, _, _, _ = env.step({'joint_command': raw_actions[i], 'render': render})\n joints_now = obs['joint_positions']\n pos_now = env.robot.parts['base'].get_position()\n record[i] = np.linalg.norm(joints_now - raw_actions[i])\n record_xy_diff[i] = np.linalg.norm(pos_now - raw_xy[i])\n record_xy[i] = pos_now\n if i in checktimes:\n check = checkpoints[checktimes.index(i)]\n diff = np.linalg.norm(pos_now - np.array(check)) \n if diff > 0.01:\n print(\"Failed!\", i, diff)\n printThis = True\n\n if printThis:\n print(\"Printing failed action\")\n plt.plot(record)\n for tl, label in labels:\n plt.annotate(label, # this is the text\n (tl-1, record[tl-1]), # this is the point to label\n textcoords=\"offset points\", # how to position the text\n xytext=(0,10), # distance from text to points (x,y)\n ha='center') # horizontal alignment can be left, right or center\n plt.title(\"Joints diff plot\")\n plt.figure()\n plt.plot(record_xy_diff)\n for tl, label in labels:\n plt.annotate(label, # this is the text\n (tl-1, record_xy_diff[tl-1]), # this is the point to label\n textcoords=\"offset points\", # how to position the text\n xytext=(0,10), # distance from text to points (x,y)\n ha='center') # horizontal alignment can be left, right or center\n plt.title(\"Cartesian diff plot\")\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(record_xy[:, 0], record_xy[:, 1], record_xy[:, 2])\n for check in checkpoints:\n # draw sphere\n radius = 0.01\n center = check\n u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]\n x = np.cos(u)*np.sin(v)*radius+center[0]\n y = np.sin(u)*np.sin(v)*radius+center[1]\n z = np.cos(v)*radius+center[2]\n ax.plot_wireframe(x, y, z, color=\"r\")\n\n X = record_xy[:, 0]\n Y = record_xy[:, 1]\n Z = record_xy[:, 2]\n # Create cubic bounding box to simulate equal aspect ratio\n max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max()\n Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(X.max()+X.min())\n Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(Y.max()+Y.min())\n Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(Z.max()+Z.min())\n # Comment or uncomment following both lines to test the fake bounding box:\n for xb, yb, zb in zip(Xb, Yb, Zb):\n ax.plot([xb], [yb], [zb], 'w')\n \n plt.show()\n replay = not replay\n\n for i in range(100):\n obs, _, _, _ = env.step({'joint_command': np.zeros(9), 'render': render})\n\nprint(\"All perimeter combinations tested.\")\n" ]
[ [ "numpy.hstack", "numpy.linspace", "matplotlib.pyplot.title", "numpy.meshgrid", "matplotlib.pyplot.annotate", "numpy.linalg.norm", "numpy.tile", "matplotlib.pyplot.show", "matplotlib.pyplot.plot", "numpy.cos", "numpy.sin", "numpy.random.rand", "numpy.array", "numpy.zeros", "numpy.vstack", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gordon-frost-hwu/autonomous-learning-library
[ "42f25149277a13d325587f1f7d579da8392fcc38", "42f25149277a13d325587f1f7d579da8392fcc38" ]
[ "all/policies/gaussian_test.py", "all/policies/stochastic.py" ]
[ "import unittest\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom gym.spaces import Box\nfrom all.environments import State\nfrom all.policies import GaussianPolicy\n\nSTATE_DIM = 2\nACTION_DIM = 3\n\nclass TestGaussian(unittest.TestCase):\n def setUp(self):\n\n torch.manual_seed(2)\n self.space = Box(np.array([-1, -1, -1]), np.array([1, 1, 1]))\n self.model = nn.Sequential(\n nn.Linear(STATE_DIM, ACTION_DIM * 2)\n )\n optimizer = torch.optim.RMSprop(self.model.parameters(), lr=0.01)\n self.policy = GaussianPolicy(self.model, optimizer, self.space)\n\n def test_output_shape(self):\n state = State(torch.randn(1, STATE_DIM))\n action = self.policy(state).sample()\n self.assertEqual(action.shape, (1, ACTION_DIM))\n state = State(torch.randn(5, STATE_DIM))\n action = self.policy(state).sample()\n self.assertEqual(action.shape, (5, ACTION_DIM))\n\n def test_reinforce_one(self):\n state = State(torch.randn(1, STATE_DIM))\n dist = self.policy(state)\n action = dist.sample()\n log_prob1 = dist.log_prob(action)\n loss = -log_prob1.mean()\n self.policy.reinforce(loss)\n\n dist = self.policy(state)\n log_prob2 = dist.log_prob(action)\n\n self.assertGreater(log_prob2.item(), log_prob1.item())\n\n def test_converge(self):\n state = State(torch.randn(1, STATE_DIM))\n target = torch.tensor([1., 2., -1.])\n\n for _ in range(0, 1000):\n dist = self.policy(state)\n action = dist.sample()\n log_prob = dist.log_prob(action)\n error = ((target - action) ** 2).mean()\n loss = (error * log_prob).mean()\n self.policy.reinforce(loss)\n\n self.assertTrue(error < 1)\n\nif __name__ == '__main__':\n unittest.main()\n", "import torch\nfrom all.approximation import Approximation\n\n\nclass StochasticPolicy(Approximation):\n def __init__(\n self,\n model,\n optimizer,\n distribution,\n name='policy',\n entropy_loss_scaling=0,\n **kwargs\n ):\n super().__init__(\n model,\n optimizer,\n name=name,\n **kwargs\n )\n self.distribution = distribution\n self._entropy_loss_scaling = entropy_loss_scaling\n self._log_probs = []\n self._entropy = []\n\n def __call__(self, state, action=None, log_prob=False):\n outputs = self.model(state)\n distribution = self.distribution(outputs)\n if action is None:\n action = distribution.sample()\n if log_prob:\n _log_prob = distribution.log_prob(action)\n _log_prob.entropy = distribution.entropy()\n return action, _log_prob\n return action\n _log_prob = distribution.log_prob(action)\n _log_prob.entropy = distribution.entropy()\n return _log_prob\n\n def eval(self, state, action=None, log_prob=False):\n with torch.no_grad():\n return self(state, action=action, log_prob=log_prob)\n\n def reinforce(self, log_probs, policy_loss, retain_graph=False):\n entropy_loss = -log_probs.entropy.mean()\n loss = policy_loss + self._entropy_loss_scaling * entropy_loss\n self._writer.add_loss(self._name, loss)\n self._writer.add_loss(self._name + '/pg', policy_loss)\n self._writer.add_loss(self._name + '/entropy', entropy_loss)\n loss.backward(retain_graph=retain_graph)\n self.step()\n" ]
[ [ "torch.manual_seed", "torch.randn", "torch.tensor", "torch.nn.Linear", "numpy.array" ], [ "torch.no_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
chao1224/pria_lifechem
[ "1fd892505a45695c6197f8d711a8a37589cd7097" ]
[ "pria_lifechem/analysis/prospective_screening_analysis_support.py" ]
[ "from __future__ import print_function\n\nimport pandas as pd\nimport numpy as np\nimport os\nfrom collections import OrderedDict\nfrom pria_lifechem.function import *\nfrom prospective_screening_model_names import *\nfrom prospective_screening_metric_names import *\n\n\ndef clean_excel():\n dataframe = pd.read_excel('../../output/stage_2_predictions/Keck_LC4_backup.xlsx')\n dataframe = dataframe.drop(dataframe.index[[8779]])\n dataframe.to_excel('../../output/stage_2_predictions/Keck_LC4_export.xlsx', index=None)\n\n\ndef merge_prediction():\n dataframe = pd.read_csv('../../dataset/fixed_dataset/pria_prospective.csv.gz')\n molecule_ids = dataframe['Molecule'].tolist()\n actual_labels = dataframe['Keck_Pria_AS_Retest'].tolist()\n inhibits = dataframe['Keck_Pria_Continuous'].tolist()\n\n complete_df = pd.DataFrame({'molecule': molecule_ids, 'label': actual_labels, 'inhibition': inhibits})\n\n column_names = ['molecule', 'label', 'inhibition']\n complete_df = complete_df[column_names]\n\n dir_ = '../../output/stage_2_predictions/Keck_Pria_AS_Retest'\n model_names = []\n\n for model_name in model_name_mapping.keys():\n file_path = '{}/{}.npz'.format(dir_, model_name)\n if not os.path.exists(file_path):\n continue\n print('model: {} exists'.format(model_name))\n data = np.load(file_path)\n file_path, '\\t', data.keys()\n\n y_pred = data['y_pred_on_test'][:, 0]\n if y_pred.ndim == 2:\n y_pred = y_pred[:, 0]\n\n model_name = model_name_mapping[model_name]\n model_names.append(model_name)\n complete_df[model_name] = y_pred\n\n print()\n\n model_names = sorted(model_names)\n column_names.extend(model_names)\n\n complete_df = complete_df[column_names]\n print(complete_df.shape)\n complete_df.to_csv('{}/complete_prediction.csv'.format(dir_), index=None)\n\n return\n\n\ndef merge_prediction_old():\n dataframe = pd.read_excel('../../output/stage_2_predictions/Keck_LC4_export.xlsx')\n\n molecule_name_list = dataframe['Molecule Name'].tolist()\n supplier_id = dataframe['Supplier ID'].tolist()\n failed_id = ['F0401-0050', 'F2964-1411', 'F2964-1523']\n inhibits = dataframe[\n 'PriA-SSB AS, normalized for plate and edge effects, correct plate map: % inhibition Alpha, normalized (%)'].tolist()\n neo_dataframe = pd.read_csv('../../output/stage_2_predictions/pria_lc4_retest_may18.csv')\n failed_molecule_names = neo_dataframe[neo_dataframe['Active'] == 0]['Row Labels'].tolist()\n failed_molecule_names += ['SMSSF-0044356', 'SMSSF-0030688']\n\n positive_enumerate = filter(lambda x: x[1] >= 35 and supplier_id[x[0]] not in failed_id and molecule_name_list[x[0]] not in failed_molecule_names, enumerate(inhibits))\n positive_idx = map(lambda x: x[0], positive_enumerate)\n actual_label = map(lambda x: 1 if x in positive_idx else 0, range(len(supplier_id)))\n actual_label = np.array(actual_label)\n\n complete_df = pd.DataFrame({'molecule name': molecule_name_list, 'molecule id': supplier_id, 'label': actual_label, 'inhibition': inhibits})\n\n column_names = ['molecule name', 'molecule id', 'label', 'inhibition']\n complete_df = complete_df[column_names]\n\n test_data_df = pd.read_csv('../../dataset/keck_lc4.csv.gz')\n test_data_df = test_data_df[['Molecule', 'SMILES', 'Fingerprints']]\n complete_df = complete_df.merge(test_data_df, how='left', left_on='molecule id', right_on='Molecule', sort=False)\n complete_df.to_csv('LC4_complete.csv', index=None)\n dir_ = '../../output/stage_2_predictions/Keck_Pria_AS_Retest'\n\n file_path = '{}/{}.npz'.format(dir_, 'vanilla_lstm_19')\n data = np.load(file_path)\n molecule_id = data['molecule_id']\n\n model_names = []\n special_models = ['irv', 'random_forest', 'dockscore', 'consensus', 'baseline']\n\n for model_name in model_name_mapping.keys():\n file_path = '{}/{}.npz'.format(dir_, model_name)\n if not os.path.exists(file_path):\n continue\n print('model: {} exists'.format(model_name))\n data = np.load(file_path)\n\n if any(x in model_name for x in special_models):\n y_pred = data['y_pred_on_test']\n else:\n y_pred = data['y_pred']\n if y_pred.ndim == 2:\n y_pred = y_pred[:, 0]\n\n temp_df = pd.DataFrame({'molecule id': molecule_id,\n model_name_mapping[model_name]: y_pred})\n\n model_names.append(model_name_mapping[model_name])\n complete_df = complete_df.join(temp_df.set_index('molecule id'), on='molecule id')\n\n print()\n\n model_names = sorted(model_names)\n column_names.extend(model_names)\n\n complete_df = complete_df[column_names]\n print(complete_df.shape)\n complete_df.to_csv('{}/complete_prediction.csv'.format(dir_), index=None)\n\n\ndef merge_rank():\n dir_ = '../../output/stage_2_predictions/Keck_Pria_AS_Retest'\n complete_df = pd.read_csv('{}/complete_prediction.csv'.format(dir_))\n model_names = complete_df.columns[3:]\n rank_df = complete_df[['molecule', 'label', 'inhibition']]\n for (idx, model_name) in enumerate(model_names):\n order = complete_df[model_name].rank(ascending=False, method='max').tolist()\n order = np.array(order)\n order = order.astype(np.int)\n rank_df[model_name] = order\n\n ensemble_model_names_pairs = OrderedDict()\n\n for ensemble_name, ensemble_model_names in ensemble_model_names_pairs.items():\n ensemble_orders = []\n for (idx, model_name) in enumerate(model_names):\n order = complete_df[model_name].rank(ascending=False, method='max').tolist()\n order = np.array(order)\n order = order.astype(np.int)\n if model_name in ensemble_model_names:\n ensemble_orders.append(order)\n ensemble_orders = np.vstack(ensemble_orders)\n ensemble_order = np.zeros((ensemble_orders.shape[1]))\n for i in range(ensemble_orders.shape[1]):\n ensemble_order[i] = np.min(ensemble_orders[:, i])\n ensemble_order = ensemble_order.astype(int)\n\n temp_df = pd.DataFrame()\n temp_df[ensemble_name] = ensemble_order\n\n # Rank the simple ensemble\n order = temp_df[ensemble_name].rank(method='max').as_matrix()\n order = np.array(order)\n order = order.astype(int)\n rank_df[ensemble_name] = order\n\n rank_df.to_csv('{}/complete_rank.csv'.format(dir_), index=None)\n\n\ndef merge_evaluation():\n dir_ = '../../output/stage_2_predictions/Keck_Pria_AS_Retest'\n complete_df = pd.read_csv('{}/complete_prediction.csv'.format(dir_))\n model_names = complete_df.columns[3:]\n\n metric_df = pd.DataFrame({'Model': model_names})\n\n actual_oracle = complete_df['label'].as_matrix()\n actual_oracle = reshape_data_into_2_dim(actual_oracle)\n\n for (metric_name, metric_) in metric_name_mapping.iteritems():\n metric_values = []\n for model_name in model_names:\n pred = complete_df[model_name].as_matrix()\n pred = reshape_data_into_2_dim(pred)\n\n actual, pred = collectively_drop_nan(actual_oracle, pred)\n\n value = metric_['function'](actual, pred, **metric_['argument'])\n metric_values.append(value)\n print(metric_name, '\\t', model_name, '\\t', value)\n metric_df[metric_name] = metric_values\n print()\n\n print('saving to {}/complete_evaluation.csv'.format(dir_))\n metric_df.to_csv('{}/complete_evaluation.csv'.format(dir_), index=None)\n\n\ndef filter_model_name(model_name):\n model_name = model_name.replace('SingleClassification', 'STC')\n model_name = model_name.replace('SingleRegression', 'STR')\n model_name = model_name.replace('MultiClassification', 'MTC')\n model_name = model_name.replace('RandomForest', 'RF')\n model_name = model_name.replace('ConsensusDocking', 'ConDock')\n model_name = model_name.replace('Docking', 'Dock')\n return model_name\n\n\nif __name__ == '__main__':\n # clean_excel()\n merge_prediction()\n merge_rank()\n merge_evaluation()\n" ]
[ [ "pandas.read_excel", "pandas.read_csv", "numpy.min", "pandas.DataFrame", "numpy.load", "numpy.array", "numpy.zeros", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
AaltoML/trieste
[ "6b2bb0e73649debaac81157f0f9fdb8d3fdfef5b" ]
[ "tests/unit/acquisition/test_rule.py" ]
[ "# Copyright 2020 The Trieste Contributors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import annotations\n\nimport copy\nfrom collections.abc import Mapping\nfrom typing import Callable, Optional\n\nimport gpflow\nimport numpy.testing as npt\nimport pytest\nimport tensorflow as tf\n\nfrom tests.util.misc import empty_dataset, quadratic, random_seed\nfrom tests.util.models.gpflow.models import (\n QuadraticMeanAndRBFKernel,\n QuadraticMeanAndRBFKernelWithSamplers,\n)\nfrom trieste.acquisition import (\n AcquisitionFunction,\n AcquisitionFunctionBuilder,\n NegativeLowerConfidenceBound,\n SingleModelAcquisitionBuilder,\n SingleModelGreedyAcquisitionBuilder,\n)\nfrom trieste.acquisition.optimizer import AcquisitionOptimizer\nfrom trieste.acquisition.rule import (\n AcquisitionRule,\n AsynchronousGreedy,\n AsynchronousOptimization,\n AsynchronousRuleState,\n DiscreteThompsonSampling,\n EfficientGlobalOptimization,\n TrustRegion,\n)\nfrom trieste.acquisition.sampler import (\n ExactThompsonSampler,\n GumbelSampler,\n ThompsonSampler,\n ThompsonSamplerFromTrajectory,\n)\nfrom trieste.data import Dataset\nfrom trieste.models import ProbabilisticModel\nfrom trieste.observer import OBJECTIVE\nfrom trieste.space import Box\nfrom trieste.types import State, TensorType\n\n\ndef _line_search_maximize(\n search_space: Box, f: AcquisitionFunction, num_query_points: int = 1\n) -> TensorType:\n if num_query_points != 1:\n raise ValueError(\"_line_search_maximizer only defined for batches of size 1\")\n if len(search_space.lower) != 1:\n raise ValueError(\"_line_search_maximizer only defined for search spaces of dimension 1\")\n xs = tf.linspace(search_space.lower, search_space.upper, 10 ** 6)\n return xs[tf.squeeze(tf.argmax(f(tf.expand_dims(xs, 1)))), None]\n\n\[email protected](\n \"num_search_space_samples, num_query_points\",\n [\n (0, 50),\n (-2, 50),\n (10, 0),\n (10, -2),\n ],\n)\ndef test_discrete_thompson_sampling_raises_for_invalid_init_params(\n num_search_space_samples: int, num_query_points: int\n) -> None:\n with pytest.raises(ValueError):\n DiscreteThompsonSampling(num_search_space_samples, num_query_points)\n\n\[email protected](\n \"models\",\n [\n {},\n {\"foo\": QuadraticMeanAndRBFKernel()},\n {\"foo\": QuadraticMeanAndRBFKernel(), OBJECTIVE: QuadraticMeanAndRBFKernel()},\n ],\n)\[email protected](\"datasets\", [{}, {OBJECTIVE: empty_dataset([1], [1])}])\ndef test_discrete_thompson_sampling_raises_for_invalid_models_keys(\n datasets: dict[str, Dataset], models: dict[str, ProbabilisticModel]\n) -> None:\n search_space = Box([-1], [1])\n rule = DiscreteThompsonSampling(100, 10)\n with pytest.raises(ValueError):\n rule.acquire(search_space, models, datasets=datasets)\n\n\[email protected](\"models\", [{}, {OBJECTIVE: QuadraticMeanAndRBFKernel()}])\[email protected](\n \"datasets\",\n [\n {},\n {\"foo\": empty_dataset([1], [1])},\n {\"foo\": empty_dataset([1], [1]), OBJECTIVE: empty_dataset([1], [1])},\n ],\n)\ndef test_discrete_thompson_sampling_raises_for_invalid_dataset_keys(\n datasets: dict[str, Dataset], models: dict[str, ProbabilisticModel]\n) -> None:\n search_space = Box([-1], [1])\n rule = DiscreteThompsonSampling(10, 100)\n with pytest.raises(ValueError):\n rule.acquire(search_space, models, datasets=datasets)\n\n\[email protected](\n \"sampler\",\n [\n ExactThompsonSampler(sample_min_value=True),\n ThompsonSamplerFromTrajectory(sample_min_value=True),\n ],\n)\ndef test_discrete_thompson_sampling_raises_if_passed_sampler_with_sample_min_value_True(\n sampler: ThompsonSampler,\n) -> None:\n with pytest.raises(ValueError):\n DiscreteThompsonSampling(100, 10, thompson_sampler=sampler)\n\n\[email protected](\n \"thompson_sampler\",\n [\n ExactThompsonSampler(sample_min_value=False),\n ThompsonSamplerFromTrajectory(sample_min_value=False),\n ],\n)\ndef test_discrete_thompson_sampling_initialized_with_correct_sampler(\n thompson_sampler: ThompsonSampler,\n) -> None:\n ts = DiscreteThompsonSampling(100, 10, thompson_sampler=thompson_sampler)\n assert ts._thompson_sampler == thompson_sampler\n\n\ndef test_discrete_thompson_sampling_raises_if_use_fourier_features_with_incorrect_model() -> None:\n search_space = Box([-2.2, -1.0], [1.3, 3.3])\n ts = DiscreteThompsonSampling(\n 100, 10, thompson_sampler=ThompsonSamplerFromTrajectory(sample_min_value=False)\n )\n dataset = Dataset(tf.zeros([1, 2], dtype=tf.float64), tf.zeros([1, 1], dtype=tf.float64))\n model = QuadraticMeanAndRBFKernel(noise_variance=tf.constant(1.0, dtype=tf.float64))\n with pytest.raises(ValueError):\n ts.acquire_single(search_space, model, dataset=dataset)\n\n\ndef test_discrete_thompson_sampling_raises_for_gumbel_sampler() -> None:\n with pytest.raises(ValueError):\n DiscreteThompsonSampling(100, 10, thompson_sampler=GumbelSampler(sample_min_value=False))\n\n\[email protected](\n \"thompson_sampler\",\n [\n ExactThompsonSampler(sample_min_value=False),\n ThompsonSamplerFromTrajectory(sample_min_value=False),\n ],\n)\[email protected](\"num_query_points\", [1, 10])\ndef test_discrete_thompson_sampling_acquire_returns_correct_shape(\n thompson_sampler: ThompsonSampler, num_query_points: int\n) -> None:\n search_space = Box([-2.2, -1.0], [1.3, 3.3])\n ts = DiscreteThompsonSampling(100, num_query_points, thompson_sampler=thompson_sampler)\n dataset = Dataset(tf.zeros([1, 2], dtype=tf.float64), tf.zeros([1, 1], dtype=tf.float64))\n model = QuadraticMeanAndRBFKernelWithSamplers(\n dataset=dataset, noise_variance=tf.constant(1.0, dtype=tf.float64)\n )\n model.kernel = (\n gpflow.kernels.RBF()\n ) # need a gpflow kernel object for random feature decompositions\n query_points = ts.acquire_single(search_space, model, dataset=dataset)\n\n npt.assert_array_equal(query_points.shape, tf.constant([num_query_points, 2]))\n\n\ndef test_efficient_global_optimization_raises_for_no_query_points() -> None:\n with pytest.raises(ValueError):\n EfficientGlobalOptimization(num_query_points=0)\n\n\ndef test_efficient_global_optimization_raises_for_no_batch_fn_with_many_query_points() -> None:\n with pytest.raises(ValueError):\n EfficientGlobalOptimization(num_query_points=2)\n\n\[email protected](\"optimizer\", [_line_search_maximize, None])\ndef test_efficient_global_optimization(optimizer: AcquisitionOptimizer[Box]) -> None:\n class NegQuadratic(SingleModelAcquisitionBuilder[ProbabilisticModel]):\n def __init__(self) -> None:\n self._updated = False\n\n def prepare_acquisition_function(\n self,\n model: ProbabilisticModel,\n dataset: Optional[Dataset] = None,\n ) -> AcquisitionFunction:\n return lambda x: -quadratic(tf.squeeze(x, -2) - 1)\n\n def update_acquisition_function(\n self,\n function: AcquisitionFunction,\n model: ProbabilisticModel,\n dataset: Optional[Dataset] = None,\n ) -> AcquisitionFunction:\n self._updated = True\n return function\n\n function = NegQuadratic()\n search_space = Box([-10], [10])\n ego = EfficientGlobalOptimization(function, optimizer)\n data, model = empty_dataset([1], [1]), QuadraticMeanAndRBFKernel(x_shift=1)\n query_point = ego.acquire_single(search_space, model, dataset=data)\n npt.assert_allclose(query_point, [[1]], rtol=1e-4)\n assert not function._updated\n query_point = ego.acquire(search_space, {OBJECTIVE: model})\n npt.assert_allclose(query_point, [[1]], rtol=1e-4)\n assert function._updated\n\n\nclass _JointBatchModelMinusMeanMaximumSingleBuilder(AcquisitionFunctionBuilder[ProbabilisticModel]):\n def prepare_acquisition_function(\n self,\n models: Mapping[str, ProbabilisticModel],\n datasets: Optional[Mapping[str, Dataset]] = None,\n ) -> AcquisitionFunction:\n return lambda at: -tf.reduce_max(models[OBJECTIVE].predict(at)[0], axis=-2)\n\n\n@random_seed\[email protected](\n \"rule_fn\",\n [\n lambda acq, batch_size: EfficientGlobalOptimization(acq, num_query_points=batch_size),\n lambda acq, batch_size: AsynchronousOptimization(acq, num_query_points=batch_size),\n ],\n)\n# As a side effect, this test ensures and EGO and AsynchronousOptimization\n# behave similarly in sync mode\ndef test_joint_batch_acquisition_rule_acquire(\n rule_fn: Callable[\n # callable input type(s)\n [_JointBatchModelMinusMeanMaximumSingleBuilder, int],\n # callable output type\n AcquisitionRule[TensorType, Box]\n | AcquisitionRule[State[TensorType, AsynchronousRuleState], Box],\n ]\n) -> None:\n search_space = Box(tf.constant([-2.2, -1.0]), tf.constant([1.3, 3.3]))\n num_query_points = 4\n acq = _JointBatchModelMinusMeanMaximumSingleBuilder()\n acq_rule: AcquisitionRule[TensorType, Box] | AcquisitionRule[\n State[TensorType, AsynchronousRuleState], Box\n ] = rule_fn(acq, num_query_points)\n\n dataset = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))\n points_or_stateful = acq_rule.acquire_single(\n search_space, QuadraticMeanAndRBFKernel(), dataset=dataset\n )\n if callable(points_or_stateful):\n _, query_point = points_or_stateful(None)\n else:\n query_point = points_or_stateful\n npt.assert_allclose(query_point, [[0.0, 0.0]] * num_query_points, atol=1e-3)\n\n\nclass _GreedyBatchModelMinusMeanMaximumSingleBuilder(\n SingleModelGreedyAcquisitionBuilder[ProbabilisticModel]\n):\n def __init__(self) -> None:\n self._update_count = 0\n\n def prepare_acquisition_function(\n self,\n model: ProbabilisticModel,\n dataset: Optional[Dataset] = None,\n pending_points: TensorType = None,\n ) -> AcquisitionFunction:\n if pending_points is None:\n return lambda at: -tf.reduce_max(model.predict(at)[0], axis=-2)\n else:\n best_pending_score = tf.reduce_max(model.predict(pending_points)[0])\n return lambda at: -tf.math.maximum(\n tf.reduce_max(model.predict(at)[0], axis=-2), best_pending_score\n )\n\n def update_acquisition_function(\n self,\n function: Optional[AcquisitionFunction],\n model: ProbabilisticModel,\n dataset: Optional[Dataset] = None,\n pending_points: Optional[TensorType] = None,\n new_optimization_step: bool = True,\n ) -> AcquisitionFunction:\n self._update_count += 1\n return self.prepare_acquisition_function(\n model, dataset=dataset, pending_points=pending_points\n )\n\n\n@random_seed\[email protected](\n \"rule_fn\",\n [\n lambda acq, batch_size: EfficientGlobalOptimization(acq, num_query_points=batch_size),\n lambda acq, batch_size: AsynchronousGreedy(acq, num_query_points=batch_size),\n ],\n)\n# As a side effect, this test ensures and EGO and AsynchronousGreedy\n# behave similarly in sync mode\ndef test_greedy_batch_acquisition_rule_acquire(\n rule_fn: Callable[\n # callable input type(s)\n [_GreedyBatchModelMinusMeanMaximumSingleBuilder, int],\n # callable output type\n AcquisitionRule[TensorType, Box]\n | AcquisitionRule[State[TensorType, AsynchronousRuleState], Box],\n ]\n) -> None:\n search_space = Box(tf.constant([-2.2, -1.0]), tf.constant([1.3, 3.3]))\n num_query_points = 4\n acq = _GreedyBatchModelMinusMeanMaximumSingleBuilder()\n assert acq._update_count == 0\n acq_rule: AcquisitionRule[TensorType, Box] | AcquisitionRule[\n State[TensorType, AsynchronousRuleState], Box\n ] = rule_fn(acq, num_query_points)\n dataset = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))\n points_or_stateful = acq_rule.acquire_single(\n search_space, QuadraticMeanAndRBFKernel(), dataset=dataset\n )\n if callable(points_or_stateful):\n _, query_points = points_or_stateful(None)\n else:\n query_points = points_or_stateful\n assert acq._update_count == num_query_points - 1\n npt.assert_allclose(query_points, [[0.0, 0.0]] * num_query_points, atol=1e-3)\n\n points_or_stateful = acq_rule.acquire_single(\n search_space, QuadraticMeanAndRBFKernel(), dataset=dataset\n )\n if callable(points_or_stateful):\n _, query_points = points_or_stateful(None)\n else:\n query_points = points_or_stateful\n npt.assert_allclose(query_points, [[0.0, 0.0]] * num_query_points, atol=1e-3)\n assert acq._update_count == 2 * num_query_points - 1\n\n\ndef test_async_greedy_raises_for_non_greedy_function() -> None:\n non_greedy_function_builder = NegativeLowerConfidenceBound()\n with pytest.raises(NotImplementedError):\n # we are deliberately passing in wrong object\n # hence type ignore\n AsynchronousGreedy(non_greedy_function_builder) # type: ignore\n\n\ndef test_async_optimization_raises_for_incorrect_query_points() -> None:\n with pytest.raises(ValueError):\n AsynchronousOptimization(num_query_points=0)\n\n with pytest.raises(ValueError):\n AsynchronousOptimization(num_query_points=-5)\n\n\ndef test_async_greedy_raises_for_incorrect_query_points() -> None:\n with pytest.raises(ValueError):\n AsynchronousGreedy(\n builder=_GreedyBatchModelMinusMeanMaximumSingleBuilder(), num_query_points=0\n )\n\n with pytest.raises(ValueError):\n AsynchronousGreedy(\n builder=_GreedyBatchModelMinusMeanMaximumSingleBuilder(), num_query_points=-5\n )\n\n\n@random_seed\[email protected](\n \"async_rule\",\n [\n AsynchronousOptimization(_JointBatchModelMinusMeanMaximumSingleBuilder()),\n AsynchronousGreedy(_GreedyBatchModelMinusMeanMaximumSingleBuilder()),\n ],\n)\ndef test_async_keeps_track_of_pending_points(\n async_rule: AcquisitionRule[State[TensorType, AsynchronousRuleState], Box]\n) -> None:\n search_space = Box(tf.constant([-2.2, -1.0]), tf.constant([1.3, 3.3]))\n dataset = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))\n\n state_fn = async_rule.acquire_single(search_space, QuadraticMeanAndRBFKernel(), dataset=dataset)\n state, point1 = state_fn(None)\n state, point2 = state_fn(state)\n\n assert state is not None\n assert len(state.pending_points) == 2\n\n # pretend we saw observation for the first point\n new_observations = Dataset(\n query_points=point1,\n observations=tf.constant([[1]], dtype=tf.float32),\n )\n state_fn = async_rule.acquire_single(\n search_space,\n QuadraticMeanAndRBFKernel(),\n dataset=dataset + new_observations,\n )\n state, point3 = state_fn(state)\n\n assert state is not None\n assert len(state.pending_points) == 2\n\n # we saw first point, so pendings points are\n # second point and new third point\n npt.assert_allclose(state.pending_points, tf.concat([point2, point3], axis=0))\n\n\[email protected](\"datasets\", [{}, {\"foo\": empty_dataset([1], [1])}])\[email protected](\n \"models\", [{}, {\"foo\": QuadraticMeanAndRBFKernel()}, {OBJECTIVE: QuadraticMeanAndRBFKernel()}]\n)\ndef test_trust_region_raises_for_missing_datasets_key(\n datasets: dict[str, Dataset], models: dict[str, ProbabilisticModel]\n) -> None:\n search_space = Box([-1], [1])\n rule = TrustRegion()\n with pytest.raises(ValueError):\n rule.acquire(search_space, models, datasets=datasets)\n\n\nclass _Midpoint(AcquisitionRule[TensorType, Box]):\n def acquire(\n self,\n search_space: Box,\n models: Mapping[str, ProbabilisticModel],\n datasets: Optional[Mapping[str, Dataset]] = None,\n ) -> TensorType:\n return (search_space.upper[None] + search_space.lower[None]) / 2\n\n\[email protected](\n \"rule, expected_query_point\",\n [\n (EfficientGlobalOptimization(NegativeLowerConfidenceBound(0)), [[0.0, 0.0]]),\n (_Midpoint(), [[-0.45, 1.15]]),\n ],\n)\ndef test_trust_region_for_default_state(\n rule: AcquisitionRule[TensorType, Box], expected_query_point: TensorType\n) -> None:\n tr = TrustRegion(rule)\n dataset = Dataset(tf.constant([[0.1, 0.2]]), tf.constant([[0.012]]))\n lower_bound = tf.constant([-2.2, -1.0])\n upper_bound = tf.constant([1.3, 3.3])\n search_space = Box(lower_bound, upper_bound)\n\n state, query_point = tr.acquire_single(\n search_space, QuadraticMeanAndRBFKernel(), dataset=dataset\n )(None)\n\n assert state is not None\n npt.assert_array_almost_equal(query_point, expected_query_point, 5)\n npt.assert_array_almost_equal(state.acquisition_space.lower, lower_bound)\n npt.assert_array_almost_equal(state.acquisition_space.upper, upper_bound)\n npt.assert_array_almost_equal(state.y_min, [0.012])\n assert state.is_global\n\n\[email protected](\n \"rule, expected_query_point\",\n [\n (EfficientGlobalOptimization(NegativeLowerConfidenceBound(0)), [[0.0, 0.0]]),\n (_Midpoint(), [[-0.45, 1.15]]),\n ],\n)\ndef test_trust_region_successful_global_to_global_trust_region_unchanged(\n rule: AcquisitionRule[TensorType, Box], expected_query_point: TensorType\n) -> None:\n tr = TrustRegion(rule)\n dataset = Dataset(tf.constant([[0.1, 0.2], [-0.1, -0.2]]), tf.constant([[0.4], [0.3]]))\n lower_bound = tf.constant([-2.2, -1.0])\n upper_bound = tf.constant([1.3, 3.3])\n search_space = Box(lower_bound, upper_bound)\n\n eps = 0.5 * (search_space.upper - search_space.lower) / 10\n previous_y_min = dataset.observations[0]\n is_global = True\n previous_state = TrustRegion.State(search_space, eps, previous_y_min, is_global)\n\n current_state, query_point = tr.acquire(\n search_space,\n {OBJECTIVE: QuadraticMeanAndRBFKernel()},\n datasets={OBJECTIVE: dataset},\n )(previous_state)\n\n assert current_state is not None\n npt.assert_array_almost_equal(current_state.eps, previous_state.eps)\n assert current_state.is_global\n npt.assert_array_almost_equal(query_point, expected_query_point, 5)\n npt.assert_array_almost_equal(current_state.acquisition_space.lower, lower_bound)\n npt.assert_array_almost_equal(current_state.acquisition_space.upper, upper_bound)\n\n\[email protected](\n \"rule\",\n [\n EfficientGlobalOptimization(NegativeLowerConfidenceBound(0)),\n _Midpoint(),\n ],\n)\ndef test_trust_region_for_unsuccessful_global_to_local_trust_region_unchanged(\n rule: AcquisitionRule[TensorType, Box]\n) -> None:\n tr = TrustRegion(rule)\n dataset = Dataset(tf.constant([[0.1, 0.2], [-0.1, -0.2]]), tf.constant([[0.4], [0.5]]))\n lower_bound = tf.constant([-2.2, -1.0])\n upper_bound = tf.constant([1.3, 3.3])\n search_space = Box(lower_bound, upper_bound)\n\n eps = 0.5 * (search_space.upper - search_space.lower) / 10\n previous_y_min = dataset.observations[0]\n is_global = True\n acquisition_space = search_space\n previous_state = TrustRegion.State(acquisition_space, eps, previous_y_min, is_global)\n\n current_state, query_point = tr.acquire(\n search_space,\n {OBJECTIVE: QuadraticMeanAndRBFKernel()},\n datasets={OBJECTIVE: dataset},\n )(previous_state)\n\n assert current_state is not None\n npt.assert_array_almost_equal(current_state.eps, previous_state.eps)\n assert not current_state.is_global\n npt.assert_array_less(lower_bound, current_state.acquisition_space.lower)\n npt.assert_array_less(current_state.acquisition_space.upper, upper_bound)\n assert query_point[0] in current_state.acquisition_space\n\n\[email protected](\n \"rule\",\n [\n EfficientGlobalOptimization(NegativeLowerConfidenceBound(0)),\n _Midpoint(),\n ],\n)\ndef test_trust_region_for_successful_local_to_global_trust_region_increased(\n rule: AcquisitionRule[TensorType, Box]\n) -> None:\n tr = TrustRegion(rule)\n dataset = Dataset(tf.constant([[0.1, 0.2], [-0.1, -0.2]]), tf.constant([[0.4], [0.3]]))\n lower_bound = tf.constant([-2.2, -1.0])\n upper_bound = tf.constant([1.3, 3.3])\n search_space = Box(lower_bound, upper_bound)\n\n eps = 0.5 * (search_space.upper - search_space.lower) / 10\n previous_y_min = dataset.observations[0]\n is_global = False\n acquisition_space = Box(dataset.query_points[0] - eps, dataset.query_points[0] + eps)\n previous_state = TrustRegion.State(acquisition_space, eps, previous_y_min, is_global)\n\n current_state, _ = tr.acquire(\n search_space,\n {OBJECTIVE: QuadraticMeanAndRBFKernel()},\n datasets={OBJECTIVE: dataset},\n )(previous_state)\n\n assert current_state is not None\n npt.assert_array_less(previous_state.eps, current_state.eps) # current TR larger than previous\n assert current_state.is_global\n npt.assert_array_almost_equal(current_state.acquisition_space.lower, lower_bound)\n npt.assert_array_almost_equal(current_state.acquisition_space.upper, upper_bound)\n\n\[email protected](\n \"rule\",\n [\n EfficientGlobalOptimization(NegativeLowerConfidenceBound(0)),\n _Midpoint(),\n ],\n)\ndef test_trust_region_for_unsuccessful_local_to_global_trust_region_reduced(\n rule: AcquisitionRule[TensorType, Box]\n) -> None:\n tr = TrustRegion(rule)\n dataset = Dataset(tf.constant([[0.1, 0.2], [-0.1, -0.2]]), tf.constant([[0.4], [0.5]]))\n lower_bound = tf.constant([-2.2, -1.0])\n upper_bound = tf.constant([1.3, 3.3])\n search_space = Box(lower_bound, upper_bound)\n\n eps = 0.5 * (search_space.upper - search_space.lower) / 10\n previous_y_min = dataset.observations[0]\n is_global = False\n acquisition_space = Box(dataset.query_points[0] - eps, dataset.query_points[0] + eps)\n previous_state = TrustRegion.State(acquisition_space, eps, previous_y_min, is_global)\n\n current_state, _ = tr.acquire(\n search_space,\n {OBJECTIVE: QuadraticMeanAndRBFKernel()},\n datasets={OBJECTIVE: dataset},\n )(previous_state)\n\n assert current_state is not None\n npt.assert_array_less(current_state.eps, previous_state.eps) # current TR smaller than previous\n assert current_state.is_global\n npt.assert_array_almost_equal(current_state.acquisition_space.lower, lower_bound)\n\n\ndef test_trust_region_state_deepcopy() -> None:\n tr_state = TrustRegion.State(\n Box(tf.constant([1.2]), tf.constant([3.4])), tf.constant(5.6), tf.constant(7.8), False\n )\n tr_state_copy = copy.deepcopy(tr_state)\n npt.assert_allclose(tr_state_copy.acquisition_space.lower, tr_state.acquisition_space.lower)\n npt.assert_allclose(tr_state_copy.acquisition_space.upper, tr_state.acquisition_space.upper)\n npt.assert_allclose(tr_state_copy.eps, tr_state.eps)\n npt.assert_allclose(tr_state_copy.y_min, tr_state.y_min)\n assert tr_state_copy.is_global == tr_state.is_global\n\n\ndef test_asynchronous_rule_state_pending_points() -> None:\n pending_points = tf.constant([[1], [2], [3]])\n\n state = AsynchronousRuleState(pending_points)\n npt.assert_array_equal(pending_points, state.pending_points)\n\n\ndef test_asynchronous_rule_state_raises_incorrect_shape() -> None:\n with pytest.raises(ValueError):\n AsynchronousRuleState(tf.constant([1, 2]))\n\n with pytest.raises(ValueError):\n AsynchronousRuleState(tf.constant([[[1], [2]]]))\n\n\ndef test_asynchronous_rule_state_has_pending_points() -> None:\n state = AsynchronousRuleState(None)\n assert not state.has_pending_points\n\n state = AsynchronousRuleState(tf.zeros([0, 2]))\n assert not state.has_pending_points\n\n pending_points = tf.constant([[1], [2], [3]])\n state = AsynchronousRuleState(pending_points)\n assert state.has_pending_points\n\n\ndef test_asynchronous_rule_remove_points_raises_shape_mismatch() -> None:\n state = AsynchronousRuleState(tf.constant([[1], [2], [3]]))\n with pytest.raises(ValueError):\n state.remove_points(tf.constant([[1, 1]]))\n\n state = AsynchronousRuleState(tf.constant([[1, 1], [2, 2]]))\n with pytest.raises(ValueError):\n state.remove_points(tf.constant([[1]]))\n\n state = AsynchronousRuleState(tf.constant([[1, 1], [2, 2]]))\n with pytest.raises(ValueError):\n state.remove_points(tf.constant([[[1, 1], [2, 2]]]))\n\n\ndef test_asynchronous_rule_state_remove_points() -> None:\n # brace yourself, there are many test cases here\n\n pending_points = tf.constant([[1], [2], [3]])\n\n # first\n state = AsynchronousRuleState(pending_points)\n state = state.remove_points(tf.constant([[1]]))\n npt.assert_array_equal(state.pending_points, [[2], [3]])\n\n # neither first nor last\n state = AsynchronousRuleState(pending_points)\n state = state.remove_points(tf.constant([[2]]))\n npt.assert_array_equal(state.pending_points, [[1], [3]])\n\n # last\n state = AsynchronousRuleState(pending_points)\n state = state.remove_points(tf.constant([[3]]))\n npt.assert_array_equal(state.pending_points, [[1], [2]])\n\n # unknown point, nothing to remove\n state = AsynchronousRuleState(pending_points)\n state = state.remove_points(tf.constant([[4]]))\n npt.assert_array_equal(state.pending_points, [[1], [2], [3]])\n\n # duplicated pending points - only remove one occurence\n state = AsynchronousRuleState(tf.constant([[1], [2], [3], [2]]))\n state = state.remove_points(tf.constant([[2]]))\n npt.assert_array_equal(state.pending_points, [[1], [3], [2]])\n\n # duplicated pending points - remove a dupe and not a dupe\n state = AsynchronousRuleState(tf.constant([[1], [2], [3], [2]]))\n state = state.remove_points(tf.constant([[2], [3]]))\n npt.assert_array_equal(state.pending_points, [[1], [2]])\n\n # duplicated pending points - remove both dupes\n state = AsynchronousRuleState(tf.constant([[1], [2], [3], [2]]))\n state = state.remove_points(tf.constant([[2], [2]]))\n npt.assert_array_equal(state.pending_points, [[1], [3]])\n\n # duplicated pending points - dupe, not a dupe, unknown point\n state = AsynchronousRuleState(tf.constant([[1], [2], [3], [2]]))\n state = state.remove_points(tf.constant([[2], [3], [4]]))\n npt.assert_array_equal(state.pending_points, [[1], [2]])\n\n # remove from empty\n state = AsynchronousRuleState(None)\n state = state.remove_points(tf.constant([[2]]))\n assert not state.has_pending_points\n\n # remove all\n state = AsynchronousRuleState(pending_points)\n state = state.remove_points(pending_points)\n assert not state.has_pending_points\n\n # bigger last dimension\n state = AsynchronousRuleState(tf.constant([[1, 1], [2, 3]]))\n state = state.remove_points(tf.constant([[1, 1], [2, 2], [3, 3], [1, 2]]))\n npt.assert_array_equal(state.pending_points, [[2, 3]])\n\n\ndef test_asynchronous_rule_add_pending_points_raises_shape_mismatch() -> None:\n state = AsynchronousRuleState(tf.constant([[1], [2], [3]]))\n with pytest.raises(ValueError):\n state.add_pending_points(tf.constant([[1, 1]]))\n\n state = AsynchronousRuleState(tf.constant([[1, 1], [2, 2]]))\n with pytest.raises(ValueError):\n state.add_pending_points(tf.constant([[1]]))\n\n state = AsynchronousRuleState(tf.constant([[1, 1], [2, 2]]))\n with pytest.raises(ValueError):\n state.add_pending_points(tf.constant([[[1, 1], [2, 2]]]))\n\n\ndef test_asynchronous_rule_add_pending_points() -> None:\n state = AsynchronousRuleState(None)\n state = state.add_pending_points(tf.constant([[1]]))\n npt.assert_array_equal(state.pending_points, [[1]])\n\n state = AsynchronousRuleState(tf.constant([[1], [2]]))\n state = state.add_pending_points(tf.constant([[1]]))\n npt.assert_array_equal(state.pending_points, [[1], [2], [1]])\n\n state = AsynchronousRuleState(tf.constant([[1, 1], [2, 2]]))\n state = state.add_pending_points(tf.constant([[3, 3], [4, 4]]))\n npt.assert_array_equal(state.pending_points, [[1, 1], [2, 2], [3, 3], [4, 4]])\n" ]
[ [ "tensorflow.constant", "tensorflow.concat", "tensorflow.zeros", "tensorflow.expand_dims", "tensorflow.squeeze", "numpy.testing.assert_array_less", "numpy.testing.assert_array_equal", "numpy.testing.assert_allclose", "tensorflow.linspace", "numpy.testing.assert_array_almost_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
slowy07/tensorflow-model-research
[ "48ba4ba6240452eb3e3350fe7099f2b045acc530" ]
[ "advesarial_text/gen_vocab.py" ]
[ "\"\"\"Generates vocabulary and term frequency files for datasets.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom six import iteritems\n\nfrom collections import defaultdict\n\n# Dependency imports\n\nimport tensorflow as tf\n\nfrom data import data_utils\nfrom data import document_generators\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\n# Flags controlling input are in document_generators.py\n\nflags.DEFINE_string(\"output_dir\", \"\", \"Path to save vocab.txt and vocab_freq.txt.\")\n\nflags.DEFINE_boolean(\n \"use_unlabeled\",\n True,\n \"Whether to use the \" \"unlabeled sentiment dataset in the vocabulary.\",\n)\nflags.DEFINE_boolean(\n \"include_validation\",\n False,\n \"Whether to include the \" \"validation set in the vocabulary.\",\n)\nflags.DEFINE_integer(\n \"doc_count_threshold\",\n 1,\n \"The minimum number of \"\n \"documents a word or bigram should occur in to keep \"\n \"it in the vocabulary.\",\n)\n\nMAX_VOCAB_SIZE = 100 * 1000\n\n\ndef fill_vocab_from_doc(doc, vocab_freqs, doc_counts):\n \"\"\"Fills vocabulary and doc counts with tokens from doc.\n Args:\n doc: Document to read tokens from.\n vocab_freqs: dict<token, frequency count>\n doc_counts: dict<token, document count>\n Returns:\n None\n \"\"\"\n doc_seen = set()\n\n for token in document_generators.tokens(doc):\n if doc.add_tokens or token in vocab_freqs:\n vocab_freqs[token] += 1\n if token not in doc_seen:\n doc_counts[token] += 1\n doc_seen.add(token)\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n vocab_freqs = defaultdict(int)\n doc_counts = defaultdict(int)\n\n # Fill vocabulary frequencies map and document counts map\n for doc in document_generators.documents(\n dataset=\"train\",\n include_unlabeled=FLAGS.use_unlabeled,\n include_validation=FLAGS.include_validation,\n ):\n fill_vocab_from_doc(doc, vocab_freqs, doc_counts)\n\n # Filter out low-occurring terms\n vocab_freqs = dict(\n (term, freq)\n for term, freq in iteritems(vocab_freqs)\n if doc_counts[term] > FLAGS.doc_count_threshold\n )\n\n # Sort by frequency\n ordered_vocab_freqs = data_utils.sort_vocab_by_frequency(vocab_freqs)\n\n # Limit vocab size\n ordered_vocab_freqs = ordered_vocab_freqs[:MAX_VOCAB_SIZE]\n\n # Add EOS token\n ordered_vocab_freqs.append((data_utils.EOS_TOKEN, 1))\n\n # Write\n tf.gfile.MakeDirs(FLAGS.output_dir)\n data_utils.write_vocab_and_frequency(ordered_vocab_freqs, FLAGS.output_dir)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n" ]
[ [ "tensorflow.gfile.MakeDirs", "tensorflow.logging.set_verbosity", "tensorflow.app.run" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
hamiz-ahmed/Wasserstein-GAN
[ "f8f813569715229954b5dced12b830c07c547ce1" ]
[ "wgan.py" ]
[ "import os\nimport time\nimport argparse\nimport importlib\nimport tensorflow as tf\nimport tensorflow.contrib as tc\n\nfrom visualize import *\nfrom scipy.misc import imsave\n\n\nclass WassersteinGAN(object):\n def __init__(self, g_net, d_net, x_sampler, z_sampler, data, model):\n self.model = model\n self.data = data\n self.g_net = g_net # generator\n self.d_net = d_net # discriminator\n self.x_sampler = x_sampler # image sampler\n self.z_sampler = z_sampler # random noise distribution\n self.x_dim = self.d_net.x_dim # 784\n self.z_dim = self.g_net.z_dim #100 noise distribtution for input to generator\n self.x = tf.placeholder(tf.float32, [None, self.x_dim], name='x') # original image\n self.z = tf.placeholder(tf.float32, [None, self.z_dim], name='z') # distribution random\n\n self.x_ = self.g_net(self.z) # returns a convolution of shape (?, 784), generated image on noise\n\n self.d = self.d_net(self.x, reuse=False)\n self.d_ = self.d_net(self.x_)\n\n self.g_loss = tf.reduce_mean(self.d_) # predicted image mean\n self.d_loss = tf.reduce_mean(self.d) - tf.reduce_mean(self.d_) # Gw # W distance\n\n # w in algorithm is discriminator parameters\n\n #self.reg = tc.layers.apply_regularization(\n # tc.layers.l1_regularizer(2.5e-5),\n # weights_list=[var for var in tf.global_variables() if 'weights' in var.name]\n #)\n self.g_loss_reg = self.g_loss\n self.d_loss_reg = self.d_loss\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n self.d_rmsprop = tf.train.RMSPropOptimizer(learning_rate=1e-4)\\\n .minimize(self.d_loss_reg, var_list=self.d_net.vars)\n self.g_rmsprop = tf.train.RMSPropOptimizer(learning_rate=1e-4)\\\n .minimize(self.g_loss_reg, var_list=self.g_net.vars)\n\n self.d_clip = [v.assign(tf.clip_by_value(v, -0.01, 0.01)) for v in self.d_net.vars]\n gpu_options = tf.GPUOptions(allow_growth=True)\n self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\n def train(self, batch_size=64, num_batches=100000):\n plt.ion()\n self.sess.run(tf.global_variables_initializer())\n start_time = time.time()\n for t in range(0, num_batches):\n d_iters = 5\n #if t % 500 == 0 or t < 25:\n # d_iters = 100\n\n for _ in range(0, d_iters):\n bx = self.x_sampler(batch_size)\n bz = self.z_sampler(batch_size, self.z_dim)\n\n self.sess.run(self.d_rmsprop, feed_dict={self.x: bx, self.z: bz})\n self.sess.run(self.d_clip)\n\n bz = self.z_sampler(batch_size, self.z_dim)\n self.sess.run(self.g_rmsprop, feed_dict={self.z: bz, self.x: bx})\n\n if t % 100 == 0:\n bx = self.x_sampler(batch_size)\n bz = self.z_sampler(batch_size, self.z_dim)\n\n d_loss = self.sess.run(\n self.d_loss, feed_dict={self.x: bx, self.z: bz}\n )\n g_loss = self.sess.run(\n self.g_loss, feed_dict={self.z: bz, self.x: bx}\n )\n print('Iter [%8d] Time [%5.4f] wasserstein distance [%.4f] g_loss [%.4f]' %\n (t, time.time() - start_time, d_loss, g_loss))\n\n with open('logs/w_dist/reading.txt', 'a') as txt_file:\n txt_file.write(str(d_loss) + '\\n')\n\n if t % 100 == 0:\n bz = self.z_sampler(batch_size, self.z_dim)\n bx = self.sess.run(self.x_, feed_dict={self.z: bz})\n bx = xs.data2img(bx)\n #fig = plt.figure(self.data + '.' + self.model)\n #grid_show(fig, bx, xs.shape)\n #fig.savefig('logs/{}/{}.png'.format(self.data, t/100))\n bx = grid_transform(bx, xs.shape)\n imsave('logs_4/{}/{}.png'.format(self.data, t / 100), bx)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('')\n parser.add_argument('--data', type=str, default='mnist')\n parser.add_argument('--model', type=str, default='dcgan')\n parser.add_argument('--gpus', type=str, default='0')\n args = parser.parse_args()\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus\n\n data = importlib.import_module(args.data)\n model = importlib.import_module(args.data + '.' + args.model)\n\n xs = data.DataSampler()\n zs = data.NoiseSampler()\n\n d_net = model.Discriminator()\n g_net = model.Generator()\n wgan = WassersteinGAN(g_net, d_net, xs, zs, args.data, args.model)\n wgan.train()\n" ]
[ [ "tensorflow.clip_by_value", "tensorflow.train.RMSPropOptimizer", "tensorflow.reduce_mean", "tensorflow.get_collection", "tensorflow.placeholder", "tensorflow.ConfigProto", "tensorflow.global_variables_initializer", "tensorflow.GPUOptions" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
visionshao/exp_translation
[ "55b8699c323e8618de38a71ec646b011275186d0" ]
[ "fairseq/models/transformer/transformer_decoder.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nfrom typing import Any, Dict, List, Optional\n\nimport torch\nimport torch.nn as nn\nfrom fairseq import utils\nfrom fairseq.distributed import fsdp_wrap\nfrom fairseq.models import FairseqIncrementalDecoder\nfrom fairseq.models.transformer import TransformerConfig\nfrom fairseq.modules import (\n AdaptiveSoftmax,\n BaseLayer,\n FairseqDropout,\n LayerDropModuleList,\n LayerNorm,\n MultiheadAttention,\n PositionalEmbedding,\n SinusoidalPositionalEmbedding,\n)\nfrom fairseq.modules import transformer_layer\nfrom fairseq.modules.checkpoint_activations import checkpoint_wrapper\nfrom fairseq.modules.quant_noise import quant_noise as apply_quant_noise_\nfrom torch import Tensor\nfrom torch.nn.functional import softmax\n\n\n# rewrite name for backward compatibility in `make_generation_fast_`\ndef module_name_fordropout(module_name: str) -> str:\n if module_name == 'TransformerDecoderBase':\n return 'TransformerDecoder'\n else:\n return module_name\n\n\nclass TransformerDecoderBase(FairseqIncrementalDecoder):\n \"\"\"\n Transformer decoder consisting of *cfg.decoder.layers* layers. Each layer\n is a :class:`TransformerDecoderLayer`.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n dictionary (~fairseq.data.Dictionary): decoding dictionary\n embed_tokens (torch.nn.Embedding): output embedding\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n \"\"\"\n\n def __init__(\n self,\n cfg,\n dictionary,\n embed_tokens,\n no_encoder_attn=False,\n output_projection=None,\n ):\n self.cfg = cfg\n super().__init__(dictionary)\n self.register_buffer(\"version\", torch.Tensor([3]))\n self._future_mask = torch.empty(0)\n\n self.dropout_module = FairseqDropout(\n cfg.dropout, module_name=module_name_fordropout(self.__class__.__name__)\n )\n self.decoder_layerdrop = cfg.decoder.layerdrop\n self.share_input_output_embed = cfg.share_decoder_input_output_embed\n\n input_embed_dim = embed_tokens.embedding_dim\n embed_dim = cfg.decoder.embed_dim\n self.embed_dim = embed_dim\n self.output_embed_dim = cfg.decoder.output_dim\n\n self.padding_idx = embed_tokens.padding_idx\n self.max_target_positions = cfg.max_target_positions\n\n self.embed_tokens = embed_tokens\n\n self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim)\n\n if not cfg.adaptive_input and cfg.quant_noise.pq > 0:\n self.quant_noise = apply_quant_noise_(\n nn.Linear(embed_dim, embed_dim, bias=False),\n cfg.quant_noise.pq,\n cfg.quant_noise.pq_block_size,\n )\n else:\n self.quant_noise = None\n\n self.project_in_dim = (\n Linear(input_embed_dim, embed_dim, bias=False)\n if embed_dim != input_embed_dim\n else None\n )\n self.embed_positions = (\n PositionalEmbedding(\n self.max_target_positions,\n embed_dim,\n self.padding_idx,\n learned=cfg.decoder.learned_pos,\n )\n if not cfg.no_token_positional_embeddings\n else None\n )\n if cfg.layernorm_embedding:\n self.layernorm_embedding = LayerNorm(embed_dim, export=cfg.export)\n else:\n self.layernorm_embedding = None\n\n self.cross_self_attention = cfg.cross_self_attention\n\n if self.decoder_layerdrop > 0.0:\n self.layers = LayerDropModuleList(p=self.decoder_layerdrop)\n else:\n self.layers = nn.ModuleList([])\n self.layers.extend(\n [\n self.build_decoder_layer(cfg, no_encoder_attn)\n for _ in range(cfg.decoder.layers)\n ]\n )\n self.num_layers = len(self.layers)\n\n if cfg.decoder.normalize_before and not cfg.no_decoder_final_norm:\n self.layer_norm = LayerNorm(embed_dim, export=cfg.export)\n else:\n self.layer_norm = None\n \n ##################### here #################################\n self.segment_layer = self.build_segment_layer(cfg, no_encoder_attn)\n # self.segment_attn_layer_norm = LayerNorm(self.embed_dim, export=cfg.export)\n ##################### above ###############################\n self.project_out_dim = (\n Linear(embed_dim, self.output_embed_dim, bias=False)\n if embed_dim != self.output_embed_dim and not cfg.tie_adaptive_weights\n else None\n )\n\n self.adaptive_softmax = None\n self.output_projection = output_projection\n if self.output_projection is None:\n self.build_output_projection(cfg, dictionary, embed_tokens)\n\n ################### below ###############################\n # def build_segment_attention(self, embed_dim, cfg):\n # return MultiheadAttention(\n # embed_dim,\n # cfg.decoder.attention_heads,\n # kdim=cfg.decoder.embed_dim,\n # vdim=cfg.decoder.embed_dim,\n # dropout=cfg.attention_dropout,\n # encoder_decoder_attention=True,\n # #q_noise=self.quant_noise,\n # #qn_block_size=cfg.quant_noise.pq_block_size,\n # )\n ################## above ##############################\n \n def build_output_projection(self, cfg, dictionary, embed_tokens):\n if cfg.adaptive_softmax_cutoff is not None:\n self.adaptive_softmax = AdaptiveSoftmax(\n len(dictionary),\n self.output_embed_dim,\n utils.eval_str_list(cfg.adaptive_softmax_cutoff, type=int),\n dropout=cfg.adaptive_softmax_dropout,\n adaptive_inputs=embed_tokens if cfg.tie_adaptive_weights else None,\n factor=cfg.adaptive_softmax_factor,\n tie_proj=cfg.tie_adaptive_proj,\n )\n elif self.share_input_output_embed:\n self.output_projection = nn.Linear(\n self.embed_tokens.weight.shape[1],\n self.embed_tokens.weight.shape[0],\n bias=False,\n )\n self.output_projection.weight = self.embed_tokens.weight\n else:\n self.output_projection = nn.Linear(\n self.output_embed_dim, len(dictionary), bias=False\n )\n nn.init.normal_(\n self.output_projection.weight, mean=0, std=self.output_embed_dim ** -0.5\n )\n num_base_layers = cfg.base_layers\n for i in range(num_base_layers):\n self.layers.insert(\n ((i + 1) * cfg.decoder.layers) // (num_base_layers + 1),\n BaseLayer(cfg),\n )\n\n def build_decoder_layer(self, cfg, no_encoder_attn=False):\n layer = transformer_layer.TransformerDecoderLayerBase(cfg, no_encoder_attn)\n checkpoint = cfg.checkpoint_activations\n if checkpoint:\n offload_to_cpu = cfg.offload_activations\n layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)\n # if we are checkpointing, enforce that FSDP always wraps the\n # checkpointed layer, regardless of layer size\n min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0\n layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)\n return layer\n\n def build_segment_layer(self, cfg, no_encoder_attn=False):\n layer = transformer_layer.SegTransformerDecoderLayerBase(cfg, no_encoder_attn)\n checkpoint = cfg.checkpoint_activations\n if checkpoint:\n offload_to_cpu = cfg.offload_activations\n layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)\n # if we are checkpointing, enforce that FSDP always wraps the\n # checkpointed layer, regardless of layer size\n min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0\n layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)\n return layer\n\n def forward_embedding(\n self, tokens, token_embedding: Optional[torch.Tensor] = None\n ):\n # embed tokens and positions\n if token_embedding is None:\n token_embedding = self.embed_tokens(tokens)\n x = embed = self.embed_scale * token_embedding\n if self.embed_positions is not None:\n x = embed + self.embed_positions(tokens)\n if self.layernorm_embedding is not None:\n x = self.layernorm_embedding(x)\n x = self.dropout_module(x)\n if self.quant_noise is not None:\n x = self.quant_noise(x)\n return x, embed\n \n def segment_forward(\n self,\n x,\n encoder_out,\n prev_output_tokens,\n incremental_state,\n ngram=3):\n\n x = x.transpose(0, 1)\n if incremental_state is not None:\n prev_output_tokens = prev_output_tokens[:, -1:]\n if incremental_state is None:\n self_attn_mask = self.buffered_future_mask(x)\n else:\n self_attn_mask = None\n bs, tlen = prev_output_tokens.size()\n src_tokens: Optional[Tensor] = None\n if encoder_out is not None and len(encoder_out[\"src_tokens\"]) > 0:\n src_tokens = encoder_out[\"src_tokens\"][0]\n\n head_tokens = torch.ones(src_tokens.size(0), ngram-1).fill_(self.dictionary.pad()).type_as(src_tokens)\n start_head_tokens = torch.ones(src_tokens.size(0), 1).fill_(self.dictionary.bos()).type_as(src_tokens)\n\n # B x (n-1 + S-1)\n head_src_tokens = torch.cat((head_tokens, src_tokens[:, :-1]), dim=1)\n # B x (n-1 + S-1 + T-1)\n context_tokens = torch.cat((head_src_tokens, prev_output_tokens[:, 1:]), dim=1)\n # B x (n-1 + S-1 + T-1)\n context_mask = (1 - context_tokens.eq(self.padding_idx).to(torch.long)).to(x)\n context_mask = context_mask.unsqueeze(-1)\n # B x (n-1 + S-1 + T-1) x Dim\n context_enc, _ = self.forward_embedding(context_tokens)\n\n # B x (1 + S-1 + T-1) compute the p(z|c_t)\n prob_context_tokens = torch.cat((start_head_tokens, src_tokens[:, :-1], prev_output_tokens[:, 1:]), dim=1)\n # B x (1 + S-1 + T-1) x Dim\n prob_context_enc, _ = self.forward_embedding(prob_context_tokens)\n # B x (1 + S-1 + T-1)\n context_attn_padding_mask = prob_context_tokens.eq(self.padding_idx).to(torch.bool)\n # context_attn_padding_mask = prob_context_mask.unsqueeze(1).unsqueeze(2)\n # T x (1 + S-1 + T-1)\n context_attn_mask = torch.cat((torch.zeros([tlen, context_attn_padding_mask.size(-1)-tlen+1]), torch.triu(utils.fill_with_neg_inf(torch.zeros([tlen, tlen-1])), 0)), dim=1).to(x)\n # from bs x len x dim to len x bs x dim\n\n self_attn_padding_mask: Optional[Tensor] = None\n if prev_output_tokens.eq(self.padding_idx).any():\n self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)\n\n seg_x, weights, _ = self.segment_layer(\n x=x,\n encoder_out=prob_context_enc.transpose(0, 1),\n encoder_padding_mask=context_attn_padding_mask,\n context_attn_mask=context_attn_mask,\n context_attn_padding_mask=context_attn_padding_mask,\n incremental_state=incremental_state,\n self_attn_mask=self_attn_mask,\n self_attn_padding_mask=self_attn_padding_mask,\n need_attn=True\n )\n seg_x = seg_x.transpose(0, 1)\n # bs x T x (1 + S-1 + T-1)\n p_z_c = weights\n # print(p_z_c.size())\n # embedding segment\n dim1 = context_tokens.size(1) - ngram + 1\n dim2 = context_tokens.size(1)\n a= torch.triu(torch.ones([dim1, dim2]), 0).cuda()\n # print(\"a: \", a.size())\n t = torch.tril(torch.ones([dim1, dim2]), ngram-1).cuda()\n # print(\"t: \", t.size())\n seg_mask = a * t\n seg_mask = seg_mask.to(x)\n # print(seg_mask.size())\n # print(seg_mask)\n # (S-1 + T-1) segments, each segment correspondong to a mask whose dim is the same as context_tokens(add n-1 pad tokens to the head)\n # seg_mask: (S-1 + T-1) x (S-1 + T-1 + n-1)\n # context_enc : # B x (n-1 + S-1 + T-1) x Dim\n z_features = torch.matmul(seg_mask, context_enc * context_mask)\n # print(z_features.size())\n # print(z_features.size())\n # z_features: # B x (S-1 + T-1) x dim\n # print(z_features.size())\n return z_features, p_z_c, seg_x\n\n\n\n def forward(\n self,\n prev_output_tokens,\n encoder_out: Optional[Dict[str, List[Tensor]]] = None,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n features_only: bool = False,\n full_context_alignment: bool = False,\n alignment_layer: Optional[int] = None,\n alignment_heads: Optional[int] = None,\n src_lengths: Optional[Any] = None,\n return_all_hiddens: bool = False,\n ):\n \"\"\"\n Args:\n prev_output_tokens (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for teacher forcing\n encoder_out (optional): output from the encoder, used for\n encoder-side attention, should be of size T x B x C\n incremental_state (dict): dictionary used for storing state during\n :ref:`Incremental decoding`\n features_only (bool, optional): only return features without\n applying output layer (default: False).\n full_context_alignment (bool, optional): don't apply\n auto-regressive mask to self-attention (default: False).\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n \"\"\"\n ngram = 3\n x, extra = self.extract_features(\n prev_output_tokens,\n encoder_out=encoder_out,\n incremental_state=incremental_state,\n full_context_alignment=full_context_alignment,\n alignment_layer=alignment_layer,\n alignment_heads=alignment_heads,\n )\n\n # x: bs x len x dim\n z_features, segment_prob, x = self.segment_forward(\n x,\n encoder_out,\n prev_output_tokens,\n incremental_state,\n ngram=ngram\n )\n\n if not features_only:\n full_prob = self.output_layer(x)\n\n segment_out = self.output_layer(z_features)\n logits = full_prob\n # B x T x N_s B x N_s x V , origin\n # B x T x N_s B x T x N_s x V , enhance1\n segment_prob = utils.softmax(segment_prob, dim=-1, onnx_trace=self.onnx_trace)\n segment_out = utils.softmax(segment_out, dim=-1, onnx_trace=self.onnx_trace)\n # B x T x V origin\n segment_out_prob = torch.matmul(segment_prob[:, :, 1:], segment_out)\n # B x T x V\n full_out_prob = utils.softmax(logits, dim=-1, onnx_trace=self.onnx_trace)\n seg_full_out_prob = full_out_prob * segment_prob[:, :, 1].unsqueeze(-1)\n\n full_prob = seg_full_out_prob + segment_out_prob\n penal_prob = segment_prob[:, :, 1]\n #penal_full_prob = full_out_prob / torch.exp(segment_prob[:, :, 1].unsqueeze(-1)+1e-7)\n return full_prob, extra, penal_prob\n\n def extract_features(\n self,\n prev_output_tokens,\n encoder_out: Optional[Dict[str, List[Tensor]]],\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n full_context_alignment: bool = False,\n alignment_layer: Optional[int] = None,\n alignment_heads: Optional[int] = None,\n ):\n return self.extract_features_scriptable(\n prev_output_tokens,\n encoder_out,\n incremental_state,\n full_context_alignment,\n alignment_layer,\n alignment_heads,\n )\n\n \"\"\"\n A scriptable subclass of this class has an extract_features method and calls\n super().extract_features, but super() is not supported in torchscript. A copy of\n this function is made to be used in the subclass instead.\n \"\"\"\n\n def extract_features_scriptable(\n self,\n prev_output_tokens,\n encoder_out: Optional[Dict[str, List[Tensor]]],\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n full_context_alignment: bool = False,\n alignment_layer: Optional[int] = None,\n alignment_heads: Optional[int] = None,\n ):\n \"\"\"\n Similar to *forward* but only return features.\n\n Includes several features from \"Jointly Learning to Align and\n Translate with Transformer Models\" (Garg et al., EMNLP 2019).\n\n Args:\n full_context_alignment (bool, optional): don't apply\n auto-regressive mask to self-attention (default: False).\n alignment_layer (int, optional): return mean alignment over\n heads at this layer (default: last layer).\n alignment_heads (int, optional): only average alignment over\n this many heads (default: all heads).\n\n Returns:\n tuple:\n - the decoder's features of shape `(batch, tgt_len, embed_dim)`\n - a dictionary with any model-specific outputs\n \"\"\"\n bs, slen = prev_output_tokens.size()\n if alignment_layer is None:\n alignment_layer = self.num_layers - 1\n\n enc: Optional[Tensor] = None\n padding_mask: Optional[Tensor] = None\n if encoder_out is not None and len(encoder_out[\"encoder_out\"]) > 0:\n enc = encoder_out[\"encoder_out\"][0]\n assert (\n enc.size()[1] == bs\n ), f\"Expected enc.shape == (t, {bs}, c) got {enc.shape}\"\n if encoder_out is not None and len(encoder_out[\"encoder_padding_mask\"]) > 0:\n padding_mask = encoder_out[\"encoder_padding_mask\"][0]\n\n # embed positions\n positions = None\n if self.embed_positions is not None:\n positions = self.embed_positions(\n prev_output_tokens, incremental_state=incremental_state\n )\n\n if incremental_state is not None:\n prev_output_tokens = prev_output_tokens[:, -1:]\n if positions is not None:\n positions = positions[:, -1:]\n\n # embed tokens and positions\n x = self.embed_scale * self.embed_tokens(prev_output_tokens)\n\n if self.quant_noise is not None:\n x = self.quant_noise(x)\n\n if self.project_in_dim is not None:\n x = self.project_in_dim(x)\n\n if positions is not None:\n x += positions\n\n if self.layernorm_embedding is not None:\n x = self.layernorm_embedding(x)\n\n x = self.dropout_module(x)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n self_attn_padding_mask: Optional[Tensor] = None\n if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():\n self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)\n\n # decoder layers\n attn: Optional[Tensor] = None\n inner_states: List[Optional[Tensor]] = [x]\n for idx, layer in enumerate(self.layers):\n if incremental_state is None and not full_context_alignment:\n self_attn_mask = self.buffered_future_mask(x)\n else:\n self_attn_mask = None\n x, layer_attn, _ = layer(\n x,\n enc,\n padding_mask,\n incremental_state,\n self_attn_mask=self_attn_mask,\n self_attn_padding_mask=self_attn_padding_mask,\n need_attn=bool((idx == alignment_layer)),\n need_head_weights=bool((idx == alignment_layer)),\n )\n inner_states.append(x)\n if layer_attn is not None and idx == alignment_layer:\n attn = layer_attn.float().to(x)\n\n if attn is not None:\n if alignment_heads is not None:\n attn = attn[:alignment_heads]\n\n # average probabilities over heads\n attn = attn.mean(dim=0)\n\n if self.layer_norm is not None:\n x = self.layer_norm(x)\n\n # T x B x C -> B x T x C\n x = x.transpose(0, 1)\n\n if self.project_out_dim is not None:\n x = self.project_out_dim(x)\n\n return x, {\"attn\": [attn], \"inner_states\": inner_states}\n \n def get_normalized_probs(self, net_output, log_probs, sample):\n \"\"\"Get normalized probabilities (or log probs) from a net's output.\"\"\"\n\n if hasattr(self, \"adaptive_softmax\") and self.adaptive_softmax is not None:\n if sample is not None:\n assert \"target\" in sample\n target = sample[\"target\"]\n else:\n target = None\n out = self.adaptive_softmax.get_log_prob(net_output[0], target=target)\n return out.exp_() if not log_probs else out\n\n prob = net_output[0]\n penal_prob = net_output[2]\n # print(full_out_prob.size())\n # print(segment_out_prob.size())\n # print(seg_full_out_prob.size())\n if log_probs:\n return torch.log(prob + 1e-7), penal_prob\n else:\n return prob, penal_prob\n\n def output_layer(self, features):\n \"\"\"Project features to the vocabulary size.\"\"\"\n if self.adaptive_softmax is None:\n # project back to size of vocabulary\n return self.output_projection(features)\n else:\n return features\n\n def max_positions(self):\n \"\"\"Maximum output length supported by the decoder.\"\"\"\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions)\n\n def buffered_future_mask(self, tensor):\n dim = tensor.size(0)\n # self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.\n if (\n self._future_mask.size(0) == 0\n or (not self._future_mask.device == tensor.device)\n or self._future_mask.size(0) < dim\n ):\n self._future_mask = torch.triu(\n utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1\n )\n self._future_mask = self._future_mask.to(tensor)\n return self._future_mask[:dim, :dim]\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"Upgrade a (possibly old) state dict for new versions of fairseq.\"\"\"\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\n weights_key = \"{}.embed_positions.weights\".format(name)\n if weights_key in state_dict:\n del state_dict[weights_key]\n state_dict[\n \"{}.embed_positions._float_tensor\".format(name)\n ] = torch.FloatTensor(1)\n\n if f\"{name}.output_projection.weight\" not in state_dict:\n if self.share_input_output_embed:\n embed_out_key = f\"{name}.embed_tokens.weight\"\n else:\n embed_out_key = f\"{name}.embed_out\"\n if embed_out_key in state_dict:\n state_dict[f\"{name}.output_projection.weight\"] = state_dict[\n embed_out_key\n ]\n if not self.share_input_output_embed:\n del state_dict[embed_out_key]\n\n for i in range(self.num_layers):\n # update layer norms\n layer_norm_map = {\n \"0\": \"self_attn_layer_norm\",\n \"1\": \"encoder_attn_layer_norm\",\n \"2\": \"final_layer_norm\",\n }\n for old, new in layer_norm_map.items():\n for m in (\"weight\", \"bias\"):\n k = \"{}.layers.{}.layer_norms.{}.{}\".format(name, i, old, m)\n if k in state_dict:\n state_dict[\n \"{}.layers.{}.{}.{}\".format(name, i, new, m)\n ] = state_dict[k]\n del state_dict[k]\n\n version_key = \"{}.version\".format(name)\n if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:\n # earlier checkpoints did not normalize after the stack of layers\n self.layer_norm = None\n self.normalize = False\n state_dict[version_key] = torch.Tensor([1])\n\n return state_dict\n\n\ndef Linear(in_features, out_features, bias=True):\n m = nn.Linear(in_features, out_features, bias)\n nn.init.xavier_uniform_(m.weight)\n if bias:\n nn.init.constant_(m.bias, 0.0)\n return m\n\n\nclass TransformerDecoder(TransformerDecoderBase):\n def __init__(\n self,\n args,\n dictionary,\n embed_tokens,\n no_encoder_attn=False,\n output_projection=None,\n ):\n self.args = args\n super().__init__(\n TransformerConfig.from_namespace(args),\n dictionary,\n embed_tokens,\n no_encoder_attn=no_encoder_attn,\n output_projection=output_projection,\n )\n\n def build_output_projection(self, args, dictionary, embed_tokens):\n super().build_output_projection(\n TransformerConfig.from_namespace(args), dictionary, embed_tokens\n )\n\n def build_decoder_layer(self, args, no_encoder_attn=False):\n return super().build_decoder_layer(\n TransformerConfig.from_namespace(args), no_encoder_attn=no_encoder_attn\n )\n" ]
[ [ "torch.ones", "torch.empty", "torch.Tensor", "torch.cat", "torch.nn.init.constant_", "torch.zeros", "torch.nn.ModuleList", "torch.matmul", "torch.nn.Linear", "torch.FloatTensor", "torch.nn.init.normal_", "torch.log", "torch.nn.init.xavier_uniform_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
adiojha629/JIRP_LRM
[ "a06e3725a8f4f406a100d2a4c2c69d4e9450a2d3" ]
[ "active16/src/baselines/feature_proxy.py" ]
[ "import numpy as np\n\nclass FeatureProxy:\n def __init__(self, num_features, num_states, is_tabular):\n self.num_features = num_features\n self.num_states = num_states\n self.is_tabular = is_tabular\n\n def get_num_features(self):\n if self.is_tabular:\n return self.num_states * self.num_features\n else:\n return self.num_states + self.num_features\n\n def add_state_features(self, s, u_i):\n if self.is_tabular:\n ret = np.zeros((self.num_states, self.num_features))\n ret[u_i,:] = s\n ret = ret.ravel() # to 1D\n else:\n ret = np.concatenate((s,self._get_one_hot_vector(u_i))) # adding the DFA state to the features\n return ret\n\n def _get_one_hot_vector(self, u_i):\n one_hot = np.zeros((self.num_states), dtype=np.float64)\n one_hot[u_i] = 1.0\n return one_hot" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mwunderl/aws-panorama-samples
[ "0dd246bb0b02f6cd11a60d2a1b5e9ca614d7cd8e" ]
[ "FallDetection/Lambda/fall_detector.py" ]
[ "from __future__ import division\nfrom __future__ import print_function\n\nimport panoramasdk\nimport numpy as np\nimport time\nfrom utils import update_x, update_y, update_ids, crop_resize_normalize, preprocess, upscale_bbox_fn, \\\n detector_to_simple_pose, get_max_pred, heatmap_to_coord, reset_counts, reset_tracker, fall_detection\n\n\nclass fall_detector(panoramasdk.base):\n\n def interface(self):\n return {\n \"parameters\":\n (\n (\"float\", \"conf_thresh\", \"Detection threshold\", 0.10),\n (\"model\", \"object_detector\", \"Model for detecting people\", \"ssd-coco\"),\n (\"model\", \"pose_model\", \"Model for pose estimation\", \"pose-net2\"),\n (\"int\", \"batch_size\", \"Model batch size\", 1),\n\n (\"int\", \"img_size\", \"img size\", 512),\n (\"float\", \"person_index\", \"person index based on dataset used\", 0),\n (\"int\", \"box_size_thresh\", \"min bbox dimension\", 20),\n\n (\"int\", \"min_non_dets\", \"reset trackers after no detection\", 50),\n (\"int\", \"anks_shdr_thresh\", \"min ankle-shoulder dist\", 10),\n (\"int\", \"dist_count\", \"min frame count for low anks-shdr distance\", 5),\n (\"int\", \"dist_hist\", \"min ankle-shoulder dist\", 50),\n (\"int\", \"fall_interval\", \"number of frames to skip, to detect next fall\", 100)\n ),\n \"inputs\":\n (\n (\"media[]\", \"video_in\", \"Camera input stream\"),\n ),\n \"outputs\":\n (\n (\"media[video_in]\", \"video_out\", \"Camera output stream\"),\n )\n }\n\n def init(self, parameters, inputs, outputs):\n try:\n ### data parameters\n self.img_size = (parameters.img_size, parameters.img_size)\n\n # Detection probability threshold.\n self.conf_thresh = parameters.conf_thresh\n # Number of People\n self.number_people = 0\n # Person Index for Model\n self.person_index = parameters.person_index\n\n ### Fall parameters\n self.min_non_dets = parameters.min_non_dets\n self.box_size_thresh = (parameters.box_size_thresh, parameters.box_size_thresh)\n self.anks_shdr_thresh = parameters.anks_shdr_thresh\n self.dist_hist = parameters.dist_hist\n self.dist_count = parameters.dist_count\n self.fall_interval = parameters.fall_interval\n self.fall_time = -1\n\n # Load model from the specified directory.\n print(\"loading the model...\")\n self.model = panoramasdk.model()\n self.model.open(parameters.object_detector, 1)\n print(\"Detector loaded\")\n self.pose_model = panoramasdk.model()\n self.pose_model.open(parameters.pose_model, 1)\n\n print(\"Pose model loaded\")\n # Create input and output arrays.\n class_info = self.model.get_output(0)\n prob_info = self.model.get_output(1)\n rect_info = self.model.get_output(2)\n\n # Create pose output arrays\n heatmap_info = self.pose_model.get_output(0)\n\n self.class_array = np.empty(class_info.get_dims(), dtype=class_info.get_type())\n self.prob_array = np.empty(prob_info.get_dims(), dtype=prob_info.get_type())\n self.rect_array = np.empty(rect_info.get_dims(), dtype=rect_info.get_type())\n self.heatmaps_array = np.empty(heatmap_info.get_dims(), dtype=heatmap_info.get_type())\n\n # Fall tracking variables\n self.xpart_tracker, self.ypart_tracker = reset_tracker()\n self.frame_num, self.frame_prev, self.frame_curr, self.zero_dets = reset_counts()\n self.fall_idx = -1\n\n self.master_idx = -1\n\n return True\n\n except Exception as e:\n print(\"Exception: {}\".format(e))\n return False\n\n def get_person_data(self, class_data, prob_data, rect_data):\n # get indices of people detections in class data\n person_indices = [i for i in range(len(class_data)) if int(class_data[i]) == self.person_index]\n # filter detections below the confidence threshold\n prob_person_indices = [i for i in person_indices if prob_data[i] >= self.conf_thresh]\n return prob_person_indices, class_data[person_indices], prob_data[person_indices], rect_data[person_indices]\n\n def entry(self, inputs, outputs):\n self.master_idx += 1\n\n for i in range(len(inputs.video_in)):\n\n stream = inputs.video_in[i]\n person_image = stream.image\n x1, orig_img = preprocess(person_image, output_size=self.img_size)\n\n # Do inference on the new frame.\n self.model.batch(0, x1)\n self.model.flush()\n\n # Get the results.\n resultBatchSet = self.model.get_result()\n class_batch = resultBatchSet.get(0)\n prob_batch = resultBatchSet.get(1)\n rect_batch = resultBatchSet.get(2)\n class_batch.get(0, self.class_array)\n prob_batch.get(0, self.prob_array)\n rect_batch.get(0, self.rect_array)\n class_data = self.class_array[0]\n prob_data = self.prob_array[0]\n rect_data = self.rect_array[0]\n\n self.model.release_result(resultBatchSet)\n x_min, y_min = rect_data[0][0], rect_data[0][1]\n x_max, y_max = rect_data[0][2], rect_data[0][3]\n w, h = x_max - x_min, y_max - y_min\n try:\n # Filter predictions of only \"person-class\"\n person_indices, class_data, prob_data, rect_data = self.get_person_data(class_data, prob_data,\n rect_data)\n except Exception as e:\n print(\"Exception: {}\".format(e))\n try:\n self.number_people = len(person_indices)\n except:\n self.number_people = 0\n\n # Draw Bounding Boxes\n if (self.number_people > 0) and (w > self.box_size_thresh[0]) and (h > self.box_size_thresh[1]):\n try:\n # Crop the bbox area from detector output from original image, transform it for pose model\n pose_input, upscale_bbox = detector_to_simple_pose(orig_img, class_data[None, :, :][:, 0:1, :],\n prob_data[None, :, :][:, 0:1, :],\n rect_data[None, :, :][:, 0:1, :],\n person_index=self.person_index,\n thr=self.conf_thresh)\n if len(pose_input) > 0:\n self.pose_model.batch(0, pose_input)\n self.pose_model.flush()\n # Get the results.\n PresultBatchSet = self.pose_model.get_result()\n heatmaps_batch = PresultBatchSet.get(0)\n heatmaps_batch.get(0, self.heatmaps_array)\n predicted_heatmap = self.heatmaps_array[0]\n self.pose_model.release_result(PresultBatchSet)\n # process pose model output to get key point coordinates\n pred_coords, confidence = heatmap_to_coord(predicted_heatmap[None, :, :, :], upscale_bbox)\n pred_coords = np.round(pred_coords, 3)\n\n self.xpart_tracker = update_x(pred_coords[0][:, 0], self.xpart_tracker)\n self.ypart_tracker = update_y(pred_coords[0][:, 1], self.ypart_tracker)\n\n result = fall_detection(self.ypart_tracker, self.anks_shdr_thresh, self.dist_hist,\n self.dist_count)\n\n if result:\n # Flag next fall after fall_interval frames\n if self.fall_idx == -1 or (self.master_idx - self.fall_idx) >= (self.fall_interval):\n self.fall_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime(time.time()))\n print('Fall Detected at : {}'.format(self.fall_time))\n self.fall_idx = self.master_idx\n\n except Exception as e:\n print('Pose model exception')\n print(\"Exception: {}\".format(e))\n\n else:\n ### Reset tracker if no person is detected for more than `min_non_dets` continuous frames\n if self.zero_dets > self.min_non_dets:\n self.xpart_tracker, self.ypart_tracker = reset_tracker()\n self.frame_num, self.frame_prev, self.frame_curr, self.zero_dets = reset_counts()\n\n outputs.video_out[i] = stream\n continue\n\n # Track consecutive non detections\n self.frame_prev, self.frame_curr = self.frame_curr, self.frame_num\n if self.frame_curr - self.frame_prev == 1:\n self.zero_dets += 1\n else:\n self.zero_dets = 0\n\n if len(person_indices) > 0:\n # currently single person fall detector, choosing the top prediction.\n index = 0\n left = np.clip(rect_data[index][0] / np.float(512), 0, 1)\n top = np.clip(rect_data[index][1] / np.float(512), 0, 1)\n right = np.clip(rect_data[index][2] / np.float(512), 0, 1)\n bottom = np.clip(rect_data[index][3] / np.float(512), 0, 1)\n\n stream.add_rect(left, top, right, bottom)\n stream.add_label(str(prob_data[index][0]), left, top)\n stream.add_label('Current Frame : ' + str(self.master_idx), 0.1, 0.1)\n stream.add_label('Fall frame : ' + str(self.fall_idx), 0.1, 0.15)\n stream.add_label('Last Fall at : ' + str(self.fall_time), 0.1, 0.2)\n\n outputs.video_out[i] = stream\n self.frame_num += 1\n\n return True\n\n\ndef main():\n fall_detector().run()\n\n\nmain()" ]
[ [ "numpy.round", "numpy.float" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rebecca-palmer/pandas
[ "7c94949dc89c62cae1bc647acd87266d6c3a0468" ]
[ "pandas/core/window/rolling.py" ]
[ "\"\"\"\nProvide a generic structure to support window functions,\nsimilar to how we have a Groupby object.\n\"\"\"\nfrom datetime import timedelta\nfrom functools import partial\nimport inspect\nfrom textwrap import dedent\nfrom typing import Callable, Dict, List, Optional, Set, Tuple, Union\n\nimport numpy as np\n\nimport pandas._libs.window.aggregations as window_aggregations\nfrom pandas._typing import Axis, FrameOrSeries, Scalar\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.compat.numpy import function as nv\nfrom pandas.util._decorators import Appender, Substitution, cache_readonly\n\nfrom pandas.core.dtypes.common import (\n ensure_float64,\n is_bool,\n is_float_dtype,\n is_integer,\n is_integer_dtype,\n is_list_like,\n is_scalar,\n needs_i8_conversion,\n)\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCDateOffset,\n ABCDatetimeIndex,\n ABCPeriodIndex,\n ABCSeries,\n ABCTimedeltaIndex,\n)\n\nfrom pandas.core.base import DataError, PandasObject, SelectionMixin, ShallowMixin\nimport pandas.core.common as com\nfrom pandas.core.indexes.api import Index, ensure_index\nfrom pandas.core.window.common import (\n WindowGroupByMixin,\n _doc_template,\n _flex_binary_moment,\n _shared_docs,\n calculate_center_offset,\n calculate_min_periods,\n get_weighted_roll_func,\n zsqrt,\n)\nfrom pandas.core.window.indexers import (\n BaseIndexer,\n FixedWindowIndexer,\n VariableWindowIndexer,\n)\nfrom pandas.core.window.numba_ import generate_numba_apply_func\n\n\nclass _Window(PandasObject, ShallowMixin, SelectionMixin):\n _attributes: List[str] = [\n \"window\",\n \"min_periods\",\n \"center\",\n \"win_type\",\n \"axis\",\n \"on\",\n \"closed\",\n ]\n exclusions: Set[str] = set()\n\n def __init__(\n self,\n obj,\n window=None,\n min_periods: Optional[int] = None,\n center: Optional[bool] = False,\n win_type: Optional[str] = None,\n axis: Axis = 0,\n on: Optional[Union[str, Index]] = None,\n closed: Optional[str] = None,\n **kwargs,\n ):\n\n self.__dict__.update(kwargs)\n self.obj = obj\n self.on = on\n self.closed = closed\n self.window = window\n self.min_periods = min_periods\n self.center = center\n self.win_type = win_type\n self.win_freq = None\n self.axis = obj._get_axis_number(axis) if axis is not None else None\n self.validate()\n self._numba_func_cache: Dict[Optional[str], Callable] = dict()\n\n @property\n def _constructor(self):\n return Window\n\n @property\n def is_datetimelike(self) -> Optional[bool]:\n return None\n\n @property\n def _on(self):\n return None\n\n @property\n def is_freq_type(self) -> bool:\n return self.win_type == \"freq\"\n\n def validate(self) -> None:\n if self.center is not None and not is_bool(self.center):\n raise ValueError(\"center must be a boolean\")\n if self.min_periods is not None and not is_integer(self.min_periods):\n raise ValueError(\"min_periods must be an integer\")\n if self.closed is not None and self.closed not in [\n \"right\",\n \"both\",\n \"left\",\n \"neither\",\n ]:\n raise ValueError(\"closed must be 'right', 'left', 'both' or 'neither'\")\n if not isinstance(self.obj, (ABCSeries, ABCDataFrame)):\n raise TypeError(f\"invalid type: {type(self)}\")\n if isinstance(self.window, BaseIndexer):\n self._validate_get_window_bounds_signature(self.window)\n\n @staticmethod\n def _validate_get_window_bounds_signature(window: BaseIndexer) -> None:\n \"\"\"\n Validate that the passed BaseIndexer subclass has\n a get_window_bounds with the correct signature.\n \"\"\"\n get_window_bounds_signature = inspect.signature(\n window.get_window_bounds\n ).parameters.keys()\n expected_signature = inspect.signature(\n BaseIndexer().get_window_bounds\n ).parameters.keys()\n if get_window_bounds_signature != expected_signature:\n raise ValueError(\n f\"{type(window).__name__} does not implement the correct signature for \"\n f\"get_window_bounds\"\n )\n\n def _create_blocks(self):\n \"\"\"\n Split data into blocks & return conformed data.\n \"\"\"\n\n obj = self._selected_obj\n\n # filter out the on from the object\n if self.on is not None and not isinstance(self.on, Index):\n if obj.ndim == 2:\n obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)\n blocks = obj._to_dict_of_blocks(copy=False).values()\n\n return blocks, obj\n\n def _gotitem(self, key, ndim, subset=None):\n \"\"\"\n Sub-classes to define. Return a sliced object.\n\n Parameters\n ----------\n key : str / list of selections\n ndim : 1,2\n requested ndim of result\n subset : object, default None\n subset to act on\n \"\"\"\n\n # create a new object to prevent aliasing\n if subset is None:\n subset = self.obj\n self = self._shallow_copy(subset)\n self._reset_cache()\n if subset.ndim == 2:\n if is_scalar(key) and key in subset or is_list_like(key):\n self._selection = key\n return self\n\n def __getattr__(self, attr: str):\n if attr in self._internal_names_set:\n return object.__getattribute__(self, attr)\n if attr in self.obj:\n return self[attr]\n\n raise AttributeError(\n f\"'{type(self).__name__}' object has no attribute '{attr}'\"\n )\n\n def _dir_additions(self):\n return self.obj._dir_additions()\n\n def _get_win_type(self, kwargs: Dict):\n \"\"\"\n Exists for compatibility, overriden by subclass Window.\n\n Parameters\n ----------\n kwargs : dict\n ignored, exists for compatibility\n\n Returns\n -------\n None\n \"\"\"\n return None\n\n def _get_window(self, other=None, win_type: Optional[str] = None) -> int:\n \"\"\"\n Return window length.\n\n Parameters\n ----------\n other :\n ignored, exists for compatibility\n win_type :\n ignored, exists for compatibility\n\n Returns\n -------\n window : int\n \"\"\"\n if isinstance(self.window, BaseIndexer):\n return self.min_periods or 0\n return self.window\n\n @property\n def _window_type(self) -> str:\n return type(self).__name__\n\n def __repr__(self) -> str:\n \"\"\"\n Provide a nice str repr of our rolling object.\n \"\"\"\n\n attrs_list = (\n f\"{attr_name}={getattr(self, attr_name)}\"\n for attr_name in self._attributes\n if getattr(self, attr_name, None) is not None\n )\n attrs = \",\".join(attrs_list)\n return f\"{self._window_type} [{attrs}]\"\n\n def __iter__(self):\n url = \"https://github.com/pandas-dev/pandas/issues/11704\"\n raise NotImplementedError(f\"See issue #11704 {url}\")\n\n def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:\n \"\"\"Convert input to numpy arrays for Cython routines\"\"\"\n if values is None:\n values = getattr(self._selected_obj, \"values\", self._selected_obj)\n\n # GH #12373 : rolling functions error on float32 data\n # make sure the data is coerced to float64\n if is_float_dtype(values.dtype):\n values = ensure_float64(values)\n elif is_integer_dtype(values.dtype):\n values = ensure_float64(values)\n elif needs_i8_conversion(values.dtype):\n raise NotImplementedError(\n f\"ops for {self._window_type} for this \"\n f\"dtype {values.dtype} are not implemented\"\n )\n else:\n try:\n values = ensure_float64(values)\n except (ValueError, TypeError):\n raise TypeError(f\"cannot handle this type -> {values.dtype}\")\n\n # Convert inf to nan for C funcs\n inf = np.isinf(values)\n if inf.any():\n values = np.where(inf, np.nan, values)\n\n return values\n\n def _wrap_result(self, result, block=None, obj=None):\n \"\"\"\n Wrap a single result.\n \"\"\"\n\n if obj is None:\n obj = self._selected_obj\n index = obj.index\n\n if isinstance(result, np.ndarray):\n\n if result.ndim == 1:\n from pandas import Series\n\n return Series(result, index, name=obj.name)\n\n return type(obj)(result, index=index, columns=block.columns)\n return result\n\n def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries:\n \"\"\"\n Wrap the results.\n\n Parameters\n ----------\n results : list of ndarrays\n blocks : list of blocks\n obj : conformed data (may be resampled)\n exclude: list of columns to exclude, default to None\n \"\"\"\n\n from pandas import Series, concat\n\n final = []\n for result, block in zip(results, blocks):\n\n result = self._wrap_result(result, block=block, obj=obj)\n if result.ndim == 1:\n return result\n final.append(result)\n\n # if we have an 'on' column\n # we want to put it back into the results\n # in the same location\n columns = self._selected_obj.columns\n if self.on is not None and not self._on.equals(obj.index):\n\n name = self._on.name\n final.append(Series(self._on, index=obj.index, name=name))\n\n if self._selection is not None:\n\n selection = ensure_index(self._selection)\n\n # need to reorder to include original location of\n # the on column (if its not already there)\n if name not in selection:\n columns = self.obj.columns\n indexer = columns.get_indexer(selection.tolist() + [name])\n columns = columns.take(sorted(indexer))\n\n # exclude nuisance columns so that they are not reindexed\n if exclude is not None and exclude:\n columns = [c for c in columns if c not in exclude]\n\n if not columns:\n raise DataError(\"No numeric types to aggregate\")\n\n if not len(final):\n return obj.astype(\"float64\")\n return concat(final, axis=1).reindex(columns=columns, copy=False)\n\n def _center_window(self, result, window) -> np.ndarray:\n \"\"\"\n Center the result in the window.\n \"\"\"\n if self.axis > result.ndim - 1:\n raise ValueError(\"Requested axis is larger then no. of argument dimensions\")\n\n offset = calculate_center_offset(window)\n if offset > 0:\n lead_indexer = [slice(None)] * result.ndim\n lead_indexer[self.axis] = slice(offset, None)\n result = np.copy(result[tuple(lead_indexer)])\n return result\n\n def _get_roll_func(self, func_name: str) -> Callable:\n \"\"\"\n Wrap rolling function to check values passed.\n\n Parameters\n ----------\n func_name : str\n Cython function used to calculate rolling statistics\n\n Returns\n -------\n func : callable\n \"\"\"\n window_func = getattr(window_aggregations, func_name, None)\n if window_func is None:\n raise ValueError(\n f\"we do not support this function in window_aggregations.{func_name}\"\n )\n return window_func\n\n def _get_cython_func_type(self, func: str) -> Callable:\n \"\"\"\n Return a variable or fixed cython function type.\n\n Variable algorithms do not use window while fixed do.\n \"\"\"\n if self.is_freq_type or isinstance(self.window, BaseIndexer):\n return self._get_roll_func(f\"{func}_variable\")\n return partial(self._get_roll_func(f\"{func}_fixed\"), win=self._get_window())\n\n def _get_window_indexer(self, window: int) -> BaseIndexer:\n \"\"\"\n Return an indexer class that will compute the window start and end bounds\n \"\"\"\n if isinstance(self.window, BaseIndexer):\n return self.window\n if self.is_freq_type:\n return VariableWindowIndexer(index_array=self._on.asi8, window_size=window)\n return FixedWindowIndexer(window_size=window)\n\n def _apply(\n self,\n func: Callable,\n center: bool,\n require_min_periods: int = 0,\n floor: int = 1,\n is_weighted: bool = False,\n name: Optional[str] = None,\n use_numba_cache: bool = False,\n **kwargs,\n ):\n \"\"\"\n Rolling statistical measure using supplied function.\n\n Designed to be used with passed-in Cython array-based functions.\n\n Parameters\n ----------\n func : callable function to apply\n center : bool\n require_min_periods : int\n floor : int\n is_weighted : bool\n name : str,\n compatibility with groupby.rolling\n use_numba_cache : bool\n whether to cache a numba compiled function. Only available for numba\n enabled methods (so far only apply)\n **kwargs\n additional arguments for rolling function and window function\n\n Returns\n -------\n y : type of input\n \"\"\"\n win_type = self._get_win_type(kwargs)\n window = self._get_window(win_type=win_type)\n\n blocks, obj = self._create_blocks()\n block_list = list(blocks)\n window_indexer = self._get_window_indexer(window)\n\n results = []\n exclude: List[Scalar] = []\n for i, b in enumerate(blocks):\n try:\n values = self._prep_values(b.values)\n\n except (TypeError, NotImplementedError):\n if isinstance(obj, ABCDataFrame):\n exclude.extend(b.columns)\n del block_list[i]\n continue\n else:\n raise DataError(\"No numeric types to aggregate\")\n\n if values.size == 0:\n results.append(values.copy())\n continue\n\n # calculation function\n offset = calculate_center_offset(window) if center else 0\n additional_nans = np.array([np.nan] * offset)\n\n if not is_weighted:\n\n def calc(x):\n x = np.concatenate((x, additional_nans))\n if not isinstance(window, BaseIndexer):\n min_periods = calculate_min_periods(\n window, self.min_periods, len(x), require_min_periods, floor\n )\n else:\n min_periods = calculate_min_periods(\n self.min_periods or 1,\n self.min_periods,\n len(x),\n require_min_periods,\n floor,\n )\n start, end = window_indexer.get_window_bounds(\n num_values=len(x),\n min_periods=self.min_periods,\n center=self.center,\n closed=self.closed,\n )\n return func(x, start, end, min_periods)\n\n else:\n\n def calc(x):\n x = np.concatenate((x, additional_nans))\n return func(x, window, self.min_periods)\n\n with np.errstate(all=\"ignore\"):\n if values.ndim > 1:\n result = np.apply_along_axis(calc, self.axis, values)\n else:\n result = calc(values)\n result = np.asarray(result)\n\n if use_numba_cache:\n self._numba_func_cache[name] = func\n\n if center:\n result = self._center_window(result, window)\n\n results.append(result)\n\n return self._wrap_results(results, block_list, obj, exclude)\n\n def aggregate(self, func, *args, **kwargs):\n result, how = self._aggregate(func, *args, **kwargs)\n if result is None:\n return self.apply(func, raw=False, args=args, kwargs=kwargs)\n return result\n\n agg = aggregate\n\n _shared_docs[\"sum\"] = dedent(\n \"\"\"\n Calculate %(name)s sum of given DataFrame or Series.\n\n Parameters\n ----------\n *args, **kwargs\n For compatibility with other %(name)s methods. Has no effect\n on the computed value.\n\n Returns\n -------\n Series or DataFrame\n Same type as the input, with the same index, containing the\n %(name)s sum.\n\n See Also\n --------\n Series.sum : Reducing sum for Series.\n DataFrame.sum : Reducing sum for DataFrame.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4, 5])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n dtype: int64\n\n >>> s.rolling(3).sum()\n 0 NaN\n 1 NaN\n 2 6.0\n 3 9.0\n 4 12.0\n dtype: float64\n\n >>> s.expanding(3).sum()\n 0 NaN\n 1 NaN\n 2 6.0\n 3 10.0\n 4 15.0\n dtype: float64\n\n >>> s.rolling(3, center=True).sum()\n 0 NaN\n 1 6.0\n 2 9.0\n 3 12.0\n 4 NaN\n dtype: float64\n\n For DataFrame, each %(name)s sum is computed column-wise.\n\n >>> df = pd.DataFrame({\"A\": s, \"B\": s ** 2})\n >>> df\n A B\n 0 1 1\n 1 2 4\n 2 3 9\n 3 4 16\n 4 5 25\n\n >>> df.rolling(3).sum()\n A B\n 0 NaN NaN\n 1 NaN NaN\n 2 6.0 14.0\n 3 9.0 29.0\n 4 12.0 50.0\n \"\"\"\n )\n\n _shared_docs[\"mean\"] = dedent(\n \"\"\"\n Calculate the %(name)s mean of the values.\n\n Parameters\n ----------\n *args\n Under Review.\n **kwargs\n Under Review.\n\n Returns\n -------\n Series or DataFrame\n Returned object type is determined by the caller of the %(name)s\n calculation.\n\n See Also\n --------\n Series.%(name)s : Calling object with Series data.\n DataFrame.%(name)s : Calling object with DataFrames.\n Series.mean : Equivalent method for Series.\n DataFrame.mean : Equivalent method for DataFrame.\n\n Examples\n --------\n The below examples will show rolling mean calculations with window sizes of\n two and three, respectively.\n\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.rolling(2).mean()\n 0 NaN\n 1 1.5\n 2 2.5\n 3 3.5\n dtype: float64\n\n >>> s.rolling(3).mean()\n 0 NaN\n 1 NaN\n 2 2.0\n 3 3.0\n dtype: float64\n \"\"\"\n )\n\n _shared_docs[\"var\"] = dedent(\n \"\"\"\n Calculate unbiased %(name)s variance.\n %(versionadded)s\n Normalized by N-1 by default. This can be changed using the `ddof`\n argument.\n\n Parameters\n ----------\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n *args, **kwargs\n For NumPy compatibility. No additional arguments are used.\n\n Returns\n -------\n Series or DataFrame\n Returns the same object type as the caller of the %(name)s calculation.\n\n See Also\n --------\n Series.%(name)s : Calling object with Series data.\n DataFrame.%(name)s : Calling object with DataFrames.\n Series.var : Equivalent method for Series.\n DataFrame.var : Equivalent method for DataFrame.\n numpy.var : Equivalent method for Numpy array.\n\n Notes\n -----\n The default `ddof` of 1 used in :meth:`Series.var` is different than the\n default `ddof` of 0 in :func:`numpy.var`.\n\n A minimum of 1 period is required for the rolling calculation.\n\n Examples\n --------\n >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])\n >>> s.rolling(3).var()\n 0 NaN\n 1 NaN\n 2 0.333333\n 3 1.000000\n 4 1.000000\n 5 1.333333\n 6 0.000000\n dtype: float64\n\n >>> s.expanding(3).var()\n 0 NaN\n 1 NaN\n 2 0.333333\n 3 0.916667\n 4 0.800000\n 5 0.700000\n 6 0.619048\n dtype: float64\n \"\"\"\n )\n\n _shared_docs[\"std\"] = dedent(\n \"\"\"\n Calculate %(name)s standard deviation.\n %(versionadded)s\n Normalized by N-1 by default. This can be changed using the `ddof`\n argument.\n\n Parameters\n ----------\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n *args, **kwargs\n For NumPy compatibility. No additional arguments are used.\n\n Returns\n -------\n Series or DataFrame\n Returns the same object type as the caller of the %(name)s calculation.\n\n See Also\n --------\n Series.%(name)s : Calling object with Series data.\n DataFrame.%(name)s : Calling object with DataFrames.\n Series.std : Equivalent method for Series.\n DataFrame.std : Equivalent method for DataFrame.\n numpy.std : Equivalent method for Numpy array.\n\n Notes\n -----\n The default `ddof` of 1 used in Series.std is different than the default\n `ddof` of 0 in numpy.std.\n\n A minimum of one period is required for the rolling calculation.\n\n Examples\n --------\n >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])\n >>> s.rolling(3).std()\n 0 NaN\n 1 NaN\n 2 0.577350\n 3 1.000000\n 4 1.000000\n 5 1.154701\n 6 0.000000\n dtype: float64\n\n >>> s.expanding(3).std()\n 0 NaN\n 1 NaN\n 2 0.577350\n 3 0.957427\n 4 0.894427\n 5 0.836660\n 6 0.786796\n dtype: float64\n \"\"\"\n )\n\n\nclass Window(_Window):\n \"\"\"\n Provide rolling window calculations.\n\n Parameters\n ----------\n window : int, offset, or BaseIndexer subclass\n Size of the moving window. This is the number of observations used for\n calculating the statistic. Each window will be a fixed size.\n\n If its an offset then this will be the time period of each window. Each\n window will be a variable sized based on the observations included in\n the time-period. This is only valid for datetimelike indexes.\n\n If a BaseIndexer subclass is passed, calculates the window boundaries\n based on the defined ``get_window_bounds`` method. Additional rolling\n keyword arguments, namely `min_periods`, `center`, and\n `closed` will be passed to `get_window_bounds`.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). For a window that is specified by an offset,\n `min_periods` will default to 1. Otherwise, `min_periods` will default\n to the size of the window.\n center : bool, default False\n Set the labels at the center of the window.\n win_type : str, default None\n Provide a window type. If ``None``, all points are evenly weighted.\n See the notes below for further information.\n on : str, optional\n For a DataFrame, a datetime-like column or MultiIndex level on which\n to calculate the rolling window, rather than the DataFrame's index.\n Provided integer column is ignored and excluded from result since\n an integer index is not used to calculate the rolling window.\n axis : int or str, default 0\n closed : str, default None\n Make the interval closed on the 'right', 'left', 'both' or\n 'neither' endpoints.\n For offset-based windows, it defaults to 'right'.\n For fixed windows, defaults to 'both'. Remaining cases not implemented\n for fixed windows.\n\n Returns\n -------\n a Window or Rolling sub-classed for the particular operation\n\n See Also\n --------\n expanding : Provides expanding transformations.\n ewm : Provides exponential weighted functions.\n\n Notes\n -----\n By default, the result is set to the right edge of the window. This can be\n changed to the center of the window by setting ``center=True``.\n\n To learn more about the offsets & frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n The recognized win_types are:\n\n * ``boxcar``\n * ``triang``\n * ``blackman``\n * ``hamming``\n * ``bartlett``\n * ``parzen``\n * ``bohman``\n * ``blackmanharris``\n * ``nuttall``\n * ``barthann``\n * ``kaiser`` (needs beta)\n * ``gaussian`` (needs std)\n * ``general_gaussian`` (needs power, width)\n * ``slepian`` (needs width)\n * ``exponential`` (needs tau), center is set to None.\n\n If ``win_type=None`` all points are evenly weighted. To learn more about\n different window types see `scipy.signal window functions\n <https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.\n\n Examples\n --------\n\n >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})\n >>> df\n B\n 0 0.0\n 1 1.0\n 2 2.0\n 3 NaN\n 4 4.0\n\n Rolling sum with a window length of 2, using the 'triang'\n window type.\n\n >>> df.rolling(2, win_type='triang').sum()\n B\n 0 NaN\n 1 0.5\n 2 1.5\n 3 NaN\n 4 NaN\n\n Rolling sum with a window length of 2, using the 'gaussian'\n window type (note how we need to specify std).\n\n >>> df.rolling(2, win_type='gaussian').sum(std=3)\n B\n 0 NaN\n 1 0.986207\n 2 2.958621\n 3 NaN\n 4 NaN\n\n Rolling sum with a window length of 2, min_periods defaults\n to the window length.\n\n >>> df.rolling(2).sum()\n B\n 0 NaN\n 1 1.0\n 2 3.0\n 3 NaN\n 4 NaN\n\n Same as above, but explicitly set the min_periods\n\n >>> df.rolling(2, min_periods=1).sum()\n B\n 0 0.0\n 1 1.0\n 2 3.0\n 3 2.0\n 4 4.0\n\n A ragged (meaning not-a-regular frequency), time-indexed DataFrame\n\n >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},\n ... index = [pd.Timestamp('20130101 09:00:00'),\n ... pd.Timestamp('20130101 09:00:02'),\n ... pd.Timestamp('20130101 09:00:03'),\n ... pd.Timestamp('20130101 09:00:05'),\n ... pd.Timestamp('20130101 09:00:06')])\n\n >>> df\n B\n 2013-01-01 09:00:00 0.0\n 2013-01-01 09:00:02 1.0\n 2013-01-01 09:00:03 2.0\n 2013-01-01 09:00:05 NaN\n 2013-01-01 09:00:06 4.0\n\n Contrasting to an integer rolling window, this will roll a variable\n length window corresponding to the time period.\n The default for min_periods is 1.\n\n >>> df.rolling('2s').sum()\n B\n 2013-01-01 09:00:00 0.0\n 2013-01-01 09:00:02 1.0\n 2013-01-01 09:00:03 3.0\n 2013-01-01 09:00:05 NaN\n 2013-01-01 09:00:06 4.0\n \"\"\"\n\n def validate(self):\n super().validate()\n\n window = self.window\n if isinstance(window, BaseIndexer):\n raise NotImplementedError(\n \"BaseIndexer subclasses not implemented with win_types.\"\n )\n elif isinstance(window, (list, tuple, np.ndarray)):\n pass\n elif is_integer(window):\n if window <= 0:\n raise ValueError(\"window must be > 0 \")\n import_optional_dependency(\n \"scipy\", extra=\"Scipy is required to generate window weight.\"\n )\n import scipy.signal as sig\n\n if not isinstance(self.win_type, str):\n raise ValueError(f\"Invalid win_type {self.win_type}\")\n if getattr(sig, self.win_type, None) is None:\n raise ValueError(f\"Invalid win_type {self.win_type}\")\n else:\n raise ValueError(f\"Invalid window {window}\")\n\n def _get_win_type(self, kwargs: Dict) -> Union[str, Tuple]:\n \"\"\"\n Extract arguments for the window type, provide validation for it\n and return the validated window type.\n\n Parameters\n ----------\n kwargs : dict\n\n Returns\n -------\n win_type : str, or tuple\n \"\"\"\n # the below may pop from kwargs\n def _validate_win_type(win_type, kwargs):\n arg_map = {\n \"kaiser\": [\"beta\"],\n \"gaussian\": [\"std\"],\n \"general_gaussian\": [\"power\", \"width\"],\n \"slepian\": [\"width\"],\n \"exponential\": [\"tau\"],\n }\n\n if win_type in arg_map:\n win_args = _pop_args(win_type, arg_map[win_type], kwargs)\n if win_type == \"exponential\":\n # exponential window requires the first arg (center)\n # to be set to None (necessary for symmetric window)\n win_args.insert(0, None)\n\n return tuple([win_type] + win_args)\n\n return win_type\n\n def _pop_args(win_type, arg_names, kwargs):\n all_args = []\n for n in arg_names:\n if n not in kwargs:\n raise ValueError(f\"{win_type} window requires {n}\")\n all_args.append(kwargs.pop(n))\n return all_args\n\n return _validate_win_type(self.win_type, kwargs)\n\n def _get_window(\n self, other=None, win_type: Optional[Union[str, Tuple]] = None\n ) -> np.ndarray:\n \"\"\"\n Get the window, weights.\n\n Parameters\n ----------\n other :\n ignored, exists for compatibility\n win_type : str, or tuple\n type of window to create\n\n Returns\n -------\n window : ndarray\n the window, weights\n \"\"\"\n\n window = self.window\n if isinstance(window, (list, tuple, np.ndarray)):\n return com.asarray_tuplesafe(window).astype(float)\n elif is_integer(window):\n import scipy.signal as sig\n\n # GH #15662. `False` makes symmetric window, rather than periodic.\n return sig.get_window(win_type, window, False).astype(float)\n\n _agg_see_also_doc = dedent(\n \"\"\"\n See Also\n --------\n pandas.DataFrame.rolling.aggregate\n pandas.DataFrame.aggregate\n \"\"\"\n )\n\n _agg_examples_doc = dedent(\n \"\"\"\n Examples\n --------\n\n >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])\n >>> df\n A B C\n 0 -2.385977 -0.102758 0.438822\n 1 -1.004295 0.905829 -0.954544\n 2 0.735167 -0.165272 -1.619346\n 3 -0.702657 -1.340923 -0.706334\n 4 -0.246845 0.211596 -0.901819\n 5 2.463718 3.157577 -1.380906\n 6 -1.142255 2.340594 -0.039875\n 7 1.396598 -1.647453 1.677227\n 8 -0.543425 1.761277 -0.220481\n 9 -0.640505 0.289374 -1.550670\n\n >>> df.rolling(3, win_type='boxcar').agg('mean')\n A B C\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 -0.885035 0.212600 -0.711689\n 3 -0.323928 -0.200122 -1.093408\n 4 -0.071445 -0.431533 -1.075833\n 5 0.504739 0.676083 -0.996353\n 6 0.358206 1.903256 -0.774200\n 7 0.906020 1.283573 0.085482\n 8 -0.096361 0.818139 0.472290\n 9 0.070889 0.134399 -0.031308\n \"\"\"\n )\n\n @Substitution(\n see_also=_agg_see_also_doc,\n examples=_agg_examples_doc,\n versionadded=\"\",\n klass=\"Series/DataFrame\",\n axis=\"\",\n )\n @Appender(_shared_docs[\"aggregate\"])\n def aggregate(self, func, *args, **kwargs):\n result, how = self._aggregate(func, *args, **kwargs)\n if result is None:\n\n # these must apply directly\n result = func(self)\n\n return result\n\n agg = aggregate\n\n @Substitution(name=\"window\")\n @Appender(_shared_docs[\"sum\"])\n def sum(self, *args, **kwargs):\n nv.validate_window_func(\"sum\", args, kwargs)\n window_func = self._get_roll_func(\"roll_weighted_sum\")\n window_func = get_weighted_roll_func(window_func)\n return self._apply(\n window_func, center=self.center, is_weighted=True, name=\"sum\", **kwargs\n )\n\n @Substitution(name=\"window\")\n @Appender(_shared_docs[\"mean\"])\n def mean(self, *args, **kwargs):\n nv.validate_window_func(\"mean\", args, kwargs)\n window_func = self._get_roll_func(\"roll_weighted_mean\")\n window_func = get_weighted_roll_func(window_func)\n return self._apply(\n window_func, center=self.center, is_weighted=True, name=\"mean\", **kwargs\n )\n\n @Substitution(name=\"window\", versionadded=\"\\n.. versionadded:: 1.0.0\\n\")\n @Appender(_shared_docs[\"var\"])\n def var(self, ddof=1, *args, **kwargs):\n nv.validate_window_func(\"var\", args, kwargs)\n window_func = partial(self._get_roll_func(\"roll_weighted_var\"), ddof=ddof)\n window_func = get_weighted_roll_func(window_func)\n kwargs.pop(\"name\", None)\n return self._apply(\n window_func, center=self.center, is_weighted=True, name=\"var\", **kwargs\n )\n\n @Substitution(name=\"window\", versionadded=\"\\n.. versionadded:: 1.0.0\\n\")\n @Appender(_shared_docs[\"std\"])\n def std(self, ddof=1, *args, **kwargs):\n nv.validate_window_func(\"std\", args, kwargs)\n return zsqrt(self.var(ddof=ddof, name=\"std\", **kwargs))\n\n\nclass _Rolling(_Window):\n @property\n def _constructor(self):\n return Rolling\n\n\nclass _Rolling_and_Expanding(_Rolling):\n\n _shared_docs[\"count\"] = dedent(\n r\"\"\"\n The %(name)s count of any non-NaN observations inside the window.\n\n Returns\n -------\n Series or DataFrame\n Returned object type is determined by the caller of the %(name)s\n calculation.\n\n See Also\n --------\n Series.%(name)s : Calling object with Series data.\n DataFrame.%(name)s : Calling object with DataFrames.\n DataFrame.count : Count of the full DataFrame.\n\n Examples\n --------\n >>> s = pd.Series([2, 3, np.nan, 10])\n >>> s.rolling(2).count()\n 0 1.0\n 1 2.0\n 2 1.0\n 3 1.0\n dtype: float64\n >>> s.rolling(3).count()\n 0 1.0\n 1 2.0\n 2 2.0\n 3 2.0\n dtype: float64\n >>> s.rolling(4).count()\n 0 1.0\n 1 2.0\n 2 2.0\n 3 3.0\n dtype: float64\n \"\"\"\n )\n\n def count(self):\n\n blocks, obj = self._create_blocks()\n\n window = self._get_window()\n window = min(window, len(obj)) if not self.center else window\n\n results = []\n for b in blocks:\n result = b.notna().astype(int)\n result = self._constructor(\n result,\n window=window,\n min_periods=0,\n center=self.center,\n axis=self.axis,\n closed=self.closed,\n ).sum()\n results.append(result)\n\n return self._wrap_results(results, blocks, obj)\n\n _shared_docs[\"apply\"] = dedent(\n r\"\"\"\n Apply an arbitrary function to each %(name)s window.\n\n Parameters\n ----------\n func : function\n Must produce a single value from an ndarray input if ``raw=True``\n or a single value from a Series if ``raw=False``. Can also accept a\n Numba JIT function with ``engine='numba'`` specified.\n\n .. versionchanged:: 1.0.0\n\n raw : bool, default None\n * ``False`` : passes each row or column as a Series to the\n function.\n * ``True`` : the passed function will receive ndarray\n objects instead.\n If you are just applying a NumPy reduction function this will\n achieve much better performance.\n engine : str, default 'cython'\n * ``'cython'`` : Runs rolling apply through C-extensions from cython.\n * ``'numba'`` : Runs rolling apply through JIT compiled code from numba.\n Only available when ``raw`` is set to ``True``.\n\n .. versionadded:: 1.0.0\n\n engine_kwargs : dict, default None\n * For ``'cython'`` engine, there are no accepted ``engine_kwargs``\n * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``\n and ``parallel`` dictionary keys. The values must either be ``True`` or\n ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is\n ``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be\n applied to both the ``func`` and the ``apply`` rolling aggregation.\n\n .. versionadded:: 1.0.0\n\n args : tuple, default None\n Positional arguments to be passed into func.\n kwargs : dict, default None\n Keyword arguments to be passed into func.\n\n Returns\n -------\n Series or DataFrame\n Return type is determined by the caller.\n\n See Also\n --------\n Series.%(name)s : Series %(name)s.\n DataFrame.%(name)s : DataFrame %(name)s.\n\n Notes\n -----\n See :ref:`stats.rolling_apply` for extended documentation and performance\n considerations for the Numba engine.\n \"\"\"\n )\n\n def apply(\n self,\n func,\n raw: bool = False,\n engine: str = \"cython\",\n engine_kwargs: Optional[Dict] = None,\n args: Optional[Tuple] = None,\n kwargs: Optional[Dict] = None,\n ):\n if args is None:\n args = ()\n if kwargs is None:\n kwargs = {}\n kwargs.pop(\"_level\", None)\n kwargs.pop(\"floor\", None)\n window = self._get_window()\n offset = calculate_center_offset(window) if self.center else 0\n if not is_bool(raw):\n raise ValueError(\"raw parameter must be `True` or `False`\")\n\n if engine == \"cython\":\n if engine_kwargs is not None:\n raise ValueError(\"cython engine does not accept engine_kwargs\")\n apply_func = self._generate_cython_apply_func(\n args, kwargs, raw, offset, func\n )\n elif engine == \"numba\":\n if raw is False:\n raise ValueError(\"raw must be `True` when using the numba engine\")\n if func in self._numba_func_cache:\n # Return an already compiled version of roll_apply if available\n apply_func = self._numba_func_cache[func]\n else:\n apply_func = generate_numba_apply_func(\n args, kwargs, func, engine_kwargs\n )\n else:\n raise ValueError(\"engine must be either 'numba' or 'cython'\")\n\n # TODO: Why do we always pass center=False?\n # name=func for WindowGroupByMixin._apply\n return self._apply(\n apply_func,\n center=False,\n floor=0,\n name=func,\n use_numba_cache=engine == \"numba\",\n )\n\n def _generate_cython_apply_func(self, args, kwargs, raw, offset, func):\n from pandas import Series\n\n window_func = partial(\n self._get_cython_func_type(\"roll_generic\"),\n args=args,\n kwargs=kwargs,\n raw=raw,\n offset=offset,\n func=func,\n )\n\n def apply_func(values, begin, end, min_periods, raw=raw):\n if not raw:\n values = Series(values, index=self.obj.index)\n return window_func(values, begin, end, min_periods)\n\n return apply_func\n\n def sum(self, *args, **kwargs):\n nv.validate_window_func(\"sum\", args, kwargs)\n window_func = self._get_cython_func_type(\"roll_sum\")\n kwargs.pop(\"floor\", None)\n return self._apply(\n window_func, center=self.center, floor=0, name=\"sum\", **kwargs\n )\n\n _shared_docs[\"max\"] = dedent(\n \"\"\"\n Calculate the %(name)s maximum.\n\n Parameters\n ----------\n *args, **kwargs\n Arguments and keyword arguments to be passed into func.\n \"\"\"\n )\n\n def max(self, *args, **kwargs):\n nv.validate_window_func(\"max\", args, kwargs)\n window_func = self._get_cython_func_type(\"roll_max\")\n return self._apply(window_func, center=self.center, name=\"max\", **kwargs)\n\n _shared_docs[\"min\"] = dedent(\n \"\"\"\n Calculate the %(name)s minimum.\n\n Parameters\n ----------\n **kwargs\n Under Review.\n\n Returns\n -------\n Series or DataFrame\n Returned object type is determined by the caller of the %(name)s\n calculation.\n\n See Also\n --------\n Series.%(name)s : Calling object with a Series.\n DataFrame.%(name)s : Calling object with a DataFrame.\n Series.min : Similar method for Series.\n DataFrame.min : Similar method for DataFrame.\n\n Examples\n --------\n Performing a rolling minimum with a window size of 3.\n\n >>> s = pd.Series([4, 3, 5, 2, 6])\n >>> s.rolling(3).min()\n 0 NaN\n 1 NaN\n 2 3.0\n 3 2.0\n 4 2.0\n dtype: float64\n \"\"\"\n )\n\n def min(self, *args, **kwargs):\n nv.validate_window_func(\"min\", args, kwargs)\n window_func = self._get_cython_func_type(\"roll_min\")\n return self._apply(window_func, center=self.center, name=\"min\", **kwargs)\n\n def mean(self, *args, **kwargs):\n nv.validate_window_func(\"mean\", args, kwargs)\n window_func = self._get_cython_func_type(\"roll_mean\")\n return self._apply(window_func, center=self.center, name=\"mean\", **kwargs)\n\n _shared_docs[\"median\"] = dedent(\n \"\"\"\n Calculate the %(name)s median.\n\n Parameters\n ----------\n **kwargs\n For compatibility with other %(name)s methods. Has no effect\n on the computed median.\n\n Returns\n -------\n Series or DataFrame\n Returned type is the same as the original object.\n\n See Also\n --------\n Series.%(name)s : Calling object with Series data.\n DataFrame.%(name)s : Calling object with DataFrames.\n Series.median : Equivalent method for Series.\n DataFrame.median : Equivalent method for DataFrame.\n\n Examples\n --------\n Compute the rolling median of a series with a window size of 3.\n\n >>> s = pd.Series([0, 1, 2, 3, 4])\n >>> s.rolling(3).median()\n 0 NaN\n 1 NaN\n 2 1.0\n 3 2.0\n 4 3.0\n dtype: float64\n \"\"\"\n )\n\n def median(self, **kwargs):\n window_func = self._get_roll_func(\"roll_median_c\")\n window_func = partial(window_func, win=self._get_window())\n return self._apply(window_func, center=self.center, name=\"median\", **kwargs)\n\n def std(self, ddof=1, *args, **kwargs):\n nv.validate_window_func(\"std\", args, kwargs)\n kwargs.pop(\"require_min_periods\", None)\n window_func = self._get_cython_func_type(\"roll_var\")\n\n def zsqrt_func(values, begin, end, min_periods):\n return zsqrt(window_func(values, begin, end, min_periods, ddof=ddof))\n\n # ddof passed again for compat with groupby.rolling\n return self._apply(\n zsqrt_func,\n center=self.center,\n require_min_periods=1,\n name=\"std\",\n ddof=ddof,\n **kwargs,\n )\n\n def var(self, ddof=1, *args, **kwargs):\n nv.validate_window_func(\"var\", args, kwargs)\n kwargs.pop(\"require_min_periods\", None)\n window_func = partial(self._get_cython_func_type(\"roll_var\"), ddof=ddof)\n # ddof passed again for compat with groupby.rolling\n return self._apply(\n window_func,\n center=self.center,\n require_min_periods=1,\n name=\"var\",\n ddof=ddof,\n **kwargs,\n )\n\n _shared_docs[\n \"skew\"\n ] = \"\"\"\n Unbiased %(name)s skewness.\n\n Parameters\n ----------\n **kwargs\n Keyword arguments to be passed into func.\n \"\"\"\n\n def skew(self, **kwargs):\n window_func = self._get_cython_func_type(\"roll_skew\")\n kwargs.pop(\"require_min_periods\", None)\n return self._apply(\n window_func,\n center=self.center,\n require_min_periods=3,\n name=\"skew\",\n **kwargs,\n )\n\n _shared_docs[\"kurt\"] = dedent(\n \"\"\"\n Calculate unbiased %(name)s kurtosis.\n\n This function uses Fisher's definition of kurtosis without bias.\n\n Parameters\n ----------\n **kwargs\n Under Review.\n\n Returns\n -------\n Series or DataFrame\n Returned object type is determined by the caller of the %(name)s\n calculation.\n\n See Also\n --------\n Series.%(name)s : Calling object with Series data.\n DataFrame.%(name)s : Calling object with DataFrames.\n Series.kurt : Equivalent method for Series.\n DataFrame.kurt : Equivalent method for DataFrame.\n scipy.stats.skew : Third moment of a probability density.\n scipy.stats.kurtosis : Reference SciPy method.\n\n Notes\n -----\n A minimum of 4 periods is required for the %(name)s calculation.\n \"\"\"\n )\n\n def kurt(self, **kwargs):\n window_func = self._get_cython_func_type(\"roll_kurt\")\n kwargs.pop(\"require_min_periods\", None)\n return self._apply(\n window_func,\n center=self.center,\n require_min_periods=4,\n name=\"kurt\",\n **kwargs,\n )\n\n _shared_docs[\"quantile\"] = dedent(\n \"\"\"\n Calculate the %(name)s quantile.\n\n Parameters\n ----------\n quantile : float\n Quantile to compute. 0 <= quantile <= 1.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n .. versionadded:: 0.23.0\n\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`:\n\n * linear: `i + (j - i) * fraction`, where `fraction` is the\n fractional part of the index surrounded by `i` and `j`.\n * lower: `i`.\n * higher: `j`.\n * nearest: `i` or `j` whichever is nearest.\n * midpoint: (`i` + `j`) / 2.\n **kwargs\n For compatibility with other %(name)s methods. Has no effect on\n the result.\n\n Returns\n -------\n Series or DataFrame\n Returned object type is determined by the caller of the %(name)s\n calculation.\n\n See Also\n --------\n Series.quantile : Computes value at the given quantile over all data\n in Series.\n DataFrame.quantile : Computes values at the given quantile over\n requested axis in DataFrame.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.rolling(2).quantile(.4, interpolation='lower')\n 0 NaN\n 1 1.0\n 2 2.0\n 3 3.0\n dtype: float64\n\n >>> s.rolling(2).quantile(.4, interpolation='midpoint')\n 0 NaN\n 1 1.5\n 2 2.5\n 3 3.5\n dtype: float64\n \"\"\"\n )\n\n def quantile(self, quantile, interpolation=\"linear\", **kwargs):\n if quantile == 1.0:\n window_func = self._get_cython_func_type(\"roll_max\")\n elif quantile == 0.0:\n window_func = self._get_cython_func_type(\"roll_min\")\n else:\n window_func = partial(\n self._get_roll_func(\"roll_quantile\"),\n win=self._get_window(),\n quantile=quantile,\n interpolation=interpolation,\n )\n\n # Pass through for groupby.rolling\n kwargs[\"quantile\"] = quantile\n kwargs[\"interpolation\"] = interpolation\n return self._apply(window_func, center=self.center, name=\"quantile\", **kwargs)\n\n _shared_docs[\n \"cov\"\n ] = \"\"\"\n Calculate the %(name)s sample covariance.\n\n Parameters\n ----------\n other : Series, DataFrame, or ndarray, optional\n If not supplied then will default to self and produce pairwise\n output.\n pairwise : bool, default None\n If False then only matching columns between self and other will be\n used and the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the\n output will be a MultiIndexed DataFrame in the case of DataFrame\n inputs. In the case of missing elements, only complete pairwise\n observations will be used.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n **kwargs\n Keyword arguments to be passed into func.\n \"\"\"\n\n def cov(self, other=None, pairwise=None, ddof=1, **kwargs):\n if other is None:\n other = self._selected_obj\n # only default unset\n pairwise = True if pairwise is None else pairwise\n other = self._shallow_copy(other)\n\n # GH 16058: offset window\n if self.is_freq_type:\n window = self.win_freq\n else:\n window = self._get_window(other)\n\n def _get_cov(X, Y):\n # GH #12373 : rolling functions error on float32 data\n # to avoid potential overflow, cast the data to float64\n X = X.astype(\"float64\")\n Y = Y.astype(\"float64\")\n mean = lambda x: x.rolling(\n window, self.min_periods, center=self.center\n ).mean(**kwargs)\n count = (X + Y).rolling(window=window, center=self.center).count(**kwargs)\n bias_adj = count / (count - ddof)\n return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj\n\n return _flex_binary_moment(\n self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise)\n )\n\n _shared_docs[\"corr\"] = dedent(\n \"\"\"\n Calculate %(name)s correlation.\n\n Parameters\n ----------\n other : Series, DataFrame, or ndarray, optional\n If not supplied then will default to self.\n pairwise : bool, default None\n Calculate pairwise combinations of columns within a\n DataFrame. If `other` is not specified, defaults to `True`,\n otherwise defaults to `False`.\n Not relevant for :class:`~pandas.Series`.\n **kwargs\n Unused.\n\n Returns\n -------\n Series or DataFrame\n Returned object type is determined by the caller of the\n %(name)s calculation.\n\n See Also\n --------\n Series.%(name)s : Calling object with Series data.\n DataFrame.%(name)s : Calling object with DataFrames.\n Series.corr : Equivalent method for Series.\n DataFrame.corr : Equivalent method for DataFrame.\n %(name)s.cov : Similar method to calculate covariance.\n numpy.corrcoef : NumPy Pearson's correlation calculation.\n\n Notes\n -----\n This function uses Pearson's definition of correlation\n (https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).\n\n When `other` is not specified, the output will be self correlation (e.g.\n all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`\n set to `True`.\n\n Function will return ``NaN`` for correlations of equal valued sequences;\n this is the result of a 0/0 division error.\n\n When `pairwise` is set to `False`, only matching columns between `self` and\n `other` will be used.\n\n When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame\n with the original index on the first level, and the `other` DataFrame\n columns on the second level.\n\n In the case of missing elements, only complete pairwise observations\n will be used.\n\n Examples\n --------\n The below example shows a rolling calculation with a window size of\n four matching the equivalent function call using :meth:`numpy.corrcoef`.\n\n >>> v1 = [3, 3, 3, 5, 8]\n >>> v2 = [3, 4, 4, 4, 8]\n >>> # numpy returns a 2X2 array, the correlation coefficient\n >>> # is the number at entry [0][1]\n >>> print(f\"{np.corrcoef(v1[:-1], v2[:-1])[0][1]:.6f}\")\n 0.333333\n >>> print(f\"{np.corrcoef(v1[1:], v2[1:])[0][1]:.6f}\")\n 0.916949\n >>> s1 = pd.Series(v1)\n >>> s2 = pd.Series(v2)\n >>> s1.rolling(4).corr(s2)\n 0 NaN\n 1 NaN\n 2 NaN\n 3 0.333333\n 4 0.916949\n dtype: float64\n\n The below example shows a similar rolling calculation on a\n DataFrame using the pairwise option.\n\n >>> matrix = np.array([[51., 35.], [49., 30.], [47., 32.],\\\n [46., 31.], [50., 36.]])\n >>> print(np.corrcoef(matrix[:-1,0], matrix[:-1,1]).round(7))\n [[1. 0.6263001]\n [0.6263001 1. ]]\n >>> print(np.corrcoef(matrix[1:,0], matrix[1:,1]).round(7))\n [[1. 0.5553681]\n [0.5553681 1. ]]\n >>> df = pd.DataFrame(matrix, columns=['X','Y'])\n >>> df\n X Y\n 0 51.0 35.0\n 1 49.0 30.0\n 2 47.0 32.0\n 3 46.0 31.0\n 4 50.0 36.0\n >>> df.rolling(4).corr(pairwise=True)\n X Y\n 0 X NaN NaN\n Y NaN NaN\n 1 X NaN NaN\n Y NaN NaN\n 2 X NaN NaN\n Y NaN NaN\n 3 X 1.000000 0.626300\n Y 0.626300 1.000000\n 4 X 1.000000 0.555368\n Y 0.555368 1.000000\n \"\"\"\n )\n\n def corr(self, other=None, pairwise=None, **kwargs):\n if other is None:\n other = self._selected_obj\n # only default unset\n pairwise = True if pairwise is None else pairwise\n other = self._shallow_copy(other)\n window = self._get_window(other)\n\n def _get_corr(a, b):\n a = a.rolling(\n window=window, min_periods=self.min_periods, center=self.center\n )\n b = b.rolling(\n window=window, min_periods=self.min_periods, center=self.center\n )\n\n return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))\n\n return _flex_binary_moment(\n self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise)\n )\n\n\nclass Rolling(_Rolling_and_Expanding):\n @cache_readonly\n def is_datetimelike(self) -> bool:\n return isinstance(\n self._on, (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex)\n )\n\n @cache_readonly\n def _on(self) -> Index:\n if self.on is None:\n if self.axis == 0:\n return self.obj.index\n else:\n # i.e. self.axis == 1\n return self.obj.columns\n elif isinstance(self.on, Index):\n return self.on\n elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns:\n return Index(self.obj[self.on])\n else:\n raise ValueError(\n f\"invalid on specified as {self.on}, \"\n \"must be a column (of DataFrame), an Index or None\"\n )\n\n def validate(self):\n super().validate()\n\n # we allow rolling on a datetimelike index\n if (self.obj.empty or self.is_datetimelike) and isinstance(\n self.window, (str, ABCDateOffset, timedelta)\n ):\n\n self._validate_monotonic()\n freq = self._validate_freq()\n\n # we don't allow center\n if self.center:\n raise NotImplementedError(\n \"center is not implemented for \"\n \"datetimelike and offset based windows\"\n )\n\n # this will raise ValueError on non-fixed freqs\n self.win_freq = self.window\n self.window = freq.nanos\n self.win_type = \"freq\"\n\n # min_periods must be an integer\n if self.min_periods is None:\n self.min_periods = 1\n\n elif isinstance(self.window, BaseIndexer):\n # Passed BaseIndexer subclass should handle all other rolling kwargs\n return\n elif not is_integer(self.window):\n raise ValueError(\"window must be an integer\")\n elif self.window < 0:\n raise ValueError(\"window must be non-negative\")\n\n if not self.is_datetimelike and self.closed is not None:\n raise ValueError(\n \"closed only implemented for datetimelike and offset based windows\"\n )\n\n def _validate_monotonic(self):\n \"\"\"\n Validate monotonic (increasing or decreasing).\n \"\"\"\n if not (self._on.is_monotonic_increasing or self._on.is_monotonic_decreasing):\n formatted = self.on\n if self.on is None:\n formatted = \"index\"\n raise ValueError(f\"{formatted} must be monotonic\")\n\n def _validate_freq(self):\n \"\"\"\n Validate & return window frequency.\n \"\"\"\n from pandas.tseries.frequencies import to_offset\n\n try:\n return to_offset(self.window)\n except (TypeError, ValueError):\n raise ValueError(\n f\"passed window {self.window} is not \"\n \"compatible with a datetimelike index\"\n )\n\n _agg_see_also_doc = dedent(\n \"\"\"\n See Also\n --------\n Series.rolling\n DataFrame.rolling\n \"\"\"\n )\n\n _agg_examples_doc = dedent(\n \"\"\"\n Examples\n --------\n\n >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])\n >>> df\n A B C\n 0 -2.385977 -0.102758 0.438822\n 1 -1.004295 0.905829 -0.954544\n 2 0.735167 -0.165272 -1.619346\n 3 -0.702657 -1.340923 -0.706334\n 4 -0.246845 0.211596 -0.901819\n 5 2.463718 3.157577 -1.380906\n 6 -1.142255 2.340594 -0.039875\n 7 1.396598 -1.647453 1.677227\n 8 -0.543425 1.761277 -0.220481\n 9 -0.640505 0.289374 -1.550670\n\n >>> df.rolling(3).sum()\n A B C\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 -2.655105 0.637799 -2.135068\n 3 -0.971785 -0.600366 -3.280224\n 4 -0.214334 -1.294599 -3.227500\n 5 1.514216 2.028250 -2.989060\n 6 1.074618 5.709767 -2.322600\n 7 2.718061 3.850718 0.256446\n 8 -0.289082 2.454418 1.416871\n 9 0.212668 0.403198 -0.093924\n\n >>> df.rolling(3).agg({'A':'sum', 'B':'min'})\n A B\n 0 NaN NaN\n 1 NaN NaN\n 2 -2.655105 -0.165272\n 3 -0.971785 -1.340923\n 4 -0.214334 -1.340923\n 5 1.514216 -1.340923\n 6 1.074618 0.211596\n 7 2.718061 -1.647453\n 8 -0.289082 -1.647453\n 9 0.212668 -1.647453\n \"\"\"\n )\n\n @Substitution(\n see_also=_agg_see_also_doc,\n examples=_agg_examples_doc,\n versionadded=\"\",\n klass=\"Series/Dataframe\",\n axis=\"\",\n )\n @Appender(_shared_docs[\"aggregate\"])\n def aggregate(self, func, *args, **kwargs):\n return super().aggregate(func, *args, **kwargs)\n\n agg = aggregate\n\n @Substitution(name=\"rolling\")\n @Appender(_shared_docs[\"count\"])\n def count(self):\n\n # different impl for freq counting\n if self.is_freq_type:\n window_func = self._get_roll_func(\"roll_count\")\n return self._apply(window_func, center=self.center, name=\"count\")\n\n return super().count()\n\n @Substitution(name=\"rolling\")\n @Appender(_shared_docs[\"apply\"])\n def apply(\n self,\n func,\n raw=False,\n engine=\"cython\",\n engine_kwargs=None,\n args=None,\n kwargs=None,\n ):\n return super().apply(\n func,\n raw=raw,\n engine=engine,\n engine_kwargs=engine_kwargs,\n args=args,\n kwargs=kwargs,\n )\n\n @Substitution(name=\"rolling\")\n @Appender(_shared_docs[\"sum\"])\n def sum(self, *args, **kwargs):\n nv.validate_rolling_func(\"sum\", args, kwargs)\n return super().sum(*args, **kwargs)\n\n @Substitution(name=\"rolling\")\n @Appender(_doc_template)\n @Appender(_shared_docs[\"max\"])\n def max(self, *args, **kwargs):\n nv.validate_rolling_func(\"max\", args, kwargs)\n return super().max(*args, **kwargs)\n\n @Substitution(name=\"rolling\")\n @Appender(_shared_docs[\"min\"])\n def min(self, *args, **kwargs):\n nv.validate_rolling_func(\"min\", args, kwargs)\n return super().min(*args, **kwargs)\n\n @Substitution(name=\"rolling\")\n @Appender(_shared_docs[\"mean\"])\n def mean(self, *args, **kwargs):\n nv.validate_rolling_func(\"mean\", args, kwargs)\n return super().mean(*args, **kwargs)\n\n @Substitution(name=\"rolling\")\n @Appender(_shared_docs[\"median\"])\n def median(self, **kwargs):\n return super().median(**kwargs)\n\n @Substitution(name=\"rolling\", versionadded=\"\")\n @Appender(_shared_docs[\"std\"])\n def std(self, ddof=1, *args, **kwargs):\n nv.validate_rolling_func(\"std\", args, kwargs)\n return super().std(ddof=ddof, **kwargs)\n\n @Substitution(name=\"rolling\", versionadded=\"\")\n @Appender(_shared_docs[\"var\"])\n def var(self, ddof=1, *args, **kwargs):\n nv.validate_rolling_func(\"var\", args, kwargs)\n return super().var(ddof=ddof, **kwargs)\n\n @Substitution(name=\"rolling\")\n @Appender(_doc_template)\n @Appender(_shared_docs[\"skew\"])\n def skew(self, **kwargs):\n return super().skew(**kwargs)\n\n _agg_doc = dedent(\n \"\"\"\n Examples\n --------\n\n The example below will show a rolling calculation with a window size of\n four matching the equivalent function call using `scipy.stats`.\n\n >>> arr = [1, 2, 3, 4, 999]\n >>> import scipy.stats\n >>> print(f\"{scipy.stats.kurtosis(arr[:-1], bias=False):.6f}\")\n -1.200000\n >>> print(f\"{scipy.stats.kurtosis(arr[1:], bias=False):.6f}\")\n 3.999946\n >>> s = pd.Series(arr)\n >>> s.rolling(4).kurt()\n 0 NaN\n 1 NaN\n 2 NaN\n 3 -1.200000\n 4 3.999946\n dtype: float64\n \"\"\"\n )\n\n @Appender(_agg_doc)\n @Substitution(name=\"rolling\")\n @Appender(_shared_docs[\"kurt\"])\n def kurt(self, **kwargs):\n return super().kurt(**kwargs)\n\n @Substitution(name=\"rolling\")\n @Appender(_shared_docs[\"quantile\"])\n def quantile(self, quantile, interpolation=\"linear\", **kwargs):\n return super().quantile(\n quantile=quantile, interpolation=interpolation, **kwargs\n )\n\n @Substitution(name=\"rolling\")\n @Appender(_doc_template)\n @Appender(_shared_docs[\"cov\"])\n def cov(self, other=None, pairwise=None, ddof=1, **kwargs):\n return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs)\n\n @Substitution(name=\"rolling\")\n @Appender(_shared_docs[\"corr\"])\n def corr(self, other=None, pairwise=None, **kwargs):\n return super().corr(other=other, pairwise=pairwise, **kwargs)\n\n\nRolling.__doc__ = Window.__doc__\n\n\nclass RollingGroupby(WindowGroupByMixin, Rolling):\n \"\"\"\n Provide a rolling groupby implementation.\n \"\"\"\n\n @property\n def _constructor(self):\n return Rolling\n\n def _gotitem(self, key, ndim, subset=None):\n\n # we are setting the index on the actual object\n # here so our index is carried thru to the selected obj\n # when we do the splitting for the groupby\n if self.on is not None:\n self._groupby.obj = self._groupby.obj.set_index(self._on)\n self.on = None\n return super()._gotitem(key, ndim, subset=subset)\n\n def _validate_monotonic(self):\n \"\"\"\n Validate that on is monotonic;\n we don't care for groupby.rolling\n because we have already validated at a higher\n level.\n \"\"\"\n pass\n" ]
[ [ "pandas.tseries.frequencies.to_offset", "pandas.Series", "scipy.signal.get_window", "numpy.asarray", "pandas.core.window.indexers.FixedWindowIndexer", "numpy.concatenate", "pandas.compat.numpy.function.validate_rolling_func", "numpy.where", "pandas.util._decorators.Substitution", "pandas.core.dtypes.common.ensure_float64", "pandas.compat.numpy.function.validate_window_func", "pandas.core.common.asarray_tuplesafe", "pandas.compat._optional.import_optional_dependency", "numpy.apply_along_axis", "pandas.core.dtypes.common.is_float_dtype", "pandas.core.dtypes.common.is_integer_dtype", "pandas.core.dtypes.common.is_list_like", "pandas.util._decorators.Appender", "pandas.core.base.DataError", "pandas.concat", "pandas.core.window.common.get_weighted_roll_func", "pandas.core.window.indexers.BaseIndexer", "pandas.core.window.numba_.generate_numba_apply_func", "pandas.core.indexes.api.Index", "numpy.errstate", "numpy.array", "pandas.core.dtypes.common.needs_i8_conversion", "pandas.core.dtypes.common.is_bool", "pandas.core.window.indexers.VariableWindowIndexer", "pandas.core.window.common.calculate_center_offset", "pandas.core.dtypes.common.is_scalar", "pandas.core.dtypes.common.is_integer", "pandas.core.indexes.api.ensure_index", "numpy.isinf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "1.0", "0.25", "1.3" ], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
LSchultebraucks/support-vector-machines-blog-post
[ "090694340579cbf0a74f339ad19b853c022fadd4" ]
[ "iris_svc.py" ]
[ "#\n# iris data set\n# 150 total entries\n# features are: sepal length in cm, sepal width in cm, petal length in cm, petal width in cm\\n\n# labels names: setosa, versicolor, virginica\n#\n# used algorithm: SVC (C-Support Vector Classifiction)\n#\n# accuracy ~100%\n#\nfrom time import time\nimport numpy as np\nfrom sklearn.datasets import load_iris\nfrom sklearn import svm\nfrom sklearn.metrics import accuracy_score\n\n\ndef main():\n data_set = load_iris()\n\n features, labels = split_features_labels(data_set)\n\n train_features, train_labels, test_features, test_labels = split_train_test(features, labels,\n 0.18)\n\n print(len(train_features), \" \", len(test_features))\n\n clf = svm.SVC()\n\n print(\"Start training...\")\n t_start = time()\n clf.fit(train_features, train_labels)\n print(\"Training time: \", round(time() - t_start, 3), \"s\")\n\n print(\"Accuracy: \", accuracy_score(clf.predict(test_features), test_labels))\n\n\ndef split_train_test(features, labels, test_size):\n total_test_size = int(len(features) * test_size)\n np.random.seed(2)\n indices = np.random.permutation(len(features))\n train_features = features[indices[:-total_test_size]]\n train_labels = labels[indices[:-total_test_size]]\n test_features = features[indices[-total_test_size:]]\n test_labels = labels[indices[-total_test_size:]]\n return train_features, train_labels, test_features, test_labels\n\n\ndef split_features_labels(data_set):\n features = data_set.data\n labels = data_set.target\n return features, labels\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "sklearn.datasets.load_iris", "numpy.random.seed", "sklearn.svm.SVC" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tufts-ml/fNIRS-mental-workload-classifiers
[ "b5199d6184e659152d1fe650db48eba53a221186" ]
[ "synthesizing_results/domain_adaptation/synthesize_all_subjects.py" ]
[ "import os\nimport numpy as np\nimport csv\nimport argparse\nimport pandas as pd\n\ndef main(experiment_dir, summary_save_dir):\n \n AllSubject_summary_filename = os.path.join(summary_save_dir, 'AllSubjects_summary.csv')\n \n with open(AllSubject_summary_filename, mode='w') as csv_file:\n \n fieldnames = ['subject_id', 'bucket', 'max_validation_accuracy', 'corresponding_test_accuracy', 'performance_string', 'experiment_folder']\n \n fileEmpty = os.stat(AllSubject_summary_filename).st_size == 0\n \n writer = csv.DictWriter(csv_file, fieldnames = fieldnames)\n \n if fileEmpty:\n writer.writeheader()\n \n buckets = ['TestBucket1', 'TestBucket2', 'TestBucket3', 'TestBucket4', 'TestBucket5', 'TestBucket6', 'TestBucket7', 'TestBucket8', 'TestBucket9', 'TestBucket10', 'TestBucket11', 'TestBucket12', 'TestBucket13', 'TestBucket14', 'TestBucket15', 'TestBucket16', 'TestBucket17']\n \n \n for bucket in buckets:\n subject_this_bucket_list = os.listdir(os.path.join(experiment_dir, bucket))\n \n for subject_id in subject_this_bucket_list:\n this_subject_summary_csv_path = os.path.join(experiment_dir, bucket, str(subject_id), 'hypersearch_summary/hypersearch_summary.csv')\n\n this_subject_summary_df = pd.read_csv(this_subject_summary_csv_path)\n\n this_subject_selected_setting = this_subject_summary_df.sort_values(by=['validation_accuracy'], ascending=False).iloc[0]\n\n this_subject_dict = {}\n this_subject_max_validation_accuracy = this_subject_selected_setting.validation_accuracy\n this_subject_corresponding_test_accuracy = this_subject_selected_setting.test_accuracy\n this_subject_performance_string = this_subject_selected_setting.performance_string\n this_subject_experiment_folder = this_subject_selected_setting.experiment_folder\n\n this_subject_dict.update(subject_id=subject_id, bucket=bucket, max_validation_accuracy=this_subject_max_validation_accuracy, corresponding_test_accuracy=this_subject_corresponding_test_accuracy, performance_string=this_subject_performance_string, experiment_folder=this_subject_experiment_folder)\n\n writer.writerow(this_subject_dict)\n \n\nif __name__==\"__main__\":\n \n parser = argparse.ArgumentParser()\n parser.add_argument('--experiment_dir')\n \n #parse args\n args = parser.parse_args()\n \n experiment_dir = args.experiment_dir\n summary_save_dir = experiment_dir + '_summary'\n \n if not os.path.exists(summary_save_dir):\n os.makedirs(summary_save_dir)\n \n main(experiment_dir, summary_save_dir)\n \n \n \n \n \n \n " ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
JohannesBuchner/gammapy
[ "48769519f04b7df7b3e4580ebb61396445790bc3", "015206d2418b1d254f1c9d3ea819ab0c5ece99e9", "48769519f04b7df7b3e4580ebb61396445790bc3", "015206d2418b1d254f1c9d3ea819ab0c5ece99e9", "48769519f04b7df7b3e4580ebb61396445790bc3", "48769519f04b7df7b3e4580ebb61396445790bc3", "015206d2418b1d254f1c9d3ea819ab0c5ece99e9", "48769519f04b7df7b3e4580ebb61396445790bc3" ]
[ "gammapy/modeling/models/spatial.py", "gammapy/stats/tests/test_variability.py", "gammapy/makers/background/reflected.py", "gammapy/irf/tests/test_io.py", "gammapy/utils/array.py", "gammapy/catalog/tests/test_fermi.py", "gammapy/estimators/excess_profile.py", "docs/makers/make_rectangular_reflected_background.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Spatial models.\"\"\"\nimport logging\nimport numpy as np\nimport scipy.integrate\nimport scipy.special\nimport astropy.units as u\nfrom astropy.coordinates import Angle, SkyCoord\nfrom astropy.coordinates.angle_utilities import angular_separation, position_angle\nfrom astropy.utils import lazyproperty\nfrom regions import (\n CircleAnnulusSkyRegion,\n CircleSkyRegion,\n EllipseSkyRegion,\n PointSkyRegion,\n PolygonSkyRegion,\n)\nfrom gammapy.maps import Map, WcsGeom\nfrom gammapy.modeling import Parameter\nfrom gammapy.utils.gauss import Gauss2DPDF\nfrom gammapy.utils.scripts import make_path\nfrom .core import Model\n\nlog = logging.getLogger(__name__)\n\n\ndef compute_sigma_eff(lon_0, lat_0, lon, lat, phi, major_axis, e):\n \"\"\"Effective radius, used for the evaluation of elongated models\"\"\"\n phi_0 = position_angle(lon_0, lat_0, lon, lat)\n d_phi = phi - phi_0\n minor_axis = Angle(major_axis * np.sqrt(1 - e ** 2))\n\n a2 = (major_axis * np.sin(d_phi)) ** 2\n b2 = (minor_axis * np.cos(d_phi)) ** 2\n denominator = np.sqrt(a2 + b2)\n sigma_eff = major_axis * minor_axis / denominator\n return minor_axis, sigma_eff\n\n\nclass SpatialModel(Model):\n \"\"\"Spatial model base class.\"\"\"\n\n _type = \"spatial\"\n\n def __init__(self, **kwargs):\n frame = kwargs.pop(\"frame\", \"icrs\")\n super().__init__(**kwargs)\n if not hasattr(self, \"frame\"):\n self.frame = frame\n\n def __call__(self, lon, lat, energy=None):\n \"\"\"Call evaluate method\"\"\"\n kwargs = {par.name: par.quantity for par in self.parameters}\n\n if energy is None and self.is_energy_dependent:\n raise ValueError(\"Missing energy value for evaluation\")\n\n if energy is not None:\n kwargs[\"energy\"] = energy\n\n return self.evaluate(lon, lat, **kwargs)\n\n # TODO: make this a hard-coded class attribute?\n @lazyproperty\n def is_energy_dependent(self):\n varnames = self.evaluate.__code__.co_varnames\n return \"energy\" in varnames\n\n @property\n def position(self):\n \"\"\"Spatial model center position\"\"\"\n lon = self.lon_0.quantity\n lat = self.lat_0.quantity\n return SkyCoord(lon, lat, frame=self.frame)\n\n @position.setter\n def position(self, skycoord):\n \"\"\"Spatial model center position\"\"\"\n coord = skycoord.transform_to(self.frame)\n self.lon_0.quantity = coord.data.lon\n self.lat_0.quantity = coord.data.lat\n\n # TODO: get rid of this!\n _phi_0 = 0.0\n\n @property\n def phi_0(self):\n return self._phi_0\n\n @phi_0.setter\n def phi_0(self, phi_0=0.0):\n self._phi_0 = phi_0\n\n @property\n def position_error(self):\n \"\"\"Get 95% containment position error as (`~regions.EllipseSkyRegion`)\"\"\"\n if self.covariance is None:\n return EllipseSkyRegion(\n center=self.position,\n height=np.nan * u.deg,\n width=np.nan * u.deg,\n angle=np.nan * u.deg,\n )\n\n pars = self.parameters\n sub_covar = self.covariance.get_subcovariance([\"lon_0\", \"lat_0\"]).data.copy()\n cos_lat = np.cos(self.lat_0.quantity.to_value(\"rad\"))\n sub_covar[0, 0] *= cos_lat ** 2.0\n sub_covar[0, 1] *= cos_lat\n sub_covar[1, 0] *= cos_lat\n eig_vals, eig_vecs = np.linalg.eig(sub_covar)\n lon_err, lat_err = np.sqrt(eig_vals)\n y_vec = eig_vecs[:, 0]\n phi = (np.arctan2(y_vec[1], y_vec[0]) * u.rad).to(\"deg\") + self.phi_0\n err = np.sort([lon_err, lat_err])\n scale_r95 = Gauss2DPDF(sigma=1).containment_radius(0.95)\n err *= scale_r95\n if err[1] == lon_err * scale_r95:\n phi += 90 * u.deg\n height = 2 * err[1] * pars[\"lon_0\"].unit\n width = 2 * err[0] * pars[\"lat_0\"].unit\n else:\n height = 2 * err[1] * pars[\"lat_0\"].unit\n width = 2 * err[0] * pars[\"lon_0\"].unit\n\n return EllipseSkyRegion(\n center=self.position, height=height, width=width, angle=phi\n )\n\n def evaluate_geom(self, geom):\n coords = geom.to_image().get_coord(frame=self.frame)\n\n if self.is_energy_dependent:\n energy = geom.axes[\"energy_true\"].center\n return self(coords.lon, coords.lat, energy[:, np.newaxis, np.newaxis])\n else:\n return self(coords.lon, coords.lat)\n\n def integrate_geom(self, geom):\n \"\"\"Integrate model on `~gammapy.maps.Geom` or `~gammapy.maps.RegionGeom`.\n \n Parameters\n ----------\n geom : `~gammapy.maps.WcsGeom` or `~gammapy.maps.RegionGeom`\n\n Returns\n ---------\n `~gammapy.maps.Map` or `gammapy.maps.RegionNDMap`, containing\n the integral value in each spatial bin.\n \"\"\"\n if geom.is_region:\n wcs_geom = geom.to_wcs_geom().to_image()\n mask = geom.contains(wcs_geom.get_coord())\n values = self.evaluate_geom(wcs_geom)\n data = ((values * wcs_geom.solid_angle())[mask]).sum()\n else:\n values = self.evaluate_geom(geom)\n data = values * geom.solid_angle()\n\n return Map.from_geom(geom=geom, data=data.value, unit=data.unit)\n\n def to_dict(self, full_output=False):\n \"\"\"Create dict for YAML serilisation\"\"\"\n data = super().to_dict(full_output)\n data[\"frame\"] = self.frame\n data[\"parameters\"] = data.pop(\"parameters\")\n return data\n\n def _get_plot_map(self, geom):\n if self.evaluation_radius is None and geom is None:\n raise ValueError(\n f\"{self.__class__.__name__} requires geom to be defined for plotting.\"\n )\n\n if geom is None:\n width = 2 * max(self.evaluation_radius, 0.1 * u.deg)\n geom = WcsGeom.create(\n skydir=self.position, frame=self.frame, width=width, binsz=0.02\n )\n data = self.evaluate_geom(geom)\n return Map.from_geom(geom, data=data.value, unit=data.unit)\n\n def plot(self, ax=None, geom=None, **kwargs):\n \"\"\"Plot spatial model.\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`, optional\n Axis\n geom : `~gammapy.maps.WcsGeom`, optional\n Geom to use for plotting.\n **kwargs : dict\n Keyword arguments passed to `~gammapy.maps.WcsMap.plot()`\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`, optional\n Axis\n \"\"\"\n m = self._get_plot_map(geom)\n if not m.geom.is_flat:\n raise TypeError(\n \"Use .plot_interactive() or .plot_grid() for Map dimension > 2\"\n )\n _, ax, _ = m.plot(ax=ax, **kwargs)\n return ax\n\n def plot_interative(self, ax=None, geom=None, **kwargs):\n \"\"\"Plot spatial model.\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`, optional\n Axis\n geom : `~gammapy.maps.WcsGeom`, optional\n Geom to use for plotting.\n **kwargs : dict\n Keyword arguments passed to `~gammapy.maps.WcsMap.plot()`\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`, optional\n Axis\n \"\"\"\n\n m = self._get_plot_map(geom)\n if m.geom.is_image:\n raise TypeError(\"Use .plot() for 2D Maps\")\n m.plot_interactive(ax=ax, **kwargs)\n\n def plot_error(self, ax=None, **kwargs):\n \"\"\"Plot position error\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`, optional\n Axis\n **kwargs : dict\n Keyword arguments passed to `~gammapy.maps.WcsMap.plot()`\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`, optional\n Axis\n \"\"\"\n import matplotlib.pyplot as plt\n\n # plot center position\n lon, lat = self.lon_0.value, self.lat_0.value\n\n ax = plt.gca() if ax is None else ax\n\n kwargs.setdefault(\"marker\", \"x\")\n kwargs.setdefault(\"color\", \"red\")\n kwargs.setdefault(\"label\", \"position\")\n\n ax.scatter(lon, lat, transform=ax.get_transform(self.frame), **kwargs)\n\n # plot position error\n if not np.all(self.covariance.data == 0):\n region = self.position_error.to_pixel(ax.wcs)\n artist = region.as_artist(facecolor=\"none\", edgecolor=kwargs[\"color\"])\n ax.add_artist(artist)\n\n return ax\n\n def plot_grid(self, geom=None, **kwargs):\n \"\"\"Plot spatial model energy slices in a grid.\n\n Parameters\n ----------\n geom : `~gammapy.maps.WcsGeom`, optional\n Geom to use for plotting.\n **kwargs : dict\n Keyword arguments passed to `~gammapy.maps.WcsMap.plot()`\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`, optional\n Axis\n \"\"\"\n\n if (geom is None) or geom.is_image:\n raise TypeError(\"Use .plot() for 2D Maps\")\n m = self._get_plot_map(geom)\n m.plot_grid(**kwargs)\n\n @classmethod\n def from_position(cls, position, **kwargs):\n \"\"\"Define the position of the model using a sky coord\n\n Parameters\n ----------\n position : `SkyCoord`\n Position\n\n Returns\n -------\n model : `SpatialModel`\n Spatial model\n \"\"\"\n lon_0, lat_0 = position.data.lon, position.data.lat\n return cls(lon_0=lon_0, lat_0=lat_0, frame=position.frame, **kwargs)\n\n @property\n def evaluation_radius(self):\n \"\"\"Evaluation radius\"\"\"\n return None\n\n @property\n def evaluation_region(self):\n \"\"\"Evaluation region\"\"\"\n\n if hasattr(self, \"to_region\"):\n return self.to_region()\n elif self.evaluation_radius is not None:\n return CircleSkyRegion(center=self.position, radius=self.evaluation_radius,)\n else:\n return None\n\n\nclass PointSpatialModel(SpatialModel):\n r\"\"\"Point Source.\n\n For more information see :ref:`point-spatial-model`.\n\n Parameters\n ----------\n lon_0, lat_0 : `~astropy.coordinates.Angle`\n Center position\n frame : {\"icrs\", \"galactic\"}\n Center position coordinate frame\n \"\"\"\n\n tag = [\"PointSpatialModel\", \"point\"]\n lon_0 = Parameter(\"lon_0\", \"0 deg\")\n lat_0 = Parameter(\"lat_0\", \"0 deg\", min=-90, max=90)\n is_energy_dependent = False\n\n @property\n def evaluation_radius(self):\n \"\"\"Evaluation radius (`~astropy.coordinates.Angle`).\n\n Set as zero degrees.\n \"\"\"\n return 0 * u.deg\n\n @staticmethod\n def _grid_weights(x, y, x0, y0):\n \"\"\"Compute 4-pixel weights such that centroid is preserved.\"\"\"\n dx = np.abs(x - x0)\n dx = np.where(dx < 1, 1 - dx, 0)\n\n dy = np.abs(y - y0)\n dy = np.where(dy < 1, 1 - dy, 0)\n\n return dx * dy\n\n def evaluate_geom(self, geom):\n \"\"\"Evaluate model on `~gammapy.maps.Geom`.\"\"\"\n values = self.integrate_geom(geom).data\n return values / geom.solid_angle()\n\n def integrate_geom(self, geom):\n \"\"\"Integrate model on `~gammapy.maps.Geom`\n\n Parameters\n ----------\n geom : `Geom`\n Map geometry\n\n Returns\n -------\n flux : `Map`\n Predicted flux map\n \"\"\"\n geom_image = geom.to_image()\n x, y = geom_image.get_pix()\n x0, y0 = self.position.to_pixel(geom.wcs)\n data = self._grid_weights(x, y, x0, y0)\n return Map.from_geom(geom=geom_image, data=data, unit=\"\")\n\n def to_region(self, **kwargs):\n \"\"\"Model outline (`~regions.PointSkyRegion`).\"\"\"\n return PointSkyRegion(center=self.position, **kwargs)\n\n\nclass GaussianSpatialModel(SpatialModel):\n r\"\"\"Two-dimensional Gaussian model.\n\n For more information see :ref:`gaussian-spatial-model`.\n\n Parameters\n ----------\n lon_0, lat_0 : `~astropy.coordinates.Angle`\n Center position\n sigma : `~astropy.coordinates.Angle`\n Length of the major semiaxis of the Gaussian, in angular units.\n e : `float`\n Eccentricity of the Gaussian (:math:`0< e< 1`).\n phi : `~astropy.coordinates.Angle`\n Rotation angle :math:`\\phi`: of the major semiaxis.\n Increases counter-clockwise from the North direction.\n frame : {\"icrs\", \"galactic\"}\n Center position coordinate frame\n \"\"\"\n\n tag = [\"GaussianSpatialModel\", \"gauss\"]\n\n lon_0 = Parameter(\"lon_0\", \"0 deg\")\n lat_0 = Parameter(\"lat_0\", \"0 deg\", min=-90, max=90)\n sigma = Parameter(\"sigma\", \"1 deg\", min=0)\n e = Parameter(\"e\", 0, min=0, max=1, frozen=True)\n phi = Parameter(\"phi\", \"0 deg\", frozen=True)\n\n @property\n def evaluation_radius(self):\n r\"\"\"Evaluation radius (`~astropy.coordinates.Angle`).\n\n Set as :math:`5\\sigma`.\n \"\"\"\n return 5 * self.parameters[\"sigma\"].quantity\n\n @staticmethod\n def evaluate(lon, lat, lon_0, lat_0, sigma, e, phi):\n \"\"\"Evaluate model.\"\"\"\n sep = angular_separation(lon, lat, lon_0, lat_0)\n\n if e == 0:\n a = 1.0 - np.cos(sigma)\n norm = (1 / (4 * np.pi * a * (1.0 - np.exp(-1.0 / a)))).value\n else:\n minor_axis, sigma_eff = compute_sigma_eff(\n lon_0, lat_0, lon, lat, phi, sigma, e\n )\n a = 1.0 - np.cos(sigma_eff)\n norm = (1 / (2 * np.pi * sigma * minor_axis)).to_value(\"sr-1\")\n\n exponent = -0.5 * ((1 - np.cos(sep)) / a)\n return u.Quantity(norm * np.exp(exponent).value, \"sr-1\", copy=False)\n\n def to_region(self, **kwargs):\n \"\"\"Model outline (`~regions.EllipseSkyRegion`).\"\"\"\n minor_axis = Angle(self.sigma.quantity * np.sqrt(1 - self.e.quantity ** 2))\n return EllipseSkyRegion(\n center=self.position,\n height=2 * self.sigma.quantity,\n width=2 * minor_axis,\n angle=self.phi.quantity,\n **kwargs,\n )\n\n @property\n def evaluation_region(self):\n \"\"\"Evaluation region\"\"\"\n region = self.to_region()\n region.height = 5 * region.height # consistent with evaluation radius\n region.width = 5 * region.width\n return region\n\n\nclass GeneralizedGaussianSpatialModel(SpatialModel):\n r\"\"\"Two-dimensional Generealized Gaussian model.\n\n For more information see :ref:`generalized-gaussian-spatial-model`.\n\n Parameters\n ----------\n lon_0, lat_0 : `~astropy.coordinates.Angle`\n Center position\n r_0 : `~astropy.coordinates.Angle`\n Length of the major semiaxis, in angular units.\n eta : `float`\n Shape parameter whitin (0, 1]. Special cases for disk: ->0, Gaussian: 0.5, Laplace:1\n e : `float`\n Eccentricity (:math:`0< e< 1`).\n phi : `~astropy.coordinates.Angle`\n Rotation angle :math:`\\phi`: of the major semiaxis.\n Increases counter-clockwise from the North direction.\n frame : {\"icrs\", \"galactic\"}\n Center position coordinate frame\n \"\"\"\n\n tag = [\"GeneralizedGaussianSpatialModel\", \"gauss-general\"]\n lon_0 = Parameter(\"lon_0\", \"0 deg\")\n lat_0 = Parameter(\"lat_0\", \"0 deg\", min=-90, max=90)\n r_0 = Parameter(\"r_0\", \"1 deg\")\n eta = Parameter(\"eta\", 0.5, min=0.01, max=1.0)\n e = Parameter(\"e\", 0.0, min=0.0, max=1.0, frozen=True)\n phi = Parameter(\"phi\", \"0 deg\", frozen=True)\n\n @staticmethod\n def evaluate(lon, lat, lon_0, lat_0, r_0, eta, e, phi):\n sep = angular_separation(lon, lat, lon_0, lat_0)\n if isinstance(eta, u.Quantity):\n eta = eta.value # gamma function does not allow quantities\n minor_axis, r_eff = compute_sigma_eff(lon_0, lat_0, lon, lat, phi, r_0, e)\n z = sep / r_eff\n norm = 1 / (2 * np.pi * minor_axis * r_0 * eta * scipy.special.gamma(2 * eta))\n return (norm * np.exp(-(z ** (1 / eta)))).to(\"sr-1\")\n\n @property\n def evaluation_radius(self):\n r\"\"\"Evaluation radius (`~astropy.coordinates.Angle`).\n The evaluation radius is defined as r_eval = r_0*(1+8*eta) so it verifies:\n r_eval -> r_0 if eta -> 0 \n r_eval = 5*r_0 = 5*sigma_gauss if eta=0.5\n r_eval = 9*r_0 > 5*sigma_laplace = 5*sqrt(2)*r_0 ~ 7*r_0 if eta = 1\n r_eval -> inf if eta -> inf\n \"\"\"\n return self.r_0.quantity * (1 + 8 * self.eta.value)\n\n def to_region(self, **kwargs):\n \"\"\"Model outline (`~regions.EllipseSkyRegion`).\"\"\"\n minor_axis = Angle(self.r_0.quantity * np.sqrt(1 - self.e.quantity ** 2))\n return EllipseSkyRegion(\n center=self.position,\n height=2 * self.r_0.quantity,\n width=2 * minor_axis,\n angle=self.phi.quantity,\n **kwargs,\n )\n\n @property\n def evaluation_region(self):\n \"\"\"Evaluation region\"\"\"\n region = self.to_region()\n scale = self.evaluation_radius / self.r_0.quantity\n # scale to be consistent with evaluation radius\n region.height = scale * region.height\n region.width = scale * region.width\n return region\n\n\nclass DiskSpatialModel(SpatialModel):\n r\"\"\"Constant disk model.\n\n For more information see :ref:`disk-spatial-model`.\n\n Parameters\n ----------\n lon_0, lat_0 : `~astropy.coordinates.Angle`\n Center position\n r_0 : `~astropy.coordinates.Angle`\n :math:`a`: length of the major semiaxis, in angular units.\n e : `float`\n Eccentricity of the ellipse (:math:`0< e< 1`).\n phi : `~astropy.coordinates.Angle`\n Rotation angle :math:`\\phi`: of the major semiaxis.\n Increases counter-clockwise from the North direction.\n edge : `~astropy.coordinates.Angle`\n Width of the edge. The width is defined as the range within the\n smooth edges of the model drops from 95% to 5% of its amplitude.\n frame : {\"icrs\", \"galactic\"}\n Center position coordinate frame\n \"\"\"\n\n tag = [\"DiskSpatialModel\", \"disk\"]\n lon_0 = Parameter(\"lon_0\", \"0 deg\")\n lat_0 = Parameter(\"lat_0\", \"0 deg\", min=-90, max=90)\n r_0 = Parameter(\"r_0\", \"1 deg\", min=0)\n e = Parameter(\"e\", 0, min=0, max=1, frozen=True)\n phi = Parameter(\"phi\", \"0 deg\", frozen=True)\n edge = Parameter(\"edge\", \"0.01 deg\", frozen=True)\n\n @property\n def evaluation_radius(self):\n \"\"\"Evaluation radius (`~astropy.coordinates.Angle`).\n \n Set to the length of the semi-major axis.\n \"\"\"\n return self.r_0.quantity + self.edge.quantity\n\n @staticmethod\n def _evaluate_norm_factor(r_0, e):\n \"\"\"Compute the normalization factor.\"\"\"\n semi_minor = r_0 * np.sqrt(1 - e ** 2)\n\n def integral_fcn(x, a, b):\n A = 1 / np.sin(a) ** 2\n B = 1 / np.sin(b) ** 2\n C = A - B\n cs2 = np.cos(x) ** 2\n\n return 1 - np.sqrt(1 - 1 / (B + C * cs2))\n\n return (\n 2\n * scipy.integrate.quad(\n lambda x: integral_fcn(x, r_0, semi_minor), 0, np.pi\n )[0]\n ) ** -1\n\n @staticmethod\n def _evaluate_smooth_edge(x, width):\n value = (x / width).to_value(\"\")\n edge_width_95 = 2.326174307353347\n return 0.5 * (1 - scipy.special.erf(value * edge_width_95))\n\n @staticmethod\n def evaluate(lon, lat, lon_0, lat_0, r_0, e, phi, edge):\n \"\"\"Evaluate model.\"\"\"\n sep = angular_separation(lon, lat, lon_0, lat_0)\n\n if e == 0:\n sigma_eff = r_0\n else:\n sigma_eff = compute_sigma_eff(lon_0, lat_0, lon, lat, phi, r_0, e)[1]\n\n norm = DiskSpatialModel._evaluate_norm_factor(r_0, e)\n\n in_ellipse = DiskSpatialModel._evaluate_smooth_edge(sep - sigma_eff, edge)\n return u.Quantity(norm * in_ellipse, \"sr-1\", copy=False)\n\n def to_region(self, **kwargs):\n \"\"\"Model outline (`~regions.EllipseSkyRegion`).\"\"\"\n minor_axis = Angle(self.r_0.quantity * np.sqrt(1 - self.e.quantity ** 2))\n return EllipseSkyRegion(\n center=self.position,\n height=2 * self.r_0.quantity,\n width=2 * minor_axis,\n angle=self.phi.quantity,\n **kwargs,\n )\n\n\nclass ShellSpatialModel(SpatialModel):\n r\"\"\"Shell model.\n\n For more information see :ref:`shell-spatial-model`.\n\n Parameters\n ----------\n lon_0, lat_0 : `~astropy.coordinates.Angle`\n Center position\n radius : `~astropy.coordinates.Angle`\n Inner radius, :math:`r_{in}`\n width : `~astropy.coordinates.Angle`\n Shell width\n frame : {\"icrs\", \"galactic\"}\n Center position coordinate frame\n See Also\n --------\n Shell2SpatialModel\n \"\"\"\n\n tag = [\"ShellSpatialModel\", \"shell\"]\n lon_0 = Parameter(\"lon_0\", \"0 deg\")\n lat_0 = Parameter(\"lat_0\", \"0 deg\", min=-90, max=90)\n radius = Parameter(\"radius\", \"1 deg\")\n width = Parameter(\"width\", \"0.2 deg\")\n\n @property\n def evaluation_radius(self):\n r\"\"\"Evaluation radius (`~astropy.coordinates.Angle`).\n\n Set to :math:`r_\\text{out}`.\n \"\"\"\n return self.radius.quantity + self.width.quantity\n\n @staticmethod\n def evaluate(lon, lat, lon_0, lat_0, radius, width):\n \"\"\"Evaluate model.\"\"\"\n sep = angular_separation(lon, lat, lon_0, lat_0)\n radius_out = radius + width\n\n norm = 3 / (2 * np.pi * (radius_out ** 3 - radius ** 3))\n\n with np.errstate(invalid=\"ignore\"):\n # np.where and np.select do not work with quantities, so we use the\n # workaround with indexing\n value = np.sqrt(radius_out ** 2 - sep ** 2)\n mask = sep < radius\n value[mask] = (value - np.sqrt(radius ** 2 - sep ** 2))[mask]\n value[sep > radius_out] = 0\n\n return norm * value\n\n def to_region(self, **kwargs):\n \"\"\"Model outline (`~regions.CircleAnnulusSkyRegion`).\"\"\"\n return CircleAnnulusSkyRegion(\n center=self.position,\n inner_radius=self.radius.quantity,\n outer_radius=self.radius.quantity + self.width.quantity,\n **kwargs,\n )\n\n\nclass Shell2SpatialModel(SpatialModel):\n r\"\"\"Shell model with outer radius and relative width parametrization\n\n For more information see :ref:`shell2-spatial-model`.\n\n Parameters\n ----------\n lon_0, lat_0 : `~astropy.coordinates.Angle`\n Center position\n r_0 : `~astropy.coordinates.Angle`\n Outer radius, :math:`r_{out}`\n eta : float\n Shell width relative to outer radius, r_0, should be within (0,1]\n frame : {\"icrs\", \"galactic\"}\n Center position coordinate frame\n\n See Also\n --------\n ShellSpatialModel\n \"\"\"\n\n tag = [\"Shell2SpatialModel\", \"shell2\"]\n lon_0 = Parameter(\"lon_0\", \"0 deg\")\n lat_0 = Parameter(\"lat_0\", \"0 deg\", min=-90, max=90)\n r_0 = Parameter(\"r_0\", \"1 deg\")\n eta = Parameter(\"eta\", 0.2, min=0.02, max=1)\n\n @property\n def evaluation_radius(self):\n r\"\"\"Evaluation radius (`~astropy.coordinates.Angle`).\n\n Set to :math:`r_\\text{out}`.\n \"\"\"\n return self.r_0.quantity\n\n @property\n def r_in(self):\n return (1 - self.eta.quantity) * self.r_0.quantity\n\n @staticmethod\n def evaluate(lon, lat, lon_0, lat_0, r_0, eta):\n \"\"\"Evaluate model.\"\"\"\n sep = angular_separation(lon, lat, lon_0, lat_0)\n r_in = (1 - eta) * r_0\n\n norm = 3 / (2 * np.pi * (r_0 ** 3 - r_in ** 3))\n\n with np.errstate(invalid=\"ignore\"):\n # np.where and np.select do not work with quantities, so we use the\n # workaround with indexing\n value = np.sqrt(r_0 ** 2 - sep ** 2)\n mask = sep < r_in\n value[mask] = (value - np.sqrt(r_in ** 2 - sep ** 2))[mask]\n value[sep > r_0] = 0\n\n return norm * value\n\n def to_region(self, **kwargs):\n \"\"\"Model outline (`~regions.CircleAnnulusSkyRegion`).\"\"\"\n return CircleAnnulusSkyRegion(\n center=self.position,\n inner_radius=self.r_in,\n outer_radius=self.r_0.quantity,\n **kwargs,\n )\n\n\nclass ConstantSpatialModel(SpatialModel):\n \"\"\"Spatially constant (isotropic) spatial model.\n\n For more information see :ref:`constant-spatial-model`.\n\n Parameters\n ----------\n value : `~astropy.units.Quantity`\n Value\n \"\"\"\n\n tag = [\"ConstantSpatialModel\", \"const\"]\n value = Parameter(\"value\", \"1 sr-1\", frozen=True)\n\n frame = \"icrs\"\n evaluation_radius = None\n position = None\n\n def to_dict(self, full_output=False):\n \"\"\"Create dict for YAML serilisation\"\"\"\n # redefined to ignore frame attribute from parent class\n data = super().to_dict(full_output)\n data.pop(\"frame\")\n data[\"parameters\"] = data.pop(\"parameters\")\n return data\n\n @staticmethod\n def evaluate(lon, lat, value):\n \"\"\"Evaluate model.\"\"\"\n return value\n\n @staticmethod\n def to_region(**kwargs):\n \"\"\"Model outline (`~regions.EllipseSkyRegion`).\"\"\"\n return EllipseSkyRegion(\n center=SkyCoord(np.nan * u.deg, np.nan * u.deg),\n height=np.nan * u.deg,\n width=np.nan * u.deg,\n angle=np.nan * u.deg,\n **kwargs,\n )\n\n\nclass ConstantFluxSpatialModel(SpatialModel):\n \"\"\"Spatially constant flux spatial model.\n\n For more information see :ref:`constant-spatial-model`.\n\n \"\"\"\n\n tag = [\"ConstantFluxSpatialModel\", \"const-flux\"]\n\n frame = \"icrs\"\n evaluation_radius = None\n position = None\n\n def to_dict(self, full_output=False):\n \"\"\"Create dict for YAML serilisation\"\"\"\n # redefined to ignore frame attribute from parent class\n data = super().to_dict(full_output)\n data.pop(\"frame\")\n return data\n\n @staticmethod\n def evaluate_geom(geom):\n \"\"\"Evaluate model.\"\"\"\n return 1 / geom.solid_angle()\n\n @staticmethod\n def integrate_geom(geom):\n \"\"\"Evaluate model.\"\"\"\n return Map.from_geom(geom=geom, data=1)\n\n @staticmethod\n def to_region(**kwargs):\n \"\"\"Model outline (`~regions.EllipseSkyRegion`).\"\"\"\n return EllipseSkyRegion(\n center=SkyCoord(np.nan * u.deg, np.nan * u.deg),\n height=np.nan * u.deg,\n width=np.nan * u.deg,\n angle=np.nan * u.deg,\n **kwargs,\n )\n\n\nclass TemplateSpatialModel(SpatialModel):\n \"\"\"Spatial sky map template model.\n\n For more information see :ref:`template-spatial-model`.\n\n Parameters\n ----------\n map : `~gammapy.maps.Map`\n Map template.\n meta : dict, optional\n Meta information, meta['filename'] will be used for serialization\n normalize : bool\n Normalize the input map so that it integrates to unity.\n interp_kwargs : dict\n Interpolation keyword arguments passed to `gammapy.maps.Map.interp_by_coord`.\n Default arguments are {'interp': 'linear', 'fill_value': 0}.\n \"\"\"\n\n tag = [\"TemplateSpatialModel\", \"template\"]\n\n def __init__(\n self, map, meta=None, normalize=True, interp_kwargs=None, filename=None,\n ):\n if (map.data < 0).any():\n log.warning(\"Diffuse map has negative values. Check and fix this!\")\n\n if filename is not None:\n filename = str(make_path(filename))\n\n self.normalize = normalize\n\n if normalize:\n # Normalize the diffuse map model so that it integrates to unity\n if map.geom.is_image:\n data_sum = map.data.sum()\n else:\n # Normalize in each energy bin\n data_sum = map.data.sum(axis=(1, 2)).reshape((-1, 1, 1))\n\n data = map.data / data_sum\n data /= map.geom.solid_angle().to_value(\"sr\")\n map = map.copy(data=data, unit=\"sr-1\")\n\n if map.unit.is_equivalent(\"\"):\n map = map.copy(unit=\"sr-1\")\n log.warning(\"Missing spatial template unit, assuming sr^-1\")\n\n self.map = map\n\n self.meta = dict() if meta is None else meta\n interp_kwargs = {} if interp_kwargs is None else interp_kwargs\n interp_kwargs.setdefault(\"method\", \"linear\")\n interp_kwargs.setdefault(\"fill_value\", 0)\n self._interp_kwargs = interp_kwargs\n self.filename = filename\n super().__init__()\n\n @property\n def is_energy_dependent(self):\n return \"energy_true\" in self.map.geom.axes.names\n\n @property\n def evaluation_radius(self):\n \"\"\"Evaluation radius (`~astropy.coordinates.Angle`).\n\n Set to half of the maximal dimension of the map.\n \"\"\"\n return np.max(self.map.geom.width) / 2.0\n\n @classmethod\n def read(cls, filename, normalize=True, **kwargs):\n \"\"\"Read spatial template model from FITS image.\n If unit is not given in the FITS header the default is ``sr-1``.\n\n Parameters\n ----------\n filename : str\n FITS image filename.\n normalize : bool\n Normalize the input map so that it integrates to unity.\n kwargs : dict\n Keyword arguments passed to `Map.read()`.\n \"\"\"\n m = Map.read(filename, **kwargs)\n return cls(m, normalize=normalize, filename=filename)\n\n def evaluate(self, lon, lat, energy=None):\n coord = {\n \"lon\": lon.to_value(\"deg\"),\n \"lat\": lat.to_value(\"deg\"),\n }\n if energy is not None:\n coord[\"energy_true\"] = energy\n\n val = self.map.interp_by_coord(coord, **self._interp_kwargs)\n return u.Quantity(val, self.map.unit, copy=False)\n\n @property\n def position(self):\n \"\"\"`~astropy.coordinates.SkyCoord`\"\"\"\n return self.map.geom.center_skydir\n\n @property\n def frame(self):\n return self.position.frame.name\n\n @classmethod\n def from_dict(cls, data):\n filename = data[\"filename\"]\n normalize = data.get(\"normalize\", True)\n m = Map.read(filename)\n return cls(m, normalize=normalize, filename=filename)\n\n def to_dict(self, full_output=False):\n \"\"\"Create dict for YAML serilisation\"\"\"\n data = super().to_dict(full_output)\n data[\"filename\"] = self.filename\n data[\"normalize\"] = self.normalize\n data[\"unit\"] = str(self.map.unit)\n return data\n\n def to_region(self, **kwargs):\n \"\"\"Model outline (`~regions.PolygonSkyRegion`).\"\"\"\n footprint = self.map.geom.wcs.calc_footprint()\n return PolygonSkyRegion(\n vertices=SkyCoord(footprint, unit=\"deg\", frame=self.frame, **kwargs)\n )\n\n def plot(self, ax=None, geom=None, **kwargs):\n if geom is None:\n geom = self.map.geom\n super().plot(ax=ax, geom=geom, **kwargs)\n\n def plot_interative(self, ax=None, geom=None, **kwargs):\n if geom is None:\n geom = self.map.geom\n super().plot_interative(ax=ax, geom=geom, **kwargs)\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom astropy.table import Column, Table\nfrom astropy.time import Time\nfrom gammapy.estimators import LightCurve\nfrom gammapy.stats.variability import compute_chisq, compute_fvar\nfrom gammapy.utils.testing import assert_quantity_allclose\n\n\[email protected](scope=\"session\")\ndef lc():\n meta = dict(TIMESYS=\"utc\")\n\n table = Table(\n meta=meta,\n data=[\n Column(Time([\"2010-01-01\", \"2010-01-03\"]).mjd, \"time_min\"),\n Column(Time([\"2010-01-03\", \"2010-01-10\"]).mjd, \"time_max\"),\n Column([1e-11, 3e-11], \"flux\", unit=\"cm-2 s-1\"),\n Column([0.1e-11, 0.3e-11], \"flux_err\", unit=\"cm-2 s-1\"),\n Column([np.nan, 3.6e-11], \"flux_ul\", unit=\"cm-2 s-1\"),\n Column([False, True], \"is_ul\"),\n ],\n )\n\n return LightCurve(table=table)\n\n\ndef test_lightcurve_fvar(lc):\n flux = lc.table[\"flux\"].astype(\"float64\")\n flux_err = lc.table[\"flux_err\"].astype(\"float64\")\n fvar, fvar_err = compute_fvar(flux, flux_err)\n assert_allclose(fvar, 0.6982120021884471)\n # Note: the following tolerance is very low in the next assert,\n # because results differ by ~ 1e-3 between different machines\n assert_allclose(fvar_err, 0.07905694150420949, rtol=1e-2)\n\n\ndef test_lightcurve_chisq(lc):\n flux = lc.table[\"flux\"].astype(\"float64\")\n chi2, pval = compute_chisq(flux)\n assert_quantity_allclose(chi2, 1.0000000000000001e-11)\n assert_quantity_allclose(pval, 0.999997476867478)\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport logging\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.coordinates import Angle\nfrom regions import PixCoord\nfrom gammapy.datasets import SpectrumDatasetOnOff\nfrom gammapy.maps import RegionGeom, RegionNDMap, WcsNDMap\nfrom gammapy.utils.regions import list_to_compound_region\nfrom ..core import Maker\n\n__all__ = [\"ReflectedRegionsFinder\", \"ReflectedRegionsBackgroundMaker\"]\n\nlog = logging.getLogger(__name__)\n\n\nclass ReflectedRegionsFinder:\n \"\"\"Find reflected regions.\n\n This class is responsible for placing :ref:`region_reflected` for a given\n input region and pointing position. It converts to pixel coordinates\n internally assuming a tangent projection at center position.\n\n If the center lies inside the input region, no reflected regions\n can be found.\n\n If you want to make a\n background estimate for an IACT observation using the reflected regions\n method, see also `~gammapy.makers.ReflectedRegionsBackgroundMaker`\n\n Parameters\n ----------\n region : `~regions.SkyRegion`\n Region to rotate\n center : `~astropy.coordinates.SkyCoord`\n Rotation point\n angle_increment : `~astropy.coordinates.Angle`, optional\n Rotation angle applied when a region falls in an excluded region.\n min_distance : `~astropy.coordinates.Angle`, optional\n Minimal distance between two consecutive reflected regions\n min_distance_input : `~astropy.coordinates.Angle`, optional\n Minimal distance from input region\n max_region_number : int, optional\n Maximum number of regions to use\n exclusion_mask : `~gammapy.maps.WcsNDMap`, optional\n Exclusion mask\n binsz : `~astropy.coordinates.Angle`\n Bin size of the reference map used for region finding.\n\n Examples\n --------\n >>> from astropy.coordinates import SkyCoord, Angle\n >>> from regions import CircleSkyRegion\n >>> from gammapy.makers import ReflectedRegionsFinder\n >>> pointing = SkyCoord(83.2, 22.7, unit='deg', frame='icrs')\n >>> target_position = SkyCoord(80.2, 23.5, unit='deg', frame='icrs')\n >>> theta = Angle(0.4, 'deg')\n >>> on_region = CircleSkyRegion(target_position, theta)\n >>> finder = ReflectedRegionsFinder(min_distance_input='1 rad', region=on_region, center=pointing)\n >>> finder.run()\n >>> print(finder.reflected_regions[0])\n Region: CircleSkyRegion\n center: <SkyCoord (ICRS): (ra, dec) in deg\n (83.19879005, 25.57300957)>\n radius: 0.39953342830756855 deg\n \"\"\"\n\n def __init__(\n self,\n region,\n center,\n angle_increment=\"0.1 rad\",\n min_distance=\"0 rad\",\n min_distance_input=\"0.1 rad\",\n max_region_number=10000,\n exclusion_mask=None,\n binsz=\"0.01 deg\",\n ):\n self.region = region\n self.center = center\n\n self.angle_increment = Angle(angle_increment)\n if self.angle_increment <= Angle(0, \"deg\"):\n raise ValueError(\"angle_increment is too small\")\n\n self.min_distance = Angle(min_distance)\n self.min_distance_input = Angle(min_distance_input)\n self.exclusion_mask = exclusion_mask\n self.max_region_number = max_region_number\n self.reflected_regions = None\n self.reference_map = None\n self.binsz = Angle(binsz)\n\n def run(self):\n \"\"\"Run all steps.\n \"\"\"\n self.reference_map = self.make_reference_map(\n self.region, self.center, self.binsz\n )\n if self.exclusion_mask:\n coords = self.reference_map.geom.get_coord()\n vals = self.exclusion_mask.get_by_coord(coords)\n self.reference_map.data += vals\n else:\n self.reference_map.data += 1\n\n # Check if center is contained in region\n if self.region.contains(self.center, self.reference_map.geom.wcs):\n self.reflected_regions = []\n else:\n self.setup()\n self.find_regions()\n\n @staticmethod\n def make_reference_map(region, center, binsz=\"0.01 deg\", min_width=\"0.3 deg\"):\n \"\"\"Create empty reference map.\n\n The size of the map is chosen such that all reflected regions are\n contained on the image.\n To do so, the reference map width is taken to be 4 times the distance between\n the target region center and the rotation point. This distance is larger than\n the typical dimension of the region itself (otherwise the rotation point would\n lie inside the region). A minimal width value is added by default in case the\n region center and the rotation center are too close.\n\n The WCS of the map is the TAN projection at the `center` in the coordinate\n system used by the `region` center.\n\n Parameters\n ----------\n region : `~regions.SkyRegion`\n Region to rotate\n center : `~astropy.coordinates.SkyCoord`\n Rotation point\n binsz : `~astropy.coordinates.Angle`\n Reference map bin size.\n min_width : `~astropy.coordinates.Angle`\n Minimal map width.\n\n Returns\n -------\n reference_map : `~gammapy.maps.WcsNDMap`\n Map containing the region\n \"\"\"\n frame = region.center.frame.name\n\n # width is the full width of an image (not the radius)\n width = 4 * region.center.separation(center) + Angle(min_width)\n\n return WcsNDMap.create(\n skydir=center, binsz=binsz, width=width, frame=frame, proj=\"TAN\"\n )\n\n @staticmethod\n def _region_angular_size(pixels, center):\n \"\"\"Compute maximum angular size of a group of pixels as seen from center.\n\n This assumes that the center lies outside the group of pixel\n\n Parameters\n ----------\n pixels : `~astropy.regions.PixCoord`\n the pixels coordinates\n center : `~astropy.regions.PixCoord`\n the center coordinate in pixels\n\n Returns\n -------\n angular_size : `~astropy.coordinates.Angle`\n the maximum angular size\n \"\"\"\n newX, newY = center.x - pixels.x, center.y - pixels.y\n angles = Angle(np.arctan2(newX, newY), \"rad\")\n angular_size = np.max(angles) - np.min(angles)\n\n if angular_size.value > np.pi:\n angular_size = np.max(angles.wrap_at(0 * u.rad)) - np.min(\n angles.wrap_at(0 * u.rad)\n )\n\n return angular_size\n\n def setup(self):\n \"\"\"Compute parameters for reflected regions algorithm.\"\"\"\n geom = self.reference_map.geom\n self._pix_region = self.region.to_pixel(geom.wcs)\n self._pix_center = PixCoord.from_sky(self.center, geom.wcs)\n\n # Make the ON reference map\n mask = geom.region_mask([self.region]).data\n # on_reference_map = WcsNDMap(geom=geom, data=mask)\n\n # Extract all pixcoords in the geom\n X, Y = geom.get_pix()\n ONpixels = PixCoord(X[mask], Y[mask])\n\n # find excluded PixCoords\n mask = self.reference_map.data == 0\n self.excluded_pixcoords = PixCoord(X[mask], Y[mask])\n\n # Minimum angle a region has to be moved to not overlap with previous one\n min_ang = self._region_angular_size(ONpixels, self._pix_center)\n\n # Add required minimal distance between two off regions\n self._min_ang = min_ang + self.min_distance\n\n # Maximum possible angle before regions is reached again\n self._max_angle = Angle(\"360deg\") - self._min_ang - self.min_distance_input\n\n def find_regions(self):\n \"\"\"Find reflected regions.\"\"\"\n curr_angle = self._min_ang + self.min_distance_input\n reflected_regions = []\n\n while curr_angle < self._max_angle:\n test_reg = self._pix_region.rotate(self._pix_center, curr_angle)\n if not np.any(test_reg.contains(self.excluded_pixcoords)):\n region = test_reg.to_sky(self.reference_map.geom.wcs)\n reflected_regions.append(region)\n\n curr_angle += self._min_ang\n if self.max_region_number <= len(reflected_regions):\n break\n else:\n curr_angle = curr_angle + self.angle_increment\n\n self.reflected_regions = reflected_regions\n\n def plot(self, fig=None, ax=None):\n \"\"\"Standard debug plot.\n\n See example here: :ref:'regions_reflected'.\n \"\"\"\n fig, ax, cbar = self.reference_map.plot(\n fig=fig, ax=ax, cmap=\"gray\", vmin=0, vmax=1\n )\n wcs = self.reference_map.geom.wcs\n\n on_patch = self.region.to_pixel(wcs=wcs).as_artist(edgecolor=\"red\", alpha=0.6)\n ax.add_patch(on_patch)\n\n for off in self.reflected_regions:\n tmp = off.to_pixel(wcs=wcs)\n off_patch = tmp.as_artist(edgecolor=\"blue\", alpha=0.6)\n ax.add_patch(off_patch)\n\n xx, yy = self.center.to_pixel(wcs)\n ax.plot(xx, yy, marker=\"+\", color=\"green\", markersize=20, linewidth=5)\n\n return fig, ax\n\n\nclass ReflectedRegionsBackgroundMaker(Maker):\n \"\"\"Reflected regions background maker.\n\n Parameters\n ----------\n angle_increment : `~astropy.coordinates.Angle`, optional\n Rotation angle applied when a region falls in an excluded region.\n min_distance : `~astropy.coordinates.Angle`, optional\n Minimal distance between two consecutive reflected regions\n min_distance_input : `~astropy.coordinates.Angle`, optional\n Minimal distance from input region\n max_region_number : int, optional\n Maximum number of regions to use\n exclusion_mask : `~gammapy.maps.WcsNDMap`, optional\n Exclusion mask\n binsz : `~astropy.coordinates.Angle`\n Bin size of the reference map used for region finding.\n \"\"\"\n\n tag = \"ReflectedRegionsBackgroundMaker\"\n\n def __init__(\n self,\n angle_increment=\"0.1 rad\",\n min_distance=\"0 rad\",\n min_distance_input=\"0.1 rad\",\n max_region_number=10000,\n exclusion_mask=None,\n binsz=\"0.01 deg\",\n ):\n self.binsz = binsz\n self.exclusion_mask = exclusion_mask\n self.angle_increment = Angle(angle_increment)\n self.min_distance = Angle(min_distance)\n self.min_distance_input = Angle(min_distance_input)\n self.max_region_number = max_region_number\n\n def _get_finder(self, dataset, observation):\n return ReflectedRegionsFinder(\n binsz=self.binsz,\n exclusion_mask=self.exclusion_mask,\n center=observation.pointing_radec,\n region=dataset.counts.geom.region,\n min_distance=self.min_distance,\n min_distance_input=self.min_distance_input,\n max_region_number=self.max_region_number,\n angle_increment=self.angle_increment,\n )\n\n def make_counts_off(self, dataset, observation):\n \"\"\"Make off counts.\n\n Parameters\n ----------\n dataset : `SpectrumDataset`\n Spectrum dataset.\n observation : `DatastoreObservation`\n Data store observation.\n\n\n Returns\n -------\n counts_off : `RegionNDMap`\n Off counts.\n \"\"\"\n finder = self._get_finder(dataset, observation)\n finder.run()\n\n energy_axis = dataset.counts.geom.axes[\"energy\"]\n\n if len(finder.reflected_regions) > 0:\n region_union = list_to_compound_region(finder.reflected_regions)\n wcs = finder.reference_map.geom.wcs\n geom = RegionGeom.create(region=region_union, axes=[energy_axis], wcs=wcs)\n counts_off = RegionNDMap.from_geom(geom=geom)\n counts_off.fill_events(observation.events)\n acceptance_off = len(finder.reflected_regions)\n else:\n # if no OFF regions are found, off is set to None and acceptance_off to zero\n log.warning(\n f\"ReflectedRegionsBackgroundMaker failed. No OFF region found outside exclusion mask for {dataset.name}.\"\n )\n\n counts_off = None\n acceptance_off = 0\n return counts_off, acceptance_off\n\n def run(self, dataset, observation):\n \"\"\"Run reflected regions background maker\n\n Parameters\n ----------\n dataset : `SpectrumDataset`\n Spectrum dataset.\n observation : `DatastoreObservation`\n Data store observation.\n\n Returns\n -------\n dataset_on_off : `SpectrumDatasetOnOff`\n On off dataset.\n \"\"\"\n counts_off, acceptance_off = self.make_counts_off(dataset, observation)\n\n return SpectrumDatasetOnOff.from_spectrum_dataset(\n dataset=dataset,\n acceptance=1,\n acceptance_off=acceptance_off,\n counts_off=counts_off,\n )\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom astropy.units import Quantity\nimport astropy.units as u\nfrom astropy.io import fits\nfrom gammapy.irf import load_cta_irfs\nfrom gammapy.utils.testing import requires_data\nfrom gammapy.irf import Background3D, EffectiveAreaTable2D, EnergyDispersion2D\nfrom gammapy.maps import MapAxis\n\n\n@requires_data()\ndef test_cta_irf():\n \"\"\"Test that CTA IRFs can be loaded and evaluated.\"\"\"\n irf = load_cta_irfs(\n \"$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits\"\n )\n\n energy = Quantity(1, \"TeV\")\n offset = Quantity(3, \"deg\")\n\n val = irf[\"aeff\"].evaluate(energy_true=energy, offset=offset)\n assert_allclose(val.value, 545269.4675, rtol=1e-5)\n assert val.unit == \"m2\"\n\n val = irf[\"edisp\"].evaluate(offset=offset, energy_true=energy, migra=1)\n assert_allclose(val.value, 3183.6882, rtol=1e-5)\n assert val.unit == \"\"\n\n val = irf[\"psf\"].evaluate(rad=Quantity(0.1, \"deg\"), energy_true=energy, offset=offset)\n assert_allclose(val, 3.56989 * u.Unit(\"deg-2\"), rtol=1e-5)\n\n val = irf[\"bkg\"].evaluate(energy=energy, fov_lon=offset, fov_lat=\"0 deg\")\n assert_allclose(val.value, 9.400071e-05, rtol=1e-5)\n assert val.unit == \"1 / (MeV s sr)\"\n\n\nclass TestIRFWrite:\n def setup(self):\n self.energy_lo = np.logspace(0, 1, 10)[:-1] * u.TeV\n self.energy_hi = np.logspace(0, 1, 10)[1:] * u.TeV\n self.energy_axis_true = MapAxis.from_energy_bounds(\n \"1 TeV\", \"10 TeV\", nbin=9, name=\"energy_true\"\n )\n\n self.offset_lo = np.linspace(0, 1, 4)[:-1] * u.deg\n self.offset_hi = np.linspace(0, 1, 4)[1:] * u.deg\n\n self.offset_axis = MapAxis.from_bounds(\n 0, 1, nbin=3, unit=\"deg\", name=\"offset\", node_type=\"edges\"\n )\n self.migra_lo = np.linspace(0, 3, 4)[:-1]\n self.migra_hi = np.linspace(0, 3, 4)[1:]\n self.migra_axis = MapAxis.from_bounds(\n 0, 3, nbin=3, name=\"migra\", node_type=\"edges\"\n )\n self.fov_lon_lo = np.linspace(-6, 6, 11)[:-1] * u.deg\n self.fov_lon_hi = np.linspace(-6, 6, 11)[1:] * u.deg\n self.fov_lon_axis = MapAxis.from_bounds(-6, 6, nbin=10, name=\"fov_lon\")\n\n self.fov_lat_lo = np.linspace(-6, 6, 11)[:-1] * u.deg\n self.fov_lat_hi = np.linspace(-6, 6, 11)[1:] * u.deg\n self.fov_lat_axis = MapAxis.from_bounds(-6, 6, nbin=10, name=\"fov_lat\")\n\n self.aeff_data = np.random.rand(9, 3) * u.cm * u.cm\n self.edisp_data = np.random.rand(9, 3, 3)\n self.bkg_data = np.random.rand(9, 10, 10) / u.MeV / u.s / u.sr\n\n self.aeff = EffectiveAreaTable2D(\n axes=[self.energy_axis_true, self.offset_axis],\n data=self.aeff_data.value,\n unit=self.aeff_data.unit\n )\n self.edisp = EnergyDispersion2D(axes=[\n self.energy_axis_true, self.migra_axis, self.offset_axis,\n ],\n data=self.edisp_data,\n )\n axes = [self.energy_axis_true.copy(name=\"energy\"), self.fov_lon_axis, self.fov_lat_axis]\n self.bkg = Background3D(axes=axes, data=self.bkg_data.value, unit=self.bkg_data.unit)\n\n def test_array_to_container(self):\n assert_allclose(self.aeff.quantity, self.aeff_data)\n assert_allclose(self.edisp.quantity, self.edisp_data)\n assert_allclose(self.bkg.quantity, self.bkg_data)\n\n def test_container_to_table(self):\n assert_allclose(self.aeff.to_table()[\"ENERG_LO\"].quantity[0], self.energy_lo)\n assert_allclose(self.edisp.to_table()[\"ENERG_LO\"].quantity[0], self.energy_lo)\n assert_allclose(self.bkg.to_table()[\"ENERG_LO\"].quantity[0], self.energy_lo)\n\n assert_allclose(self.aeff.to_table()[\"EFFAREA\"].quantity[0].T, self.aeff_data)\n assert_allclose(self.edisp.to_table()[\"MATRIX\"].quantity[0].T, self.edisp_data)\n assert_allclose(self.bkg.to_table()[\"BKG\"].quantity[0].T, self.bkg_data)\n\n assert self.aeff.to_table()[\"EFFAREA\"].quantity[0].unit == self.aeff_data.unit\n assert self.bkg.to_table()[\"BKG\"].quantity[0].unit == self.bkg_data.unit\n\n def test_container_to_fits(self):\n assert_allclose(self.aeff.to_table()[\"ENERG_LO\"].quantity[0], self.energy_lo)\n\n assert self.aeff.to_table_hdu().header[\"EXTNAME\"] == \"EFFECTIVE AREA\"\n assert self.edisp.to_table_hdu().header[\"EXTNAME\"] == \"ENERGY DISPERSION\"\n assert self.bkg.to_table_hdu().header[\"EXTNAME\"] == \"BACKGROUND\"\n\n hdu = self.aeff.to_table_hdu()\n assert_allclose(\n hdu.data[hdu.header[\"TTYPE1\"]][0], self.aeff.axes[0].edges[:-1].value\n )\n hdu = self.aeff.to_table_hdu()\n assert_allclose(hdu.data[hdu.header[\"TTYPE5\"]][0].T, self.aeff.data)\n\n hdu = self.edisp.to_table_hdu()\n assert_allclose(\n hdu.data[hdu.header[\"TTYPE1\"]][0], self.edisp.axes[0].edges[:-1].value\n )\n hdu = self.edisp.to_table_hdu()\n assert_allclose(hdu.data[hdu.header[\"TTYPE7\"]][0].T, self.edisp.data)\n\n hdu = self.bkg.to_table_hdu()\n assert_allclose(\n hdu.data[hdu.header[\"TTYPE1\"]][0], self.bkg.axes[0].edges[:-1].value\n )\n hdu = self.bkg.to_table_hdu()\n assert_allclose(hdu.data[hdu.header[\"TTYPE7\"]][0].T, self.bkg.data)\n\n def test_writeread(self, tmp_path):\n path = tmp_path / \"tmp.fits\"\n fits.HDUList(\n [\n fits.PrimaryHDU(),\n self.aeff.to_table_hdu(),\n self.edisp.to_table_hdu(),\n self.bkg.to_table_hdu(),\n ]\n ).writeto(path)\n\n read_aeff = EffectiveAreaTable2D.read(path, hdu=\"EFFECTIVE AREA\")\n assert_allclose(read_aeff.quantity, self.aeff_data)\n\n read_edisp = EnergyDispersion2D.read(path, hdu=\"ENERGY DISPERSION\")\n assert_allclose(read_edisp.quantity, self.edisp_data)\n\n read_bkg = Background3D.read(path, hdu=\"BACKGROUND\")\n assert_allclose(read_bkg.quantity, self.bkg_data)\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Utility functions to deal with arrays and quantities.\"\"\"\nimport numpy as np\nimport scipy.ndimage\nimport scipy.signal\nfrom astropy.convolution import Gaussian2DKernel\n\n__all__ = [\n \"array_stats_str\",\n \"shape_2N\",\n \"shape_divisible_by\",\n \"symmetric_crop_pad_width\",\n]\n\n\ndef array_stats_str(x, label=\"\"):\n \"\"\"Make a string summarising some stats for an array.\n\n Parameters\n ----------\n x : array-like\n Array\n label : str, optional\n Label\n\n Returns\n -------\n stats_str : str\n String with array stats\n \"\"\"\n x = np.asanyarray(x)\n\n ss = \"\"\n if label:\n ss += f\"{label:15s}: \"\n\n min = x.min()\n max = x.max()\n size = x.size\n\n fmt = \"size = {size:5d}, min = {min:6.3f}, max = {max:6.3f}\\n\"\n ss += fmt.format(**locals())\n\n return ss\n\n\ndef shape_2N(shape, N=3):\n \"\"\"\n Round a given shape to values that are divisible by 2^N.\n\n Parameters\n ----------\n shape : tuple\n Input shape.\n N : int (default = 3), optional\n Exponent of two.\n\n Returns\n -------\n new_shape : Tuple\n New shape extended to integers divisible by 2^N\n \"\"\"\n shape = np.array(shape)\n new_shape = shape + (2 ** N - np.mod(shape, 2 ** N))\n return tuple(new_shape)\n\n\ndef shape_divisible_by(shape, factor):\n \"\"\"\n Round a given shape to values that are divisible by factor.\n\n Parameters\n ----------\n shape : tuple\n Input shape.\n factor : int\n Divisor.\n\n Returns\n -------\n new_shape : Tuple\n New shape extended to integers divisible by factor\n \"\"\"\n shape = np.array(shape)\n new_shape = shape + (shape % factor)\n return tuple(new_shape)\n\n\ndef round_up_to_odd(f):\n \"\"\"Round float to odd integer\n\n Parameters\n ----------\n f : float\n Float value\n\n Returns\n -------\n int : int\n Odd integer\n \"\"\"\n return (np.ceil(f) // 2 * 2 + 1).astype(int)\n\n\ndef symmetric_crop_pad_width(shape, new_shape):\n \"\"\"\n Compute symmetric crop or pad width.\n\n To obtain a new shape from a given old shape of an array.\n\n Parameters\n ----------\n shape : tuple\n Old shape\n new_shape : tuple or str\n New shape\n \"\"\"\n xdiff = abs(shape[1] - new_shape[1])\n ydiff = abs(shape[0] - new_shape[0])\n\n if (np.array([xdiff, ydiff]) % 2).any():\n raise ValueError(\n \"For symmetric crop / pad width, difference to new shape \"\n \"must be even in all axes.\"\n )\n\n ywidth = (ydiff // 2, ydiff // 2)\n xwidth = (xdiff // 2, xdiff // 2)\n return ywidth, xwidth\n\n\ndef _fftconvolve_wrap(kernel, data):\n # wrap gaussian filter as a special case, because the gain in\n # performance is factor ~100\n if isinstance(kernel, Gaussian2DKernel):\n width = kernel.model.x_stddev.value\n norm = kernel.array.sum()\n return norm * scipy.ndimage.gaussian_filter(data, width)\n else:\n return scipy.signal.fftconvolve(\n data.astype(np.float32), kernel.array, mode=\"same\"\n )\n\n\ndef scale_cube(data, kernels):\n \"\"\"\n Compute scale space cube.\n\n Compute scale space cube by convolving the data with a set of kernels and\n stack the resulting images along the third axis.\n\n Parameters\n ----------\n data : `~numpy.ndarray`\n Input data.\n kernels: list of `~astropy.convolution.Kernel`\n List of convolution kernels.\n\n Returns\n -------\n cube : `~numpy.ndarray`\n Array of the shape (len(kernels), data.shape)\n \"\"\"\n return np.dstack(list([_fftconvolve_wrap(kernel, data) for kernel in kernels]))\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom astropy import units as u\nfrom astropy.time import Time\nfrom astropy.utils.data import get_pkg_data_filename\nfrom gammapy.catalog import (\n SourceCatalog2FHL,\n SourceCatalog3FGL,\n SourceCatalog3FHL,\n SourceCatalog4FGL,\n)\nfrom gammapy.modeling.models import (\n ExpCutoffPowerLaw3FGLSpectralModel,\n LogParabolaSpectralModel,\n PowerLaw2SpectralModel,\n PowerLawSpectralModel,\n SuperExpCutoffPowerLaw3FGLSpectralModel,\n SuperExpCutoffPowerLaw4FGLSpectralModel,\n)\nfrom gammapy.utils.gauss import Gauss2DPDF\nfrom gammapy.utils.testing import (\n assert_quantity_allclose,\n assert_time_allclose,\n requires_data,\n)\n\nSOURCES_4FGL = [\n dict(\n idx=0,\n name=\"4FGL J0000.3-7355\",\n str_ref_file=\"data/4fgl_J0000.3-7355.txt\",\n spec_type=PowerLawSpectralModel,\n dnde=u.Quantity(2.9476e-11, \"cm-2 s-1 GeV-1\"),\n dnde_err=u.Quantity(5.3318e-12, \"cm-2 s-1 GeV-1\"),\n ),\n dict(\n idx=3,\n name=\"4FGL J0001.5+2113\",\n str_ref_file=\"data/4fgl_J0001.5+2113.txt\",\n spec_type=LogParabolaSpectralModel,\n dnde=u.Quantity(2.8545e-8, \"cm-2 s-1 GeV-1\"),\n dnde_err=u.Quantity(1.3324e-9, \"cm-2 s-1 GeV-1\"),\n ),\n dict(\n idx=7,\n name=\"4FGL J0002.8+6217\",\n str_ref_file=\"data/4fgl_J0002.8+6217.txt\",\n spec_type=SuperExpCutoffPowerLaw4FGLSpectralModel,\n dnde=u.Quantity(2.084e-09, \"cm-2 s-1 GeV-1\"),\n dnde_err=u.Quantity(1.0885e-10, \"cm-2 s-1 GeV-1\"),\n ),\n dict(\n idx=2718,\n name=\"4FGL J1409.1-6121e\",\n str_ref_file=\"data/4fgl_J1409.1-6121e.txt\",\n spec_type=LogParabolaSpectralModel,\n dnde=u.Quantity(1.3237202133031811e-12, \"cm-2 s-1 MeV-1\"),\n dnde_err=u.Quantity(4.513233455580648e-14, \"cm-2 s-1 MeV-1\"),\n ),\n]\n\nSOURCES_3FGL = [\n dict(\n idx=0,\n name=\"3FGL J0000.1+6545\",\n str_ref_file=\"data/3fgl_J0000.1+6545.txt\",\n spec_type=PowerLawSpectralModel,\n dnde=u.Quantity(1.4351261e-9, \"cm-2 s-1 GeV-1\"),\n dnde_err=u.Quantity(2.1356270e-10, \"cm-2 s-1 GeV-1\"),\n ),\n dict(\n idx=4,\n name=\"3FGL J0001.4+2120\",\n str_ref_file=\"data/3fgl_J0001.4+2120.txt\",\n spec_type=LogParabolaSpectralModel,\n dnde=u.Quantity(8.3828599e-10, \"cm-2 s-1 GeV-1\"),\n dnde_err=u.Quantity(2.6713238e-10, \"cm-2 s-1 GeV-1\"),\n ),\n dict(\n idx=55,\n name=\"3FGL J0023.4+0923\",\n str_ref_file=\"data/3fgl_J0023.4+0923.txt\",\n spec_type=ExpCutoffPowerLaw3FGLSpectralModel,\n dnde=u.Quantity(1.8666925e-09, \"cm-2 s-1 GeV-1\"),\n dnde_err=u.Quantity(2.2068837e-10, \"cm-2 s-1 GeV-1\"),\n ),\n dict(\n idx=960,\n name=\"3FGL J0835.3-4510\",\n str_ref_file=\"data/3fgl_J0835.3-4510.txt\",\n spec_type=SuperExpCutoffPowerLaw3FGLSpectralModel,\n dnde=u.Quantity(1.6547128794756733e-06, \"cm-2 s-1 GeV-1\"),\n dnde_err=u.Quantity(1.6621504e-11, \"cm-2 s-1 MeV-1\"),\n ),\n]\n\nSOURCES_2FHL = [\n dict(\n idx=221,\n name=\"2FHL J1445.1-0329\",\n str_ref_file=\"data/2fhl_j1445.1-0329.txt\",\n spec_type=PowerLaw2SpectralModel,\n dnde=u.Quantity(1.065463448091757e-10, \"cm-2 s-1 TeV-1\"),\n dnde_err=u.Quantity(4.9691205387540815e-11, \"cm-2 s-1 TeV-1\"),\n ),\n dict(\n idx=134,\n name=\"2FHL J0822.6-4250e\",\n str_ref_file=\"data/2fhl_j0822.6-4250e.txt\",\n spec_type=LogParabolaSpectralModel,\n dnde=u.Quantity(2.46548351696472e-10, \"cm-2 s-1 TeV-1\"),\n dnde_err=u.Quantity(9.771755529198772e-11, \"cm-2 s-1 TeV-1\"),\n ),\n]\n\nSOURCES_3FHL = [\n dict(\n idx=352,\n name=\"3FHL J0534.5+2201\",\n spec_type=PowerLawSpectralModel,\n dnde=u.Quantity(6.3848912826152664e-12, \"cm-2 s-1 GeV-1\"),\n dnde_err=u.Quantity(2.679593524691324e-13, \"cm-2 s-1 GeV-1\"),\n ),\n dict(\n idx=1442,\n name=\"3FHL J2158.8-3013\",\n spec_type=LogParabolaSpectralModel,\n dnde=u.Quantity(2.056998292908196e-12, \"cm-2 s-1 GeV-1\"),\n dnde_err=u.Quantity(4.219030630302381e-13, \"cm-2 s-1 GeV-1\"),\n ),\n]\n\n\n@requires_data()\nclass TestFermi4FGLObject:\n @classmethod\n def setup_class(cls):\n cls.cat = SourceCatalog4FGL(\"$GAMMAPY_DATA/catalogs/fermi/gll_psc_v20.fit.gz\")\n cls.source_name = \"4FGL J0534.5+2200\"\n cls.source = cls.cat[cls.source_name]\n\n def test_name(self):\n assert self.source.name == self.source_name\n\n def test_row_index(self):\n assert self.source.row_index == 995\n\n @pytest.mark.parametrize(\"ref\", SOURCES_4FGL, ids=lambda _: _[\"name\"])\n def test_str(self, ref):\n actual = str(self.cat[ref[\"idx\"]])\n expected = open(get_pkg_data_filename(ref[\"str_ref_file\"])).read()\n assert actual == expected\n\n @pytest.mark.parametrize(\"ref\", SOURCES_4FGL, ids=lambda _: _[\"name\"])\n def test_spectral_model(self, ref):\n model = self.cat[ref[\"idx\"]].spectral_model()\n\n e_ref = model.reference.quantity\n dnde, dnde_err = model.evaluate_error(e_ref)\n assert isinstance(model, ref[\"spec_type\"])\n assert_quantity_allclose(dnde, ref[\"dnde\"], rtol=1e-4)\n assert_quantity_allclose(dnde_err, ref[\"dnde_err\"], rtol=1e-4)\n\n def test_spatial_model(self):\n model = self.cat[\"4FGL J0000.3-7355\"].spatial_model()\n assert \"PointSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 0.0983)\n assert_allclose(p[\"lat_0\"].value, -73.921997)\n pos_err = model.position_error\n assert_allclose(pos_err.angle.value, -62.7)\n assert_allclose(0.5 * pos_err.height.value, 0.0525, rtol=1e-4)\n assert_allclose(0.5 * pos_err.width.value, 0.051, rtol=1e-4)\n assert_allclose(model.position.ra.value, pos_err.center.ra.value)\n assert_allclose(model.position.dec.value, pos_err.center.dec.value)\n\n model = self.cat[\"4FGL J1409.1-6121e\"].spatial_model()\n assert \"DiskSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 212.294006)\n assert_allclose(p[\"lat_0\"].value, -61.353001)\n assert_allclose(p[\"r_0\"].value, 0.7331369519233704)\n\n model = self.cat[\"4FGL J0617.2+2234e\"].spatial_model()\n assert \"GaussianSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 94.309998)\n assert_allclose(p[\"lat_0\"].value, 22.58)\n assert_allclose(p[\"sigma\"].value, 0.27)\n\n model = self.cat[\"4FGL J1443.0-6227e\"].spatial_model()\n assert \"TemplateSpatialModel\" in model.tag\n assert model.frame == \"fk5\"\n assert model.normalize is True\n\n @pytest.mark.parametrize(\"ref\", SOURCES_4FGL, ids=lambda _: _[\"name\"])\n def test_sky_model(self, ref):\n self.cat[ref[\"idx\"]].sky_model\n\n def test_flux_points(self):\n flux_points = self.source.flux_points\n\n assert len(flux_points.table) == 7\n assert \"flux_ul\" in flux_points.table.colnames\n assert flux_points.sed_type == \"flux\"\n\n desired = [\n 2.2378458e-06,\n 1.4318283e-06,\n 5.4776939e-07,\n 1.2769708e-07,\n 2.5820052e-08,\n 2.3897000e-09,\n 7.1766204e-11,\n ]\n assert_allclose(flux_points.table[\"flux\"].data, desired, rtol=1e-5)\n\n def test_flux_points_ul(self):\n source = self.cat[\"4FGL J0000.3-7355\"]\n flux_points = source.flux_points\n\n desired = [\n 4.13504750e-08,\n 3.80519616e-09,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n 7.99699456e-12,\n ]\n assert_allclose(flux_points.table[\"flux_ul\"].data, desired, rtol=1e-5)\n\n def test_lightcurve_dr1(self):\n lc = self.source.lightcurve(interval=\"1-year\")\n table = lc.table\n\n assert len(table) == 8\n assert table.colnames == [\n \"time_min\",\n \"time_max\",\n \"flux\",\n \"flux_errp\",\n \"flux_errn\",\n ]\n\n expected = Time(54682.655277777776, format=\"mjd\", scale=\"utc\")\n assert_time_allclose(lc.time_min[0], expected)\n\n expected = Time(55047.603239293836, format=\"mjd\", scale=\"utc\")\n assert_time_allclose(lc.time_max[0], expected)\n\n assert table[\"flux\"].unit == \"cm-2 s-1\"\n assert_allclose(table[\"flux\"][0], 2.2122326e-06, rtol=1e-3)\n\n assert table[\"flux_errp\"].unit == \"cm-2 s-1\"\n assert_allclose(table[\"flux_errp\"][0], 2.3099371e-08, rtol=1e-3)\n\n assert table[\"flux_errn\"].unit == \"cm-2 s-1\"\n assert_allclose(table[\"flux_errn\"][0], 2.3099371e-08, rtol=1e-3)\n\n table = self.source.lightcurve(interval=\"2-month\").table\n assert len(table) == 48 # (12 month/year / 2month) * 8 years\n\n assert table[\"flux\"].unit == \"cm-2 s-1\"\n assert_allclose(table[\"flux\"][0], 2.238483e-6, rtol=1e-3)\n\n assert table[\"flux_errp\"].unit == \"cm-2 s-1\"\n assert_allclose(table[\"flux_errp\"][0], 4.437058e-8, rtol=1e-3)\n\n assert table[\"flux_errn\"].unit == \"cm-2 s-1\"\n assert_allclose(table[\"flux_errn\"][0], 4.437058e-8, rtol=1e-3)\n\n def test_lightcurve_dr2(self):\n dr2 = SourceCatalog4FGL(\"$GAMMAPY_DATA/catalogs/fermi/gll_psc_v27.fit.gz\")\n source_dr2 = dr2[self.source_name]\n table = source_dr2.lightcurve(interval=\"1-year\").table\n\n assert table[\"flux\"].unit == \"cm-2 s-1\"\n assert_allclose(table[\"flux\"][0], 2.196788e-6, rtol=1e-3)\n\n assert table[\"flux_errp\"].unit == \"cm-2 s-1\"\n assert_allclose(table[\"flux_errp\"][0], 2.312938e-8, rtol=1e-3)\n\n assert table[\"flux_errn\"].unit == \"cm-2 s-1\"\n assert_allclose(table[\"flux_errn\"][0], 2.312938e-8, rtol=1e-3)\n\n with pytest.raises(ValueError):\n source_dr2.lightcurve(interval=\"2-month\")\n\n\n@requires_data()\nclass TestFermi3FGLObject:\n @classmethod\n def setup_class(cls):\n cls.cat = SourceCatalog3FGL()\n # Use 3FGL J0534.5+2201 (Crab) as a test source\n cls.source_name = \"3FGL J0534.5+2201\"\n cls.source = cls.cat[cls.source_name]\n\n def test_name(self):\n assert self.source.name == self.source_name\n\n def test_row_index(self):\n assert self.source.row_index == 621\n\n def test_data(self):\n assert_allclose(self.source.data[\"Signif_Avg\"], 30.669872283935547)\n\n def test_position(self):\n position = self.source.position\n assert_allclose(position.ra.deg, 83.637199, atol=1e-3)\n assert_allclose(position.dec.deg, 22.024099, atol=1e-3)\n\n @pytest.mark.parametrize(\"ref\", SOURCES_3FGL, ids=lambda _: _[\"name\"])\n def test_str(self, ref):\n actual = str(self.cat[ref[\"idx\"]])\n expected = open(get_pkg_data_filename(ref[\"str_ref_file\"])).read()\n assert actual == expected\n\n @pytest.mark.parametrize(\"ref\", SOURCES_3FGL, ids=lambda _: _[\"name\"])\n def test_spectral_model(self, ref):\n model = self.cat[ref[\"idx\"]].spectral_model()\n\n dnde, dnde_err = model.evaluate_error(1 * u.GeV)\n\n assert isinstance(model, ref[\"spec_type\"])\n assert_quantity_allclose(dnde, ref[\"dnde\"])\n assert_quantity_allclose(dnde_err, ref[\"dnde_err\"], rtol=1e-3)\n\n def test_spatial_model(self):\n model = self.cat[0].spatial_model()\n assert \"PointSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 0.0377)\n assert_allclose(p[\"lat_0\"].value, 65.751701)\n\n model = self.cat[122].spatial_model()\n assert \"GaussianSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 14.75)\n assert_allclose(p[\"lat_0\"].value, -72.699997)\n assert_allclose(p[\"sigma\"].value, 1.35)\n\n model = self.cat[955].spatial_model()\n assert \"DiskSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 128.287201)\n assert_allclose(p[\"lat_0\"].value, -45.190102)\n assert_allclose(p[\"r_0\"].value, 0.91)\n\n model = self.cat[602].spatial_model()\n assert \"TemplateSpatialModel\" in model.tag\n assert model.frame == \"fk5\"\n assert model.normalize is True\n\n model = self.cat[\"3FGL J0000.2-3738\"].spatial_model()\n pos_err = model.position_error\n assert_allclose(pos_err.angle.value, -88.55)\n assert_allclose(0.5 * pos_err.height.value, 0.0731, rtol=1e-4)\n assert_allclose(0.5 * pos_err.width.value, 0.0676, rtol=1e-4)\n assert_allclose(model.position.ra.value, pos_err.center.ra.value)\n assert_allclose(model.position.dec.value, pos_err.center.dec.value)\n\n @pytest.mark.parametrize(\"ref\", SOURCES_3FGL, ids=lambda _: _[\"name\"])\n def test_sky_model(self, ref):\n self.cat[ref[\"idx\"]].sky_model()\n\n def test_flux_points(self):\n flux_points = self.source.flux_points\n\n assert len(flux_points.table) == 5\n assert \"flux_ul\" in flux_points.table.colnames\n assert flux_points.sed_type == \"flux\"\n\n desired = [1.645888e-06, 5.445407e-07, 1.255338e-07, 2.545524e-08, 2.263189e-09]\n assert_allclose(flux_points.table[\"flux\"].data, desired, rtol=1e-5)\n\n def test_flux_points_ul(self):\n source = self.cat[\"3FGL J0000.2-3738\"]\n flux_points = source.flux_points\n\n desired = [4.096391e-09, 6.680059e-10, np.nan, np.nan, np.nan]\n assert_allclose(flux_points.table[\"flux_ul\"].data, desired, rtol=1e-5)\n\n def test_lightcurve(self):\n lc = self.source.lightcurve()\n table = lc.table\n\n assert len(table) == 48\n assert table.colnames == [\n \"time_min\",\n \"time_max\",\n \"flux\",\n \"flux_errp\",\n \"flux_errn\",\n ]\n\n expected = Time(54680.02313657408, format=\"mjd\", scale=\"utc\")\n assert_time_allclose(lc.time_min[0], expected)\n\n expected = Time(54710.43824797454, format=\"mjd\", scale=\"utc\")\n assert_time_allclose(lc.time_max[0], expected)\n\n assert table[\"flux\"].unit == \"cm-2 s-1\"\n assert_allclose(table[\"flux\"][0], 2.384e-06, rtol=1e-3)\n\n assert table[\"flux_errp\"].unit == \"cm-2 s-1\"\n assert_allclose(table[\"flux_errp\"][0], 8.071e-08, rtol=1e-3)\n\n assert table[\"flux_errn\"].unit == \"cm-2 s-1\"\n assert_allclose(table[\"flux_errn\"][0], 8.071e-08, rtol=1e-3)\n\n def test_crab_alias(self):\n for name in [\n \"Crab\",\n \"3FGL J0534.5+2201\",\n \"1FHL J0534.5+2201\",\n \"PSR J0534+2200\",\n ]:\n assert self.cat[name].row_index == 621\n\n\n@requires_data()\nclass TestFermi2FHLObject:\n @classmethod\n def setup_class(cls):\n cls.cat = SourceCatalog2FHL()\n # Use 2FHL J0534.5+2201 (Crab) as a test source\n cls.source_name = \"2FHL J0534.5+2201\"\n cls.source = cls.cat[cls.source_name]\n\n def test_name(self):\n assert self.source.name == self.source_name\n\n def test_position(self):\n position = self.source.position\n assert_allclose(position.ra.deg, 83.634102, atol=1e-3)\n assert_allclose(position.dec.deg, 22.0215, atol=1e-3)\n\n @pytest.mark.parametrize(\"ref\", SOURCES_2FHL, ids=lambda _: _[\"name\"])\n def test_str(self, ref):\n actual = str(self.cat[ref[\"idx\"]])\n expected = open(get_pkg_data_filename(ref[\"str_ref_file\"])).read()\n assert actual == expected\n\n def test_spectral_model(self):\n model = self.source.spectral_model()\n energy = u.Quantity(100, \"GeV\")\n desired = u.Quantity(6.8700477298e-12, \"cm-2 GeV-1 s-1\")\n assert_quantity_allclose(model(energy), desired)\n\n def test_flux_points(self):\n # test flux point on PKS 2155-304\n src = self.cat[\"PKS 2155-304\"]\n flux_points = src.flux_points\n actual = flux_points.table[\"flux\"]\n desired = [2.866363e-10, 6.118736e-11, 3.257970e-16] * u.Unit(\"cm-2 s-1\")\n assert_quantity_allclose(actual, desired)\n\n actual = flux_points.table[\"flux_ul\"]\n desired = [np.nan, np.nan, 1.294092e-11] * u.Unit(\"cm-2 s-1\")\n assert_quantity_allclose(actual, desired, rtol=1e-3)\n\n def test_spatial_model(self):\n model = self.cat[221].spatial_model()\n assert \"PointSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 221.281998, rtol=1e-5)\n assert_allclose(p[\"lat_0\"].value, -3.4943, rtol=1e-5)\n\n model = self.cat[\"2FHL J1304.5-4353\"].spatial_model()\n pos_err = model.position_error\n scale = Gauss2DPDF().containment_radius(0.95) / Gauss2DPDF().containment_radius(\n 0.68\n )\n assert_allclose(pos_err.height.value, 2 * 0.041987 * scale, rtol=1e-4)\n assert_allclose(pos_err.width.value, 2 * 0.041987 * scale, rtol=1e-4)\n assert_allclose(model.position.ra.value, pos_err.center.ra.value)\n assert_allclose(model.position.dec.value, pos_err.center.dec.value)\n\n model = self.cat[97].spatial_model()\n assert \"GaussianSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 94.309998, rtol=1e-5)\n assert_allclose(p[\"lat_0\"].value, 22.58, rtol=1e-5)\n assert_allclose(p[\"sigma\"].value, 0.27)\n\n model = self.cat[134].spatial_model()\n assert \"DiskSpatialModel\" in model.tag\n assert model.frame == \"icrs\"\n p = model.parameters\n assert_allclose(p[\"lon_0\"].value, 125.660004, rtol=1e-5)\n assert_allclose(p[\"lat_0\"].value, -42.84, rtol=1e-5)\n assert_allclose(p[\"r_0\"].value, 0.37)\n\n model = self.cat[256].spatial_model()\n assert \"TemplateSpatialModel\" in model.tag\n assert model.frame == \"fk5\"\n assert model.normalize is True\n # TODO: have to check the extended template used for RX J1713,\n # for now I guess it's the same than for 3FGL\n # and added a copy with the name given by 2FHL in gammapy-extra\n\n\n@requires_data()\nclass TestFermi3FHLObject:\n @classmethod\n def setup_class(cls):\n cls.cat = SourceCatalog3FHL()\n # Use 3FHL J0534.5+2201 (Crab) as a test source\n cls.source_name = \"3FHL J0534.5+2201\"\n cls.source = cls.cat[cls.source_name]\n\n def test_name(self):\n assert self.source.name == self.source_name\n\n def test_row_index(self):\n assert self.source.row_index == 352\n\n def test_data(self):\n assert_allclose(self.source.data[\"Signif_Avg\"], 168.64082)\n\n def test_str(self):\n actual = str(self.cat[\"3FHL J2301.9+5855e\"]) # an extended source\n expected = open(get_pkg_data_filename(\"data/3fhl_j2301.9+5855e.txt\")).read()\n assert actual == expected\n\n def test_position(self):\n position = self.source.position\n assert_allclose(position.ra.deg, 83.634834, atol=1e-3)\n assert_allclose(position.dec.deg, 22.019203, atol=1e-3)\n\n @pytest.mark.parametrize(\"ref\", SOURCES_3FHL, ids=lambda _: _[\"name\"])\n def test_spectral_model(self, ref):\n model = self.cat[ref[\"idx\"]].spectral_model()\n\n dnde, dnde_err = model.evaluate_error(100 * u.GeV)\n\n assert isinstance(model, ref[\"spec_type\"])\n assert_quantity_allclose(dnde, ref[\"dnde\"])\n assert_quantity_allclose(dnde_err, ref[\"dnde_err\"], rtol=1e-3)\n\n @pytest.mark.parametrize(\"ref\", SOURCES_3FHL, ids=lambda _: _[\"name\"])\n def test_spatial_model(self, ref):\n model = self.cat[ref[\"idx\"]].spatial_model()\n assert model.frame == \"icrs\"\n\n model = self.cat[\"3FHL J0002.1-6728\"].spatial_model()\n pos_err = model.position_error\n assert_allclose(0.5 * pos_err.height.value, 0.035713, rtol=1e-4)\n assert_allclose(0.5 * pos_err.width.value, 0.035713, rtol=1e-4)\n assert_allclose(model.position.ra.value, pos_err.center.ra.value)\n assert_allclose(model.position.dec.value, pos_err.center.dec.value)\n\n @pytest.mark.parametrize(\"ref\", SOURCES_3FHL, ids=lambda _: _[\"name\"])\n def test_sky_model(self, ref):\n self.cat[ref[\"idx\"]].sky_model()\n\n def test_flux_points(self):\n flux_points = self.source.flux_points\n\n assert len(flux_points.table) == 5\n assert \"flux_ul\" in flux_points.table.colnames\n\n desired = [5.169889e-09, 2.245024e-09, 9.243175e-10, 2.758956e-10, 6.684021e-11]\n assert_allclose(flux_points.table[\"flux\"].data, desired, rtol=1e-3)\n\n def test_crab_alias(self):\n for name in [\"Crab Nebula\", \"3FHL J0534.5+2201\", \"3FGL J0534.5+2201i\"]:\n assert self.cat[name].row_index == 352\n\n\n@requires_data()\nclass TestSourceCatalog3FGL:\n @classmethod\n def setup_class(cls):\n cls.cat = SourceCatalog3FGL()\n\n def test_main_table(self):\n assert len(self.cat.table) == 3034\n\n def test_extended_sources(self):\n table = self.cat.extended_sources_table\n assert len(table) == 25\n\n\n@requires_data()\nclass TestSourceCatalog2FHL:\n @classmethod\n def setup_class(cls):\n cls.cat = SourceCatalog2FHL()\n\n def test_main_table(self):\n assert len(self.cat.table) == 360\n\n def test_extended_sources(self):\n table = self.cat.extended_sources_table\n assert len(table) == 25\n\n def test_crab_alias(self):\n for name in [\"Crab\", \"3FGL J0534.5+2201i\", \"1FHL J0534.5+2201\"]:\n assert self.cat[name].row_index == 85\n\n\n@requires_data()\nclass TestSourceCatalog3FHL:\n @classmethod\n def setup_class(cls):\n cls.cat = SourceCatalog3FHL()\n\n def test_main_table(self):\n assert len(self.cat.table) == 1556\n\n def test_extended_sources(self):\n table = self.cat.extended_sources_table\n assert len(table) == 55\n", "\"\"\"Tools to create profiles (i.e. 1D \"slices\" from 2D images).\"\"\"\nimport numpy as np\nfrom astropy import units as u\nfrom regions import CircleAnnulusSkyRegion, RectangleSkyRegion\nfrom gammapy.datasets import Datasets, SpectrumDatasetOnOff\nfrom gammapy.maps import MapAxis\nfrom gammapy.modeling.models import PowerLawSpectralModel, SkyModel\nfrom gammapy.stats import CashCountsStatistic, WStatCountsStatistic\nfrom gammapy.utils.table import table_from_row_data\nfrom .core import Estimator\n\n__all__ = [\"ExcessProfileEstimator\"]\n\n\nclass ExcessProfileEstimator(Estimator):\n \"\"\"Estimate profile from a DataSet.\n\n Parameters\n ----------\n regions : list of `regions`\n regions to use\n energy_edges : `~astropy.units.Quantity`\n Energy edges of the profiles to be computed.\n n_sigma : float (optional)\n Number of sigma to compute errors. By default, it is 1.\n n_sigma_ul : float (optional)\n Number of sigma to compute upper limit. By default, it is 3.\n spectrum : `~gammapy.modeling.models.SpectralModel` (optional)\n Spectral model to compute the fluxes or brightness.\n Default is power-law with spectral index of 2.\n selection_optional : list of str\n Additional quantities to be estimated. Possible options are:\n\n * \"errn-errp\": estimate asymmetric errors.\n * \"ul\": estimate upper limits.\n\n By default all quantities are estimated.\n\n Examples\n --------\n This example shows how to compute a counts profile for the Fermi galactic\n center region::\n\n import matplotlib.pyplot as plt\n from astropy import units as u\n from astropy.coordinates import SkyCoord\n from gammapy.data import GTI\n from gammapy.estimators import ExcessProfileEstimator, ImageProfile\n from gammapy.utils.regions import make_orthogonal_rectangle_sky_regions\n from gammapy.datasets import Datasets\n\n # load example data\n datasets = Datasets.read(\"$GAMMAPY_DATA/fermi-3fhl-crab/\",\n \"Fermi-LAT-3FHL_datasets.yaml\", \"Fermi-LAT-3FHL_models.yaml\")\n # configuration\n datasets[0].gti = GTI.create(\"0s\", \"1e7s\", \"2010-01-01\")\n\n # creation of the boxes and axis\n start_line = SkyCoord(182.5, -5.8, unit='deg', frame='galactic')\n end_line = SkyCoord(186.5, -5.8, unit='deg', frame='galactic')\n boxes, axis = make_orthogonal_rectangle_sky_regions(start_line,\n end_line,\n datasets[0].counts.geom.wcs,\n 1.*u.deg,\n 11)\n\n # set up profile estimator and run\n prof_maker = ExcessProfileEstimator(boxes, axis)\n fermi_prof = prof_maker.run(datasets[0])\n\n # smooth and plot the data using the ImageProfile class\n fermi_prof.peek()\n plt.show()\n\n ax = plt.gca()\n ax.set_yscale('log')\n ax = fermi_prof.plot(\"flux\", ax=ax)\n\n \"\"\"\n\n tag = \"ExcessProfileEstimator\"\n _available_selection_optional = [\"errn-errp\", \"ul\", \"scan\"]\n\n def __init__(\n self,\n regions,\n energy_edges=None,\n spectrum=None,\n n_sigma=1.0,\n n_sigma_ul=3.0,\n selection_optional=\"all\",\n ):\n self.regions = regions\n self.n_sigma = n_sigma\n self.n_sigma_ul = n_sigma_ul\n\n self.energy_edges = (\n u.Quantity(energy_edges) if energy_edges is not None else None\n )\n\n if spectrum is None:\n spectrum = PowerLawSpectralModel()\n\n self.spectrum = spectrum\n self.selection_optional = selection_optional\n\n def get_spectrum_datasets(self, dataset):\n \"\"\" Utility to make the final `~gammapy.datasts.Datasets`\n\n Parameters\n ----------\n dataset : `~gammapy.datasets.MapDataset` or `~gammapy.datasets.MapDatasetOnOff`\n the dataset to use for profile extraction\n Returns\n --------\n sp_datasets : array of `~gammapy.datasets.SpectrumDataset`\n the list of `~gammapy.datasets.SpectrumDataset` computed in each box\n \"\"\"\n datasets = Datasets()\n\n for reg in self.regions:\n spectrum_dataset = dataset.to_spectrum_dataset(reg)\n datasets.append(spectrum_dataset)\n\n return datasets\n\n def _get_projected_distance(self):\n distances = []\n center = self.regions[0].center\n\n for idx, region in enumerate(self.regions):\n if isinstance(region, CircleAnnulusSkyRegion):\n distance = (region.inner_radius + region.outer_radius) / 2.0\n else:\n distance = center.separation(region.center)\n\n distances.append(distance)\n\n return MapAxis.from_nodes(\n u.Quantity(distances, \"deg\"), name=\"projected distance\"\n )\n\n def make_prof(self, sp_datasets):\n \"\"\" Utility to make the profile in each region\n\n Parameters\n ----------\n sp_datasets : `~gammapy.datasets.MapDatasets` of `~gammapy.datasets.SpectrumDataset` or \\\n `~gammapy.datasets.SpectrumDatasetOnOff`\n the dataset to use for profile extraction\n\n Returns\n --------\n results : list of dictionary\n the list of results (list of keys: x_min, x_ref, x_max, alpha, counts, background, excess, ts, sqrt_ts, \\\n err, errn, errp, ul, exposure, solid_angle)\n \"\"\"\n results = []\n\n distance = self._get_projected_distance()\n\n for index, spds in enumerate(sp_datasets):\n old_model = None\n if spds.models is not None:\n old_model = spds.models\n spds.models = SkyModel(spectral_model=self.spectrum)\n e_reco = spds.counts.geom.axes[\"energy\"].edges\n\n # ToDo: When the function to_spectrum_dataset will manage the masks, use the following line\n # mask = spds.mask if spds.mask is not None else slice(None)\n mask = slice(None)\n if isinstance(spds, SpectrumDatasetOnOff):\n stats = WStatCountsStatistic(\n spds.counts.data[mask][:, 0, 0],\n spds.counts_off.data[mask][:, 0, 0],\n spds.alpha.data[mask][:, 0, 0],\n )\n\n else:\n stats = CashCountsStatistic(\n spds.counts.data[mask][:, 0, 0],\n spds.npred_background().data[mask][:, 0, 0],\n )\n\n result = {\n \"x_min\": distance.edges[index],\n \"x_max\": distance.edges[index + 1],\n \"x_ref\": distance.center[index],\n \"energy_edge\": e_reco,\n }\n if isinstance(spds, SpectrumDatasetOnOff):\n result[\"alpha\"] = stats.alpha\n result.update(\n {\n \"counts\": stats.n_on,\n \"background\": stats.n_bkg,\n \"excess\": stats.n_sig,\n }\n )\n\n result[\"ts\"] = stats.ts\n result[\"sqrt_ts\"] = stats.sqrt_ts\n\n result[\"err\"] = stats.error * self.n_sigma\n\n if \"errn-errp\" in self.selection_optional:\n result[\"errn\"] = stats.compute_errn(self.n_sigma)\n result[\"errp\"] = stats.compute_errp(self.n_sigma)\n\n if \"ul\" in self.selection_optional:\n result[\"ul\"] = stats.compute_upper_limit(self.n_sigma_ul)\n\n npred = spds.npred().data[mask][:, 0, 0]\n e_reco_lo = e_reco[:-1]\n e_reco_hi = e_reco[1:]\n flux = (\n stats.n_sig\n / npred\n * spds.models[0].spectral_model.integral(e_reco_lo, e_reco_hi).value\n )\n result[\"flux\"] = flux\n\n result[\"flux_err\"] = stats.error / stats.n_sig * flux\n\n if \"errn-errp\" in self.selection_optional:\n result[\"flux_errn\"] = np.abs(result[\"errn\"]) / stats.n_sig * flux\n result[\"flux_errp\"] = result[\"errp\"] / stats.n_sig * flux\n\n if \"ul\" in self.selection_optional:\n result[\"flux_ul\"] = result[\"ul\"] / stats.n_sig * flux\n\n solid_angle = spds.counts.geom.solid_angle()\n result[\"solid_angle\"] = (\n np.full(result[\"counts\"].shape, solid_angle.to_value(\"sr\")) * u.sr\n )\n\n results.append(result)\n if old_model is not None:\n spds.models = old_model\n\n return results\n\n def run(self, dataset):\n \"\"\"Make the profiles\n\n Parameters\n ----------\n dataset : `~gammapy.datasets.MapDataset` or `~gammapy.datasets.MapDatasetOnOff`\n the dataset to use for profile extraction\n\n Returns\n --------\n imageprofile : `~gammapy.estimators.ImageProfile`\n Return an image profile class containing the result\n \"\"\"\n if self.energy_edges is not None:\n axis = MapAxis.from_energy_edges(self.energy_edges)\n dataset = dataset.resample_energy_axis(energy_axis=axis)\n else:\n dataset = dataset.to_image()\n\n spectrum_datasets = self.get_spectrum_datasets(dataset)\n\n results = self.make_prof(spectrum_datasets)\n table = table_from_row_data(results)\n if isinstance(self.regions[0], RectangleSkyRegion):\n table.meta[\"PROFILE_TYPE\"] = \"orthogonal_rectangle\"\n table.meta[\"SPECTRAL_MODEL\"] = self.spectrum.to_dict()\n\n # return ImageProfile(table)\n return table\n", "\"\"\"Example how to compute and plot reflected regions.\"\"\"\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\nfrom regions import RectangleSkyRegion\nimport matplotlib.pyplot as plt\nfrom gammapy.data import DataStore\nfrom gammapy.datasets import SpectrumDataset\nfrom gammapy.makers import ReflectedRegionsBackgroundMaker, SpectrumDatasetMaker\nfrom gammapy.maps import Map, MapAxis, RegionGeom\nfrom gammapy.visualization import plot_spectrum_datasets_off_regions\n\ndata_store = DataStore.from_dir(\"$GAMMAPY_DATA/hess-dl3-dr1/\")\nmask = data_store.obs_table[\"TARGET_NAME\"] == \"Crab\"\nobs_ids = data_store.obs_table[\"OBS_ID\"][mask].data\nobservations = data_store.get_observations(obs_ids)\n\ncrab_position = SkyCoord(83.63, 22.01, unit=\"deg\", frame=\"icrs\")\n\n# The ON region center is defined in the icrs frame. The angle is defined w.r.t. to its axis.\nrectangle = RectangleSkyRegion(\n center=crab_position, width=0.5 * u.deg, height=0.4 * u.deg, angle=0 * u.deg\n)\n\nbkg_maker = ReflectedRegionsBackgroundMaker(min_distance=0.1 * u.rad)\ndataset_maker = SpectrumDatasetMaker(selection=[\"counts\"])\n\nenergy_axis = MapAxis.from_energy_bounds(0.1, 100, 30, unit=\"TeV\")\ngeom = RegionGeom.create(region=rectangle, axes=[energy_axis])\ndataset_empty = SpectrumDataset.create(geom=geom)\n\ndatasets = []\n\nfor obs in observations:\n\n dataset = dataset_maker.run(dataset_empty.copy(name=f\"obs-{obs.obs_id}\"), obs)\n dataset_on_off = bkg_maker.run(observation=obs, dataset=dataset)\n datasets.append(dataset_on_off)\n\nm = Map.create(skydir=crab_position, width=(8, 8), proj=\"TAN\")\n\n_, ax, _ = m.plot(vmin=-1, vmax=0)\n\nrectangle.to_pixel(ax.wcs).plot(ax=ax, color=\"black\")\n\nplot_spectrum_datasets_off_regions(datasets=datasets, ax=ax)\nplt.show()\n" ]
[ [ "matplotlib.pyplot.gca", "numpy.sqrt", "numpy.abs", "numpy.linalg.eig", "numpy.cos", "numpy.sort", "numpy.sin", "numpy.all", "numpy.max", "numpy.arctan2", "numpy.errstate", "numpy.exp", "numpy.where" ], [ "numpy.testing.assert_allclose" ], [ "numpy.arctan2", "numpy.max", "numpy.min" ], [ "numpy.logspace", "numpy.random.rand", "numpy.linspace", "numpy.testing.assert_allclose" ], [ "numpy.mod", "numpy.ceil", "numpy.array", "numpy.asanyarray" ], [ "numpy.testing.assert_allclose" ], [ "numpy.abs" ], [ "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
manhhv87/densenet_bottleneck
[ "fd08eb88514dacaff1bcec8bc52a77ea56ab72c7" ]
[ "finetuning/trainer_old.py" ]
[ "import argparse\nimport os\nfrom pathlib import Path\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom sklearn.model_selection import StratifiedKFold\n\nfrom finetuning.utils import ecg_feature_extractor, train_test_split\nfrom transplant.evaluation import auc, f1, multi_f1, CustomCheckpoint\nfrom transplant.utils import (create_predictions_frame, load_pkl, is_multiclass)\n\n\ndef _create_dataset_from_data(data):\n \"\"\"\n input: data = {'x': x,\n 'y': labels.to_numpy(),\n 'record_ids': labels.index.to_numpy(),\n 'classes': labels.columns.to_numpy()}\n return: data and label\n \"\"\"\n return tf.data.Dataset.from_tensor_slices((data['x'], data['y']))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--job-dir', type=Path, required=True, help='Job output directory.')\n parser.add_argument('--train', type=Path, required=True, help='Path to the train file.')\n parser.add_argument('--val', type=Path, help='Path to the validation file.\\n'\n 'Overrides --val-size.')\n parser.add_argument('--test', type=Path, help='Path to the test file.')\n parser.add_argument('--weights-file', type=Path, help='Path to pretrained weights or a checkpoint of the model.')\n parser.add_argument('--val-size', type=float, default=None,\n help='Size of the validation set or proportion of the train set.')\n parser.add_argument('--subset', type=float, default=None, help='Size of a subset of the train set '\n 'or proportion of the train set.')\n parser.add_argument('--batch-size', type=int, default=32, help='Batch size.')\n parser.add_argument('--val-metric', default='loss',\n help='Validation metric used to find the best model at each epoch. Supported metrics are:'\n '`loss`, `acc`, `f1`, `auc`.')\n parser.add_argument('--channel', type=int, default=None, help='Use only the selected channel. '\n 'By default use all available channels.')\n parser.add_argument('--epochs', type=int, default=1, help='Number of epochs.')\n parser.add_argument('--seed', type=int, default=None, help='Random state.')\n parser.add_argument('--verbose', action='store_true', help='Show debug messages.')\n args, _ = parser.parse_known_args()\n\n if args.val_metric not in ['loss', 'acc', 'f1', 'auc']:\n raise ValueError('Unknown metric: {}'.format(args.val_metric))\n\n os.makedirs(name=str(args.job_dir), exist_ok=True)\n print('Creating working directory in {}'.format(args.job_dir))\n\n seed = args.seed or np.random.randint(2 ** 16)\n print('Setting random state {}'.format(seed))\n np.random.seed(seed)\n\n # Không sử dụng val file riêng biệt mà chia val từ train set\n if not args.val and args.val_size:\n if args.val_size >= 1: # Lấy theo số lượng của patients, nếu <= 1 thì đó là theo tỷ lệ của train set\n args.val_size = int(args.val_size)\n\n # Tiếp tục chia train set sau khi đã chia thành train và val set\n # Lớn hơn hoặc bằng 1 thì sẽ là lấy theo số lượng patient, nếu <= 1 thì đó là theo tỷ lệ của train set\n if args.subset and args.subset >= 1:\n args.subset = int(args.subset)\n\n print('Loading train data from {} ...'.format(args.train))\n train = load_pkl(file=str(args.train))\n\n if args.val: # Loading val set từ val file\n print('Loading validation data from {} ...'.format(args.val))\n val = load_pkl(file=str(args.val))\n elif args.val_size: # Chia tỷ lệ val set từ train set mà không phải là load từ file\n original_train_size = len(train['x']) # Kích thước của toàn bộ dữ liệu dataset\n train, val = train_test_split(train, test_size=args.val_size, stratify=train['y']) # Chia thành train và val set\n new_train_size = len(train['x']) # Trả về kích thước của train set mới\n new_val_size = len(val['x']) # trả về kích thước của val set mới\n print('Split data into train {:.2%} and validation {:.2%}'.format(\n new_train_size / original_train_size, new_val_size / original_train_size))\n else: # Không sử dụng val set\n val = None\n\n if args.test: # Sử dụng test set file riêng biệt\n print('Loading test data from {} ...'.format(args.test))\n test = load_pkl(str(args.test))\n else: # Không sử dụng test set\n test = None\n\n if args.subset: # Tiếp tục chia train set sau khi đã chia train dataset ban đầu thành new train set và val set\n original_train_size = len(train['x']) # Trả về kích thước của train set\n train, _ = train_test_split(train, train_size=args.subset, stratify=train['y']) # Trả về new train set\n new_train_size = len(train['x']) # Trả về kích thước của new train set\n print('Using only {:.2%} of train data'.format(new_train_size / original_train_size))\n\n if args.channel is not None:\n train['x'] = train['x'][:, :, args.channel:args.channel + 1]\n if val:\n val['x'] = val['x'][:, :, args.channel:args.channel + 1]\n if test:\n test['x'] = test['x'][:, :, args.channel:args.channel + 1]\n\n print('Train data shape:', train['x'].shape)\n\n train_data = _create_dataset_from_data(train).shuffle(len(train['x'])).batch(args.batch_size)\n val_data = _create_dataset_from_data(val).batch(args.batch_size) if val else None\n test_data = _create_dataset_from_data(test).batch(args.batch_size) if test else None\n\n strategy = tf.distribute.MirroredStrategy()\n\n with strategy.scope():\n print('Building model ...')\n num_classes = len(train['classes'])\n\n if is_multiclass(train['y']):\n activation = 'sigmoid'\n loss = tf.keras.losses.BinaryCrossentropy()\n accuracy = tf.keras.metrics.BinaryAccuracy(name='acc')\n else:\n activation = 'softmax'\n loss = tf.keras.losses.CategoricalCrossentropy()\n accuracy = tf.keras.metrics.CategoricalAccuracy(name='acc')\n\n # not include fc layer\n model = ecg_feature_extractor(arch=args.arch)\n model.add(tf.keras.layers.Dense(units=num_classes, activation=activation))\n\n # initialize the weights of the model\n inputs = tf.keras.layers.Input(shape=train['x'].shape[1:], dtype=train['x'].dtype)\n model(inputs) # complete model\n\n print('# model parameters: {:,d}'.format(model.count_params()))\n\n if args.weights_file: # Sử dụng trọng số đã được pre-trained\n # initialize weights (excluding the optimizer state) to load the pretrained resnet\n # the optimizer state is randomly initialized in the `model.compile` function\n print('Loading weights from file {} ...'.format(args.weights_file))\n model.load_weights(str(args.weights_file))\n\n model.compile(optimizer=tf.keras.optimizers.Adam(),\n loss=loss,\n metrics=[accuracy])\n\n callbacks = []\n\n logger = tf.keras.callbacks.CSVLogger(filename=str(args.job_dir / 'history.csv'))\n callbacks.append(logger)\n\n if args.val_metric in ['loss', 'acc']:\n monitor = ('val_' + args.val_metric) if val else args.val_metric\n checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath=str(args.job_dir / 'best_model.weights'),\n monitor=monitor,\n save_best_only=True,\n save_weights_only=True,\n mode='auto',\n verbose=1)\n elif args.val_metric == 'f1':\n if is_multiclass(train['y']):\n score_fn = multi_f1\n else:\n score_fn = f1\n\n checkpoint = CustomCheckpoint(filepath=str(args.job_dir / 'best_model.weights'),\n data=(val_data, val['y']) if val else (train_data, train['y']),\n score_fn=score_fn,\n save_best_only=True,\n verbose=1)\n\n elif args.val_metric == 'auc':\n checkpoint = CustomCheckpoint(filepath=str(args.job_dir / 'best_model.weights'),\n data=(val_data, val['y']) if val else (train_data, train['y']),\n score_fn=auc,\n save_best_only=True,\n verbose=1)\n else:\n raise ValueError('Unknown metric: {}'.format(args.val_metric))\n\n callbacks.append(checkpoint)\n\n if val:\n # new adding\n rl_stopping = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=7,\n verbose=1, min_lr=1e-7)\n callbacks.append(rl_stopping)\n\n early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=30, verbose=1)\n callbacks.append(early_stopping)\n\n model.fit(train_data, epochs=args.epochs, verbose=2, validation_data=val_data, callbacks=callbacks)\n\n # load best model for inference\n print('Loading the best weights from file {} ...'.format(str(args.job_dir / 'best_model.weights')))\n model.load_weights(filepath=str(args.job_dir / 'best_model.weights'))\n\n print('Predicting training data ...')\n train_y_prob = model.predict(x=train['x'], batch_size=args.batch_size)\n train_predictions = create_predictions_frame(y_prob=train_y_prob,\n y_true=train['y'],\n class_names=train['classes'],\n record_ids=train['record_ids'])\n train_predictions.to_csv(path_or_buf=args.job_dir / 'train_predictions.csv', index=False)\n\n if val:\n print('Predicting validation data ...')\n val_y_prob = model.predict(x=val['x'], batch_size=args.batch_size)\n val_predictions = create_predictions_frame(y_prob=val_y_prob, y_true=val['y'],\n class_names=train['classes'],\n record_ids=val['record_ids'])\n val_predictions.to_csv(path_or_buf=args.job_dir / 'val_predictions.csv', index=False)\n\n if test:\n print('Predicting test data ...')\n test_y_prob = model.predict(x=test['x'], batch_size=args.batch_size)\n test_predictions = create_predictions_frame(y_prob=test_y_prob, y_true=test['y'],\n class_names=train['classes'],\n record_ids=test['record_ids'])\n test_predictions.to_csv(path_or_buf=args.job_dir / 'test_predictions.csv', index=False)\n" ]
[ [ "tensorflow.keras.metrics.BinaryAccuracy", "tensorflow.keras.losses.CategoricalCrossentropy", "numpy.random.seed", "tensorflow.distribute.MirroredStrategy", "tensorflow.keras.layers.Dense", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.keras.callbacks.ReduceLROnPlateau", "tensorflow.keras.losses.BinaryCrossentropy", "tensorflow.keras.optimizers.Adam", "numpy.random.randint", "tensorflow.keras.metrics.CategoricalAccuracy", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.keras.layers.Input" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
loftwah/MONAI
[ "37fb3e779121e6dc74127993df102fc91d9065f8", "37fb3e779121e6dc74127993df102fc91d9065f8", "37fb3e779121e6dc74127993df102fc91d9065f8", "37fb3e779121e6dc74127993df102fc91d9065f8", "37fb3e779121e6dc74127993df102fc91d9065f8", "37fb3e779121e6dc74127993df102fc91d9065f8" ]
[ "examples/classification_3d_ignite/densenet_training_dict.py", "tests/test_as_channel_firstd.py", "tests/test_handler_tb_image.py", "monai/data/synthetic.py", "tests/test_rand_rotated.py", "tests/test_rand_scale_intensity.py" ]
[ "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport logging\nimport numpy as np\nimport torch\nfrom ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator, _prepare_batch\nfrom ignite.handlers import ModelCheckpoint, EarlyStopping\nfrom ignite.metrics import Accuracy\nfrom torch.utils.data import DataLoader\n\nimport monai\nfrom monai.transforms import Compose, LoadNiftid, AddChanneld, ScaleIntensityd, Resized, RandRotate90d, ToTensord\nfrom monai.handlers import StatsHandler, TensorBoardStatsHandler, stopping_fn_from_metric, ROCAUC\n\nmonai.config.print_config()\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO)\n\n# IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/\nimages = [\n \"/workspace/data/medical/ixi/IXI-T1/IXI314-IOP-0889-T1.nii.gz\",\n \"/workspace/data/medical/ixi/IXI-T1/IXI249-Guys-1072-T1.nii.gz\",\n \"/workspace/data/medical/ixi/IXI-T1/IXI609-HH-2600-T1.nii.gz\",\n \"/workspace/data/medical/ixi/IXI-T1/IXI173-HH-1590-T1.nii.gz\",\n \"/workspace/data/medical/ixi/IXI-T1/IXI020-Guys-0700-T1.nii.gz\",\n \"/workspace/data/medical/ixi/IXI-T1/IXI342-Guys-0909-T1.nii.gz\",\n \"/workspace/data/medical/ixi/IXI-T1/IXI134-Guys-0780-T1.nii.gz\",\n \"/workspace/data/medical/ixi/IXI-T1/IXI577-HH-2661-T1.nii.gz\",\n \"/workspace/data/medical/ixi/IXI-T1/IXI066-Guys-0731-T1.nii.gz\",\n \"/workspace/data/medical/ixi/IXI-T1/IXI130-HH-1528-T1.nii.gz\",\n \"/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz\",\n \"/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz\",\n \"/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz\",\n \"/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz\",\n \"/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz\",\n \"/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz\",\n \"/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz\",\n \"/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz\",\n \"/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz\",\n \"/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz\"\n]\n# 2 binary labels for gender classification: man and woman\nlabels = np.array([\n 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0\n])\ntrain_files = [{'img': img, 'label': label} for img, label in zip(images[:10], labels[:10])]\nval_files = [{'img': img, 'label': label} for img, label in zip(images[-10:], labels[-10:])]\n\n# define transforms for image\ntrain_transforms = Compose([\n LoadNiftid(keys=['img']),\n AddChanneld(keys=['img']),\n ScaleIntensityd(keys=['img']),\n Resized(keys=['img'], spatial_size=(96, 96, 96)),\n RandRotate90d(keys=['img'], prob=0.8, spatial_axes=[0, 2]),\n ToTensord(keys=['img'])\n])\nval_transforms = Compose([\n LoadNiftid(keys=['img']),\n AddChanneld(keys=['img']),\n ScaleIntensityd(keys=['img']),\n Resized(keys=['img'], spatial_size=(96, 96, 96)),\n ToTensord(keys=['img'])\n])\n\n# define dataset, data loader\ncheck_ds = monai.data.Dataset(data=train_files, transform=train_transforms)\ncheck_loader = DataLoader(check_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available())\ncheck_data = monai.utils.misc.first(check_loader)\nprint(check_data['img'].shape, check_data['label'])\n\n# create DenseNet121, CrossEntropyLoss and Adam optimizer\nnet = monai.networks.nets.densenet.densenet121(\n spatial_dims=3,\n in_channels=1,\n out_channels=2,\n)\nloss = torch.nn.CrossEntropyLoss()\nlr = 1e-5\nopt = torch.optim.Adam(net.parameters(), lr)\ndevice = torch.device(\"cuda:0\")\n\n\n# ignite trainer expects batch=(img, label) and returns output=loss at every iteration,\n# user can add output_transform to return other values, like: y_pred, y, etc.\ndef prepare_batch(batch, device=None, non_blocking=False):\n return _prepare_batch((batch['img'], batch['label']), device, non_blocking)\n\n\ntrainer = create_supervised_trainer(net, opt, loss, device, False, prepare_batch=prepare_batch)\n\n# adding checkpoint handler to save models (network params and optimizer stats) during training\ncheckpoint_handler = ModelCheckpoint('./runs/', 'net', n_saved=10, require_empty=False)\ntrainer.add_event_handler(event_name=Events.EPOCH_COMPLETED,\n handler=checkpoint_handler,\n to_save={'net': net, 'opt': opt})\n\n# StatsHandler prints loss at every iteration and print metrics at every epoch,\n# we don't set metrics for trainer here, so just print loss, user can also customize print functions\n# and can use output_transform to convert engine.state.output if it's not loss value\ntrain_stats_handler = StatsHandler(name='trainer')\ntrain_stats_handler.attach(trainer)\n\n# TensorBoardStatsHandler plots loss at every iteration and plots metrics at every epoch, same as StatsHandler\ntrain_tensorboard_stats_handler = TensorBoardStatsHandler()\ntrain_tensorboard_stats_handler.attach(trainer)\n\n# set parameters for validation\nvalidation_every_n_epochs = 1\n\nmetric_name = 'Accuracy'\n# add evaluation metric to the evaluator engine\nval_metrics = {metric_name: Accuracy(), 'AUC': ROCAUC(to_onehot_y=True, add_softmax=True)}\n# ignite evaluator expects batch=(img, label) and returns output=(y_pred, y) at every iteration,\n# user can add output_transform to return other values\nevaluator = create_supervised_evaluator(net, val_metrics, device, True, prepare_batch=prepare_batch)\n\n# add stats event handler to print validation stats via evaluator\nval_stats_handler = StatsHandler(\n name='evaluator',\n output_transform=lambda x: None, # no need to print loss value, so disable per iteration output\n global_epoch_transform=lambda x: trainer.state.epoch) # fetch global epoch number from trainer\nval_stats_handler.attach(evaluator)\n\n# add handler to record metrics to TensorBoard at every epoch\nval_tensorboard_stats_handler = TensorBoardStatsHandler(\n output_transform=lambda x: None, # no need to plot loss value, so disable per iteration output\n global_epoch_transform=lambda x: trainer.state.epoch) # fetch global epoch number from trainer\nval_tensorboard_stats_handler.attach(evaluator)\n\n# add early stopping handler to evaluator\nearly_stopper = EarlyStopping(patience=4,\n score_function=stopping_fn_from_metric(metric_name),\n trainer=trainer)\nevaluator.add_event_handler(event_name=Events.EPOCH_COMPLETED, handler=early_stopper)\n\n# create a validation data loader\nval_ds = monai.data.Dataset(data=val_files, transform=val_transforms)\nval_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available())\n\n\[email protected](Events.EPOCH_COMPLETED(every=validation_every_n_epochs))\ndef run_validation(engine):\n evaluator.run(val_loader)\n\n\n# create a training data loader\ntrain_ds = monai.data.Dataset(data=train_files, transform=train_transforms)\ntrain_loader = DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4, pin_memory=torch.cuda.is_available())\n\ntrain_epochs = 30\nstate = trainer.run(train_loader, train_epochs)\n", "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy as np\nfrom parameterized import parameterized\nfrom monai.transforms import AsChannelFirstd\n\nTEST_CASE_1 = [\n {\n 'keys': ['image', 'label', 'extra'],\n 'channel_dim': -1\n },\n (4, 1, 2, 3)\n]\n\nTEST_CASE_2 = [\n {\n 'keys': ['image', 'label', 'extra'],\n 'channel_dim': 3\n },\n (4, 1, 2, 3)\n]\n\nTEST_CASE_3 = [\n {\n 'keys': ['image', 'label', 'extra'],\n 'channel_dim': 2\n },\n (3, 1, 2, 4)\n]\n\n\nclass TestAsChannelFirstd(unittest.TestCase):\n\n @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3])\n def test_shape(self, input_param, expected_shape):\n test_data = {\n 'image': np.random.randint(0, 2, size=[1, 2, 3, 4]),\n 'label': np.random.randint(0, 2, size=[1, 2, 3, 4]),\n 'extra': np.random.randint(0, 2, size=[1, 2, 3, 4])\n }\n result = AsChannelFirstd(**input_param)(test_data)\n self.assertTupleEqual(result['image'].shape, expected_shape)\n self.assertTupleEqual(result['label'].shape, expected_shape)\n self.assertTupleEqual(result['extra'].shape, expected_shape)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport glob\nimport os\nimport shutil\nimport unittest\n\nimport numpy as np\nimport torch\nfrom ignite.engine import Engine, Events\nfrom parameterized import parameterized\n\nfrom monai.handlers import TensorBoardImageHandler\n\nTEST_CASES = [\n [[20, 20]],\n [[2, 20, 20]],\n [[3, 20, 20]],\n [[20, 20, 20]],\n [[2, 20, 20, 20]],\n [[2, 2, 20, 20, 20]],\n]\n\n\nclass TestHandlerTBImage(unittest.TestCase):\n\n @parameterized.expand(TEST_CASES)\n def test_tb_image_shape(self, shape):\n default_dir = os.path.join('.', 'runs')\n shutil.rmtree(default_dir, ignore_errors=True)\n\n # set up engine\n def _train_func(engine, batch):\n return torch.zeros((1, 1, 10, 10))\n\n engine = Engine(_train_func)\n\n # set up testing handler\n stats_handler = TensorBoardImageHandler()\n engine.add_event_handler(Events.ITERATION_COMPLETED, stats_handler)\n\n data = zip(np.random.normal(size=(10, 4, *shape)), np.random.normal(size=(10, 4, *shape)))\n engine.run(data, epoch_length=10, max_epochs=1)\n\n self.assertTrue(os.path.exists(default_dir))\n self.assertTrue(len(glob.glob(default_dir)) > 0)\n shutil.rmtree(default_dir)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom monai.transforms.utils import rescale_array\n\n\ndef create_test_image_2d(width, height, num_objs=12, rad_max=30, noise_max=0.0, num_seg_classes=5, channel_dim=None):\n \"\"\"\n Return a noisy 2D image with `num_obj` circles and a 2D mask image. The maximum radius of the circles is given as\n `rad_max`. The mask will have `num_seg_classes` number of classes for segmentations labeled sequentially from 1, plus a\n background class represented as 0. If `noise_max` is greater than 0 then noise will be added to the image taken from\n the uniform distribution on range `[0,noise_max)`. If `channel_dim` is None, will create an image without channel\n dimension, otherwise create an image with channel dimension as first dim or last dim.\n \"\"\"\n image = np.zeros((width, height))\n\n for i in range(num_objs):\n x = np.random.randint(rad_max, width - rad_max)\n y = np.random.randint(rad_max, height - rad_max)\n rad = np.random.randint(5, rad_max)\n spy, spx = np.ogrid[-x:width - x, -y:height - y]\n circle = (spx * spx + spy * spy) <= rad * rad\n\n if num_seg_classes > 1:\n image[circle] = np.ceil(np.random.random() * num_seg_classes)\n else:\n image[circle] = np.random.random() * 0.5 + 0.5\n\n labels = np.ceil(image).astype(np.int32)\n\n norm = np.random.uniform(0, num_seg_classes * noise_max, size=image.shape)\n noisyimage = rescale_array(np.maximum(image, norm))\n\n if channel_dim is not None:\n assert isinstance(channel_dim, int) and channel_dim in (-1, 0, 2), 'invalid channel dim.'\n noisyimage, labels = noisyimage[None], labels[None] \\\n if channel_dim == 0 else (noisyimage[..., None], labels[..., None])\n\n return noisyimage, labels\n\n\ndef create_test_image_3d(height, width, depth, num_objs=12, rad_max=30,\n noise_max=0.0, num_seg_classes=5, channel_dim=None):\n \"\"\"\n Return a noisy 3D image and segmentation.\n\n See also:\n :py:meth:`~create_test_image_2d`\n \"\"\"\n image = np.zeros((width, height, depth))\n\n for i in range(num_objs):\n x = np.random.randint(rad_max, width - rad_max)\n y = np.random.randint(rad_max, height - rad_max)\n z = np.random.randint(rad_max, depth - rad_max)\n rad = np.random.randint(5, rad_max)\n spy, spx, spz = np.ogrid[-x:width - x, -y:height - y, -z:depth - z]\n circle = (spx * spx + spy * spy + spz * spz) <= rad * rad\n\n if num_seg_classes > 1:\n image[circle] = np.ceil(np.random.random() * num_seg_classes)\n else:\n image[circle] = np.random.random() * 0.5 + 0.5\n\n labels = np.ceil(image).astype(np.int32)\n\n norm = np.random.uniform(0, num_seg_classes * noise_max, size=image.shape)\n noisyimage = rescale_array(np.maximum(image, norm))\n\n if channel_dim is not None:\n assert isinstance(channel_dim, int) and channel_dim in (-1, 0, 3), 'invalid channel dim.'\n noisyimage, labels = (noisyimage[None], labels[None]) \\\n if channel_dim == 0 else (noisyimage[..., None], labels[..., None])\n\n return noisyimage, labels\n", "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy as np\n\nimport scipy.ndimage\nfrom parameterized import parameterized\n\nfrom monai.transforms import RandRotated\nfrom tests.utils import NumpyImageTestCase2D\n\n\nclass TestRandRotated(NumpyImageTestCase2D):\n\n @parameterized.expand([\n (90, (0, 1), True, 1, 'reflect', 0, True),\n ((-45, 45), (1, 0), True, 3, 'constant', 0, True),\n (180, (1, 0), False, 2, 'constant', 4, False),\n ])\n def test_correct_results(self, degrees, spatial_axes, reshape, \n order, mode, cval, prefilter):\n rotate_fn = RandRotated('img', degrees, prob=1.0, spatial_axes=spatial_axes, reshape=reshape, \n order=order, mode=mode, cval=cval, prefilter=prefilter)\n rotate_fn.set_random_state(243)\n rotated = rotate_fn({'img': self.imt[0]})\n\n angle = rotate_fn.angle\n expected = list()\n for channel in self.imt[0]:\n expected.append(scipy.ndimage.rotate(channel, angle, spatial_axes, reshape, order=order,\n mode=mode, cval=cval, prefilter=prefilter))\n expected = np.stack(expected).astype(np.float32)\n self.assertTrue(np.allclose(expected, rotated['img']))\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nfrom monai.transforms import RandScaleIntensity\nfrom tests.utils import NumpyImageTestCase2D\n\n\nclass TestRandScaleIntensity(NumpyImageTestCase2D):\n\n def test_value(self):\n scaler = RandScaleIntensity(factors=0.5, prob=1.0)\n scaler.set_random_state(seed=0)\n result = scaler(self.imt)\n np.random.seed(0)\n expected = (self.imt * (1 + np.random.uniform(low=-0.5, high=0.5))).astype(np.float32)\n np.testing.assert_allclose(result, expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "torch.device", "torch.nn.CrossEntropyLoss", "numpy.array", "torch.cuda.is_available" ], [ "numpy.random.randint" ], [ "numpy.random.normal", "torch.zeros" ], [ "numpy.maximum", "numpy.random.random", "numpy.ceil", "numpy.random.uniform", "numpy.zeros", "numpy.random.randint" ], [ "numpy.allclose", "numpy.stack" ], [ "numpy.random.uniform", "numpy.random.seed", "numpy.testing.assert_allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
erwinvanthiel/numpy
[ "4a14a3526abf616ab469c83d8adcc2f2f1d5de9c" ]
[ "numpy/random/tests/test_extending.py" ]
[ "import os\nimport pytest\nimport shutil\nimport subprocess\nimport sys\nimport warnings\nimport numpy as np\n\ntry:\n import cffi\nexcept ImportError:\n cffi = None\n\nif sys.flags.optimize > 1:\n # no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1\n # cffi cannot succeed\n cffi = None\n\ntry:\n with warnings.catch_warnings(record=True) as w:\n # numba issue gh-4733\n warnings.filterwarnings('always', '', DeprecationWarning)\n import numba\nexcept ImportError:\n numba = None\n\ntry:\n import cython\n from Cython.Compiler.Version import version as cython_version\nexcept ImportError:\n cython = None\nelse:\n from distutils.version import LooseVersion\n # Cython 0.29.14 is required for Python 3.8 and there are\n # other fixes in the 0.29 series that are needed even for earlier\n # Python versions.\n # Note: keep in sync with the one in pyproject.toml\n required_version = LooseVersion('0.29.14')\n if LooseVersion(cython_version) < required_version:\n # too old or wrong cython, skip the test\n cython = None\n\[email protected](cython is None, reason=\"requires cython\")\[email protected]\ndef test_cython(tmp_path):\n srcdir = os.path.join(os.path.dirname(__file__), '..')\n shutil.copytree(srcdir, tmp_path / 'random')\n # build the examples and \"install\" them into a temporary directory\n env = os.environ.copy()\n subprocess.check_call([sys.executable, 'setup.py', 'build', 'install',\n '--prefix', str(tmp_path / 'installdir'),\n '--single-version-externally-managed',\n '--record', str(tmp_path/ 'tmp_install_log.txt'),\n ],\n cwd=str(tmp_path / 'random' / '_examples' / 'cython'),\n env=env)\n # get the path to the so's\n so1 = so2 = None\n with open(tmp_path /'tmp_install_log.txt') as fid:\n for line in fid:\n if 'extending.' in line:\n so1 = line.strip()\n if 'extending_distributions' in line:\n so2 = line.strip()\n assert so1 is not None\n assert so2 is not None\n # import the so's without adding the directory to sys.path\n from importlib.machinery import ExtensionFileLoader \n extending = ExtensionFileLoader('extending', so1).load_module()\n extending_distributions = ExtensionFileLoader('extending_distributions', so2).load_module()\n\n # actually test the cython c-extension\n from numpy.random import PCG64\n values = extending_distributions.uniforms_ex(PCG64(0), 10, 'd')\n assert values.shape == (10,)\n assert values.dtype == np.float64\n\[email protected](numba is None or cffi is None,\n reason=\"requires numba and cffi\")\ndef test_numba():\n from numpy.random._examples.numba import extending # noqa: F401\n\[email protected](cffi is None, reason=\"requires cffi\")\ndef test_cffi():\n from numpy.random._examples.cffi import extending # noqa: F401\n" ]
[ [ "numpy.random.PCG64" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
amal029/eha
[ "7df60a663030d1265f212c64e86eb9a07f9d5dbc", "7df60a663030d1265f212c64e86eb9a07f9d5dbc" ]
[ "new_techniques/final.py", "src/solver.py" ]
[ "#!/usr/bin/env python3\nimport sympy as S\nimport numpy as N\nimport simpy\nfrom src.solver import Solver\n\n\nstep = 0 # The number of integration steps\n\n\ndef example1(env, solver, cstate=0):\n \"\"\"Example of a ha being solved using the new technique.\n\n \"\"\"\n # TODO: Need to fix so that this works.\n # odes = {x.diff(solver.t): (x+5)*solver.t}\n\n # First initialise the continuous variables.\n\n x = S.sympify('x(t)') # The continuous variable\n\n # Initial values\n vals_at_tn = {x: 1}\n\n def build_gtn(gtn, vals_at_tn):\n for k, i in vals_at_tn.items():\n gtn = gtn.replace(k, i)\n # Replace the \"t\" if it is there in the guard\n gtn = gtn.replace(solver.t, env.now).evalf()\n return gtn\n\n def build_gth(og, vals_at_tn, xps):\n for x in xps:\n og = og.replace(x, xps[x])\n # Replace x(t) → x(Tₙ)\n # Replace t → Tₙ\n for k, v in vals_at_tn.items():\n og = og.replace(k, v)\n og = og.replace(solver.t, env.now).evalf()\n return og\n\n def get_gh(og):\n # Now get the h, where you think the guard holds.\n nsoln = N.roots(S.poly(og).all_coeffs())\n nsoln = nsoln[N.isreal(nsoln)]\n nsoln = nsoln[N.where(nsoln >= 0)]\n # If you cannot find a root then set it to infinity\n h = N.real(N.min(nsoln)) if nsoln.size != 0 else N.inf\n return h\n\n # Returning state, delta, values, loc's_FT\n def location1(x, vals_at_tn):\n # The odes for all continuous variables in location1\n odes = {x.diff(solver.t): x + (2*solver.t)}\n odes = {k: solver.taylor_expand(i)\n for k, i in odes.items()}\n\n # Get the tokens for x\n dict_tokens = {x: solver.build_tokens(x, odes)}\n\n # First get the polynomial expression from tokens\n xps = {x: solver.get_polynomial(x, tokens, vals_at_tn)\n for x, tokens in dict_tokens.items()}\n\n # Now check of the guard is satisfied, if yes jump\n # The guard expression\n g = S.sympify('x(t) - 10')\n\n # Compute the value of g(t) at Tₙ\n gtn = build_gtn(g.copy(), vals_at_tn)\n # print('guard at Tₙ:', gtn)\n\n if (abs(gtn) <= solver.epsilon): # If zero crossing happens\n # We can make a jump to the next location\n return 1, 0, vals_at_tn\n else:\n # This is the intra-location transition\n\n # Guard1 g(t) = 0\n og = build_gth(g.copy(), vals_at_tn, xps)\n # print('guard1:', og)\n h = get_gh(og)\n\n # TODO: Guard2 g(t) - 2×g(Tₙ) = 0\n og2 = og - 2*gtn\n # print('guard2:', og2)\n h2 = get_gh(og2)\n\n # Take the minimum from amongst the two\n h = min(h, h2) if h2 is not N.inf else h\n\n assert h is not N.inf, 'Cannot find h from guards'\n\n h = solver.delta((dict_tokens, vals_at_tn), h, env.now)\n\n # Now compute the new values for continuous variables\n vals_at_tn = {k: solver.get_vals_at_tn_h(x, vals_at_tn, h, env.now)\n for k, x in xps.items()}\n return 0, h, vals_at_tn\n\n def location2(x, vals_at_tn):\n global step\n print('total steps: ', step)\n # Done\n env.exit()\n\n # The dictionary for the switch statement.\n switch_case = {\n 0: location1,\n 1: location2\n }\n\n # The initial values at time 0\n print('%f, %s' % (env.now, vals_at_tn))\n\n # Now start running the system until all events are done or\n # simulation time is over.\n while(True):\n (cstate, delta, vals_at_tn) = switch_case[cstate](x, vals_at_tn)\n # The new values of the continuous variables\n if delta != 0:\n print('%f: %s' % (env.now+delta, vals_at_tn))\n # This should always be the final statement in this function\n global step\n step += 1\n yield env.timeout(delta)\n\n\ndef main():\n # Initiaise the solver\n solver = Solver(n=10, epsilon=1e-6)\n\n env = simpy.Environment()\n env.process(example1(env, solver))\n # Run the simulation until all events in the queue are processed.\n # Make it some number to halt simulation after sometime.\n env.run()\n\n\nif __name__ == '__main__':\n main()\n", "from scipy.optimize import minimize\nimport sympy as S\nimport sympy.abc as ABC\nfrom math import factorial\nimport mpmath as M\n\n\nclass Solver(object):\n \"\"\"The solver for computing the integration step size.\n n: The number of terms in ODE → Taylor exapansion\n NUM_TERMS: The number of terms in transcendental → Taylor expansion\n epsilon: The max value error allowed\n DEBUG: No effect, yet\n\n \"\"\"\n epsilon = 1e-12\n t = ABC.t\n h = ABC.h\n n = 1\n NUM_TERMS = 5\n DEBUG = 0\n TRIG_FUNCS = [S.sin, S.cos, S.tan, S.cot, S.sec, S.csc]\n INV_TRIG_FUNCS = [S.asin, S.acos, S.atan, S.acot, S.asec, S.acsc, S.atan2]\n HYPERBOLIC_FUNCS = [S.sinh, S.cosh, S.tanh, S.coth, S.sech, S.csch]\n INV_HYPERBOLIC_FUNCS = [S.asinh, S.acosh, S.atanh, S.acoth, S.asech,\n S.acsch]\n EXP_LOG = [S.exp, S.ln]\n TRANSCEDENTAL_FUNCS = (TRIG_FUNCS + INV_TRIG_FUNCS + HYPERBOLIC_FUNCS +\n INV_HYPERBOLIC_FUNCS + EXP_LOG)\n\n def __init__(self, n=1, NUM_TERMS=10, epsilon=1e-12, DEBUG=0):\n Solver.epsilon = epsilon\n assert n >= 1, \"n < 1\"\n Solver.n = n\n Solver.DEBUG = DEBUG\n Solver.NUM_TERMS = NUM_TERMS\n\n @staticmethod\n def taylor_expand(expr, around=0):\n assert around == 0, 'Taylor expansion only works around 0 for now'\n if expr.args is ():\n return expr\n args = [Solver.taylor_expand(a, around) for a in expr.args]\n if expr.func in Solver.TRANSCEDENTAL_FUNCS:\n if len(args) != 1:\n raise RuntimeError('Cannot create a taylor series '\n 'approximation of: ', expr)\n else:\n # XXX: Build the polynomial for arg\n coeffs = M.taylor(expr.func, around, Solver.NUM_TERMS)\n # print(coeffs)\n coeffs = [(S.Mul(float(a), S.Mul(*[args[0]\n for i in range(c)])))\n for c, a in enumerate(coeffs)][::-1]\n # print(coeffs)\n return S.Add(*coeffs)\n else:\n return expr.func(*args)\n\n @staticmethod\n def getLipschitz(fun, x0, bounds):\n \"\"\"args:\n fun: The function whose lipschitz constant is needed\n bounds: Sequence of (min, max) pairs for each element in x. None is\n used to specify no bound.\n\n return: The lipschitz constant L for the function if one exists\n\n \"\"\"\n\n # import inspect\n # print(inspect.getsource(fun))\n\n # XXX: Always call the minimize function with this, because it\n # handles the number of arguments correctly.\n def lambdify_wrapper(x):\n \"\"\"\n args:\n x: The argument list, which will be used by scipy\n func: The actual function generated by sympy\n \"\"\"\n return fun(*x)\n\n # Now get the max lipschitz constant\n resmax = minimize(lambdify_wrapper, x0, bounds=bounds)\n return abs(resmax.fun)\n\n @staticmethod\n def get_polynomial(k, tokens, vals_at_tn):\n # tokens = tokens\n # Insert initial value in tokens\n # tokens.insert(0, vals_at_tn[k])\n poly = vals_at_tn[k] + sum([c*Solver.h**p/factorial(p)\n for c, p in\n zip(tokens, range(1, Solver.n+1))])\n return poly\n\n @staticmethod\n # XXX: This is taking too long, and needs to be optimised.\n def get_vals_at_tn_h(poly, vals_at_tn, h, curr_time):\n \"\"\"tokens are the taylor derivative terms for k, excluding the constant\n term k is x(t)\n\n \"\"\"\n vals_at_tn = vals_at_tn.copy()\n vals_at_tn[Solver.t] = curr_time\n vals_at_tn[Solver.h] = h\n poly = poly.xreplace(vals_at_tn)\n return poly.evalf()\n\n @staticmethod\n def build_tokens(cont_var, odes):\n \"\"\"cont_var: name of the function, e.g., x(t), you want the tokens for.\n\n odes are all xs(t) derivative terms, of all continuous vars, e.g.,\n {x(t): x(t)+y(t)+1, y(t): 1,...}\n\n \"\"\"\n odes = {k: Solver.taylor_expand(i) for k, i in odes.items()}\n tokens = [odes[cont_var.diff(Solver.t)]]\n for _ in range(len(tokens), Solver.n):\n tokens.append(Solver.build(tokens, odes))\n return tokens\n\n @staticmethod\n def build(tokens, odes):\n # This gets the next derivative\n slope = tokens[-1].diff(Solver.t)\n # 2.) Replace Derivative(deps(t), t) → exprs\n for i, k in odes.items():\n slope = slope.replace(i, k)\n return slope\n\n @staticmethod\n def delta(values, h, curr_time):\n \"\"\"Gives the time step needed in the taylor polynomial to correctly\n bound the local truncation error given the step size\n\n args:\n\n values: Continuous variable tuple\n\n ({all cont_var tokens obtained from Solver.build_tokens},\n {intial values of all continuous variables})\n\n h: The step size that you want to take\n\n curr_time: The current time Tₙ\n\n\n return: h (seconds), such that f⁽ⁿ⁺¹)(η)/(n+1)!⋆(h⁽ⁿ⁺¹⁾) ≤\n Solver.epsilon, n ≡ 2, η ∈ (Tₙ, Tₙ + h),\n\n \"\"\"\n assert(len(values) == 2)\n\n vals_at_tn = values[1]\n all_ode_taylor = values[0]\n odes = {k.diff(Solver.t): i[0] for k, i in all_ode_taylor.items()}\n\n # XXX: Now compute the value of continuous vars at Tₙ + h\n vals_at_tn_h = {k:\n Solver.get_vals_at_tn_h(\n Solver.get_polynomial(k, i, vals_at_tn),\n vals_at_tn, h, curr_time\n )\n for k, i in all_ode_taylor.items()}\n\n # Now compute the bounds for each continuous variable\n bounds = {k: (min(i, vals_at_tn_h[k]), max(i, vals_at_tn_h[k]))\n for k, i in vals_at_tn.items()}\n # print('bounds:', bounds)\n x0s = [bounds[k][1] for k in bounds]\n\n # Replace x(t) → x, will be needed for S.lambdify\n func_to_var = {k: S.Symbol(str(k.func)) for k in bounds.keys()}\n\n # Now we need to get the lipschitz constant for all continuous\n # vars at the n+1 term\n taylor_n1_term = dict()\n for k, i in all_ode_taylor.items():\n slope = Solver.build(i, odes)\n # XXX: x(t) → x, etc\n for kk, ii in func_to_var.items():\n slope = slope.replace(kk, ii)\n\n # XXX: This should be negative, because we want to maximize\n # it.\n taylor_n1_term[k] = -slope\n # print('n+1 derivatives:', taylor_n1_term)\n\n # These are the lambdified python functions\n lambdified = {k: S.lambdify((list(func_to_var.values()) +\n [Solver.t]), i)\n for k, i in taylor_n1_term.items()}\n\n # Now get the lipschitz constants for each ode\n lips = {k: Solver.getLipschitz(i, (x0s + [curr_time+h]),\n (list(bounds.values()) +\n [(curr_time, curr_time+h)]))\n for k, i in lambdified.items()}\n # print('lips:', lips)\n\n # Now check if lagrange error is satisfied\n facn_1 = factorial(Solver.n+1)\n\n # TODO: Add the condition to check that the series\n # converges.\n hs = [((Solver.epsilon * facn_1)/ll)**(1/(Solver.n+1))\n for k, ll in lips.items() if ll != 0]\n h = min(h, *hs) if hs != [] else h\n return h\n" ]
[ [ "numpy.isreal", "numpy.where", "numpy.min" ], [ "scipy.optimize.minimize" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
franciscojavierarceo/DQN-Event-Summarization
[ "6e651dc511affc8883d656a5b9e909f10266f41d" ]
[ "Code/Utils/make_density_gif.py" ]
[ "import os\nimport sys\nimport imageio\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport matplotlib.colors as mcolors\n\n\ndef make_colormap(seq):\n \"\"\"Return a LinearSegmentedColormap\n seq: a sequence of floats and RGB-tuples. The floats should be increasing\n and in the interval (0,1).\n \"\"\"\n seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]\n cdict = {'red': [], 'green': [], 'blue': []}\n for i, item in enumerate(seq):\n if isinstance(item, float):\n r1, g1, b1 = seq[i - 1]\n r2, g2, b2 = seq[i + 1]\n cdict['red'].append([item, r1, r2])\n cdict['green'].append([item, g1, g2])\n cdict['blue'].append([item, b1, b2])\n return mcolors.LinearSegmentedColormap('CustomMap', cdict)\n\n\ndef buildCDF(df, var):\n cdf = pd.DataFrame(df[var].value_counts())\n cdf = cdf.reset_index(drop=False)\n cdf.columns = [var, 'count']\n cdf['percent'] = cdf['count'] / cdf['count'].sum()\n cdf = cdf.sort_values(by=var)\n cdf['cumpercent'] = cdf['percent'].cumsum()\n return cdf\n\ndef main(nepochs, model, metric):\n if type(nepochs) == str:\n nepochs = int(nepochs)\n \n pdf = pd.read_csv('./Code/Performance/Simulation/%s_%s_perf.txt' % (model, metric) , sep=';')\n md = {\"f1\": \"rougeF1\", \"recall\": \"rougeRecall\", \"precision\": \"rougePrecision\"}\n emetric = md[metric]\n c = mcolors.ColorConverter().to_rgb\n grn = 'limegreen'\n rvb = make_colormap([c(grn), c('white'), 0.1, c(grn), c('white'), 0.9, c('white')])\n # Pulling in the images that were exported\n ofile_names = [('./Code/plotdata/%s/1/%i_epoch.txt' % (model, x) ) for x in range(nepochs) ] \n for (ofile, epoch) in zip(ofile_names, range(nepochs)):\n # Loading data sets and concatenating them\n odf = pd.read_csv(ofile, sep=';')\n if epoch == 0:\n llow = min(odf['actual'].min(), odf['predSelect'].min(), odf['predSkip'].min())\n lhigh = max(odf['actual'].max(), odf['predSelect'].max(), odf['predSkip'].max())\n\n llow = min(llow, odf['actual'].min(), odf['predSelect'].min(), odf['predSkip'].min())\n lhigh = max(lhigh, odf['actual'].max(), odf['predSelect'].max(), odf['predSkip'].max())\n\n for (ofile, epoch) in zip(ofile_names, range(nepochs)):\n # Loading data sets and concatenating them\n # Two subplots, the axes array is 1-d\n rouge = pdf[pdf['epoch']==epoch][emetric].tolist()[0]\n odf = pd.read_csv(ofile, sep=';')\n odf['predOptimal'] = odf[['predSelect','predSkip']].max(axis=1)\n nsel = odf['Select'].sum()\n nskip = odf['Skip'].sum()\n den = float(nsel+nskip)\n cdfp = buildCDF(odf, 'predOptimal')\n cdfa = buildCDF(odf, 'actual')\n f, axarr = plt.subplots(1, 2, figsize=(16,8))\n axarr[0].imshow(odf[['Skip', 'Select']], cmap=rvb, interpolation='nearest', aspect='auto')\n axarr[0].set_title('Select = {%i, %.3f} and Skip = {%i, %.3f}' % (nsel, nsel/den, nskip, nskip/den))\n axarr[0].set_xlabel('Estimated Optimal Actions')\n axarr[0].set_xticks([])\n axarr[1].plot(cdfp['predOptimal'], cdfp['cumpercent'], label='Predicted', c='blue')\n axarr[1].plot(cdfa['actual'], cdfa['cumpercent'], label='Actual', c='red')\n axarr[1].set_ylim([0,1])\n axarr[1].set_xlim([llow, lhigh])\n axarr[1].set_xlabel('CDF of Actual and Predicted Rouge')\n axarr[1].legend(loc ='upper left')\n axarr[1].grid()\n axarr[1].set_title('%s %s model performance at epoch %i = %.3f' % (metric, model, epoch, rouge))\n f.tight_layout()\n f.savefig('./Code/plotdata/%s/plotfiles/perfplot_%i.png' % (model, epoch) )\n\n # Exporting the images to a gif\n file_names = [ ('./Code/plotdata/%s/plotfiles/perfplot_%i.png' % (model, epoch)) for epoch in range(nepochs)]\n images = []\n for filename in file_names:\n images.append(imageio.imread(filename))\n # Actual v Predicted gif\n imageio.mimsave('./Code/Performance/Simulation/%s_perf.gif' % model, images, duration=0.75)\n\nif __name__ == '__main__':\n main(sys.argv[1], sys.argv[2], sys.argv[3])" ]
[ [ "matplotlib.pyplot.subplots", "matplotlib.colors.LinearSegmentedColormap", "pandas.read_csv", "matplotlib.colors.ColorConverter" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
zhutchens1/g3groups
[ "105b991f64975763e78ca3f990648c980baa3ecb" ]
[ "create_ecoresolve_stellarmassselgroups.py" ]
[ "\"\"\"\nZackary Hutchens - November 2020\n\nThis program creates stellar mass-selected group catalogs for ECO/RESOLVE-G3 using the new algorithm, described in the readme markdown.\n\nThe outline of this code is:\n(1) Read in observational data from RESOLVE-B and ECO (the latter includes RESOLVE-A).\n(2) Prepare arrays of input parameters and for storing results.\n(3) Perform FoF only for giants in ECO, using an adaptive linking strategy.\n (a) Get the adaptive links for every ECO galaxy.\n (b) Fit those adaptive links for use in RESOLVE-B.\n (c) Perform giant-only FoF for ECO\n (d) Perform giant-only FoF for RESOLVE-B, by interpolating the fit to obtain separations for RESOLVE-B.\n(4) From giant-only groups, fit model for individual giant projected radii and peculiar velocites, to use for association.\n(5) Associate dwarf galaxies to giant-only FoF groups for ECO and RESOLVE-B (note different selection floors for dwarfs).\n(6) Based on giant+dwarf groups, calibrate boundaries (as function of giant+dwarf integrated stellar mass) for iterative combination\n(7) Iterative combination on remaining ungrouped dwarf galaxies\n(8) halo mass assignment\n(9) Finalize arrays + output\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import interp1d\nfrom scipy.optimize import curve_fit\nfrom center_binned_stats import center_binned_stats\nimport foftools as fof\nimport iterativecombination as ic\nimport virtools as vz\nfrom smoothedbootstrap import smoothedbootstrap as sbs\nimport sys\nfrom scipy.interpolate import UnivariateSpline\nimport virtools as vz\nfrom lss_dens import lss_dens_by_galaxy\n\n#def giantmodel(x, a, b, c, d):\n# return a*np.log(np.abs(b)*x+c)+d\n\ndef giantmodel(x, a, b):\n return np.abs(a)*np.log(np.abs(b)*x+1)\n\ndef exp(x, a, b, c):\n return np.abs(a)*np.exp(1*np.abs(b)*(x) + c)\n #return a*np.exp(b*(x**2) + c*(x) + d)+np.abs(e)\n\ndef sepmodel(x, a, b, c, d, e):\n #return np.abs(a)*np.exp(-1*np.abs(b)*x + c)+d\n #return a*(x**3)+b*(x**2)+c*x\n return a*(x**4)+b*(x**3)+c*(x**2)+(d*x)+e\n\ndef sigmarange(x):\n q84, q16 = np.percentile(x, [84 ,16])\n return (q84-q16)/2.\n\nif __name__=='__main__':\n ####################################\n # Step 1: Read in obs data\n ####################################\n ecodata = pd.read_csv(\"ECOdata_022521.csv\")\n resolvedata = pd.read_csv(\"RESOLVEdata_022521.csv\")\n resolvebdata = resolvedata[resolvedata.f_b==1]\n\n ####################################\n # Step 2: Prepare arrays\n ####################################\n ecosz = len(ecodata)\n econame = np.array(ecodata.name)\n ecoresname = np.array(ecodata.resname)\n ecoradeg = np.array(ecodata.radeg)\n ecodedeg = np.array(ecodata.dedeg)\n ecocz = np.array(ecodata.cz)\n ecologmstar = np.array(ecodata.logmstar)\n ecologmgas = np.array(ecodata.logmgas)\n ecourcolor = np.array(ecodata.modelu_rcorr)\n ecog3grp = np.full(ecosz, -99.) # id number of g3 group\n ecog3grpn = np.full(ecosz, -99.) # multiplicity of g3 group\n ecog3grpradeg = np.full(ecosz,-99.) # ra of group center\n ecog3grpdedeg = np.full(ecosz,-99.) # dec of group center\n ecog3grpcz = np.full(ecosz,-99.) # cz of group center\n ecog3logmh = np.full(ecosz,-99.) # abundance-matched halo mass\n ecog3intmstar = np.full(ecosz,-99.) # group-integrated stellar mass\n\n resbana_g3grp = np.full(ecosz,-99.) # for RESOLVE-B analogue dataset\n\n resbsz = int(len(resolvebdata))\n resbname = np.array(resolvebdata.name)\n resbradeg = np.array(resolvebdata.radeg)\n resbdedeg = np.array(resolvebdata.dedeg)\n resbcz = np.array(resolvebdata.cz)\n resblogmstar = np.array(resolvebdata.logmstar)\n resblogmgas = np.array(resolvebdata.logmgas)\n resburcolor = np.array(resolvebdata.modelu_rcorr)\n resbg3grp = np.full(resbsz, -99.)\n resbg3grpn = np.full(resbsz, -99.)\n resbg3grpradeg = np.full(resbsz, -99.)\n resbg3grpdedeg = np.full(resbsz, -99.)\n resbg3grpcz = np.full(resbsz, -99.)\n resbg3logmh = np.full(resbsz, -99.)\n resbg3intmstar = np.full(resbsz, -99.)\n\n ####################################\n # Step 3: Giant-Only FOF\n ####################################\n ecogiantsel = (ecologmstar>=9.5) & (ecocz>2530.) & (ecocz<8000.)\n # (a) compute sep values for eco giants\n ecovolume = 192351.36 # Mpc^3 with h=1 **\n meansep0 = (ecovolume/len(ecologmstar[ecogiantsel]))**(1/3.)\n\n # (b) make an interpolation function use this for RESOLVE-B\n\n # (c) perform giant-only FoF on ECO\n blos = 1.1\n bperp = 0.07 # from Duarte & Mamon 2014\n ecogiantfofid = fof.fast_fof(ecoradeg[ecogiantsel], ecodedeg[ecogiantsel], ecocz[ecogiantsel], bperp, blos, meansep0) # meansep0 if fixed LL\n ecog3grp[ecogiantsel] = ecogiantfofid\n resbana_g3grp[ecogiantsel] = ecogiantfofid # RESOLVE-B analogue dataset\n # (d) perform giant-only FoF on RESOLVE-B\n resbgiantsel = (resblogmstar>=9.5) & (resbcz>4250) & (resbcz<7300)\n resbgiantfofid = fof.fast_fof(resbradeg[resbgiantsel], resbdedeg[resbgiantsel], resbcz[resbgiantsel], bperp, blos, meansep0)\n resbg3grp[resbgiantsel] = resbgiantfofid\n\n # (e) check the FOF results\n plt.figure()\n binv = np.arange(0.5,3000.5,3)\n plt.hist(fof.multiplicity_function(ecog3grp[ecog3grp!=-99.], return_by_galaxy=False), bins=binv, histtype='step', linewidth=3, label='ECO Giant-Only FoF Groups')\n plt.hist(fof.multiplicity_function(resbg3grp[resbg3grp!=-99.], return_by_galaxy=False), bins=binv, histtype='step', linewidth=1.5, hatch='\\\\', label='RESOLVE-B Giant-Only FoF Groups')\n plt.xlabel(\"Number of Giant Galaxies per Group\")\n plt.ylabel(\"Number of Giant-Only FoF Groups\")\n plt.yscale('log')\n plt.legend(loc='best')\n plt.xlim(0,80)\n plt.show()\n\n ##########################################\n # Step 4: Compute Association Boundaries\n ##########################################\n ecogiantgrpra, ecogiantgrpdec, ecogiantgrpcz = fof.group_skycoords(ecoradeg[ecogiantsel], ecodedeg[ecogiantsel], ecocz[ecogiantsel], ecogiantfofid)\n relvel = np.abs(ecogiantgrpcz - ecocz[ecogiantsel])\n relprojdist = (ecogiantgrpcz + ecocz[ecogiantsel])/100. * ic.angular_separation(ecogiantgrpra, ecogiantgrpdec, ecoradeg[ecogiantsel], ecodedeg[ecogiantsel])/2.0\n ecogiantgrpn = fof.multiplicity_function(ecogiantfofid, return_by_galaxy=True)\n uniqecogiantgrpn, uniqindex = np.unique(ecogiantgrpn, return_index=True)\n keepcalsel = np.where(uniqecogiantgrpn>1)\n\n median_relprojdist = np.array([np.median(relprojdist[np.where(ecogiantgrpn==sz)]) for sz in uniqecogiantgrpn[keepcalsel]])\n median_relvel = np.array([np.median(relvel[np.where(ecogiantgrpn==sz)]) for sz in uniqecogiantgrpn[keepcalsel]])\n\n rproj_median_error = np.std(np.array([sbs(relprojdist[np.where(ecogiantgrpn==sz)], 10000, np.median, kwargs=dict({'axis':1 })) for sz in uniqecogiantgrpn[keepcalsel]]), axis=1)\n dvproj_median_error = np.std(np.array([sbs(relvel[np.where(ecogiantgrpn==sz)], 10000, np.median, kwargs=dict({'axis':1})) for sz in uniqecogiantgrpn[keepcalsel]]), axis=1)\n\n #rprojslope, rprojint = np.polyfit(uniqecogiantgrpn[keepcalsel], median_relprojdist, deg=1, w=1/rproj_median_error)\n #dvprojslope, dvprojint = np.polyfit(uniqecogiantgrpn[keepcalsel], median_relvel, deg=1, w=1/dvproj_median_error)\n poptrproj, jk = curve_fit(giantmodel, uniqecogiantgrpn[keepcalsel], median_relprojdist, sigma=rproj_median_error)#, p0=[0.1, -2, 3, -0.1])\n poptdvproj,jk = curve_fit(giantmodel, uniqecogiantgrpn[keepcalsel], median_relvel, sigma=dvproj_median_error)#, p0=[160,6.5,45,-600])\n rproj_boundary = lambda N: 3*giantmodel(N, *poptrproj) #3*(rprojslope*N+rprojint)\n vproj_boundary = lambda N: 4.5*giantmodel(N, *poptdvproj) #4.5*(dvprojslope*N+dvprojint)\n assert ((rproj_boundary(1)>0) and (vproj_boundary(1)>0)), \"Cannot extrapolate Rproj_fit or dv_proj_fit to N=1\"\n\n # get virial radii from abundance matching to giant-only groups\n gihaloid, gilogmh, gir280, gihalovdisp = ic.HAMwrapper(ecoradeg[ecogiantsel], ecodedeg[ecogiantsel], ecocz[ecogiantsel], ecologmstar[ecogiantsel], ecog3grp[ecogiantsel],\\\n ecovolume, inputfilename=None, outputfilename=None)\n gilogmh = np.log10(10**gilogmh)# no longer need 7/29: /fof.getmhoffset(280,337,1,1,6))\n gihalorvir = (3*(10**gilogmh) / (4*np.pi*337*0.3*2.77e11) )**(1/3.)\n gihalon = fof.multiplicity_function(np.sort(ecog3grp[ecogiantsel]), return_by_galaxy=False)\n plt.figure()\n plt.plot(gihalon, gihalorvir, 'k.')\n plt.show()\n\n plt.figure()\n sel = (ecogiantgrpn>1)\n plt.scatter(gihalon, gihalovdisp, marker='D', color='purple', label=r'ECO HAM Velocity Dispersion')\n plt.plot(ecogiantgrpn[sel], relvel[sel], 'r.', alpha=0.2, label='ECO Giant Galaxies')\n plt.errorbar(uniqecogiantgrpn[keepcalsel], median_relvel, fmt='k^', label=r'$\\Delta v_{\\rm proj}$ (Median of $\\Delta v_{\\rm proj,\\, gal}$)',yerr=dvproj_median_error)\n tx = np.linspace(1,max(ecogiantgrpn),1000)\n plt.plot(tx, giantmodel(tx, *poptdvproj), label=r'$1\\Delta v_{\\rm proj}^{\\rm fit}$')\n plt.plot(tx, 4.5*giantmodel(tx, *poptdvproj), 'g', label=r'$4.5\\Delta v_{\\rm proj}^{\\rm fit}$', linestyle='-.')\n plt.xlabel(\"Number of Giant Members\")\n plt.ylabel(\"Relative Velocity to Group Center [km/s]\")\n plt.legend(loc='best')\n plt.show()\n\n plt.clf()\n plt.scatter(gihalon, gihalorvir, marker='D', color='purple', label=r'ECO Group Virial Radii')\n plt.plot(ecogiantgrpn[sel], relprojdist[sel], 'r.', alpha=0.2, label='ECO Giant Galaxies')\n plt.errorbar(uniqecogiantgrpn[keepcalsel], median_relprojdist, fmt='k^', label=r'$R_{\\rm proj}$ (Median of $R_{\\rm proj,\\, gal}$)',yerr=rproj_median_error)\n plt.plot(tx, giantmodel(tx, *poptrproj), label=r'$1R_{\\rm proj}^{\\rm fit}$')\n plt.plot(tx, 3*giantmodel(tx, *poptrproj), 'g', label=r'$3R_{\\rm proj}^{\\rm fit}$', linestyle='-.')\n plt.xlabel(\"Number of Giant Members in Galaxy's Group\")\n plt.ylabel(\"Projected Distance from Giant to Group Center [Mpc/h]\")\n plt.legend(loc='best')\n #plt.xlim(0,20)\n #plt.ylim(0,2.5)\n #plt.xticks(np.arange(0,22,2))\n plt.show()\n\n ####################################\n # Step 5: Association of Dwarfs\n ####################################\n ecodwarfsel = (ecologmstar<9.5) & (ecologmstar>=8.9) & (ecocz>2530) & (ecocz<8000)\n resbdwarfsel = (resblogmstar<9.5) & (resblogmstar>=8.7) & (resbcz>4250) & (resbcz<7300)\n resbana_dwarfsel = (ecologmstar<9.5) & (ecologmstar>=8.7) & (ecocz>2530) & (ecocz<8000)\n\n resbgiantgrpra, resbgiantgrpdec, resbgiantgrpcz = fof.group_skycoords(resbradeg[resbgiantsel], resbdedeg[resbgiantsel], resbcz[resbgiantsel], resbgiantfofid)\n resbgiantgrpn = fof.multiplicity_function(resbgiantfofid, return_by_galaxy=True)\n ecodwarfassocid, junk = fof.fast_faint_assoc(ecoradeg[ecodwarfsel],ecodedeg[ecodwarfsel],ecocz[ecodwarfsel],ecogiantgrpra,ecogiantgrpdec,ecogiantgrpcz,ecogiantfofid,\\\n rproj_boundary(ecogiantgrpn),vproj_boundary(ecogiantgrpn))\n resbdwarfassocid, junk = fof.fast_faint_assoc(resbradeg[resbdwarfsel],resbdedeg[resbdwarfsel],resbcz[resbdwarfsel],resbgiantgrpra,resbgiantgrpdec,resbgiantgrpcz,resbgiantfofid,\\\n rproj_boundary(resbgiantgrpn),vproj_boundary(resbgiantgrpn))\n\n resbana_dwarfassocid, jk = fof.fast_faint_assoc(ecoradeg[resbana_dwarfsel], ecodedeg[resbana_dwarfsel], ecocz[resbana_dwarfsel], ecogiantgrpra, ecogiantgrpdec, ecogiantgrpcz, ecogiantfofid,\\\n rproj_boundary(ecogiantgrpn), vproj_boundary(ecogiantgrpn))\n\n\n ecog3grp[ecodwarfsel] = ecodwarfassocid\n resbg3grp[resbdwarfsel] = resbdwarfassocid\n resbana_g3grp[resbana_dwarfsel] = resbana_dwarfassocid\n\n ###############################################\n # Step 6: Calibration for Iter. Combination\n ###############################################\n ecogdgrpn = fof.multiplicity_function(ecog3grp, return_by_galaxy=True)\n #ecogdsel = np.logical_not((ecogdgrpn==1) & (ecologmstar<9.5) & (ecog3grp>0)) # select galaxies that AREN'T ungrouped dwarfs\n ecogdsel = np.logical_and(np.logical_not(np.logical_or(ecog3grp==-99., ((ecogdgrpn==1) & (ecologmstar<9.5) & (ecologmstar>=8.9)))), (ecogdgrpn>1))\n ecogdgrpra, ecogdgrpdec, ecogdgrpcz = fof.group_skycoords(ecoradeg[ecogdsel], ecodedeg[ecogdsel], ecocz[ecogdsel], ecog3grp[ecogdsel])\n ecogdrelvel = np.abs(ecogdgrpcz - ecocz[ecogdsel])\n ecogdrelprojdist = (ecogdgrpcz + ecocz[ecogdsel])/100. * ic.angular_separation(ecogdgrpra, ecogdgrpdec, ecoradeg[ecogdsel], ecodedeg[ecogdsel])/2.0\n ecogdn = ecogdgrpn[ecogdsel]\n ecogdtotalmass = ic.get_int_mass(ecologmstar[ecogdsel], ecog3grp[ecogdsel])\n\n massbins=np.arange(9.75,14,0.15)\n binsel = np.where(np.logical_and(ecogdn>1, ecogdtotalmass<14))\n gdmedianrproj, massbincenters, massbinedges, jk = center_binned_stats(ecogdtotalmass[binsel], ecogdrelprojdist[binsel], np.median, bins=massbins)\n #gdmedianrproj_err = np.std(np.array([sbs(ecogdrelprojdist[binsel][np.where(np.logical_and(ecogdtotalmass[binsel]>massbinedges[i-1], ecogdtotalmass[binsel]<=massbinedges[i]))],\\\n # 10000, np.median) for i in range(1,len(massbinedges))]), axis=1)\n gdmedianrelvel, jk, jk, jk = center_binned_stats(ecogdtotalmass[binsel], ecogdrelvel[binsel], np.median, bins=massbins)\n #gdmedianrelvel_err = np.std(np.array([sbs(ecogdrelvel[binsel][np.where(np.logical_and(ecogdtotalmass[binsel]>massbinedges[i-1], ecogdtotalmass[binsel]<=massbinedges[i]))],\\\n # 10000, np.median) for i in range(1,len(massbinedges))]), axis=1)\n gdmedianrproj_err, jk, jk, jk = center_binned_stats(ecogdtotalmass[binsel], ecogdrelprojdist[binsel], sigmarange, bins=massbins)\n gdmedianrelvel, jk, jk, jk = center_binned_stats(ecogdtotalmass[binsel], ecogdrelvel[binsel], np.median, bins=massbins)\n gdmedianrelvel_err, jk, jk, jk = center_binned_stats(ecogdtotalmass[binsel], ecogdrelvel[binsel], sigmarange, bins=massbins)\n nansel = np.isnan(gdmedianrproj)\n if 0:\n #guess=None\n #guess=[-1,0.01,0.05,-6,0.01]\n guess=[-1,0.01,0.05]\n else:\n guess= [-1,0.01,0.05]#None#[1e-5, 0.4, 0.2, 1]\n poptr, pcovr = curve_fit(exp, massbincenters[~nansel], gdmedianrproj[~nansel], p0=guess, maxfev=5000, sigma=gdmedianrproj_err[~nansel])#30**massbincenters[~nansel])\n poptv, pcovv = curve_fit(exp, massbincenters[~nansel], gdmedianrelvel[~nansel], p0=[3e-5,4e-1,5e-03], maxfev=5000)#, sigma=gdmedianrelvel_err[~nansel])\n print(poptr, poptv)\n\n tx = np.linspace(7,15,100)\n plt.figure()\n plt.axhline(0)\n plt.plot(ecogdtotalmass[binsel], ecogdrelprojdist[binsel], 'k.', alpha=0.2, label='ECO Galaxies in N>1 Giant+Dwarf Groups')\n #plt.plot(massbincenters, gdmedianrproj, 'r^', label='Median')\n plt.errorbar(massbincenters, gdmedianrproj, yerr=gdmedianrproj_err, fmt='r^', label='Median')\n plt.plot(tx, exp(tx,*poptr), label='Fit to Medians')\n plt.plot(tx, 3*exp(tx,*poptr), label='3 times Fit to Medians')\n plt.xlabel(r\"Integrated Stellar Mass of Giant + Dwarf Members\")\n plt.ylabel(\"Projected Distance from Galaxy to Group Center [Mpc/h]\")\n plt.legend(loc='best')\n plt.xlim(9.5,13.2)\n plt.ylim(0,3)\n #plt.yscale('log')\n plt.show()\n\n plt.figure()\n plt.plot(ecogdtotalmass[binsel], ecogdrelvel[binsel], 'k.', alpha=0.2, label='Mock Galaxies in N=2 Giant+Dwarf Groups')\n #plt.plot(massbincenters, gdmedianrelvel, 'r^',label='Medians')\n plt.errorbar(massbincenters, gdmedianrelvel, yerr=gdmedianrelvel_err, fmt='r^', label='Median')\n plt.plot(tx, exp(tx, *poptv), label='Fit to Medians')\n plt.plot(tx, 4.5*exp(tx, *poptv), label='4.5 times Fit to Medians')\n plt.ylabel(\"Relative Velocity between Galaxy and Group Center\")\n plt.xlabel(r\"Integrated Stellar Mass of Giant + Dwarf Members\")\n plt.xlim(9.5,13)\n plt.ylim(0,2000)\n plt.legend(loc='best')\n plt.show()\n\n rproj_for_iteration = lambda M: 3*exp(M, *poptr)\n vproj_for_iteration = lambda M: 4.5*exp(M, *poptv)\n\n # --------------- now need to do this calibration for the RESOLVE-B analogue dataset, down to 8.7 stellar mass) -------------$\n resbana_gdgrpn = fof.multiplicity_function(resbana_g3grp, return_by_galaxy=True)\n #resbana_gdsel = np.logical_not((resbana_gdgrpn==1) & (ecologmstar>-19.4) & (resbana_g3grp!=-99.) & (resbana_g3grp>0)) # select galaxies that AREN'T ungrouped dwarfs\n resbana_gdsel = np.logical_and(np.logical_not(np.logical_or(resbana_g3grp==-99., ((resbana_gdgrpn==1) & (ecologmstar<9.5) & (ecologmstar>=8.7)))), (resbana_gdgrpn>2))\n resbana_gdgrpra, resbana_gdgrpdec, resbana_gdgrpcz = fof.group_skycoords(ecoradeg[resbana_gdsel], ecodedeg[resbana_gdsel], ecocz[resbana_gdsel], resbana_g3grp[resbana_gdsel])\n resbana_gdrelvel = np.abs(resbana_gdgrpcz - ecocz[resbana_gdsel])\n resbana_gdrelprojdist = (resbana_gdgrpcz + ecocz[resbana_gdsel])/100. * ic.angular_separation(resbana_gdgrpra, resbana_gdgrpdec, ecoradeg[resbana_gdsel], ecodedeg[resbana_gdsel])/2.0\n\n resbana_gdn = resbana_gdgrpn[resbana_gdsel]\n resbana_gdtotalmass = ic.get_int_mass(ecologmstar[resbana_gdsel], resbana_g3grp[resbana_gdsel])\n\n massbins2=np.arange(9.75,14,0.15)\n binsel2 = np.where(np.logical_and(resbana_gdn>1, resbana_gdtotalmass>-24))\n gdmedianrproj, massbincenters, massbinedges, jk = center_binned_stats(resbana_gdtotalmass[binsel2], resbana_gdrelprojdist[binsel2], np.median, bins=massbins2)\n gdmedianrproj_err, jk, jk, jk = center_binned_stats(resbana_gdtotalmass[binsel2], resbana_gdrelprojdist[binsel2], sigmarange, bins=massbins2)\n\n gdmedianrelvel, jk, jk, jk = center_binned_stats(resbana_gdtotalmass[binsel2], resbana_gdrelvel[binsel2], np.median, bins=massbins2)\n gdmedianrelvel_err, jk, jk, jk = center_binned_stats(resbana_gdtotalmass[binsel2], resbana_gdrelvel[binsel2], sigmarange, bins=massbins2)\n \n nansel = np.isnan(gdmedianrproj)\n poptr_resbana, jk = curve_fit(exp, massbincenters[~nansel], gdmedianrproj[~nansel], p0=poptr, sigma=gdmedianrproj_err[~nansel])#10**massbincenters[~nansel])\n poptv_resbana, jk = curve_fit(exp, massbincenters[~nansel], gdmedianrelvel[~nansel], p0=poptv, sigma=gdmedianrelvel_err[~nansel])#[3e-5,4e-1,5e-03,1])\n\n tx = np.linspace(7,15)\n plt.figure()\n plt.plot(resbana_gdtotalmass[binsel2], resbana_gdrelprojdist[binsel2], 'k.', alpha=0.2, label='Mock Galaxies in N>1 Giant+Dwarf Groups')\n plt.errorbar(massbincenters, gdmedianrproj, gdmedianrproj_err, fmt='r^', label='Median')\n plt.plot(tx, exp(tx,*poptr_resbana), label='Fit to Medians')\n plt.plot(tx, 3*exp(tx,*poptr_resbana), label='3 times Fit to Medians')\n plt.xlabel(r\"Integrated Stellar Mass of Giant + Dwarf Members\")\n plt.ylabel(\"Projected Distance from Galaxy to Group Center [Mpc/h]\")\n plt.legend(loc='best')\n plt.xlim(9.5,13)\n plt.ylim(0,3)\n plt.show()\n\n plt.figure()\n plt.plot(resbana_gdtotalmass[binsel2], resbana_gdrelvel[binsel2], 'k.', alpha=0.2, label='Mock Galaxies in N=2 Giant+Dwarf Groups')\n plt.errorbar(massbincenters, gdmedianrelvel, yerr=gdmedianrelvel_err, fmt='r^',label='Medians')\n plt.plot(tx, exp(tx, *poptv_resbana), label='Fit to Medians')\n plt.plot(tx, 4.5*exp(tx, *poptv_resbana), label='4.5 times Fit to Medians')\n plt.ylabel(\"Relative Velocity between Galaxy and Group Center\")\n plt.xlabel(r\"Integrated Stellar Mass of Giant + Dwarf Members\")\n plt.xlim(9.5,13)\n plt.ylim(0,2000)\n plt.legend(loc='best')\n plt.show()\n\n rproj_for_iteration_resbana = lambda M: 3*exp(M, *poptr_resbana)\n vproj_for_iteration_resbana = lambda M: 4.5*exp(M, *poptv_resbana)\n \n ###########################################################\n # Step 7: Iterative Combination of Dwarf Galaxies\n ###########################################################\n assert (ecog3grp[(ecologmstar>9.5) & (ecocz<8000) & (ecocz>2530)]!=-99.).all(), \"Not all giants are grouped.\"\n ecogrpnafterassoc = fof.multiplicity_function(ecog3grp, return_by_galaxy=True)\n resbgrpnafterassoc = fof.multiplicity_function(resbg3grp, return_by_galaxy=True)\n resbana_grpnafterassoc = fof.multiplicity_function(resbana_g3grp, return_by_galaxy=True)\n\n eco_ungroupeddwarf_sel = (ecologmstar<9.5) & (ecologmstar>=8.9) & (ecocz<8000) & (ecocz>2530) & (ecogrpnafterassoc==1)\n ecoitassocid = ic.iterative_combination(ecoradeg[eco_ungroupeddwarf_sel], ecodedeg[eco_ungroupeddwarf_sel], ecocz[eco_ungroupeddwarf_sel], ecologmstar[eco_ungroupeddwarf_sel],\\\n rproj_for_iteration, vproj_for_iteration, starting_id=np.max(ecog3grp)+1, centermethod='arithmetic')\n\n resb_ungroupeddwarf_sel = (resblogmstar<9.5) & (resblogmstar>=8.7) & (resbcz<7300) & (resbcz>4250) & (resbgrpnafterassoc==1)\n resbitassocid = ic.iterative_combination(resbradeg[resb_ungroupeddwarf_sel], resbdedeg[resb_ungroupeddwarf_sel], resbcz[resb_ungroupeddwarf_sel], resblogmstar[resb_ungroupeddwarf_sel],\\\n rproj_for_iteration, vproj_for_iteration, starting_id=np.max(resbg3grp)+1, centermethod='arithmetic')\n\n resbana_ungroupeddwarf_sel = (ecologmstar<9.5) & (ecologmstar>=8.7) & (ecocz<8000) & (ecocz>2530) & (resbana_grpnafterassoc==1)\n resbana_itassocid = ic.iterative_combination(ecoradeg[resbana_ungroupeddwarf_sel], ecodedeg[resbana_ungroupeddwarf_sel], ecocz[resbana_ungroupeddwarf_sel], ecologmstar[resbana_ungroupeddwarf_sel],\\\n rproj_for_iteration_resbana, vproj_for_iteration_resbana, starting_id=np.max(resbana_g3grp)+1, centermethod='arithmetic')\n\n ecog3grp[eco_ungroupeddwarf_sel] = ecoitassocid\n resbg3grp[resb_ungroupeddwarf_sel] = resbitassocid\n resbana_g3grp[resbana_ungroupeddwarf_sel] = resbana_itassocid\n #plt.figure()\n #plt.hist(fof.multiplicity_function(ecoitassocid, return_by_galaxy=False), log=True)\n #plt.hist(fof.multiplicity_function(resbitassocid, return_by_galaxy=False), log=True, histtype='step')\n #plt.show()\n\n plt.figure()\n binv = np.arange(0.5,1200.5,3)\n plt.hist(fof.multiplicity_function(ecog3grp[ecog3grp!=-99.], return_by_galaxy=False), bins=binv, log=True, label='ECO Groups', histtype='step', linewidth=3)\n plt.hist(fof.multiplicity_function(resbg3grp[resbg3grp!=-99.], return_by_galaxy=False), bins=binv, log=True, label='RESOLVE-B Groups', histtype='step', hatch='\\\\')\n plt.xlabel(\"Number of Giant + Dwarf Group Members\")\n plt.ylabel(\"Number of Groups\")\n plt.legend(loc='best')\n plt.xlim(0,100)\n plt.show()\n\n ############################################################\n # Step 8: Halo Abundance Matching\n ###########################################################\n # --- for RESOLVE-B analogue ----#\n resbana_hamsel = (resbana_g3grp!=-99.)\n resbana_haloid, resbana_halomass, jk, jk = ic.HAMwrapper(ecoradeg[resbana_hamsel], ecodedeg[resbana_hamsel], ecocz[resbana_hamsel], ecologmstar[resbana_hamsel], resbana_g3grp[resbana_hamsel],\\\n ecovolume, inputfilename=None, outputfilename=None)\n resbana_halomass = np.log10(10**resbana_halomass)# no longer needed as of 7/29: /fof.getmhoffset(280,337,1,1,6))\n junk, uniqindex = np.unique(resbana_g3grp[resbana_hamsel], return_index=True)\n resbana_intmass = ic.get_int_mass(ecologmstar[resbana_hamsel], resbana_g3grp[resbana_hamsel])[uniqindex]\n sortind = np.argsort(resbana_intmass)\n sortedmass = resbana_intmass[sortind]\n resbcubicspline = interp1d(sortedmass, resbana_halomass[sortind], fill_value='extrapolate')\n\n resbintmass = ic.get_int_mass(resblogmstar[resbg3grp!=-99.], resbg3grp[resbg3grp!=-99.])\n resbg3logmh[resbg3grp!=-99.] = resbcubicspline(resbintmass)-np.log10(0.7)\n\n # ---- for ECO ----- #\n ecohamsel = (ecog3grp!=-99.)\n haloid, halomass, junk, junk = ic.HAMwrapper(ecoradeg[ecohamsel], ecodedeg[ecohamsel], ecocz[ecohamsel], ecologmstar[ecohamsel], ecog3grp[ecohamsel],\\\n ecovolume, inputfilename=None, outputfilename=None)\n junk, uniqindex = np.unique(ecog3grp[ecohamsel], return_index=True)\n halomass = halomass-np.log10(0.7)\n for i,idv in enumerate(haloid):\n sel = np.where(ecog3grp==idv)\n ecog3logmh[sel] = halomass[i] # m337b\n\n # calculate Rvir in arcsec\n ecog3rvir = (3*(10**ecog3logmh) / (4*np.pi*337*0.3*1.36e11) )**(1/3.)\n resbg3rvir = (3*(10**resbg3logmh ) / (4*np.pi*337*0.3*1.36e11))**(1/3.)\n\n ecointmass = ic.get_int_mass(ecologmstar[ecohamsel], ecog3grp[ecohamsel])\n plt.figure()\n plt.plot(ecointmass, ecog3logmh[ecog3grp!=-99.], '.', color='palegreen', alpha=0.6, label='ECO', markersize=11)\n plt.plot(resbintmass, resbg3logmh[resbg3grp!=-99.], 'k.', alpha=1, label='RESOLVE-B', markersize=3)\n plt.plot\n plt.xlabel(\"group-integrated log stellar mass\")\n plt.ylabel(r\"group halo mass (log$M_\\odot$)\")\n plt.legend(loc='best')\n plt.show()\n\n ########################################\n # (9) Output arrays\n ########################################\n # ---- first get the quantities for ECO ---- #\n #eco_in_gf = np.where(ecog3grp!=-99.)\n ecog3grpn = fof.multiplicity_function(ecog3grp, return_by_galaxy=True)\n ecog3grpngi = np.zeros(len(ecog3grpn))\n ecog3grpndw = np.zeros(len(ecog3grpn))\n for uid in np.unique(ecog3grp):\n grpsel = np.where(ecog3grp==uid)\n gisel = np.where(np.logical_and((ecog3grp==uid),(ecologmstar>=9.5)))\n dwsel = np.where(np.logical_and((ecog3grp==uid), (ecologmstar<9.5)))\n if len(gisel[0])>0.:\n ecog3grpngi[grpsel] = len(gisel[0])\n if len(dwsel[0])>0.:\n ecog3grpndw[grpsel] = len(dwsel[0])\n\n ecog3grpradeg, ecog3grpdedeg, ecog3grpcz = fof.group_skycoords(ecoradeg, ecodedeg, ecocz, ecog3grp)\n ecog3rproj = fof.get_grprproj_e17(ecoradeg, ecodedeg, ecocz, ecog3grp, h=0.7) / (ecog3grpcz/70.) * 206265 # in arcsec\n ecog3fc = fof.get_central_flag(ecologmstar, ecog3grp)\n ecog3router = fof.get_outermost_galradius(ecoradeg, ecodedeg, ecocz, ecog3grp) # in arcsec\n ecog3router[(ecog3grpngi+ecog3grpndw)==1] = 0.\n junk, ecog3vdisp = fof.get_rproj_czdisp(ecoradeg, ecodedeg, ecocz, ecog3grp)\n ecog3rvir = ecog3rvir*206265/(ecog3grpcz/70.)\n ecog3grpgas = ic.get_int_mass(ecologmgas, ecog3grp)\n ecog3grpstars = ic.get_int_mass(ecologmstar, ecog3grp)\n ecog3ADtest = vz.AD_test(ecocz, ecog3grp)\n ecog3tcross = vz.group_crossing_time(ecoradeg, ecodedeg, ecocz, ecog3grp)\n ecog3colorgap = vz.group_color_gap(ecog3grp, ecologmstar, ecourcolor)\n ecog3dsprob = vz.fast_DS_test(ecoradeg,ecodedeg,ecocz,ecog3grp,niter=2500)\n ecog3nndens, ecog3edgeflag, ecog3nndens2d, ecog3edgeflag2d, ecog3edgescale2d = lss_dens_by_galaxy(ecog3grp,\\\n ecoradeg, ecodedeg, ecocz, ecog3logmh, Nnn=3, rarange=(130.05,237.45), decrange=(-1,50), czrange=(2530,7470))\n\n outofsample = (ecog3grp==-99.)\n ecog3grpn[outofsample]=-99.\n ecog3grpngi[outofsample]=-99.\n ecog3grpndw[outofsample]=-99.\n ecog3grpradeg[outofsample]=-99.\n ecog3grpdedeg[outofsample]=-99.\n ecog3grpcz[outofsample]=-99.\n ecog3logmh[outofsample]=-99.\n ecog3rvir[outofsample]=-99.\n ecog3rproj[outofsample]=-99.\n ecog3fc[outofsample]=-99.\n ecog3router[outofsample]=-99.\n ecog3vdisp[outofsample]=-99.\n ecog3grpgas[outofsample]=-99.\n ecog3grpstars[outofsample]=-99.\n ecog3ADtest[outofsample]=-99.\n ecog3tcross[outofsample]=-99.\n ecog3colorgap[outofsample]=-99.\n ecog3dsprob[outofsample]=-99.\n ecog3nndens[outofsample]=-99.\n ecog3edgeflag[outofsample]=-99.\n ecog3nndens2d[outofsample]=-99.\n ecog3edgeflag2d[outofsample]=-99.\n ecog3edgescale2d[outofsample]=-99.\n insample = ecog3grpn!=-99.\n\n ecodata['g3grp_s'] = ecog3grp\n ecodata['g3grpradeg_s'] = ecog3grpradeg\n ecodata['g3grpdedeg_s'] = ecog3grpdedeg\n ecodata['g3grpcz_s'] = ecog3grpcz\n ecodata['g3grpndw_s'] = ecog3grpndw\n ecodata['g3grpngi_s'] = ecog3grpngi\n ecodata['g3logmh_s'] = ecog3logmh\n ecodata['g3r337_s'] = ecog3rvir\n ecodata['g3rproj_s'] = ecog3rproj\n ecodata['g3router_s'] = ecog3router\n ecodata['g3fc_s'] = ecog3fc\n ecodata['g3vdisp_s'] = ecog3vdisp\n ecodata['g3grplogG_s'] = ecog3grpgas\n ecodata['g3grplogS_s'] = ecog3grpstars\n ecodata['g3grpadAlpha_s'] = ecog3ADtest\n ecodata['g3grptcross_s'] = ecog3tcross\n ecodata['g3grpcolorgap_s'] = ecog3colorgap\n ecodata['g3grpcolorgap_s'] = ecog3colorgap\n ecodata['g3grpdsProb_s'] = ecog3dsprob\n ecodata['g3grpnndens_s'] = ecog3nndens\n ecodata['g3grpedgeflag_s'] = ecog3edgeflag\n ecodata['g3grpnndens2d_s'] = ecog3nndens2d\n ecodata['g3grpedgeflag2d_s'] = ecog3edgeflag2d\n ecodata['g3grpedgescale2d_s'] = ecog3edgescale2d\n ecodata.to_csv(\"ECOdata_G3catalog_stellar.csv\", index=False)\n\n # ------ now do RESOLVE\n sz = len(resolvedata)\n resolvename = np.array(resolvedata.name)\n resolveg3grp = np.full(sz, -99.)\n resolveg3grpngi = np.full(sz, -99.)\n resolveg3grpndw = np.full(sz, -99.)\n resolveg3grpradeg = np.full(sz, -99.)\n resolveg3grpdedeg = np.full(sz, -99.)\n resolveg3grpcz = np.full(sz, -99.)\n resolveg3intmstar = np.full(sz, -99.)\n resolveg3logmh = np.full(sz, -99.)\n resolveg3rvir = np.full(sz, -99.)\n resolveg3rproj = np.full(sz,-99.)\n resolveg3fc = np.full(sz,-99.)\n resolveg3router = np.full(sz,-99.)\n resolveg3vdisp = np.full(sz,-99.)\n resolveg3grpgas = np.full(sz, -99.)\n resolveg3grpstars = np.full(sz, -99.)\n resolveg3ADtest = np.full(sz, -99.)\n resolveg3tcross = np.full(sz, -99.)\n resolveg3colorgap = np.full(sz, -99.)\n resolveg3dsprob = np.full(sz,-99.)\n resolveg3nndens = np.full(sz, -99.)\n resolveg3edgeflag = np.full(sz, -99.)\n resolveg3nndens2d = np.full(sz, -99.)\n resolveg3edgeflag2d = np.full(sz, -99.)\n resolveg3edgescale2d = np.full(sz, -99.)\n\n resbg3grpngi = np.full(len(resbg3grp), 0)\n resbg3grpndw = np.full(len(resbg3grp), 0)\n for uid in np.unique(resbg3grp):\n grpsel = np.where(resbg3grp==uid)\n gisel = np.where(np.logical_and((resbg3grp==uid),(resblogmstar>=9.5)))\n dwsel = np.where(np.logical_and((resbg3grp==uid), (resblogmstar<9.5)))\n if len(gisel[0])>0.:\n resbg3grpngi[grpsel] = len(gisel[0])\n if len(dwsel[0])>0.:\n resbg3grpndw[grpsel] = len(dwsel[0])\n\n resbg3grpradeg, resbg3grpdedeg, resbg3grpcz = fof.group_skycoords(resbradeg, resbdedeg, resbcz, resbg3grp)\n resbg3intmstar = ic.get_int_mass(resblogmstar, resbg3grp)\n resbg3rproj = fof.get_grprproj_e17(resbradeg, resbdedeg, resbcz, resbg3grp, h=0.7) / (resbg3grpcz/70.) * 206265 # in arcsec\n resbg3fc = fof.get_central_flag(resblogmstar, resbg3grp)\n resbg3router = fof.get_outermost_galradius(resbradeg, resbdedeg, resbcz, resbg3grp) # in arcsec\n resbg3router[(resbg3grpngi+resbg3grpndw)==1] = 0.\n junk, resbg3vdisp = fof.get_rproj_czdisp(resbradeg, resbdedeg, resbcz, resbg3grp)\n resbg3rvir = resbg3rvir*206265/(resbg3grpcz/70.)\n resbg3grpgas = ic.get_int_mass(resblogmgas, resbg3grp)\n resbg3grpstars = ic.get_int_mass(resblogmstar, resbg3grp)\n resbg3ADtest = vz.AD_test(resbcz, resbg3grp)\n resbg3tcross = vz.group_crossing_time(resbradeg, resbdedeg, resbcz, resbg3grp)\n resbg3colorgap = vz.group_color_gap(resbg3grp, resblogmstar, resburcolor)\n resbg3dsprob = vz.fast_DS_test(resbradeg,resbdedeg,resbcz,resbg3grp,niter=2500)\n RESB_RADEG_REMAPPED = np.copy(resbradeg)\n REMAPSEL = np.where(resbradeg>18*15.)\n RESB_RADEG_REMAPPED[REMAPSEL] = resbradeg[REMAPSEL]-360.\n resbg3nndens, resbg3edgeflag, resbg3nndens2d, resbg3edgeflag2d, resbg3edgescale2d = lss_dens_by_galaxy(resbg3grp,\\\n RESB_RADEG_REMAPPED, resbdedeg, resbcz, resbg3logmh, Nnn=3, rarange=(-2*15.,3*15.), decrange=(-1.25,1.25),\\\n czrange=(4250,7250)) # must use remapped RESOLVE-B RA because of 0/360 wraparound\n\n outofsample = (resbg3grp==-99.)\n resbg3grpngi[outofsample]=-99.\n resbg3grpndw[outofsample]=-99.\n resbg3grpradeg[outofsample]=-99.\n resbg3grpdedeg[outofsample]=-99.\n resbg3grpcz[outofsample]=-99.\n resbg3intmstar[outofsample]=-99.\n resbg3logmh[outofsample]=-99.\n resbg3rvir[outofsample]=-99.\n resbg3rproj[outofsample]=-99.\n resbg3router[outofsample]=-99.\n resbg3fc[outofsample]=-99.\n resbg3vdisp[outofsample]=-99.\n resbg3grpgas[outofsample]=-99.\n resbg3grpstars[outofsample]=-99.\n resbg3ADtest[outofsample]=-99.\n resbg3tcross[outofsample]=-99.\n resbg3colorgap[outofsample]=-99.\n resbg3dsprob[outofsample]=-99.\n resbg3nndens[outofsample]=-99.\n resbg3edgeflag[outofsample]=-99.\n resbg3nndens2d[outofsample]=-99.\n resbg3edgeflag2d[outofsample]=-99.\n resbg3edgescale2d[outofsample]=-99.\n\n for i,nm in enumerate(resolvename):\n if nm.startswith('rs'):\n sel_in_eco = np.where(ecoresname==nm)\n resolveg3grp[i] = ecog3grp[sel_in_eco]\n resolveg3grpngi[i] = ecog3grpngi[sel_in_eco]\n resolveg3grpndw[i] = ecog3grpndw[sel_in_eco]\n resolveg3grpradeg[i] = ecog3grpradeg[sel_in_eco]\n resolveg3grpdedeg[i] = ecog3grpdedeg[sel_in_eco]\n resolveg3grpcz[i] = ecog3grpcz[sel_in_eco]\n resolveg3intmstar[i] = ecog3intmstar[sel_in_eco]\n resolveg3logmh[i] = ecog3logmh[sel_in_eco]\n resolveg3rvir[i] = ecog3rvir[sel_in_eco]\n resolveg3rproj[i] = ecog3rproj[sel_in_eco]\n resolveg3fc[i] = ecog3fc[sel_in_eco]\n resolveg3router[i]=ecog3router[sel_in_eco]\n resolveg3vdisp[i]=ecog3vdisp[sel_in_eco]\n resolveg3grpstars[i] = ecog3grpstars[sel_in_eco]\n resolveg3grpgas[i] = ecog3grpgas[sel_in_eco]\n resolveg3ADtest[i] = ecog3ADtest[sel_in_eco]\n resolveg3tcross[i] = ecog3tcross[sel_in_eco]\n resolveg3colorgap[i] = ecog3colorgap[sel_in_eco]\n resolveg3dsprob[i] = ecog3dsprob[sel_in_eco]\n resolveg3nndens[i] = ecog3nndens[sel_in_eco]\n resolveg3edgeflag[i] = ecog3edgeflag[sel_in_eco]\n resolveg3nndens2d[i] = ecog3nndens2d[sel_in_eco]\n resolveg3edgeflag2d[i] = ecog3edgeflag2d[sel_in_eco]\n resolveg3edgescale2d[i] = ecog3edgescale2d[sel_in_eco]\n elif nm.startswith('rf'):\n sel_in_resb = np.where(resbname==nm)\n resolveg3grp[i] = resbg3grp[sel_in_resb]\n resolveg3grpngi[i] = resbg3grpngi[sel_in_resb]\n resolveg3grpndw[i] = resbg3grpndw[sel_in_resb]\n resolveg3grpradeg[i] = resbg3grpradeg[sel_in_resb]\n resolveg3grpdedeg[i] = resbg3grpdedeg[sel_in_resb]\n resolveg3grpcz[i] = resbg3grpcz[sel_in_resb]\n resolveg3intmstar[i] = resbg3intmstar[sel_in_resb]\n resolveg3logmh[i] = resbg3logmh[sel_in_resb]\n resolveg3rvir[i] = resbg3rvir[sel_in_resb]\n resolveg3rproj[i] = resbg3rproj[sel_in_resb]\n resolveg3fc[i] = resbg3fc[sel_in_resb]\n resolveg3router[i] = resbg3router[sel_in_resb]\n resolveg3vdisp[i] = resbg3vdisp[sel_in_resb]\n resolveg3grpgas[i] = resbg3grpgas[sel_in_resb]\n resolveg3grpstars[i] = resbg3grpstars[sel_in_resb]\n resolveg3ADtest[i] = resbg3ADtest[sel_in_resb]\n resolveg3tcross[i] = resbg3tcross[sel_in_resb]\n resolveg3colorgap[i] = resbg3colorgap[sel_in_resb]\n resolveg3dsprob[i] = resbg3dsprob[sel_in_resb]\n resolveg3nndens[i] = resbg3nndens[sel_in_resb]\n resolveg3edgeflag[i] = resbg3edgeflag[sel_in_resb]\n resolveg3nndens2d[i] = resbg3nndens2d[sel_in_resb]\n resolveg3edgeflag2d[i] = resbg3edgeflag2d[sel_in_resb]\n resolveg3edgescale2d[i] = resbg3edgescale2d[sel_in_resb]\n else:\n assert False, nm+\" not in RESOLVE\"\n\n resolvedata['g3grp_s'] = resolveg3grp\n resolvedata['g3grpngi_s'] = resolveg3grpngi\n resolvedata['g3grpndw_s'] = resolveg3grpndw\n resolvedata['g3grpradeg_s'] = resolveg3grpradeg\n resolvedata['g3grpdedeg_s'] = resolveg3grpdedeg\n resolvedata['g3grpcz_s'] = resolveg3grpcz\n resolvedata['g3logmh_s'] = resolveg3logmh\n resolvedata['g3r337_s'] = resolveg3rvir\n resolvedata['g3rproj_s'] = resolveg3rproj\n resolvedata['g3router_s'] = resolveg3router\n resolvedata['g3fc_s'] = resolveg3fc\n resolvedata['g3vdisp_s'] = resolveg3vdisp\n resolvedata['g3grplogG_s'] = resolveg3grpgas\n resolvedata['g3grplogS_s'] = resolveg3grpstars\n resolvedata['g3grpadAlpha_s'] = resolveg3ADtest\n resolvedata['g3grptcross_s'] = resolveg3tcross\n resolvedata['g3grpcolorgap_s'] = resolveg3colorgap\n resolvedata['g3grpdsProb_s'] = resolveg3dsprob\n resolvedata['g3grpnndens_s'] = resolveg3nndens\n resolvedata['g3grpedgeflag_s'] = resolveg3edgeflag\n resolvedata['g3grpnndens2d_s'] = resolveg3nndens2d\n resolvedata['g3grpedgeflag2d_s'] = resolveg3edgeflag2d\n resolvedata['g3grpedgescale2d_s'] = resolveg3edgescale2d\n resolvedata.to_csv(\"RESOLVEdata_G3catalog_stellar.csv\", index=False)\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.linspace", "matplotlib.pyplot.plot", "numpy.max", "scipy.optimize.curve_fit", "numpy.where", "pandas.read_csv", "numpy.unique", "numpy.arange", "numpy.full", "numpy.copy", "scipy.interpolate.interp1d", "matplotlib.pyplot.errorbar", "matplotlib.pyplot.figure", "numpy.isnan", "matplotlib.pyplot.ylim", "numpy.logical_or", "numpy.log10", "numpy.argsort", "numpy.array", "matplotlib.pyplot.show", "numpy.logical_and", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.axhline", "numpy.abs", "matplotlib.pyplot.scatter", "matplotlib.pyplot.yscale", "numpy.percentile", "numpy.sort", "matplotlib.pyplot.xlim", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
wmvanvliet/psychic
[ "4ab75fb655795df0272c1bb0eb0dfeb232ffe143", "4ab75fb655795df0272c1bb0eb0dfeb232ffe143" ]
[ "psychic/markers.py", "psychic/layouts.py" ]
[ "import numpy as np\n\ndef markers_to_events(marker_array):\n '''\n Extract events from the array with markers.\n Events are encoded as TTL pulses, no event is indicated with the value 0.\n Returns (events, indices).\n '''\n markers = np.asarray(marker_array, int)\n assert markers.ndim == 1, 'Expected flat array of markers'\n\n #change_ids = np.flatnonzero(np.diff(np.c_[0, markers, 0]))\n change_ids = np.flatnonzero(np.concatenate([[1], np.diff(markers)]))\n events = markers[change_ids]\n durations = np.diff( np.concatenate([change_ids, [len(markers)]]) )\n\n return (events[np.nonzero(events)], change_ids[np.nonzero(events)],\n durations[np.nonzero(events)])\n\ndef biosemi_find_ghost_markers(ys):\n '''\n BioSemi seems to decimate the status channel by taking the max of each\n window. When two makers fall in the same frame after decimation, a ghost\n marker appears, with the value of the bitwise or of the other markers.\n This function finds ghost markers using a heuristic. \n THIS FUNCTION IS DANGEROUS!\n '''\n ys = np.asarray(ys)\n e, ei, ed = markers_to_events(ys)\n if len(ei) < 3:\n return np.zeros(0)\n\n # First, find markers that are the binary OR of their neighbours\n pre_ghost_post = np.array([e[:-2], e[1:-1], e[2:]]).T\n or_matches = np.hstack([False, pre_ghost_post[:, 0] | pre_ghost_post[:,-1] \\\n == pre_ghost_post[:, 1], False])\n\n # Now check which markers are not separated with a 0\n non_sep_matches = np.hstack(\n [False, (ys[ei[1:-1] - 1] != 0) & (ys[ei[2:] - 1] != 0), False])\n\n # Finally find markers that are one frame long\n one_frame = np.hstack([np.diff(ei) == 1, False])\n\n ghosts = or_matches & non_sep_matches & one_frame\n return ei[ghosts]\n\ndef resample_markers(markers, newlen, max_delay=0):\n '''\n Resample a marker stream without losing markers. max_delay specifies how\n many frames the markers can be delayed in *target frames*. \n '''\n factor = float(newlen)/len(markers)\n e, ei, ed = markers_to_events(markers)\n ei = (ei * factor).astype(int)\n ed = np.ceil(ed * factor).astype(int)\n old_ei = ei.copy()\n\n for i in range(1, len(ei)):\n if e[i] == e[i-1]:\n # Two markers with the same value are not allowed to touch\n ei[i] = max(ei[i], ei[i-1]+ed[i-1]+1)\n else:\n ei[i] = max(ei[i], ei[i-1]+ed[i-1])\n\n if len(ei) > 0:\n assert np.max(np.abs(ei - old_ei)) <= max_delay, \\\n 'Markers are delayed too much'\n assert max(ei)+ed[np.argmax(ei)]-1 < newlen, 'Delayed markers out of bounds'\n ys = np.zeros(newlen, dtype=np.int)\n\n for i in range( len(ei) ):\n ys[ei[i]:ei[i]+ed[i]] = e[i]\n\n return ys\n\n", "import numpy as np\nfrom itertools import combinations\nfrom matplotlib import pyplot as plt\n\nfrom .scalpplot import positions\n\n\nclass Layout:\n def __init__(self, channel_names, points):\n self.channel_names = channel_names\n self.points, self.box_width, self.box_height = _box_size(points)\n self.scale_point = (1 - self.box_width - 0.01, self.box_height)\n\n def get_box(self, channel):\n '''\n Get axes box coordinates for a channel in the layout.\n\n Parameters\n ----------\n channel : string\n String name of the channel to get the box for.\n\n Returns\n -------\n box : list of floats [center_x, center_y, width, height]\n The coordinates for the box to plot the channel data in. Suitable\n as argument to matplotlib's `figure.add_axes`.\n '''\n x, y = self.points[self.channel_names.index(channel)]\n return x, y, self.box_width, self.box_height\n\n def get_scale(self):\n '''\n Get axes box coordinates for plotting the scale.\n\n Returns\n -------\n box : list of floats [center_x, center_y, width, height]\n The coordinates for the box to plot the scale. Suitable\n as argument to matplotlib's `figure.add_axes`.\n '''\n x, y = self.scale_point\n return x, y, self.box_width, self.box_height\n\n def axes(self, fig=None):\n '''\n Generator that creates matplotlib axes for the boxes.\n\n Parameters\n ----------\n fig : matplotlib figure handle (default None)\n If specified, a figure to create the axes for. Otherwise, a new\n figure will be created.\n '''\n if fig is None:\n fig = plt.figure()\n\n for x, y in self.points:\n yield fig.add_axes((x, y, self.box_width, self.box_height))\n\n def plot(self, fig=None):\n '''\n Plots the layout.\n\n Parameters\n ----------\n fig : matplotlib figure handle (default None)\n If specified, a figure to plot the layout in. Otherwise, a new\n figure will be created.\n '''\n if fig is None:\n fig = plt.figure()\n\n plt.scatter(self.points[:, 0], self.points[:, 1], color='k')\n plt.scatter(self.scale_point[0], self.scale_point[1], color='b')\n plt.xlim(0, 1)\n plt.ylim(0, 1)\n\n\ndef _box_size(points, width=None, height=None, padding=0.9):\n \"\"\" Given a series of points, calculate an appropriate box size.\n\n Parameters\n ----------\n points : array, shape = (n_points, [x-coordinate, y-coordinate])\n The centers of the axes. Normally these are points in the range [0, 1]\n centered at 0.5.\n width : float | None\n An optional box width to enforce. When set, only the box height will be\n calculated by the function.\n height : float | None\n An optional box height to enforce. When set, only the box width will be\n calculated by the function.\n padding : float\n Scale boxes by this amount to achieve padding between boxes.\n\n Returns\n -------\n width : float\n Width of the box\n height : float\n Height of the box\n \"\"\"\n # Scale points so they are centered at (0, 0) and extend [-0,5, 0.5]\n points = np.asarray(points)\n x_min, x_max = np.min(points[:, 0]), np.max(points[:, 0])\n y_min, y_max = np.min(points[:, 1]), np.max(points[:, 1])\n x_range = x_max - x_min\n y_range = y_max - y_min\n points[:, 0] = (points[:, 0] - (x_min + x_max)/2.) * 1./x_range\n points[:, 1] = (points[:, 1] - (y_min + y_max)/2.) * 1./y_range\n\n xdiff = lambda a, b: np.abs(a[0] - b[0])\n ydiff = lambda a, b: np.abs(a[1] - b[1])\n dist = lambda a, b: np.sqrt(xdiff(a, b)**2 + ydiff(a, b)**2)\n\n points = np.asarray(points)\n\n if width is None and height is None:\n if len(points) <= 1:\n # Trivial case first\n width = 1.0\n height = 1.0\n else:\n # Find the closest two points A and B.\n all_combinations = list(combinations(points, 2))\n closest_points_idx = np.argmin([dist(a, b)\n for a, b in all_combinations])\n a, b = all_combinations[closest_points_idx]\n\n # The closest points define either the max width or max height.\n w, h = xdiff(a, b), ydiff(a, b)\n if w > h:\n width = w\n else:\n height = h\n\n # At this point, either width or height is known, or both are known.\n if height is None:\n # Find all axes that could potentially overlap horizontally.\n candidates = [c for c in combinations(points, 2)\n if xdiff(*c) < width]\n\n if len(candidates) == 0:\n # No axes overlap, take all the height you want.\n height = 1.0\n else:\n # Find an appropriate height so all none of the found axes will\n # overlap.\n height = ydiff(*candidates[np.argmin([ydiff(*c) for c in\n candidates])])\n\n elif width is None:\n # Find all axes that could potentially overlap vertically.\n candidates = [c for c in combinations(points, 2)\n if ydiff(*c) < height]\n\n if len(candidates) == 0:\n # No axes overlap, take all the width you want.\n width = 1.0\n else:\n # Find an appropriate width so all none of the found axes will\n # overlap.\n width = xdiff(*candidates[np.argmin([xdiff(*c) for c in\n candidates])])\n\n # Some subplot centers will be at the figure edge. Shrink everything so it\n # fits in the figure.\n scaling = 1 / (1. + width)\n points *= scaling\n width *= scaling\n height *= scaling\n points += 0.5\n\n # Add a bit of padding between boxes\n width *= padding\n height *= padding\n\n points[:, 0] -= width / 2.\n points[:, 1] -= height / 2.\n return points, width, height\n\n\nclass Layout_10_5(Layout):\n def __init__(self, channel_names):\n points = [positions.project_scalp(*positions.POS_10_5[l])\n for l in channel_names]\n Layout.__init__(self, channel_names, points)\n\n\n_biosemi_32_points = {\n 'Fp1': (2, 9),\n 'Fp2': (4, 9),\n\n 'AF3': (2, 8),\n 'AF4': (4, 8),\n\n 'F7': (1, 7),\n 'F3': (2, 7),\n 'Fz': (3, 7),\n 'F4': (4, 7),\n 'F8': (5, 7),\n\n 'FC5': (1.5, 6),\n 'FC1': (2.5, 6),\n 'FC2': (3.5, 6),\n 'FC6': (4.5, 6),\n\n 'T7': (1, 5),\n 'C3': (2, 5),\n 'Cz': (3, 5),\n 'C4': (4, 5),\n 'T8': (5, 5),\n\n 'CP5': (1.5, 4),\n 'CP1': (2.5, 4),\n 'CP2': (3.5, 4),\n 'CP6': (4.5, 4),\n\n 'P7': (1, 3),\n 'P3': (2, 3),\n 'Pz': (3, 3),\n 'P4': (4, 3),\n 'P8': (5, 3),\n\n 'PO3': (2, 2),\n 'PO4': (4, 2),\n\n 'O1': (2, 1),\n 'Oz': (3, 1),\n 'O2': (4, 1),\n}\n\n\nclass BioSemi_32(Layout):\n def __init__(self, d):\n points = []\n y = 9\n for l in d.feat_lab[0]:\n try:\n points.append(_biosemi_32_points[l])\n except:\n points.append((6, y))\n y -= 1\n\n Layout.__init__(self, d.feat_lab[0], points)\n self.scale_point = (0.8, 0.1)\n" ]
[ [ "numpy.hstack", "numpy.abs", "numpy.nonzero", "numpy.asarray", "numpy.ceil", "numpy.argmax", "numpy.diff", "numpy.array", "numpy.zeros" ], [ "numpy.abs", "matplotlib.pyplot.scatter", "numpy.min", "numpy.asarray", "matplotlib.pyplot.ylim", "numpy.max", "matplotlib.pyplot.xlim", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
analysiscenter/dataset
[ "50a0607442bd2eb675c8f6a8d7bc5e9dbabe36c0", "50a0607442bd2eb675c8f6a8d7bc5e9dbabe36c0" ]
[ "batchflow/models/torch/base.py", "batchflow/models/torch/layers/combine.py" ]
[ "\"\"\" Eager version of TorchModel. \"\"\"\nimport os\nimport re\nfrom threading import Lock\nfrom functools import partial\n\nimport dill\nimport numpy as np\nimport pandas as pd\n\nimport torch\nfrom torch import nn\nfrom torch.optim.swa_utils import AveragedModel, SWALR\n\ntry:\n import cupy as cp\n CUPY_AVAILABLE = True\nexcept ImportError:\n CUPY_AVAILABLE = False\n\nfrom .network import Network\nfrom .base_mixins import OptimalBatchSizeMixin, LayerHook, ExtractionMixin, VisualizationMixin\nfrom .initialization import best_practice_resnet_init\nfrom .losses import CrossEntropyLoss, BinaryLovaszLoss, LovaszLoss, SSIM, MSSIM\nfrom .losses import binary as binary_losses, multiclass as multiclass_losses\nfrom .utils import get_shape\nfrom ..base import BaseModel\nfrom ...config import Config\n\n\n\nLOSSES = {\n 'l1': nn.L1Loss,\n 'huber': nn.SmoothL1Loss,\n 'absolutedifference': nn.L1Loss,\n 'mse': nn.MSELoss,\n 'cos': nn.CosineSimilarity,\n 'cosine': nn.CosineSimilarity,\n 'hinge': nn.HingeEmbeddingLoss,\n 'ssim': SSIM,\n 'mssim': MSSIM,\n\n 'bce': nn.BCEWithLogitsLoss,\n 'bdice': binary_losses.Dice,\n 'btversky': binary_losses.Tversky,\n 'blovasz': BinaryLovaszLoss,\n\n 'ce': CrossEntropyLoss,\n 'crossentropy': CrossEntropyLoss,\n 'logloss': CrossEntropyLoss,\n 'dice': multiclass_losses.Dice,\n 'lovasz': LovaszLoss\n}\n\nDECAYS = {\n 'exp': torch.optim.lr_scheduler.ExponentialLR,\n 'lambda': torch.optim.lr_scheduler.LambdaLR,\n 'step': torch.optim.lr_scheduler.StepLR,\n 'multistep': torch.optim.lr_scheduler.MultiStepLR,\n 'cos': torch.optim.lr_scheduler.CosineAnnealingLR,\n}\n\nDECAYS_DEFAULTS = {\n torch.optim.lr_scheduler.ExponentialLR : dict(gamma=0.96),\n torch.optim.lr_scheduler.LambdaLR : dict(lr_lambda=lambda epoch: 0.96**epoch),\n torch.optim.lr_scheduler.StepLR: dict(step_size=30),\n torch.optim.lr_scheduler.MultiStepLR: dict(milestones=[30, 80]),\n torch.optim.lr_scheduler.CosineAnnealingLR: dict(T_max=None)\n}\n\n\nclass TorchModel(BaseModel, ExtractionMixin, OptimalBatchSizeMixin, VisualizationMixin):\n \"\"\" Base class for Torch models.\n\n Implements two main logics:\n - the first is to build PyTorch model as a sequence of configurable nn.Modules\n - the second is to make infrastructure for model training, e.g. loss, optimizer and decay,\n and provide methods for the model training and inference.\n In the `examples` section you can find a drop-in template for your model.\n\n All of the parameters for both logics are defined in the config, supplied at initialization.\n The detailed description can be seen at `parameters` section; here, we describe the overall structure of keys:\n - global cuda and memory parameters:\n - `device` sets the desired accelerator to use. Default is to use the single best available (GPU over CPU).\n - `benchmark` defines the `cuda` behavior: trade some GPU memory to get minor (~15%) acceleration.\n Default is True.\n - `channels_last` sets the model weights and tensors layout to `channels_last`,\n which may result in minor acceleration. Default is False.\n\n - PyTorch model configuration.\n - `model`. If provided, then value should be a ready-to-use nn.Module.\n Otherwise, relies on :class:`.network.Network` for building the model:\n - `order` defines the sequence of blocks to build the model from. Default is initial_block -> body -> head.\n Separation of the NN into multiple blocks is just for convenience, so we can split\n the preprocessing, main body of the model, and postprocessing into individual parts.\n In the simplest case, each element is a string that points to other key in the config,\n which is used to create a :class:`~.torch.layers.Block`.\n Check the detailed description for more complex cases.\n - `initial_block`, `body`, `head` are parameters for this respective parts of the neural network.\n Defaults are empty layouts, meaning no operations.\n - `common` parameters are passed to each of the neural network parts. Default is empty.\n - `init_weights` allows to initialize weights.\n\n - `output` defines additional operations, applied to the output after loss computation.\n By default, we have `predictions`, `predictions_{i}` and `predictions_{i}_{j}` aliases.\n Note that these do not interfere with loss computation and are here only for convenience.\n\n - shapes info. If fully provided, used to initialize the model. If no shapes are given in the config,\n the model is created at the time of the first `train` call by looking at the actual batch data and shapes.\n Keys are `inputs_shapes`, `targets_shapes`, `classes`, and `placeholder_batch_size`.\n By default, no shapes are set in the config.\n\n - train and inference common parameters:\n - `amp` turns on/off automatic mixed precision, which allows to perform some of the operations in `float16`.\n Default is True.\n - `microbatch_size` allows to split the training/inference batches in chunks (microbatches) and process\n them sequentially. During train, we apply gradients only after all microbatches from the batch are used.\n Default is to not use microbatching.\n\n - train only parameters:\n - `sync_frequency` to apply gradients only once in a `sync_frequency` calls to `train` method.\n Default is to apply gradients after each `train` iteration.\n - `callbacks` to apply operations at the end of each iteration. Default is no callbacks.\n - `sam_rho`, `sam_individual_norm` to use sharpness-aware minimization. Default is to not use SAM at all.\n - `profile` to get detailed report of model performance. Default is False.\n\n - infrastructure for training:\n - `loss`. No default value, so this key is required.\n - `optimizer`. Default is `Adam`.\n - `decay`. Default is to not use learning rate decay.\n\n - additional parameters:\n - `sam` and `sam_rho` enable sharpness-aware minimization: a technique for improving model generatlization.\n - `weights_averaging` enables model weights averaging.\n\n\n\n We recommend looking at :class:`~.torch.layers.Block` to learn about parameters for model building blocks,\n and at :class:`~.EncoderDecoder` which allows more sophisticated logic of block chaining.\n\n\n Parameters\n ----------\n config : dict, :class:`~Config`\n Configuration of model creation. Below are the valid keys.\n\n # Global parameters\n device : str, torch.device or sequence\n Device to use for model, training and inference.\n If str, a device name (e.g. ``'cpu'`` or ``'gpu:0'``). Regular expressions are also allowed (e.g. ``'gpu:*'``).\n If torch.device, then device to be used.\n If sequence, then each entry must be in one of previous formats, and batch data is paralleled across them.\n Default behavior is to use one (and only one) device of the best available type (priority to GPU over CPU).\n\n benchmark : bool\n Whether to optimize network's forward pass during the first batch.\n Leverages the memory-speed trade-off: the network may use more GPU memory to compute predictions faster.\n Speeds up the forward pass by ~15% if shapes of inputs are constant.\n Make sure not to use different shapes of inputs.\n\n\n # Model building configuration\n model : nn.Module, optional\n If provided, then this module is used as the model to train without any modifications.\n If provided, other model-related keys (`order`, `initial_block`, etc) are not used.\n\n order : sequence\n Defines sequence of network blocks in the architecture. Default is initial_block -> body -> head.\n Each element of the sequence must be either a string, which is used to retrieve module parameters from config.\n Module parameters should include `type` and other keyword arguments for its initialization.\n Refer to the documentation of :class:`.network.Network` for more details.\n\n initial_block : dict\n User-defined module or parameters for the preprocess layers, usually :class:`~.torch.layers.Block` parameters.\n body : dict or nn.Module\n User-defined module or parameters for the base network layers, usually :class:`~.torch.layers.Block` parameters.\n head : dict or nn.Module\n User-defined module or parameters for the postprocess layers, usually :class:`~.torch.layers.Block` parameters.\n common : dict\n Default parameters, passed for all modules.\n\n trainable : sequence, optional\n Names of model parts to train. Should be a subset of names in `order` and can be used to freeze parameters.\n\n output : str, list or dict\n Auxiliary operations to apply to the network predictions.\n If dict, then should have the same length and order as network predictions.\n Each key defines this prediction name, each value should be a str/list of operations to apply to this tensor.\n For example, ``{'my_prediction' : ['sigmoid', my_callable, 'softmax]}``.\n Generated outputs are available as `my_prediction_{j}`, `my_prediction_sigmoid`,\n and also by alias `predictions_{i}_{j}`, where `i` is the tensor ordinal and `j` is operation ordinal.\n\n If list or str, then default prefix `''` is used.\n See :meth:`.TorchModel.output` for more details.\n\n init_weights : callable, 'best_practice_resnet', or None\n Model weights initialization.\n If None, then default initialization is used.\n If 'best_practice_resnet', then common used non-default initialization is used.\n If callable, then callable applied to each layer.\n\n Examples:\n\n - ``{'init_weights': 'best_practice_resnet'}``\n - .. code-block:: python\n\n def callable_init(module): # example of a callable for init\n if isinstance(module, nn.Linear):\n nn.kaiming_normal_(module.weight)\n\n config = {'init_weights': callable_init}\n\n\n # Shapes: optional\n inputs_shapes : sequence\n Shapes of the input tensors without the batch size.\n Must be a tuple (one input) or sequence of tuples (multiple inputs) with shapes.\n\n targets_shapes : sequence\n Shapes of the target tensors without the batch size.\n Must be a tuple (one target) or sequence of tuples (multiple targets) with shapes.\n Available as `targets_shapes` parameter in the `head` block.\n\n classes : int or sequence of ints\n Number of desired classes in the output tensor. Available as `classes` parameter in the `head` block.\n\n placeholder_batch_size : int\n If `inputs` is specified with all the required shapes, then it serves as size of batch dimension during\n placeholder (usually np.ndarrays with zeros) creation. Default value is 2.\n\n\n # Train and inference behavior\n amp : bool\n Whether to use automated mixed precision during model training and inference. Default is True.\n The output type of predictions remains float32. Can be changed in `train` and `predict` arguments.\n\n microbatch_size : int, bool or None\n Also known as virtual batch. Allows to process given data sequentially,\n accumulating gradients from microbatches and applying them once in the end.\n If int, then size of chunks to split every batch into.\n If False or None, then this feature is not used. Default is not to use microbatching.\n Can be changed in `train` and `predict` arguments.\n\n\n # Additional train modifications\n sync_frequency : int\n How often to apply accumulated gradients to the weights. Default value is to apply them after each batch.\n Can be changed in `train` and `predict` arguments.\n\n callbacks : sequence of `:class:callbacks.BaseCallback`\n Callbacks to call at the end of each training iteration.\n\n sam_rho : float\n Foret P. et al. \"`Sharpness-Aware Minimization for Efficiently Improving Generalization\n <https://arxiv.org/abs/2010.01412>`_\".\n If evaluates to False, then SAM is not used.\n If float, then controls the size of neighborhood (check the paper for details).\n sam_individual_norm : bool\n If True, then each gradient is scaled according to its own L2 norm.\n If False, then one common gradient norm is computed and used as a scaler for all gradients.\n\n weights_averaging : dict\n If provided, we create additional copy of the model,\n which is updated with weights from the main model during train.\n Subkeys `start_iter`, `frequency` and `last_iter` define the range and frequency of updates.\n `avg_fn` can be used to change the logic of updates:\n - `swa` makes it so that weights from each update contribute equally.\n - `ema` makes it so that weights are aggregated with exponential moving average.\n - a callable, that takes `averaged_parameter, model_parameter, num_averaged` can be passed.\n\n profile : bool\n Whether to collect stats of model training timings.\n If True, then stats can be accessed via `profile_info` attribute or :meth:`.show_profile_info` method.\n\n\n # Infrastructure\n loss : str, dict\n Loss function, might be defined in multiple formats.\n\n If str, then short ``name``.\n If dict, then ``{'name': name, **kwargs}``.\n\n Name must be one of:\n - short name (e.g. ``'mse'``, ``'ce'``, ``'l1'``, ``'cos'``, ``'hinge'``,\n ``'huber'``, ``'logloss'``, ``'dice'``)\n - a class name from `torch losses <https://pytorch.org/docs/stable/nn.html#loss-functions>`_\n (e.g. ``'PoissonNLL'`` or ``'TripletMargin'``)\n - an instance or constructor of `:class:torch.nn.Module`\n - callable\n\n Examples:\n\n - ``{'loss': 'mse'}``\n - ``{'loss': {'name': 'KLDiv', 'reduction': 'none'}}``\n - ``{'loss': {'name': MyCustomLoss, 'epsilon': 1e-6}}``\n - ``{'loss': my_custom_loss_fn}``\n - ``{'loss': my_custom_loss_class}``\n\n optimizer : str, dict\n Optimizer, might be defined in multiple formats.\n\n If str, then short ``name``.\n If dict, then ``{'name': name, **kwargs}``.\n\n Name must be one of:\n - short name (e.g. ``'Adam'``, ``'Adagrad'``, any optimizer from\n `torch.optim <https://pytorch.org/docs/stable/optim.html#algorithms>`_)\n - a class with ``Optimizer`` interface\n - a callable which takes model parameters and optional args\n\n Examples:\n\n - ``{'optimizer': 'Adam'}``\n - ``{'optimizer': {'name': 'SparseAdam', 'lr': 0.01}}``\n - ``{'optimizer': {'name': 'Adagrad', 'initial_accumulator_value': 0.01}}``\n - ``{'optimizer': {'name': MyCustomOptimizer, 'momentum': 0.95}}``\n\n decay : dict, list of dicts\n The learning rate decay algorithm might be defined in multiple formats.\n All decays require to have 'frequency' as a key in a configuration dictionary.\n Parameter 'frequency' sets how often do decay step: at every `'frequency'`\n iteration. Each decay might have optional parameters 'start_iter' and 'last_iter'\n that defines the closed range of iterations where decay is at work.\n If you want to use a learning rate warmup and decay together,\n you should use a list of decays (see examples).\n\n If dict, then ``{'name': name, **kwargs}``.\n If list, then each item is a dict of format described above.\n\n Name must be one of:\n\n - a class name from `torch.optim.lr_scheduler\n <https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate>`_\n (e.g. ``'LambdaLR'``) except ``'ReduceLROnPlateau'``.\n - short name (``'exp'`` - ExponentialLR, ``'lambda'`` - LambdaLR, ``'step'`` - StepLR,\n ``'multistep'`` - MultiStepLR, ``'cos'`` - CosineAnnealingLR)\n - a class with ``_LRScheduler`` interface\n - a callable which takes optimizer and optional args\n\n Examples:\n\n - ``{'decay': {'name: 'exp', 'frequency': 5, 'start_iter': 6, 'last_iter': 20}}``\n - ``{'decay': {'name': 'StepLR', 'steps_size': 10000, 'frequency': 5}}``\n - ``{'decay': {'name': MyCustomDecay, 'decay_rate': .5, 'frequency': 15, 'start_iter': 400}``\n - .. code-block:: python\n\n {'decay': [{'name': 'exp', 'gamma': 1, 'frequency': 1, 'last_iter': 900},\n {'name': 'exp', 'gamma': 0.96, 'frequency': 2, 'start_iter': 901}]\n\n\n Examples\n --------\n segmentation_config = {\n # Model layout\n 'initial_block': { # preprocessing\n 'layout': 'cna cna cnap', # string layout: c=conv, n=BN, a=act, p=pool\n 'channels': [INT, INT, INT], # individual channels for each convolution\n 'kernel_size': 3 # common kernel_size for all convolutions\n },\n\n 'body': {\n 'base_block': ResBlock, # can use any nn.Module as base block\n 'channels': INT, 'kernel_size': INT,\n 'downsample': False, 'attention': 'scse' # additional parameters of ResBlock module\n },\n\n 'head': { # postprocessing\n 'layout' : 'cna',\n 'channels': 1\n },\n 'output': 'sigmoid', # can get `sigmoid` output in the `predict`\n\n # Train configuration\n 'loss': 'bdice', # binary dice coefficient as loss function\n 'optimizer': {'name': 'Adam', 'lr': 0.01,}, # optimizer configuration\n 'decay': {'name': 'exp', 'gamma': 0.9, 'frequency': 100}, # lr decay scheduler\n 'microbatch_size': 16, # size of microbatches at training\n }\n \"\"\"\n PRESERVE = [\n 'full_config', 'config', 'model',\n 'inputs_shapes', 'targets_shapes', 'classes',\n 'loss', 'optimizer', 'scaler', 'decay', 'decay_step',\n 'sync_counter', 'microbatch_size',\n 'iteration', 'last_train_info', 'last_predict_info',\n 'lr_list', 'syncs', 'decay_iters',\n '_loss_list', 'loss_list', 'operations'\n ]\n\n def __init__(self, config=None):\n if isinstance(config, str):\n config = {'load/path': config}\n self.model_lock = Lock()\n\n # Configs\n self.external_config = Config(config)\n self.full_config = Config(config)\n\n # Shapes of inputs and targets\n self.placeholder_batch_size = 2\n self.inputs_shapes = None\n self.targets_shapes = None\n self.classes = None\n\n # Pytorch model\n self.model = None\n self._model_cpu_backup = None\n\n # Leading device and list of all devices to use\n self.device = None\n self.devices = []\n\n # Train procedure and infrastructure\n self.loss = None\n self.optimizer = None\n self.decay = None\n self.decay_step = None\n\n self.amp = True\n self.scaler = None\n\n self.operations = {}\n self.callbacks = []\n\n # Memory amortization: accumulate gradients to update weights later\n self.sync_frequency = 1\n self.sync_counter = 0\n self.microbatch_size = None\n\n # Sharpness-aware minimization\n self.sam_rho = 0.0\n self.sam_individual_norm = True\n\n # WA: model weight averaging\n self.weight_averaging = None\n self.wa_model = None\n self.wa_config = None\n self.wa_decay = None\n self.wa_iters = []\n self.wa_finalized = False\n\n # TTA: test time augmentations\n self.tta_wrapped = False\n\n # TRT: tensorRT\n self.trt_wrapped = False\n\n # Store info about passed train/predict iterations\n self.iteration = 0\n self.last_train_info = {}\n self.last_predict_info = {}\n self.lr_list = []\n self.syncs = []\n self.decay_iters = []\n self._loss_list = []\n self.loss_list = []\n\n # Profile\n self.profile = False\n self.profilers = []\n self.profile_info = None\n\n # Load model from file or initialize anew\n load = self.external_config.get('load')\n if load:\n self.load(**load)\n else:\n self.initialize()\n\n\n def initialize(self):\n \"\"\" Initialize the instance: make the config, attributes, and, if possible, PyTorch model. \"\"\"\n # Create config from default and external one\n self.config = self.combine_configs()\n\n # First, extract all necessary info from config into the instance attributes.\n # Then, update config with some of parsed values -- mainly for convenience.\n self.parse_attributes()\n self.update_config()\n\n # If the inputs are set in config with their shapes we can build right away\n if self.inputs_shapes:\n self.build_model()\n\n def reset(self):\n \"\"\" Delete the underlying model and all the infrastructure. Use to create model from scratch. \"\"\"\n # TODO: do we really need this?\n self.model = None\n self.last_train_info = {}\n self.last_predict_info = {}\n\n\n # Create config of model creation: combine the external and default ones\n @classmethod\n def default_config(cls):\n \"\"\" Define model defaults.\n\n Put here all constants (like the number of channels, kernel sizes, block layouts, stride, etc)\n specific to the model, but independent of anything else (like image shapes, number of classes, etc).\n\n Don't forget to use the default config from parent class.\n \"\"\"\n config = Config({\n # Devices and memory control\n 'amp': True,\n 'device': None,\n 'benchmark': True,\n 'channels_last': False,\n 'microbatch_size': False,\n 'sync_frequency': 1,\n 'profile': False,\n\n # Model building\n 'order': ['initial_block', 'body', 'head'],\n 'trainable': None,\n 'initial_block': {},\n 'body': {},\n 'head': {},\n 'common': {},\n\n # Additional operations to apply to model predictions\n 'output': None,\n\n # Shapes\n 'placeholder_batch_size': 2,\n\n # Training infrastructure\n 'loss': None,\n 'optimizer': 'Adam',\n 'decay': None,\n\n # SAM: sharpness-aware minimization\n 'sam_rho': 0.0,\n 'sam_individual_norm': True,\n })\n return config\n\n def combine_configs(self):\n \"\"\" Combine default configuration and the external one. \"\"\"\n config = self.default_config() + self.external_config\n return config\n\n def update_config(self):\n \"\"\" Update config with instance attributes. \"\"\"\n config = self.config\n\n config['head/targets_shapes'] = self.targets_shapes\n # As `update_config` can be called multiple times, and `head/classes` key can have value `None`,\n # we need to use `or` instead of `get`\n config['head/classes'] = config.get('head/classes') or self.classes\n\n if config.get('head/features') is None:\n config['head/features'] = config.get('head/classes')\n if config.get('head/channels') is None:\n config['head/channels'] = config.get('head/classes')\n\n\n # Parse config keys into instance attributes\n def parse_attributes(self):\n \"\"\" Parse instance attributes from config. \"\"\"\n config = self.config\n\n self.microbatch_size = config.get('microbatch', config.get('microbatch_size', False))\n self.sync_frequency = config.get('sync_frequency', 1)\n self.amp = config.get('amp', True)\n\n self.sam_rho = config.get('sam_rho', 0.0)\n self.sam_individual_norm = config.get('sam_individual_norm', False)\n self.profile = config.get('profile', False)\n\n self.callbacks = [callback.set_model(self) for callback in config.get('callbacks', [])]\n\n # Parse operations, that should be applied to model predictions, into a dictionary\n operations = self.config['output']\n if not isinstance(operations, dict):\n operations = operations or []\n operations = list(operations) if isinstance(operations, (tuple, list)) else [operations]\n operations = {'' : operations}\n self.operations = operations\n\n self._parse_devices()\n self._parse_placeholder_shapes()\n\n\n def _parse_devices(self):\n \"\"\" Extract `devices` and `benchmark` from config.\n If the config value is not set, use the best available accelerator.\n \"\"\"\n config = self.external_config\n devices = config.get('device')\n\n if devices is None:\n if torch.cuda.is_available():\n self.device = torch.device('cuda:0')\n else:\n self.device = torch.device('cpu')\n else:\n devices = devices if isinstance(devices, list) else [devices]\n available_devices = ['cuda:{}'.format(i) for i in range(torch.cuda.device_count())] + ['cpu']\n for dev in devices:\n if isinstance(dev, torch.device):\n self.devices.append(dev)\n elif isinstance(dev, str):\n dev_ = dev.lower()\n dev_ = dev_.replace('gpu', 'cuda')\n dev_ = dev_.replace('cpu:0', 'cpu')\n\n devices = [torch.device(device) for device in available_devices\n if re.search(dev_, device.lower()) is not None]\n self.devices.extend(devices)\n else:\n raise TypeError('Wrong device type: {}'.format(type(dev)))\n self.devices = [device for i, device in enumerate(self.devices)\n if device not in self.devices[:i]]\n self.device = self.devices[0]\n\n if self.device.type == 'cpu':\n #TODO: maybe, we should add warning\n self.amp = False\n torch.backends.cudnn.benchmark = config.get('benchmark', 'cuda' in self.device.type)\n\n def _parse_placeholder_shapes(self):\n \"\"\" Extract `inputs_shapes`, `targets_shapes`, `classes` from config. \"\"\"\n config = self.config\n\n batch_size = config.get('placeholder_batch_size', 2)\n inputs_shapes = config.get('inputs_shapes') or config.get('input_shapes')\n targets_shapes = config.get('targets_shapes') or config.get('target_shapes')\n classes = config.get('classes')\n\n self.placeholder_batch_size = batch_size\n\n if inputs_shapes:\n inputs_shapes = self._to_nested_list(inputs_shapes)\n self.inputs_shapes = [(batch_size, *shape) for shape in inputs_shapes]\n\n if targets_shapes:\n targets_shapes = self._to_nested_list(targets_shapes)\n self.targets_shapes = [(batch_size, *shape) for shape in targets_shapes]\n\n if not classes:\n self.classes = [item[0] for item in targets_shapes]\n\n if classes:\n classes = list(classes) if isinstance(classes, (tuple, list)) else [classes]\n self.classes = classes\n\n @staticmethod\n def _to_nested_list(sequence):\n if not isinstance(sequence[0], (tuple, list)):\n return [list(sequence)]\n return [list(item) for item in sequence]\n\n def make_placeholder_data(self, batch_size=None, unwrap=True, to_device=True):\n \"\"\" Create a sequence of tensor, based on the parsed `inputs_shapes`. \"\"\"\n batch_size = batch_size or self.placeholder_batch_size\n\n data = [np.random.random((batch_size, *shape[1:])).astype(np.float32)\n for shape in self.inputs_shapes]\n\n if unwrap:\n data = data[0] if len(data) == 1 else data\n if to_device:\n data = self.transfer_to_device(data)\n return data\n\n\n # Create training infrastructure: loss, optimizer, decay\n def make_infrastructure(self):\n \"\"\" Create loss, optimizer and decay, required for training the model. \"\"\"\n self.make_loss()\n self.make_optimizer()\n self.make_decay()\n self.scaler = torch.cuda.amp.GradScaler()\n\n self.setup_weights_averaging()\n\n def unpack(self, value):\n \"\"\" Unpack argument to actual value and kwargs. \"\"\"\n if isinstance(value, dict):\n kwargs = value.copy()\n value = kwargs.pop('name', None)\n else:\n kwargs = {}\n\n return value, kwargs\n\n def make_loss(self):\n \"\"\" Set model loss. Changes the `loss` attribute. \"\"\"\n if not self.config.get('loss'):\n raise ValueError('Set \"loss\" in model configuration!')\n loss, kwargs = self.unpack(self.config['loss'])\n\n loss_fn = None\n # Parse `loss` to actual module\n if isinstance(loss, str):\n # String like 'ce', 'bdice' or 'CrossEntropy'\n if hasattr(nn, loss):\n loss = getattr(nn, loss)\n elif hasattr(nn, loss + \"Loss\"):\n loss = getattr(nn, loss + \"Loss\")\n else:\n loss = LOSSES.get(re.sub('[-_ ]', '', loss).lower(), None)\n\n elif isinstance(loss, nn.Module):\n # Already a valid module\n loss_fn = loss\n elif isinstance(loss, type):\n # Class to make module\n pass\n elif callable(loss):\n # Callable: just pass other arguments in\n loss_fn = partial(loss, **kwargs)\n else:\n raise ValueError(f'Unknown loss: {loss}')\n\n loss_fn = loss_fn if loss_fn is not None else loss(**kwargs)\n if isinstance(loss_fn, nn.Module):\n loss_fn.to(device=self.device)\n\n self.loss = loss_fn\n\n def make_optimizer(self):\n \"\"\" Set model optimizer. Changes the `optimizer` attribute. \"\"\"\n optimizer, kwargs = self.unpack(self.config['optimizer'])\n\n # Choose the optimizer\n if callable(optimizer) or isinstance(optimizer, type):\n pass\n elif isinstance(optimizer, str) and hasattr(torch.optim, optimizer):\n optimizer = getattr(torch.optim, optimizer)\n else:\n raise ValueError(f'Unknown optimizer: {optimizer}')\n\n self.optimizer = optimizer(self.model.parameters(), **kwargs)\n\n def make_decay(self):\n \"\"\" Set model decay. Changes the `decay` and `decay_step` attribute. \"\"\"\n decay = self.config['decay']\n\n if decay is None:\n decays = []\n else:\n decays = decay if isinstance(decay, (tuple, list)) else [decay]\n\n self.decay, self.decay_step = [], []\n for decay_ in decays:\n decay_, decay_kwargs = self.unpack(decay_)\n\n if decay_ is None:\n raise ValueError('Missing `name` key in the decay configuration')\n\n # Parse decay\n if callable(decay_) or isinstance(decay_, type):\n pass\n elif isinstance(decay_, str) and hasattr(torch.optim.lr_scheduler, decay_):\n decay = getattr(torch.optim.lr_scheduler, decay_)\n elif decay_ in DECAYS:\n decay_ = DECAYS.get(decay_)\n else:\n raise ValueError(f'Unknown learning rate scheduler: {decay_}')\n\n # Parse step parameters\n step_params = {\n 'start_iter': 0,\n 'last_iter': np.inf,\n **decay_kwargs\n }\n if 'frequency' not in step_params:\n raise ValueError('Missing `frequency` key in the decay configuration')\n\n # Set defaults for some of the decays\n if decay_ in DECAYS_DEFAULTS:\n decay_dict = DECAYS_DEFAULTS.get(decay_).copy()\n if decay == DECAYS['cos']:\n decay_dict.update(T_max=step_params['frequency'])\n decay_kwargs = {**decay_dict, **decay_kwargs}\n\n # Remove unnecessary keys from kwargs\n for key in ['start_iter', 'last_iter', 'frequency']:\n decay_kwargs.pop(key, None)\n\n # Create decay or store parameters for later usage\n decay_ = decay_(self.optimizer, **decay_kwargs)\n\n self.decay.append(decay_)\n self.decay_step.append(step_params)\n\n def setup_weights_averaging(self):\n \"\"\" Prepare WA-model: check all required keys and store copy on CPU. \"\"\"\n wa_config = self.config.get('weights_averaging') or self.config.get('wa') or self.config.get('swa')\n\n\n if wa_config is not None:\n required_keys = ['start_iter', 'last_iter', 'frequency']\n for key in required_keys:\n if key not in wa_config:\n raise ValueError(f'Key `{key}` is missing in weights averaging configuration!')\n\n avg_fn = wa_config.get('avg_fn', None)\n if avg_fn in ['stochastic', 'swa']:\n avg_fn = None\n elif avg_fn in ['exponential', 'ema']:\n avg_fn = lambda wa_parameter, model_parameter, num_averaged: 0.1 * wa_parameter + 0.9 * model_parameter\n\n self.weight_averaging = True\n self.wa_config = Config(wa_config)\n self.wa_model = AveragedModel(self.model, device='cpu', avg_fn=avg_fn)\n\n if 'swalr' in wa_config:\n self.wa_decay = SWALR(self.optimizer, **wa_config['swalr'])\n\n\n # Set pre-initialized model or chain multiple building blocks to create model\n def set_model(self, model):\n \"\"\" Set the underlying PyTorch model to a supplied one and update training infrastructure. \"\"\"\n self.model = model\n self.initialize_weights()\n self.model_to_device()\n\n self.make_infrastructure()\n\n def build_model(self, inputs=None):\n \"\"\" Create an instance of PyTorch model or use one provided.\n After it, create training infrastructure (loss, optimizer, decay).\n \"\"\"\n if inputs is not None:\n inputs = inputs[0] if len(inputs) == 1 and isinstance(inputs, list) else inputs\n inputs = self.transfer_to_device(inputs)\n else:\n inputs = self.make_placeholder_data(to_device=True)\n\n if 'model' not in self.config:\n self.model = Network(inputs=inputs, config=self.config, device=self.device)\n else:\n self.model = self.config['model']\n\n self.initialize_weights()\n if self.config['channels_last']:\n self.model = self.model.to(memory_format=torch.channels_last)\n\n self.model_to_device()\n self.make_infrastructure()\n\n def finalize_wa(self):\n \"\"\" Replace the model with weight-averaged one. \"\"\"\n if self.weight_averaging and not self.wa_finalized:\n self.wa_iters.append(self.iteration)\n self.model = self.wa_model.module\n self.model_to_device()\n\n self.make_optimizer()\n self.scaler = torch.cuda.amp.GradScaler()\n\n self.wa_finalized = True\n\n def wrap_tta(self, wrapper='ClassificationTTAWrapper', transforms=None, merge_mode='mean'):\n \"\"\" Wrap model with test-time augmentations. \"\"\"\n import ttach\n transforms = transforms if transforms is not None else ttach.aliases.vlip_transform()\n self.model = getattr(ttach, wrapper)(self.model, transforms=transforms, merge_mode=merge_mode)\n self.tta_wrapped = True\n\n def wrap_trt(self, batch_size, use_onnx=True, fp16_mode=True, **kwargs):\n \"\"\" !!. \"\"\"\n from torch2trt import torch2trt\n inputs = self.make_placeholder_data(batch_size=batch_size, unwrap=False)\n\n self.model = torch2trt(self.model.eval(), inputs=inputs, max_batch_size=batch_size,\n fp16_mode=fp16_mode, use_onnx=use_onnx, **kwargs)\n self.trt_wrapped = True\n\n\n def initialize_weights(self):\n \"\"\" Initialize model weights with a pre-defined or supplied callable. \"\"\"\n init_weights = self.config.get('init_weights', None)\n if self.model is not None and init_weights is not None:\n # Parse model weights initialization\n init_weights = init_weights if isinstance(init_weights, list) else [init_weights]\n\n for init_weights_function in init_weights:\n if init_weights_function in {'resnet', 'classic'}:\n init_weights_function = best_practice_resnet_init\n\n # Actual weights initialization\n self.model.apply(init_weights_function)\n\n\n # Transfer to/from device(s)\n def transfer_to_device(self, data):\n \"\"\" Transfer (possibly nested) data structure to device and return the same structure. \"\"\"\n if isinstance(data, (dict, Config)):\n return type(data)({key : self.transfer_to_device(value) for key, value in data.items()})\n\n if isinstance(data, (tuple, list)):\n return type(data)(self.transfer_to_device(item) for item in data)\n\n if isinstance(data, np.ndarray):\n if data.dtype != np.float32:\n data = data.astype(np.float32)\n data = torch.from_numpy(data)\n\n if self.config['channels_last'] and data.ndim == 4:\n data = data.to(memory_format=torch.channels_last)\n data = data.to(self.device)\n return data\n\n if isinstance(data, torch.Tensor):\n data = data.to(self.device)\n return data\n\n if CUPY_AVAILABLE and isinstance(data, cp.ndarray):\n if data.device.id == self.device.index:\n data = torch.utils.dlpack.from_dlpack(data.toDlpack())\n return data\n raise TypeError(f'cupy arrays should reside on the same GPU, as model itself: {self.device}.')\n\n if data is None:\n return None\n raise TypeError('Passed data should either be a `np.ndarray`, `torch.Tensor`, `cupy.ndarray`, '\n f'or a container of them, got{type(data)}.')\n\n def transfer_from_device(self, data):\n \"\"\" Transfer (possibly nested) data structure from device and return the same structure. \"\"\"\n if isinstance(data, (dict, Config)):\n return type(data)({key : self.transfer_from_device(value) for key, value in data.items()})\n\n if isinstance(data, (tuple, list)):\n return type(data)(self.transfer_from_device(item) for item in data)\n\n if isinstance(data, (torch.Tensor, torch.autograd.Variable)):\n cpu_tensor = data.detach().cpu().numpy()\n if self.amp and cpu_tensor.dtype != np.float32:\n cpu_tensor = cpu_tensor.astype(np.float32)\n return cpu_tensor\n\n if isinstance(data, (np.ndarray, int, float)):\n return data\n raise TypeError('Passed data should either be a `np.ndarray`, `torch.Tensor`'\n f' or a container of them, got {type(data)}.')\n\n def model_to_device(self, model=None):\n \"\"\" Put model on device(s). If needed, apply DataParallel wrapper. \"\"\"\n model = model if model is not None else self.model\n\n if len(self.devices) > 1:\n self.model = nn.DataParallel(self.model, self.devices)\n else:\n self.model.to(self.device)\n\n\n # Apply model to train/predict on given data\n def train(self, inputs, targets, outputs=None, lock=True, profile=False,\n sync_frequency=True, microbatch_size=None, microbatch_drop_last=True,\n sam_rho=None, sam_individual_norm=None):\n \"\"\" Train the model with the data provided\n\n Parameters\n ----------\n inputs : np.ndarray or sequence of them\n Model inputs. If there is a single input, then it is passed to model directly; otherwise, we pass a list.\n If the microbatching is used, individual elements are split along the first axis.\n targets : np.ndarray or sequence of them\n Model targets to calculate loss with.\n If there is a single target, then it is passed to loss computation directly; otherwise, we pass a list.\n If the microbatching is used, individual elements are split along the first axis.\n outputs : str or sequence of them\n Desired outputs of the method.\n Each string defines a tensor to get and should be one of pre-defined or set in `outputs` key in the config.\n Pre-defined tensors are `predictions`, `loss`, and `predictions_{i}` for multi-output models.\n lock : bool\n If True, then model, loss and gradient update operations are locked, thus allowing for multithreading.\n sync_frequency : int, bool or None\n If int, then how often to apply accumulated gradients to the weights.\n If True, then value from config is used.\n Default value is 1, which means to apply gradients after each batch of data.\n If False or None, then gradients are applied after each batch of data.\n microbatch_size : int, bool or None\n If int, then size of chunks to split every batch into. Allows to process given data sequentially,\n accumulating gradients from microbatches and applying them once in the end.\n If None, then value from config is used (default value is not to use microbatching).\n If False, then microbatching is not used.\n microbatch_drop_last : bool\n Whether to drop microbatches, that are smaller than the microbatch size. Default is True.\n sam_rho : float\n Foret P. et al. \"`Sharpness-Aware Minimization for Efficiently Improving Generalization\n <https://arxiv.org/abs/2010.01412>`_\".\n If evaluates to False, then SAM is not used.\n If float, then controls the size of neighborhood (check the paper for details).\n sam_individual_norm : bool\n If True, then each gradient is scaled according to its own L2 norm.\n If False, then one common gradient norm is computed and used as a scaler for all gradients.\n profile : bool\n Whether to collect stats of model training timings.\n If True, then stats can be accessed via `profile_info` attribute or :meth:`.show_profile_info` method.\n\n Returns\n -------\n Calculated values of requested tensors from `outputs` in the same order.\n\n Examples\n --------\n .. code-block:: python\n\n model.train(B('images'), B('labels'), fetches='loss')\n \"\"\"\n # Lock the entire method; release in any case\n try:\n if lock:\n self.model_lock.acquire()\n self.last_train_info = {}\n\n # Parse inputs and targets: always a list\n inputs = list(inputs) if isinstance(inputs, (tuple, list)) else [inputs]\n targets = list(targets) if isinstance(targets, (tuple, list)) else [targets]\n\n # Parse outputs: always a list\n single_output = isinstance(outputs, str)\n outputs = [outputs] if single_output else (outputs or [])\n\n # Parse train parameters\n if sync_frequency is True:\n sync_frequency = self.sync_frequency\n elif sync_frequency is False or sync_frequency is None:\n sync_frequency = 1\n\n # Prepare parameters for SAM\n if sam_rho is None:\n sam_rho = self.sam_rho\n if sam_individual_norm is None:\n sam_individual_norm = self.sam_individual_norm\n\n # Split the data into `microbatch_size` size chunks\n (chunked_inputs, chunked_targets,\n batch_size, microbatch_size) = self.split_into_microbatches(inputs, targets,\n microbatch_size, microbatch_drop_last)\n\n steps = len(chunked_inputs)\n inputs_shapes = [get_shape(item) for item in chunked_inputs[-1]]\n targets_shapes = [get_shape(item) for item in chunked_targets[-1]]\n self.last_train_info.update({'inputs_shapes': inputs_shapes,\n 'targets_shapes': targets_shapes})\n\n # Create PyTorch model if it is yet to be initialized, based on the actual inputs\n if self.model is None:\n # Update config with shapes\n self.inputs_shapes = inputs_shapes\n self.targets_shapes = targets_shapes\n if not self.classes and len(targets_shapes) > 2:\n self.classes = [shape[1] for shape in targets_shapes]\n\n self.update_config()\n\n # Can use the first two items to build model: no need for the whole tensor\n build_inputs = [item[:2] for item in chunked_inputs[0]]\n self.build_model(build_inputs)\n\n self.model.train()\n\n # Set up the profiling, if needed\n profile = profile or self.profile\n if profile:\n profiler = torch.autograd.profiler.profile(use_cuda='cpu' not in self.device.type)\n profiler.__enter__()\n\n # Train on each of the microbatches\n chunked_outputs = []\n for chunk_inputs, chunk_targets in zip(chunked_inputs, chunked_targets):\n # Compute forward and backward passes of the model, apply gradients, evaluate requested outputs\n chunk_outputs = self._train(inputs=chunk_inputs, targets=chunk_targets, outputs=outputs[:],\n sync_frequency=sync_frequency*steps,\n sam_rho=sam_rho, sam_individual_norm=sam_individual_norm)\n chunked_outputs.append(chunk_outputs)\n\n # Exit the profiling\n if profile:\n profiler.__exit__(None, None, None)\n self.profilers.append(profiler)\n\n # Call the callbacks\n for callback in self.callbacks:\n callback.on_iter_end()\n\n # Use current weights for weights averaging\n if self.weight_averaging:\n start_iter, frequency, last_iter = self.wa_config.get(['start_iter', 'frequency', 'last_iter'])\n\n if self.iteration >= last_iter and not self.wa_finalized:\n self.finalize_wa()\n\n elif (start_iter <= self.iteration <= last_iter and\n (self.iteration - start_iter) % frequency == 0):\n self.wa_model.update_parameters(self.model)\n self.wa_iters.append(self.iteration)\n\n if self.wa_decay:\n self.wa_decay.step()\n\n # Aggregate the outputs from microbatches\n result = self.aggregate_microbatches(outputs, chunked_outputs, single_output)\n\n # Store the average value of loss over microbatches\n self.loss_list.append(np.mean(self._loss_list[-steps:]))\n\n # Store info about current train iteration\n self.last_train_info.update({\n 'amp': self.amp,\n 'batch_size': batch_size,\n 'microbatch_size': microbatch_size,\n 'sync_frequency': sync_frequency,\n 'steps': steps,\n 'sam': bool(sam_rho), 'sam_rho': sam_rho,\n 'sam_individual_norm': sam_individual_norm,\n 'outputs': outputs,\n })\n\n finally:\n if lock:\n self.model_lock.release()\n return result\n\n def _train(self, inputs, targets, outputs, sync_frequency, sam_rho, sam_individual_norm):\n # Parse inputs\n inputs = inputs[0] if len(inputs) == 1 and isinstance(inputs, list) else inputs\n targets = targets[0] if len(targets) == 1 and isinstance(targets, list) else targets\n inputs = self.transfer_to_device(inputs)\n targets = self.transfer_to_device(targets)\n\n # Convert layer ids into LayerHooks\n outputs = self.prepare_outputs(outputs)\n\n # Compute predictions; store shapes for introspection\n with torch.cuda.amp.autocast(enabled=self.amp):\n predictions = self.model(inputs)\n\n # SAM: store grads from previous microbatches\n if self.iteration >= 1 and bool(sam_rho):\n self._train_sam_store_gradients()\n\n # Compute loss and gradients; store loss value for every microbatch\n with torch.cuda.amp.autocast(enabled=self.amp):\n loss = self.loss(predictions, targets)\n loss_ = loss / sync_frequency\n (self.scaler.scale(loss_) if self.amp else loss_).backward()\n self._loss_list.append(self.transfer_from_device(loss))\n\n # SAM: use obtained grads to move to the local maxima\n if self.iteration >= 1 and bool(sam_rho):\n self._train_sam_update_gradients(inputs=inputs, targets=targets, sync_frequency=sync_frequency,\n sam_rho=sam_rho, sam_individual_norm=sam_individual_norm)\n\n # Whether to update weights or keep accumulating\n if self.sync_counter == sync_frequency - 1:\n # Store learning rate: we do it before decay, so it is actual LR used on this iteration\n self.lr_list.append([group['lr'] for group in self.optimizer.param_groups])\n\n # Update weights and remove grads\n if self.amp:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n else:\n self.optimizer.step()\n\n # Optimization over default `zero_grad`; can be removed after PyTorch >= 1.8\n for p in self.model.parameters():\n p.grad = None\n self.iteration += 1\n\n # Apply decay to learning rate, if needed\n if self.decay:\n for decay, decay_step in zip(self.decay, self.decay_step):\n step_cond = (self.iteration - decay_step['start_iter']) % decay_step['frequency'] == 0\n range_cond = decay_step['start_iter'] <= self.iteration <= decay_step['last_iter']\n if step_cond and range_cond:\n decay.step()\n self.decay_iters.append(self.iteration)\n\n # Update counters\n self.sync_counter = 0\n self.syncs.append(True)\n else:\n self.sync_counter += 1\n self.syncs.append(False)\n\n # Make all possible outputs\n additional_outputs = self.compute_outputs(predictions=predictions)\n output_container = {\n **additional_outputs,\n 'predictions': predictions,\n 'loss': loss,\n }\n\n # Log inner info\n predictions_ = list(predictions) if isinstance(predictions, (tuple, list)) else [predictions]\n self.last_train_info['predictions_shapes'] = [get_shape(item) for item in predictions_]\n self.last_train_info['available_outputs'] = list(output_container.keys())\n\n # Retrieve requested outputs\n requested_outputs = self.extract_outputs(outputs, output_container)\n\n # Transfer only the requested outputs to CPU\n return self.transfer_from_device(requested_outputs)\n\n def _train_sam_store_gradients(self):\n \"\"\" Store gradients from previous microbatches. \"\"\"\n for p in self.model.parameters():\n if p.grad is not None:\n self.optimizer.state[p]['previous_grad'] = p.grad.clone().detach()\n p.grad = None\n\n def _train_sam_update_gradients(self, inputs, targets, sync_frequency, sam_rho, sam_individual_norm):\n \"\"\" Update gradients to move to the local maxima. \"\"\"\n # Fetch gradients\n grads = []\n params_with_grads = []\n for p in self.model.parameters():\n if p.grad is not None:\n grads.append(p.grad.clone().detach())\n params_with_grads.append(p)\n p.grad = None\n\n # Move to the local maxima\n if sam_individual_norm:\n epsilons = [grad * sam_rho / (grad.detach().norm(2).to(self.device)) for grad in grads]\n else:\n grad_norm = torch.stack([g.detach().norm(2).to(self.device) for g in grads]).norm(2)\n epsilons = [eps * sam_rho / grad_norm for eps in grads]\n\n if self.amp:\n scale = self.scaler.get_scale()\n epsilons = [eps / scale for eps in epsilons]\n params_with_grads = [p + eps for p, eps in zip(params_with_grads, epsilons)]\n\n # Compute new gradients: direction to move to minimize the local maxima\n with torch.cuda.amp.autocast(enabled=self.amp):\n predictions_inner = self.model(inputs)\n loss_inner = self.loss(predictions_inner, targets) / sync_frequency\n (self.scaler.scale(loss_inner) if self.amp else loss_inner).backward()\n\n # Cancel the previous update to model parameters, add stored gradients from previous microbatches\n params_with_grads = [p - eps for p, eps in zip(params_with_grads, epsilons)]\n\n for p in self.model.parameters():\n previous_grad = self.optimizer.state[p].get('previous_grad')\n if previous_grad is not None:\n p.grad.add_(previous_grad)\n\n\n def predict(self, inputs, targets=None, outputs=None, lock=True, microbatch_size=False):\n \"\"\" Get predictions on the data provided.\n\n Parameters\n ----------\n inputs : np.ndarray or sequence of them\n Model inputs. Passed directly to model.\n targets : np.ndarray or sequence of them\n Optional model targets to calculate loss with. Passed directly to model.\n outputs : str or sequence of them\n Desired outputs of the method.\n Each string defines a tensor to get and should be one of:\n - pre-defined tensors, which are `predictions`, `loss`, and `predictions_{i}` for multi-output models.\n - values described in the `outputs` key in the config\n - layer id, which describes how to access the layer through a series of `getattr` and `getitem` calls.\n Allows to get intermediate activations of a neural network.\n lock : bool\n If True, then model and loss computation operations are locked, thus allowing for multithreading.\n microbatch_size : int, bool or None\n If int, then size of chunks to split every batch into. Allows to process given data sequentially.\n If None, then value from config is used (default value is not to use microbatching).\n If False, then microbatching is not used.\n\n Returns\n -------\n Calculated values of tensors in `outputs` in the same order.\n\n Examples\n --------\n Layer ids allow to get intermediate activations. If the model has `batchflow_model.model.head[0]` layer,\n you can access it with::\n\n >>> batchflow_model.predict(inputs=B.images, outputs='model.head[0]')\n\n String keys for `getitem` calls are also allowed::\n\n >>> batchflow_model.predict(inputs=B.images, outputs='model.body.encoder[\"block-0\"]')\n \"\"\"\n # Acquire lock; release in any case\n try:\n if lock:\n self.model_lock.acquire()\n self.last_predict_info = {}\n\n # Parse inputs and targets: always a list\n inputs = list(inputs) if isinstance(inputs, (tuple, list)) else [inputs]\n if targets is not None:\n targets = (list(targets) if isinstance(targets, (tuple, list)) else [targets])\n else:\n targets = []\n\n # Parse outputs: always a list\n single_output = isinstance(outputs, str)\n outputs = [outputs] if single_output else (outputs or [])\n\n # Raise error early\n if 'loss' in outputs and not targets:\n raise TypeError('`targets` should be provided to fetch `loss`!')\n\n # Split the data into `microbatch` size chunks\n (chunked_inputs, chunked_targets,\n batch_size, microbatch_size) = self.split_into_microbatches(inputs, targets,\n microbatch_size, drop_last=False)\n\n steps = len(chunked_inputs)\n inputs_shapes = [get_shape(item) for item in chunked_inputs[-1]]\n targets_shapes = [get_shape(item) for item in chunked_targets[-1]]\n self.last_predict_info.update({'inputs_shapes': inputs_shapes,\n 'targets_shapes': targets_shapes})\n\n # Evaluate each microbatch separately\n self.model.eval()\n\n chunked_outputs = []\n for chunk_inputs, chunk_targets in zip(chunked_inputs, chunked_targets):\n # Evaluate requested outputs\n chunk_outputs = self._predict(inputs=chunk_inputs, targets=chunk_targets, outputs=outputs[:])\n chunked_outputs.append(chunk_outputs)\n\n # Aggregate the outputs from microbatches\n result = self.aggregate_microbatches(outputs, chunked_outputs, single_output)\n\n # Store info about current predict iteration\n self.last_predict_info.update({\n 'amp': self.amp,\n 'batch_size': batch_size,\n 'microbatch_size': microbatch_size,\n 'steps': steps,\n 'outputs': outputs,\n })\n\n finally:\n if lock:\n self.model_lock.release()\n return result\n\n def _predict(self, inputs, targets, outputs):\n # Parse inputs\n inputs = inputs[0] if len(inputs) == 1 and isinstance(inputs, list) else inputs\n targets = targets[0] if len(targets) == 1 and isinstance(targets, list) else targets\n\n # Convert layer ids into LayerHooks\n outputs = self.prepare_outputs(outputs)\n\n output_container = {}\n with torch.no_grad(), torch.cuda.amp.autocast(enabled=self.amp):\n inputs = self.transfer_to_device(inputs)\n predictions = self.model(inputs)\n\n output_container['predictions'] = predictions\n\n if len(targets) > 0:\n targets = self.transfer_to_device(targets)\n loss = self.loss(predictions, targets)\n output_container['loss'] = loss\n\n # Make all possible outputs\n additional_outputs = self.compute_outputs(predictions=predictions)\n output_container.update(additional_outputs)\n\n # Log inner info\n predictions_ = list(predictions) if isinstance(predictions, (tuple, list)) else [predictions]\n self.last_predict_info['predictions_shapes'] = [get_shape(item) for item in predictions_]\n self.last_predict_info['available_outputs'] = list(output_container.keys())\n\n # Retrieve requested outputs\n requested_outputs = self.extract_outputs(outputs, output_container)\n\n # Transfer only the requested outputs to CPU\n return self.transfer_from_device(requested_outputs)\n\n\n # Common utilities for train and predict\n def split_into_microbatches(self, inputs, targets, microbatch_size, drop_last):\n \"\"\" Split inputs and targets into microbatch-sized chunks. \"\"\"\n # Parse microbatch size\n if microbatch_size is None:\n microbatch_size = self.microbatch_size\n\n # Compute batch_size and make sure it is the same for all inputs and targets\n batch_size = len(inputs[0])\n for i, item in enumerate(inputs):\n if len(item) != batch_size:\n raise ValueError('All of `inputs` should have the same batch_size, as the first one!'\n f'Input at position `{i}` has batch_size {len(item)}!={batch_size}')\n for i, item in enumerate(targets):\n if len(item) != batch_size:\n raise ValueError('All of `targets` should have the same batch_size, as the first of `inputs`!'\n f'Target at position `{i}` has batch_size {len(item)}!={batch_size}')\n\n # Split data into microbatches, if needed\n if microbatch_size:\n chunked_inputs = [[item[i:i + microbatch_size] for item in inputs]\n for i in range(0, batch_size, microbatch_size)]\n chunked_targets = [[item[i:i + microbatch_size] for item in targets]\n for i in range(0, batch_size, microbatch_size)]\n\n if drop_last and batch_size % microbatch_size != 0:\n chunked_inputs = chunked_inputs[:-1]\n chunked_targets = chunked_targets[:-1]\n else:\n chunked_inputs = [inputs]\n chunked_targets = [targets]\n\n return chunked_inputs, chunked_targets, batch_size, microbatch_size\n\n def aggregate_microbatches(self, outputs, chunked_outputs, single_output):\n \"\"\" Aggregate outputs from microbatches into outputs for the whole batch.\n Scalar values are aggregated by `mean`, array values are concatenated along the first (batch) axis.\n \"\"\"\n result = []\n for i, _ in enumerate(outputs):\n # All tensors for current `output_name`\n chunked_output = [chunk_outputs[i] for chunk_outputs in chunked_outputs]\n\n if chunked_output[0].size != 1:\n result.append(np.concatenate(chunked_output, axis=0))\n else:\n result.append(np.mean(chunked_output))\n if single_output:\n result = result[0]\n\n return result\n\n\n def compute_outputs(self, predictions):\n \"\"\" Produce additional outputs, defined in the config, from `predictions`.\n Also adds a number of aliases to predicted tensors.\n \"\"\"\n predictions = list(predictions) if isinstance(predictions, (tuple, list)) else [predictions]\n\n if len(predictions) < len(self.operations):\n raise ValueError(f'Not enough predictions ({len(predictions)}) to apply {len(self.operations)} operations.'\n ' Revise the `output` config key!')\n\n # Add default aliases for each predicted tensor\n outputs = {f'predictions_{i}': tensor for i, tensor in enumerate(predictions)}\n\n # Iterate over tensors in predictions and the corresponding output operations\n for i, (tensor, (output_prefix, output_operations)) in enumerate(zip(predictions, self.operations.items())):\n # Save the tensor itself under the `output_prefix` name\n if output_prefix:\n outputs[output_prefix] = tensor\n\n output_prefix = output_prefix + '_' if output_prefix else ''\n\n # For each operation, add multiple aliases\n if output_operations:\n for j, operation in enumerate(output_operations):\n output_tensor, operation_name = self.apply_output_operation(tensor, operation)\n if operation_name:\n outputs[output_prefix + operation_name] = output_tensor # i.e. `first_sigmoid`, `sigmoid`\n\n outputs.update({\n output_prefix + str(j) : output_tensor, # i.e. `first_0`, `0`\n f'output_{i}_{j}' : output_tensor, # i.e. `predictions_0_0`\n })\n\n return outputs\n\n @staticmethod\n def apply_output_operation(tensor, operation):\n \"\"\" Apply `operation`, possibly aliased with a string, to `tensor`. \"\"\"\n with torch.no_grad():\n if operation is None:\n result = tensor\n name = ''\n elif operation == 'softplus':\n result = torch.nn.functional.softplus(tensor)\n name = operation\n elif operation == 'sigmoid':\n result = torch.sigmoid(tensor)\n name = operation\n elif operation == 'proba':\n result = torch.nn.functional.softmax(tensor, dim=1)\n name = operation\n elif operation == 'labels':\n result = tensor.argmax(dim=1)\n name = operation\n elif callable(operation):\n result = operation(tensor)\n name = operation.__name__\n return result, name\n\n\n def prepare_outputs(self, outputs):\n \"\"\" Add the hooks to all outputs that look like a layer id. \"\"\"\n result = []\n for output_name in outputs:\n if self.is_layer_id(output_name):\n layer = self.get_layer(output_name)\n hook = LayerHook(layer)\n result.append(hook)\n else:\n result.append(output_name)\n return result\n\n def extract_outputs(self, outputs, output_container):\n \"\"\" Retrieve activation data from hooks, get other requested outputs from container. \"\"\"\n requested_outputs = []\n for item in outputs:\n if isinstance(item, LayerHook):\n item.close()\n value = item.activation\n else:\n value = output_container[item]\n\n requested_outputs.append(value)\n return requested_outputs\n\n\n # Store model\n def save(self, path, *args, **kwargs):\n \"\"\" Save torch model.\n\n Parameters\n ----------\n path : str\n Path to a file where the model data will be stored.\n\n Examples\n --------\n .. code-block:: python\n\n torch_model = ResNet34()\n\n Now save the model\n\n .. code-block:: python\n\n torch_model.save('/path/to/models/resnet34')\n\n The model will be saved to /path/to/models/resnet34.\n \"\"\"\n _ = args\n dirname = os.path.dirname(path)\n if dirname and not os.path.exists(dirname):\n os.makedirs(dirname)\n\n if kwargs.get('pickle_module') is None:\n kwargs['pickle_module'] = dill\n\n if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):\n self.model = self.model.module\n\n torch.save({item: getattr(self, item) for item in self.PRESERVE}, path, **kwargs)\n\n def load(self, path, *args, eval=False, **kwargs):\n \"\"\" Load a torch model from files.\n\n Parameters\n ----------\n path : str\n File path where a model is stored.\n\n eval : bool\n Whether to switch the model to eval mode.\n\n Examples\n --------\n .. code-block:: python\n\n resnet = ResNet34(load=dict(path='/path/to/models/resnet34'))\n\n torch_model.load(path='/path/to/models/resnet34')\n\n TorchModel(config={'device': 'gpu:2', 'load/path': '/path/to/models/resnet34'})\n\n **How to move the model to device**\n\n The model will be moved to device specified in the model config by key `device`.\n \"\"\"\n _ = args\n self._parse_devices()\n\n if kwargs.get('pickle_module') is None:\n kwargs['pickle_module'] = dill\n\n if self.device:\n checkpoint = torch.load(path, map_location=self.device, **kwargs)\n else:\n checkpoint = torch.load(path, **kwargs)\n\n # `load_config` is a reference to `self.external_config` used to update `config`\n # It is required since `self.external_config` is overwritten in the cycle below\n load_config = self.external_config\n\n for item in self.PRESERVE:\n setattr(self, item, checkpoint.get(item))\n self.config = self.config + load_config\n\n self.model_to_device()\n\n if eval:\n self.model.eval()\n\n\n # Debug and profile the performance\n def set_debug_mode(self, mode=True):\n \"\"\" Changes representation of model to a more or less detailed.\n By default, model representation reduces the description of the most complex modules.\n \"\"\"\n if self.model is None:\n raise ValueError('Model is not initialized yet. ')\n self.model.apply(lambda module: setattr(module, 'debug', mode))\n\n def show_profile_info(self, per_iter=False, sortby=None, limit=10, parse=False):\n \"\"\" Show stored profiling information with varying levels of details. \"\"\"\n if (self.profile_info is None) or parse:\n self._parse_profilers()\n\n if self.device.type == 'cpu':\n columns = ['ncalls', 'CPU_tottime', 'CPU_cumtime', 'CPU_tottime_avg']\n if sortby is None:\n sortby = ('CPU_tottime', 'sum') if per_iter is False else 'CPU_tottime'\n else:\n columns = ['ncalls', 'CUDA_cumtime', 'CUDA_cumtime_avg']\n if sortby is None:\n sortby = ('CUDA_cumtime', 'sum') if per_iter is False else 'CUDA_cumtime'\n\n if per_iter is False:\n aggs = {key: ['sum', 'mean', 'max'] for key in columns}\n result = (self.profile_info.reset_index().groupby(['name']).agg(aggs)\n .sort_values(sortby, ascending=False)[:limit])\n else:\n result = (self.profile_info.reset_index().set_index(['iter', 'name'])[columns]\n .sort_values(['iter', sortby], ascending=[True, False])\n .groupby(level=0).apply(lambda df: df[:limit]).droplevel(0))\n return result\n\n def _parse_profilers(self):\n us_in_s = 1000.0 * 1000.0\n\n indices, values = [], []\n for i, profiler in enumerate(self.profilers):\n for evt in profiler.function_events.key_averages():\n indices.append((i, evt.key))\n row_dict = {\n 'ncalls': evt.count,\n 'CPU_tottime': evt.self_cpu_time_total / us_in_s,\n 'CPU_cumtime': evt.cpu_time_total / us_in_s,\n 'CUDA_cumtime': evt.cuda_time_total / us_in_s,\n }\n values.append(row_dict)\n multiindex = pd.MultiIndex.from_tuples(indices, names=['iter', 'name'])\n\n self.profile_info = pd.DataFrame(values, index=multiindex,\n columns=['ncalls', 'CPU_tottime', 'CPU_cumtime', 'CUDA_cumtime'])\n self.profile_info['CPU_tottime_avg'] = self.profile_info['CPU_tottime'] / self.profile_info['ncalls']\n self.profile_info['CUDA_cumtime_avg'] = self.profile_info['CUDA_cumtime'] / self.profile_info['ncalls']\n", "\"\"\" Layer to combine multiple inputs into one tensor. \"\"\"\nimport torch\nfrom torch import nn\nfrom torchvision.transforms.functional import center_crop\n\nfrom ..utils import get_shape, get_num_channels, get_num_dims\n\n\n\n\nclass Combine(nn.Module):\n \"\"\" Combine list of tensor into one.\n For each operation, we call its initialization ('*_initialization' methods) at module init,\n then use its forward ('*_forward' methods) for applying the operation.\n\n Parameters\n ----------\n inputs : sequence of torch.Tensors\n Tensors to combine.\n\n op : str or callable\n If callable, then operation to be applied to the list of inputs.\n If 'concat', 'cat', '|', then inputs are concatenated along channels axis.\n If 'sum', '+', then inputs are summed.\n If 'mul', '*', then inputs are multiplied.\n If 'mean', then inputs are averaged.\n If 'drop_path', then inputs are summed with probability:\n for each batch item, there is a chance to not add anything.\n \"\"\"\n #pylint: disable=attribute-defined-outside-init\n OPS = {\n 'concat': ['concat', 'cat', '|'],\n 'sum': ['sum', 'plus', '+'],\n 'mul': ['multi', 'mul', '*'],\n 'mean': ['average', 'avg', 'mean'],\n 'drop_path': ['drop_path', 'droppath', 'dp', '!']\n }\n OPS = {alias: method for method, aliases in OPS.items() for alias in aliases}\n\n def __init__(self, inputs=None, op='concat', force_resize=None, leading_index=0, **kwargs):\n super().__init__()\n self.name = op\n self.kwargs = kwargs\n self.idx = leading_index\n\n if self.idx != 0:\n inputs = inputs[:]\n inputs[0], inputs[self.idx] = inputs[self.idx], inputs[0]\n self.input_shapes, self.resized_shapes, self.output_shapes = None, None, None\n self.input_ids, self.after_ids = None, None\n\n if op in self.OPS:\n op_name = self.OPS[op]\n self.op_name = op_name\n if hasattr(self, f'{op_name}_initialization'):\n getattr(self, f'{op_name}_initialization')(inputs, **kwargs)\n\n self.op = getattr(self, f'{op_name}_forward')\n self.force_resize = force_resize if force_resize is not None else True\n elif callable(op):\n self.op_name = op.__name__\n self.op = op\n self.force_resize = force_resize if force_resize is not None else False\n else:\n raise ValueError(f'Combine op must be a callable or one from {list(self.OPS.keys())}, got {op} instead!')\n\n def forward(self, inputs):\n # Inputs\n self.input_ids = [id(item) for item in inputs]\n if self.idx != 0:\n inputs = inputs[:]\n inputs[0], inputs[self.idx] = inputs[self.idx], inputs[0]\n self.after_ids = [id(item) for item in inputs]\n self.input_shapes = get_shape(inputs)\n\n # Resize\n if self.force_resize:\n inputs = self.spatial_resize(inputs)\n self.resized_shapes = get_shape(inputs)\n\n # Outputs\n output = self.op(inputs)\n self.output_shapes = get_shape(output)\n return output\n\n def extra_repr(self):\n \"\"\" Report shapes before and after combination to a repr. \"\"\"\n res = f'op={\"callable \" if not isinstance(self.name, str) else \"\"}{self.op_name}'\n res += f', leading_idx={self.idx}, force_resize={self.force_resize}'\n for key, value in self.kwargs.items():\n res += f', {key}={value}'\n\n if getattr(self, 'verbosity', 10) > 2:\n res += f',\\n input_shapes={self.input_shapes}'\n\n if self.force_resize:\n res += f',\\nresized_shapes={self.resized_shapes}'\n\n res += f',\\n output_shapes={self.output_shapes}'\n\n if getattr(self, 'extra', False):\n res += f',\\ninput_ids={self.input_ids}'\n res += f',\\nafter_ids={self.after_ids}'\n return res\n\n\n def spatial_resize(self, inputs):\n \"\"\" Force the same shapes of the inputs, if needed. \"\"\"\n shape_ = get_shape(inputs[0])\n dim_ = get_num_dims(inputs[0])\n spatial_shape_ = shape_[-dim_:]\n\n resized = [inputs[0]]\n for item in inputs[1:]:\n shape = get_shape(item)\n dim = get_num_dims(item)\n spatial_shape = shape[-dim:]\n if dim > 0 and spatial_shape != tuple([1]*dim) and spatial_shape != spatial_shape_:\n item = center_crop(item, get_shape(inputs[0])[2:])\n resized.append(item)\n return resized\n\n def concat_forward(self, inputs):\n return torch.cat(inputs, dim=1)\n\n def sum_forward(self, inputs):\n \"\"\" Addition with broadcasting. \"\"\"\n result = 0\n for item in inputs:\n result = result + item\n return result\n\n def mul_forward(self, inputs):\n \"\"\" Multiplication with broadcasting. \"\"\"\n result = 1\n for item in inputs:\n result = result * item\n return result\n\n def mean_forward(self, inputs):\n return torch.mean(inputs)\n\n\n def drop_path_initialization(self, inputs, drop_prob=0.0, scale=True, layer_scale=1e-6, **kwargs):\n \"\"\" Initializa drop path: save supplied args and create trainable parameter. \"\"\"\n _ = kwargs\n self.drop_prob = drop_prob\n self.scale = scale\n\n if layer_scale != 0.0:\n x = inputs[1]\n channels = get_num_channels(x)\n gamma_shape = (1, channels) + (1,) * (x.ndim - 2)\n self.gamma = nn.Parameter(layer_scale * torch.ones(gamma_shape, device=x.device), requires_grad=True)\n else:\n self.gamma = None\n\n def drop_path_forward(self, inputs):\n \"\"\" Drop some of the batch items in the second tensor, multiply it by trainable parameter, add. \"\"\"\n inputs, x = inputs\n\n # DropPath: drop information about some of the samples\n if self.drop_prob == 0.0 or not self.training:\n pass\n else:\n keep_prob = 1 - self.drop_prob\n shape = (x.shape[0], ) + (1,) * (x.ndim - 1)\n mask = x.new_empty(shape).bernoulli_(keep_prob)\n\n if self.scale:\n mask.div_(keep_prob)\n x = x * mask\n\n # LayerScale\n x = x if self.gamma is None else self.gamma * x\n\n # Residual\n x = inputs + x\n return x\n" ]
[ [ "torch.nn.functional.softmax", "torch.load", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "torch.cuda.amp.autocast", "numpy.concatenate", "torch.no_grad", "numpy.mean", "torch.cuda.is_available", "torch.device", "torch.from_numpy", "torch.optim.swa_utils.SWALR", "torch.nn.functional.softplus", "torch.sigmoid", "torch.cuda.amp.GradScaler", "torch.cuda.device_count", "torch.optim.swa_utils.AveragedModel", "numpy.random.random", "torch.nn.DataParallel", "torch.autograd.profiler.profile" ], [ "torch.mean", "torch.ones", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
KonstantinUshenin/biofusion
[ "89c73aa8fdb76f10c7264959e0813f89f0c77765" ]
[ "test/shift_elimination.py" ]
[ "import unittest\nfrom pandas.testing import *\n\nimport pandas as pd\nimport numpy as np\nfrom biofusion.eliminators import *\n\nclass MyTestCase(unittest.TestCase):\n\n def setUp(self):\n self.data1 = pd.DataFrame({\"col1\": [1., 1., 1, 1],\n \"col2\": [1., 3., 3, 1],\n \"col3\": [-3., 1., 1, -3]})\n self.data2 = pd.DataFrame({\"col1\": [10., 10, 10, 10],\n \"col2\": [10., 30, 30, 10],\n \"col3\": [-30., 10, 10, -30]})\n\n def test_default_concatenation(self):\n result = pd.concat([self.data1, self.data2], ignore_index=True)\n expected = pd.read_json('{\"col1\":{\"0\":1.0,\"1\":1.,\"2\":1.,\"3\":1,\"4\":10,\"5\":10,\"6\":10,\"7\":10},'\n '\"col2\":{\"0\":1.0,\"1\":3.,\"2\":3.,\"3\":1,\"4\":10,\"5\":30,\"6\":30,\"7\":10},'\n '\"col3\":{\"0\":-3.0,\"1\":1.,\"2\":1.,\"3\":-3,\"4\":-30,\"5\":10,\"6\":10,\"7\":-30}}')\n\n assert_frame_equal(expected, result, check_dtype=False)\n\n\n def test_concatenation_without_data_change(self):\n pipeline = ShiftEliminator()\n pipeline.ds.add(self.data1)\n pipeline.ds.add(self.data2)\n result = pipeline.result()\n expected = pd.read_json('{\"col1\":{\"0\":1.,\"1\":1,\"2\":1,\"3\":1,\"4\":10,\"5\":10,\"6\":10,\"7\":10},'\n '\"col2\":{\"0\":1.,\"1\":3,\"2\":3,\"3\":1,\"4\":10,\"5\":30,\"6\":30,\"7\":10},'\n '\"col3\":{\"0\":-3.,\"1\":1,\"2\":1,\"3\":-3,\"4\":-30,\"5\":10,\"6\":10,\"7\":-30}}')\n\n assert_frame_equal(expected, result, check_dtype=False)\n\n def test_concatenation_fuse_with_mean_substraction_using_substraction_to_zero_mean_strategy(self):\n pipeline = ShiftEliminator()\n pipeline.ds.add(self.data1)\n pipeline.ds.add(self.data2)\n pipeline.fuse.mean_substraction(strategy='substraction_to_zero_mean')\n result = pipeline.result()\n expected = pd.read_json('{\"col1\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0,\"4\":0.0,\"5\":0.0,\"6\":0.0,\"7\":0.0},'\n '\"col2\":{\"0\":-1.0,\"1\":1.0,\"2\":1.0,\"3\":-1.0,\"4\":-10.0,\"5\":10.0,\"6\":10.0,\"7\":-10.0},'\n '\"col3\":{\"0\":-2.0,\"1\":2.0,\"2\":2.0,\"3\":-2.0,\"4\":-20.0,\"5\":20.0,\"6\":20.0,\"7\":-20.0}}')\n\n assert_frame_equal(expected, result, check_dtype=False)\n\n def test_concatenation_fuse_with_mean_substraction_using_substraction_of_average_mean_strategy(self):\n pipeline = ShiftEliminator()\n pipeline.ds.add(self.data1)\n pipeline.ds.add(self.data2)\n pipeline.fuse.mean_substraction(strategy = 'substraction_of_average_mean')\n result = pipeline.result()\n expected = pd.read_json('{\"col1\":{\"0\":-4.5,\"1\":-4.5,\"2\":-4.5,\"3\":-4.5,\"4\":4.5,\"5\":4.5,\"6\":4.5,\"7\":4.5},'\n '\"col2\":{\"0\":-10.0,\"1\":-8.0,\"2\":-8.0,\"3\":-10.0,\"4\":-1.0,\"5\":19.0,\"6\":19.0,\"7\":-1.0},'\n '\"col3\":{\"0\":2.5,\"1\":6.5,\"2\":6.5,\"3\":2.5,\"4\":-24.5,\"5\":15.5,\"6\":15.5,\"7\":-24.5}}')\n\n assert_frame_equal(expected, result, check_dtype=False)\n\n def test_concatenation_fuse_with_mean_substraction_using_mean_normalization_strategy(self):\n pipeline = ShiftEliminator()\n pipeline.ds.add(self.data1)\n pipeline.ds.add(self.data2)\n pipeline.fuse.mean_substraction(strategy = 'division_to_one_mean')\n result = pipeline.result()\n expected = pd.read_json('{\"col1\":{\"0\":0.1818181818,\"1\":0.1818181818,\"2\":0.1818181818,\"3\":0.1818181818,\"4\":1.8181818182,\"5\":1.8181818182,\"6\":1.8181818182,\"7\":1.8181818182},'\n '\"col2\":{\"0\":0.0909090909,\"1\":0.2727272727,\"2\":0.2727272727,\"3\":0.0909090909,\"4\":0.9090909091,\"5\":2.7272727273,\"6\":2.7272727273,\"7\":0.9090909091},'\n '\"col3\":{\"0\":0.5454545455,\"1\":-0.1818181818,\"2\":-0.1818181818,\"3\":0.5454545455,\"4\":5.4545454545,\"5\":-1.8181818182,\"6\":-1.8181818182,\"7\":5.4545454545}}')\n\n assert_frame_equal(expected, result, check_dtype=False)\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "pandas.concat", "pandas.read_json", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
haesleinhuepf/dexp
[ "2ea84f3db323724588fac565fae56f0d522bc5ca", "2ea84f3db323724588fac565fae56f0d522bc5ca", "2ea84f3db323724588fac565fae56f0d522bc5ca" ]
[ "dexp/processing/fusion/demo/demo_fusion.py", "dexp/datasets/operations/deconv.py", "dexp/processing/filters/demo/demo_fft_convolve.py" ]
[ "import numpy\nfrom arbol import asection\n\nfrom dexp.datasets.synthetic_datasets import generate_fusion_test_data\nfrom dexp.processing.fusion.dct_fusion import fuse_dct_nd\nfrom dexp.processing.fusion.dft_fusion import fuse_dft_nd\nfrom dexp.processing.fusion.tg_fusion import fuse_tg_nd\nfrom dexp.utils.backends import Backend, CupyBackend, NumpyBackend\n\n\ndef demo_fusion_numpy():\n with NumpyBackend():\n demo_fusion()\n\n\ndef demo_fusion_cupy():\n try:\n with CupyBackend():\n demo_fusion(include_dct=False, length_xy=512)\n except (ModuleNotFoundError, NotImplementedError):\n print(\"Cupy module not found! ignored!\")\n\n\ndef demo_fusion(include_dct=True, length_xy=120):\n with asection(\"generate data\"):\n image_gt, image_lowq, blend_a, blend_b, image1, image2 = generate_fusion_test_data(\n add_noise=True, length_xy=length_xy, length_z_factor=4\n )\n image_gt = Backend.to_numpy(image_gt)\n\n with asection(\"dct fusion\"):\n image_fused_dct = fuse_dct_nd(image1, image2) if include_dct else numpy.zeros_like(image_gt)\n image_fused_dct = Backend.to_numpy(image_fused_dct)\n\n error_dct = numpy.median(numpy.abs(image_gt - image_fused_dct))\n print(f\"error_dct={error_dct}\")\n\n with asection(\"dft fusion\"):\n image_fused_dft = fuse_dft_nd(image1, image2)\n image_fused_dft = Backend.to_numpy(image_fused_dft)\n error_dft = numpy.median(numpy.abs(image_gt - image_fused_dft))\n print(f\"error_dft={error_dft}\")\n\n with asection(\"tg fusion\"):\n image_fused_tg = fuse_tg_nd(image1, image2)\n image_fused_tg = Backend.to_numpy(image_fused_tg)\n error_tg = numpy.median(numpy.abs(image_gt - image_fused_tg))\n print(f\"error_tg={error_tg}\")\n\n from napari import Viewer, gui_qt\n\n with gui_qt():\n\n def _c(array):\n return Backend.to_numpy(array)\n\n viewer = Viewer()\n viewer.add_image(_c(image_gt), name=\"image_gt\")\n viewer.add_image(_c(image_lowq), name=\"image_lowq\")\n viewer.add_image(_c(blend_a), name=\"blend_a\")\n viewer.add_image(_c(blend_b), name=\"blend_b\")\n viewer.add_image(_c(image1), name=\"image1\")\n viewer.add_image(_c(image2), name=\"image2\")\n viewer.add_image(_c(image_fused_dct), name=\"image_fused_dct\")\n viewer.add_image(_c(image_fused_dft), name=\"image_fused_dft\")\n viewer.add_image(_c(image_fused_tg), name=\"image_fused_tg\")\n\n\nif __name__ == \"__main__\":\n # demo_fusion_cupy()\n demo_fusion_numpy()\n", "import functools\nfrom pathlib import Path\nfrom typing import List, Optional, Sequence, Tuple\n\nimport dask\nimport numpy\nimport scipy\nfrom arbol.arbol import aprint, asection\nfrom dask.distributed import Client\nfrom dask_cuda import LocalCUDACluster\n\nfrom dexp.datasets import BaseDataset\nfrom dexp.optics.psf.standard_psfs import nikon16x08na, olympus20x10na\nfrom dexp.processing.deconvolution import (\n admm_deconvolution,\n lucy_richardson_deconvolution,\n)\nfrom dexp.processing.filters.fft_convolve import fft_convolve\nfrom dexp.processing.utils.scatter_gather_i2i import scatter_gather_i2i\nfrom dexp.utils.backends import Backend, BestBackend\nfrom dexp.utils.slicing import slice_from_shape\n\n\ndef dataset_deconv(\n dataset: BaseDataset,\n dest_path: str,\n channels: Sequence[str],\n slicing,\n store: str = \"dir\",\n compression: str = \"zstd\",\n compression_level: int = 3,\n overwrite: bool = False,\n tilesize: Optional[Tuple[int]] = None,\n method: str = \"lr\",\n num_iterations: int = 16,\n max_correction: int = 16,\n power: float = 1,\n blind_spot: int = 0,\n back_projection: Optional[str] = None,\n wb_order: int = 5,\n psf_objective: str = \"nikon16x08na\",\n psf_na: float = 0.8,\n psf_dxy: float = 0.485,\n psf_dz: float = 2,\n psf_xy_size: int = 17,\n psf_z_size: int = 17,\n psf_show: bool = False,\n scaling: Optional[Tuple[float]] = None,\n workers: int = 1,\n workersbackend: str = \"\",\n devices: Optional[List[int]] = None,\n check: bool = True,\n stop_at_exception: bool = True,\n):\n\n from dexp.datasets import ZDataset\n\n mode = \"w\" + (\"\" if overwrite else \"-\")\n dest_dataset = ZDataset(dest_path, mode, store, parent=dataset)\n\n # Default tile size:\n if tilesize is None:\n tilesize = 320 # very conservative\n\n # Scaling default value:\n if scaling is None:\n scaling = (1, 1, 1)\n sz, sy, sx = scaling\n aprint(f\"Input images will be scaled by: (sz,sy,sx)={scaling}\")\n\n # CUDA DASK cluster\n cluster = LocalCUDACluster(CUDA_VISIBLE_DEVICES=devices)\n client = Client(cluster)\n aprint(\"Dask Client\", client)\n\n lazy_computation = []\n\n for channel in dataset._selected_channels(channels):\n array = dataset.get_array(channel)\n\n aprint(f\"Slicing with: {slicing}\")\n out_shape, volume_slicing, time_points = slice_from_shape(array.shape, slicing)\n\n out_shape = tuple(int(round(u * v)) for u, v in zip(out_shape, (1,) + scaling))\n dtype = numpy.float16 if method == \"admm\" else array.dtype\n\n # Adds destination array channel to dataset\n dest_array = dest_dataset.add_channel(\n name=channel, shape=out_shape, dtype=dtype, codec=compression, clevel=compression_level\n )\n\n # This is not ideal but difficult to avoid right now:\n sxy = (sx + sy) / 2\n\n # PSF paraneters:\n psf_kwargs = {\n \"dxy\": psf_dxy / sxy,\n \"dz\": psf_dz / sz,\n \"xy_size\": int(round(psf_xy_size * sxy)),\n \"z_size\": int(round(psf_z_size * sz)),\n }\n\n aprint(f\"psf_kwargs: {psf_kwargs}\")\n\n # NA override:\n if psf_na is not None:\n aprint(f\"Numerical aperture overridden to a value of: {psf_na}\")\n psf_kwargs[\"NA\"] = psf_na\n\n # choose psf from detection optics:\n if psf_objective == \"nikon16x08na\":\n psf_kernel = nikon16x08na(**psf_kwargs)\n elif psf_objective == \"olympus20x10na\":\n psf_kernel = olympus20x10na(**psf_kwargs)\n elif Path(psf_objective).exists():\n psf_kernel = numpy.load(psf_objective)\n if sz != 1.0 or sy != 1.0 or sx != 1.0:\n psf_kernel = scipy.ndimage.interpolation.zoom(psf_kernel, zoom=(sz, sy, sx), order=1)\n psf_z_size = psf_kernel.shape[0] + 10\n psf_xy_size = max(psf_kernel.shape[1:]) + 10\n else:\n raise RuntimeError(f\"Object/path {psf_objective} not found.\")\n\n # usefull for debugging:\n if psf_show:\n import napari\n\n viewer = napari.Viewer(title=\"DEXP | viewing PSF with napari\", ndisplay=3)\n viewer.add_image(psf_kernel)\n napari.run()\n\n margins = max(psf_xy_size, psf_z_size)\n\n if method == \"lr\":\n normalize = False\n convolve = functools.partial(fft_convolve, in_place=False, mode=\"reflect\", internal_dtype=numpy.float32)\n\n def deconv(image):\n min_value = image.min()\n max_value = image.max()\n\n return lucy_richardson_deconvolution(\n image=image,\n psf=psf_kernel,\n num_iterations=num_iterations,\n max_correction=max_correction,\n normalise_minmax=(min_value, max_value),\n power=power,\n blind_spot=blind_spot,\n blind_spot_mode=\"median+uniform\",\n blind_spot_axis_exclusion=(0,),\n wb_order=wb_order,\n back_projection=back_projection,\n convolve_method=convolve,\n )\n\n elif method == \"admm\":\n normalize = True\n\n def deconv(image):\n out = admm_deconvolution(\n image,\n psf=psf_kernel,\n iterations=num_iterations,\n derivative=2,\n )\n return out\n\n else:\n raise ValueError(f\"Unknown deconvolution mode: {method}\")\n\n @dask.delayed\n def process(i):\n tp = time_points[i]\n try:\n with asection(f\"Deconvolving time point for time point {i}/{len(time_points)}\"):\n with asection(f\"Loading channel: {channel}\"):\n tp_array = numpy.asarray(array[tp][volume_slicing])\n\n with BestBackend(exclusive=True, enable_unified_memory=True):\n\n if sz != 1.0 or sy != 1.0 or sx != 1.0:\n with asection(f\"Applying scaling {(sz, sy, sx)} to image.\"):\n sp = Backend.get_sp_module()\n tp_array = Backend.to_backend(tp_array)\n tp_array = sp.ndimage.interpolation.zoom(tp_array, zoom=(sz, sy, sx), order=1)\n tp_array = Backend.to_numpy(tp_array)\n\n with asection(\n f\"Deconvolving image of shape: {tp_array.shape}, with tile size: {tilesize}, \"\n + \"margins: {margins} \"\n ):\n aprint(f\"Number of iterations: {num_iterations}, back_projection:{back_projection}, \")\n tp_array = scatter_gather_i2i(\n deconv,\n tp_array,\n tiles=tilesize,\n margins=margins,\n normalise=normalize,\n internal_dtype=dtype,\n )\n\n with asection(\"Moving array from backend to numpy.\"):\n tp_array = Backend.to_numpy(tp_array, dtype=dest_array.dtype, force_copy=False)\n\n with asection(\n f\"Saving deconvolved stack for time point {i}, shape:{tp_array.shape}, dtype:{array.dtype}\"\n ):\n dest_dataset.write_stack(channel=channel, time_point=i, stack_array=tp_array)\n\n aprint(f\"Done processing time point: {i}/{len(time_points)} .\")\n\n except Exception as error:\n aprint(error)\n aprint(f\"Error occurred while processing time point {i} !\")\n import traceback\n\n traceback.print_exc()\n\n if stop_at_exception:\n raise error\n\n for i in range(len(time_points)):\n lazy_computation.append(process(i))\n\n dask.compute(*lazy_computation)\n\n # Dataset info:\n aprint(dest_dataset.info())\n\n # Check dataset integrity:\n if check:\n dest_dataset.check_integrity()\n\n # close destination dataset:\n dest_dataset.close()\n client.close()\n", "import numpy\nfrom scipy.ndimage import convolve\nfrom skimage.data import camera\nfrom skimage.util import random_noise\n\nfrom dexp.processing.filters.fft_convolve import fft_convolve\nfrom dexp.utils.backends import Backend, CupyBackend, NumpyBackend\n\n\ndef demo_fft_convolve_numpy():\n with NumpyBackend():\n _demo_fft_convolve()\n\n\ndef demo_fft_convolve_cupy():\n try:\n with CupyBackend():\n _demo_fft_convolve()\n except ModuleNotFoundError:\n print(\"Cupy module not found! Test passes nevertheless!\")\n\n\ndef _demo_fft_convolve():\n image = camera().astype(numpy.float32) / 255\n noisy = random_noise(image, mode=\"gaussian\", var=0.005, seed=0, clip=False)\n noisy = random_noise(noisy, mode=\"s&p\", amount=0.03, seed=0, clip=False).astype(numpy.float32)\n\n psf = numpy.asarray([[1, 1, 1], [1, 0, 1], [1, 1, 1]]).astype(numpy.float32)\n\n result = fft_convolve(noisy, psf)\n reference_result = convolve(noisy, psf)\n\n from napari import Viewer, gui_qt\n\n with gui_qt():\n\n def _c(array):\n return Backend.to_numpy(array)\n\n viewer = Viewer()\n viewer.add_image(_c(image), name=\"image\")\n viewer.add_image(_c(noisy), name=\"noisy\")\n viewer.add_image(_c(psf), name=\"psf\")\n viewer.add_image(_c(reference_result), name=\"reference_result\")\n viewer.add_image(_c(result), name=\"result\")\n\n\nif __name__ == \"__main__\":\n demo_fft_convolve_cupy()\n demo_fft_convolve_numpy()\n" ]
[ [ "numpy.zeros_like", "numpy.abs" ], [ "numpy.asarray", "numpy.load", "scipy.ndimage.interpolation.zoom" ], [ "numpy.asarray", "scipy.ndimage.convolve" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "1.0", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.10", "0.17", "1.3" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
partnernetsoftware/openlab
[ "faa4e58486a7bc4140ad3d56545bfb736cb86696" ]
[ "experiments/torch/torch_eg5.py" ]
[ "# https://blog.csdn.net/rizero/article/details/104244454\n# 股票成交量预测(Pytorch基础练习)\n\n## depends\n\nimport pandas as pd\n\nimport torch\nimport torch.nn\nimport torch.optim\n\nfrom debug import ptf_tensor\n\n## raw data\n\nurl = 'C:/Users/HUAWEI/Desktop/深度学习/Blog附带代码/FB.csv'\ndf = pd.read_csv(url, index_col=0) #读取全部数据\nindex_col = ['col_1','col_2'] # 读取指定的几列\nerror_bad_lines = False # 当某行数据有问题时,不报错,直接跳过,处理脏数据时使用\nna_values = 'NULL' # 将NULL识别为空值\n\n## data clean\n\n#数据集的处理\n'''\n因为数据是日期新的占index靠前\n'''\ntrain_start, train_end=sum(df.index>='2017'),sum(df.index>='2013') \ntest_start, test_end=sum(df.index>='2018'),sum(df.index>='2017')\n\nn_total_train = train_end -train_start\nn_total_test = test_end -test_start\n\ns_mean=df[train_start:train_end].mean() #计算均值,为归一化做准备\ns_std=df[train_start:train_end].std() # 计算标准差,为归一化做准备\n\nn_features=5 # 五个特征量\n\n#选取col from 0-4 也就是Open,High,Low,Close,Volume,并进行归一化\ndf_feature=((df-s_mean)/s_std).iloc[:,:n_features] \n\ns_labels=(df['Volume']<df['Volume'].shift(1)).astype(int)\n##.shift(1)把数据下移一位\n#用法参见:https://www.zhihu.com/question/264963268\n\n#label建立的标准:假如今天次日的成交量大于当日的成交量,标签=1,反之=0\n\n\n## alter format\n\nx=torch.tensor(df_feature.values,dtype=torch.float32) # size: [m,5]\nptf_tensor(x,'x')\ny=torch.tensor(s_labels.values.reshape(-1,1),dtype=torch.float32) # size [m,1]\nptf_tensor(y,'y')\n\n## build nn\n\nfc=torch.nn.Linear(n_features,1)\nweights,bias=fc.parameters()\ncriterion=torch.nn.BCEWithLogitsLoss()\noptimizer=torch.optim.Adam(fc.parameters())\n\n## train w+ check\n\nn_steps=20001 #迭代20001次\n\nfor step in range(n_steps):\n if step:\n optimizer.zero_grad() # 梯度清零,不然会叠加的\n loss.backward() # 计算参数的梯度\n optimizer.step() # 根据参数梯度结果迭代推出新的参数\n \n pred=fc(x) # 计算预测结果\n loss=criterion(pred[train_start:train_end],y[train_start:train_end]) #计算预测的损失\n\n if step % 500==0:\n #print('#{}, 损失 = {:g}'.format(step, loss)) \n output = (pred > 0)\n correct = (output == y.bool())\n n_correct_train = correct[train_start:train_end].sum().item() #计算训练正确的数量\n n_correct_test = correct[test_start:test_end].sum().item() #计算测试正确的数量\n accuracy_train = n_correct_train / n_total_train #计算精确度\n accuracy_test = n_correct_test / n_total_test\n print('训练集准确率 = {}, 测试集准确率 = {}'.format(accuracy_train, accuracy_test))\n \n## \n\n\n\n" ]
[ [ "torch.nn.Linear", "torch.nn.BCEWithLogitsLoss", "pandas.read_csv", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
ark0015/GWDetectorDesignToolkit
[ "6ee2f7a633c973ea10b450257b1ad4dbd0323738" ]
[ "gwent/vendor/pygwinc_clone/gwinc/nb.py" ]
[ "import os\nimport logging\nimport itertools\nimport importlib\nimport importlib.util\nimport collections\nimport numpy as np\nimport scipy.interpolate\n\n\ndef quadsum(data):\n \"\"\"Calculate quadrature sum of list of data arrays.\n\n Provided data are assumed to be power-referred, so this is a\n simple point-by-point sum.\n\n NaNs in sum elements do not contribute to sum.\n\n \"\"\"\n return np.nansum(data, 0)\n\n\nclass BudgetItem:\n \"\"\"GWINC BudgetItem class\"\"\"\n\n def load(self):\n \"\"\"Overload method for initial loading of static data.\"\"\"\n return None\n\n def update(self, **kwargs):\n \"\"\"Overload method for updating data needed to calculate final PSD.\n\n By default any keyword arguments provided are written directly\n as attribute variables (as with __init__).\n\n \"\"\"\n for key, val in kwargs.items():\n setattr(self, key, val)\n\n def calc(self):\n \"\"\"Overload method for calculation of final PSD.\n\n Should return an array of power-referenced values evaluated at\n all evaluation frequencies (self.freq).\n\n \"\"\"\n return None\n\n ##########\n\n def __init__(self, freq, **kwargs):\n \"\"\"Initialize budget item.\n\n Primary argument is the evaluation frequency array. Any\n keyword arguments provided are simple written as attribute\n variables in the initialized object.\n\n \"\"\"\n self.__freq = freq\n for key, val in kwargs.items():\n setattr(self, key, val)\n\n @property\n def name(self):\n \"\"\"\"Name of this BudgetItem class.\"\"\"\n return self.__class__.__name__\n\n def __str__(self):\n # FIXME: provide info on internal state (load/update/calc/etc.)\n return \"<{} {}>\".format(\n \" \".join([c.__name__ for c in self.__class__.__bases__]),\n self.name,\n )\n\n @property\n def freq(self):\n \"\"\"Evaluation frequency array supplied at initialization.\"\"\"\n return self.__freq\n\n def interpolate(self, freq, data):\n \"\"\"Interpolate data to the evaluation frequencies.\"\"\"\n func = scipy.interpolate.interp1d(\n freq,\n data,\n kind=\"nearest\",\n copy=False,\n assume_sorted=True,\n bounds_error=False,\n fill_value=np.nan,\n )\n return func(self.freq)\n\n\nclass Calibration(BudgetItem):\n \"\"\"GWINC Calibration class\n\n BudgetItem that represents a calibration transfer function for a\n Noise. The calc() method should return a transfer function\n amplitude array evaluated at the evaluation frequencies supplied\n at initialization and available in the `freq` array attribute\n (self.freq).\n\n \"\"\"\n\n def __call__(self, data):\n \"\"\"Calibrate input data.\n\n Returns calibrated version of input data array,\n e.g. point-by-point product of data and calibration arrays.\n\n \"\"\"\n cal = self.calc()\n assert (\n data.shape == cal.shape\n ), \"data shape does not match calibration ({} != {})\".format(\n data.shape, cal.shape\n )\n return data * cal\n\n\nclass Noise(BudgetItem):\n \"\"\"GWINC Noise class\n\n BudgetItem that represents a PSD noise calculation. The calc()\n method should return the noise PSD spectrum array evaluated at the\n evaluation frequencies supplied at initialization and available in\n the `freq` array attribute (self.freq).\n\n \"\"\"\n\n style = {}\n \"\"\"Trace plot style dictionary\"\"\"\n\n def calc_trace(self, calibration=None, calc=True):\n \"\"\"Returns noise (PSD, style) tuple.\n\n If `calibration` is not None it is assumed to be a\n len(self.freq) array that will be multiplied to the output\n PSD.\n\n If calc=False, the noise will not be calculated and the PSD\n will be None. This is useful for just getting style the\n style.\n\n \"\"\"\n if calc:\n data = self.calc()\n if calibration is not None:\n data *= calibration\n else:\n data = None\n return data, self.style\n\n\nclass Budget(Noise):\n \"\"\"GWINC Budget class\n\n This is a Noise that represents the budget of multiple sub noises.\n\n The `noises` attribute of this class should be list constituent\n Noise classes. Each element can be either a single Noise class,\n or a tuple of (Noise, Calibration) classes, e.g.:\n\n noises = [\n Thermal,\n (Shot, Sensing),\n ]\n\n When this object is initialized, all sub noises and calibrations\n are initialized. Pre-defined load() and update() methods call the\n load() and update() methods of all sub noises and calibrations.\n When calc() is called, the PSD is calculated for all sub noises,\n the relevant calibration is evaluated and applied, and the\n quadrature sum of all calibrated consituent noises is returned.\n\n Additionally a `references` attribute may be definied, similar to\n the `noises` attribute described above except that the specified\n noises do not contribute to the overall budget total.\n\n NOTE: an `ifo` attribute is always passed as an initialization\n argument to sub noises.\n\n \"\"\"\n\n noises = []\n \"\"\"List of constituent noise classes\"\"\"\n\n references = []\n \"\"\"List of reference nosie classes\"\"\"\n\n def __init__(self, *args, noises=None, **kwargs):\n \"\"\"Initialize Budget object.\n\n See BudgetItem for base initialization arguments.\n\n If a `noises` keyword argument is provided it should be an\n iterable of noise names (constituent or reference) which will\n be used to filter the noises initialized in this budget.\n\n \"\"\"\n super().__init__(*args, **kwargs)\n # store args and kwargs for later use\n self.args = args\n self.kwargs = kwargs\n # FIXME: special casing the IFO here, in case it's defined as\n # a class attribute rather than passed at initialization. we\n # do this because we're not defining a standard way to extract\n # IFO variables that get passed around in a reasonable way.\n # how can we clarify this?\n if \"ifo\" not in kwargs and getattr(self, \"ifo\", None):\n self.kwargs[\"ifo\"] = getattr(self, \"ifo\", None)\n # all noise objects keyed by name\n self._noise_objs = collections.OrderedDict()\n # all cal objects keyed by name\n self._cal_objs = {}\n # noise to calibration mapping\n self._noise_cal = {}\n # set of all constituent budget noise names\n self._budget_noises = set()\n # initialize all noise objects\n for nc in self.noises:\n name, noise_obj, cal = self.__init_noise(nc)\n if noises and name not in noises:\n continue\n self.__add_noise(noise_obj, cal)\n self._budget_noises.add(name)\n for nc in self.references:\n name, noise_obj, cal = self.__init_noise(nc)\n if noises and name not in noises:\n continue\n self.__add_noise(noise_obj, cal)\n # error if requested noise is not present\n if noises:\n sset = set(noises)\n nset = set([name for name in self._noise_objs.keys()])\n if not sset <= nset:\n raise AttributeError(\n \"unknown noise terms: {}\".format(\" \".join(sset - nset))\n )\n\n def __init_noise(self, nc):\n cal = None\n if isinstance(nc, (list, tuple)):\n noise, cal = nc[:2]\n else:\n noise = nc\n noise_obj = noise(*self.args, **self.kwargs)\n return noise_obj.name, noise_obj, cal\n\n def __add_noise(self, noise_obj, cal):\n logging.debug(\"init {}\".format(noise_obj))\n # instantiate the noise object\n name = noise_obj.name\n self._noise_objs[name] = noise_obj\n if cal is not None:\n # if a cal object is specified, instantiate and store\n cal_obj = cal(*self.args, **self.kwargs)\n if cal_obj.name not in self._cal_objs:\n self._cal_objs[cal_obj.name] = cal_obj\n self._noise_cal[name] = cal_obj.name\n\n def __getitem__(self, name):\n try:\n return self._noise_objs[name]\n except KeyError:\n try:\n return self._cal_objs[name]\n except KeyError:\n raise KeyError(\"unknown noise or cal name '{}\".format(name))\n\n def keys(self):\n \"\"\"Iterate over budget noise names.\"\"\"\n return iter(self._noise_objs.keys())\n\n def values(self):\n \"\"\"Iterate over budget noise objects.\"\"\"\n return iter(self._noise_objs.values())\n\n def items(self):\n \"\"\"Iterate over budget noise (name, object) tuples.\"\"\"\n return iter(self._noise_objs.items())\n\n def __iter__(self):\n return iter(self.keys())\n\n def walk(self):\n \"\"\"Walk recursively through every BudgetItem in the budget.\n\n This includes Noise, Calibration and Budget objects, as well\n as any decendents of Budget objects.\n\n For each leaf item yields a tuple of all ancestor objects,\n e.g.:\n\n (self)\n (self, BudgetItem)\n (self, ChildBudget1)\n (self, ChildBudget1, BudgetItem)\n ...\n\n \"\"\"\n yield (self,)\n for item in itertools.chain(self._cal_objs.values(), self._noise_objs.values()):\n if isinstance(item, Budget):\n for i in item.walk():\n yield (self,) + i\n else:\n yield (self, item)\n\n def load(self):\n \"\"\"Load all noise and cal objects.\"\"\"\n for name, item in itertools.chain(\n self._cal_objs.items(), self._noise_objs.items()\n ):\n logging.debug(\"load {}\".format(item))\n item.load()\n\n def update(self, **kwargs):\n \"\"\"Update all noise and cal objects with supplied kwargs.\"\"\"\n for name, item in itertools.chain(\n self._cal_objs.items(), self._noise_objs.items()\n ):\n logging.debug(\"update {}\".format(item))\n item.update(**kwargs)\n\n def cal_for_noise(self, name):\n \"\"\"Return the calibration object for named noise.\"\"\"\n try:\n return self._cal_objs[self._noise_cal[name]]\n except KeyError:\n return None\n\n def calc_noise(self, name):\n \"\"\"Return calibrated individual noise term.\n\n The noise PSD and calibration transfer functions are\n calculated, and the calibrated noise array is returned.\n\n \"\"\"\n noise = self._noise_objs[name]\n nd = noise.calc()\n cal = self.cal_for_noise(name)\n if cal:\n cd = cal.calc()\n return nd * cd\n else:\n return nd\n\n def calc(self):\n \"\"\"Calculate sum of all noises.\"\"\"\n data = [\n self.calc_noise(name)\n for name in self._noise_objs.keys()\n if name in self._budget_noises\n ]\n return quadsum(data)\n\n def calc_trace(self, calibration=None, calc=True):\n \"\"\"Returns a dictionary of noises traces, keyed by noise names.\n\n Values are (data, style) trace tuples (see Noise.calc_trace).\n The key of the budget sum total is 'Total'. The values of sub\n budgets are themselves dictionaries returned from\n calc_trace() of the sub budget.\n\n If `calibration` is not None it is assumed to be a\n len(self.freq) array that will be multiplied to the output\n PSD of the budget and all sub noises.\n\n If calc=False, the noise will not be calculated and the PSD\n will be None. This is useful for just getting style the\n style.\n\n \"\"\"\n # start by creating an empty OrderedDict used for outputing trace data\n # or style info with the following order:\n # references\n # total\n # constituents\n d = collections.OrderedDict()\n # allocate references\n for name, noise in self._noise_objs.items():\n if name in self._budget_noises:\n continue\n d[name] = noise.calc_trace(calc=False)\n # allocate total\n if self._budget_noises:\n d[\"Total\"] = None, self.style\n # allocate constituent\n for name, noise in self._noise_objs.items():\n if name not in self._budget_noises:\n continue\n d[name] = noise.calc_trace(calc=False)\n # if we're not calc'ing, just return the dict now\n if not calc:\n return d\n # calc all noises\n for name, noise in self._noise_objs.items():\n # extract/calc the budget-level calibration for this noise\n cal_obj = self.cal_for_noise(name)\n if cal_obj:\n cal = cal_obj.calc()\n else:\n cal = np.ones_like(self.freq)\n # then multiply by the supplied calibration\n if calibration is not None:\n cal *= calibration\n d[name] = noise.calc_trace(calibration=cal, calc=True)\n # calc budget total\n constituent_data = []\n for name in self._budget_noises:\n if isinstance(d[name], dict):\n data = d[name][\"Total\"][0]\n else:\n data = d[name][0]\n constituent_data.append(data)\n d[\"Total\"] = quadsum(constituent_data), self.style\n return d\n" ]
[ [ "numpy.nansum", "numpy.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lihongqiang/Image2Text
[ "2c8d60a857a63ce516f33263e61313a3bad0695f" ]
[ "im2txt/inference_utils/vocabulary.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Vocabulary class for an image-to-text model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport tensorflow as tf\n\n\nclass Vocabulary(object):\n \"\"\"Vocabulary class for an image-to-text model.\"\"\"\n\n def __init__(self,\n vocab_file,\n start_word=\"<S>\",\n end_word=\"</S>\",\n unk_word=\"<UNK>\"):\n \"\"\"Initializes the vocabulary.\n\n Args:\n vocab_file: File containing the vocabulary, where the words are the first\n whitespace-separated token on each line (other tokens are ignored) and\n the word ids are the corresponding line numbers.\n start_word: Special word denoting sentence start.\n end_word: Special word denoting sentence end.\n unk_word: Special word denoting unknown words.\n \"\"\"\n if not tf.gfile.Exists(vocab_file):\n tf.logging.fatal(\"Vocab file %s not found.\", vocab_file)\n tf.logging.info(\"Initializing vocabulary from file: %s\", vocab_file)\n\n with tf.gfile.GFile(vocab_file, mode=\"r\") as f:\n reverse_vocab = list(f.readlines())\n reverse_vocab = [eval(line.split()[0]) for line in reverse_vocab]\n assert start_word in reverse_vocab\n assert end_word in reverse_vocab\n if unk_word not in reverse_vocab:\n reverse_vocab.append(unk_word)\n vocab = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])\n\n tf.logging.info(\"Created vocabulary with %d words\" % len(vocab))\n\n self.vocab = vocab # vocab[word] = id\n self.reverse_vocab = reverse_vocab # reverse_vocab[id] = word\n\n # Save special word ids.\n self.start_id = vocab[start_word]\n self.end_id = vocab[end_word]\n self.unk_id = vocab[unk_word]\n\n def word_to_id(self, word):\n \"\"\"Returns the integer word id of a word string.\"\"\"\n if word in self.vocab:\n return self.vocab[word]\n else:\n return self.unk_id\n\n def id_to_word(self, word_id):\n \"\"\"Returns the word string of an integer word id.\"\"\"\n if word_id >= len(self.reverse_vocab):\n return self.reverse_vocab[self.unk_id]\n else:\n return self.reverse_vocab[word_id]\n" ]
[ [ "tensorflow.gfile.Exists", "tensorflow.gfile.GFile", "tensorflow.logging.info", "tensorflow.logging.fatal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
yvan674/CurbNet
[ "1bcbfc830a7be9615a51749e2ac691f2e2eb827d" ]
[ "src/network/mce_loss.py" ]
[ "\"\"\"Masked Cross Entropy Loss.\n\nThis custom loss function uses cross entropy loss as well as ground truth\ndata to calculate a loss specific to this use case scenario. It calculates a\nregular cross entropy loss, but additionally heavily penalizes any curb\nclassification that is not around the perimeter of known roads.\n\nThe perimeter around known roads is calculated by using a binary dilation on B.\nB is a b x b matrix, with b being 0.03 * image width.\n\nAuthor:\n Yvan Satyawan <[email protected]>\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport constants\nfrom scipy.ndimage.morphology import binary_dilation\nimport numpy as np\n\n\nclass MCELoss(nn.CrossEntropyLoss):\n def __init__(self, weight_normal=None, weight_penalized=None,\n size_average=None, ignore_index=-100, reduce=None,\n reduction='mean'):\n \"\"\"Cross Entropy loss with a masked applied for different weights.\"\"\"\n super(MCELoss, self).__init__(weight_normal, size_average, reduce,\n reduction)\n self.ignore_index = ignore_index\n self.register_buffer('weight_penalized', weight_penalized)\n\n # Calculate the size of the b matrix.\n self.b_size = int(constants.DIM_WIDTH * 0.05) # Chosen based on\n # manually viewing the dataset\n # B is created later since it will have to depend on the batch size.\n\n def forward(self, input, target):\n \"\"\"Requires target to also have the road mask.\n\n Args:\n input (torch.Tensor): The predicted segmentation\n target (torch.Tensor): The ground truth segmentation. The road mask\n should be given as class k, where the network predicts k\n classes. E.g. given 3 classes (0, 1, 2), class (3) should be the\n road mask.\n \"\"\"\n # Extract the road mask from the target\n mask = torch.zeros(target.shape, dtype=torch.uint8,\n device=constants.DEVICE)\n mask[target == 3] = 1.\n\n # Create b\n b = np.ones((self.b_size, self.b_size))\n\n # Calculate the road perimeter mask\n # After testing, element-wise is significantly faster than a single\n # statement for some reason.\n target_mask = np.zeros(target.shape)\n mask = mask.detach().cpu().numpy()\n\n for i in range(target_mask.shape[0]):\n target_mask[i] = binary_dilation(mask[i], b)\n\n target_mask = torch.from_numpy(target_mask).to(dtype=torch.uint8,\n device=constants.DEVICE)\n # Remove the road so we get only the perimeter\n target_mask[target == 3] = 0\n\n # Turn road back into other for loss classification\n target[target == 3] = 0\n\n # Prepare the mask for the 3 channel input by copying it basically\n # Assuming that the batch is relatively small so this shouldn't take\n # much time even if it is a nested for loop\n input_mask = torch.zeros(input.shape).to(constants.DEVICE)\n for i in range(input.shape[0]):\n for j in range(input.shape[1]):\n input_mask[i, j] = target_mask[i]\n\n # Get the inverted mask as well\n inverted_target_mask = self.flip_tensor(target_mask)\n inverted_input_mask = self.flip_tensor(input_mask)\n\n # Send all the masks to the proper device\n target_mask = target_mask.to(device=constants.DEVICE, dtype=torch.uint8)\n input_mask = input_mask.to(device=constants.DEVICE, dtype=torch.uint8)\n inverted_target_mask = inverted_target_mask.to(device=constants.DEVICE,\n dtype=torch.uint8)\n inverted_input_mask = inverted_input_mask.to(device=constants.DEVICE,\n dtype=torch.uint8)\n\n # Create a single length zero tensor once\n zero = torch.zeros(1).to(device=constants.DEVICE)\n\n # The values within the mask are now selected as well as the inverse.\n # The loss is then calculated separately for those in the mask and not\n # in the mask.\n # We use torch.where to preserve the shape of the mask\n\n perimeter_target = torch.where(target_mask, target.long(),\n zero.long())\n perimeter_predicted = torch.where(input_mask, input, zero)\n\n other_target = torch.where(inverted_target_mask, target.long(),\n zero.long())\n other_predicted = torch.where(inverted_input_mask, input, zero)\n\n perimeter_loss = F.cross_entropy(perimeter_predicted, perimeter_target,\n weight=self.weight,\n ignore_index=self.ignore_index,\n reduction=self.reduction)\n other_loss = F.cross_entropy(other_predicted, other_target,\n weight=self.weight_penalized,\n ignore_index=self.ignore_index,\n reduction=self.reduction)\n return perimeter_loss + other_loss\n\n @staticmethod\n def flip_tensor(tensor):\n \"\"\"Flips values of 0 and 1 in a given tensor.\"\"\"\n flipped = tensor.clone()\n flipped[tensor == 0] = 1\n flipped[tensor == 1] = 0\n return flipped\n" ]
[ [ "scipy.ndimage.morphology.binary_dilation", "torch.zeros", "torch.nn.functional.cross_entropy", "torch.from_numpy", "numpy.ones", "torch.where", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "1.0", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.10", "0.17", "1.3" ], "tensorflow": [] } ]
980044579/ctpn.pytorch
[ "7049241f7c94242271c63eb96cbc29a8ad42d11b" ]
[ "ctpn_predict.py" ]
[ "import os\n#os.environ['CUDA_VISIBLE_DEVICES'] = '0'\nimport cv2\nimport numpy as np\n\nimport torch\nimport torch.nn.functional as F\nfrom ctpn_model import CTPN_Model\nfrom ctpn_utils import gen_anchor, bbox_transfor_inv, clip_box, filter_bbox,nms, TextProposalConnectorOriented\nfrom ctpn_utils import resize\nimport config\n\n\nprob_thresh = 0.8\nwidth = 600\n\n#device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\ndevice = torch.device('cpu')\n\n#weights = os.path.join(config.checkpoints_dir, 'trained weights file.pth.tar')\nweights = config.model_path\n\nmodel = CTPN_Model()\nmodel.load_state_dict(torch.load(weights, map_location=device)['model_state_dict'])\nmodel.to(device)\nmodel.eval()\n\n\ndef dis(image):\n cv2.imshow('image', image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\nfilenames = [os.path.join(config.img_path, file) for file in os.listdir(config.img_path)]\n\nprint(filenames)\n\n\nfor k in range(len(filenames)):\n\n image = cv2.imread(filenames[i])\n image = resize(image, width=width)\n image_c = image.copy()\n h, w = image.shape[:2]\n image = image.astype(np.float32) - config.IMAGE_MEAN\n image = torch.from_numpy(image.transpose(2, 0, 1)).unsqueeze(0).float()\n\n\n with torch.no_grad():\n image = image.to(device)\n cls, regr = model(image)\n\n cls_prob = F.softmax(cls, dim=-1).cpu().numpy()\n regr = regr.cpu().numpy()\n anchor = gen_anchor((int(h / 16), int(w / 16)), 16)\n\n bbox = bbox_transfor_inv(anchor, regr)\n\n bbox = clip_box(bbox, [h, w])\n\n # prob_thresh = 0.3\n # print(cls_prob)\n # print(cls_prob[0, :, 1])\n # print(cls_prob.shape)\n fg = np.where(cls_prob[0, :, 1] > prob_thresh)[0]\n #print(\"fg:\", fg)\n\n select_anchor = bbox[fg, :]\n\n print(\"select_anchor:\")\n print(select_anchor)\n print(select_anchor.shape)\n\n select_score = cls_prob[0, fg, 1]\n select_anchor = select_anchor.astype(np.int32)\n\n keep_index = filter_bbox(select_anchor, 16)\n\n # nsm\n select_anchor = select_anchor[keep_index]\n select_score = select_score[keep_index]\n select_score = np.reshape(select_score, (select_score.shape[0], 1))\n nmsbox = np.hstack((select_anchor, select_score))\n \n keep = nms(nmsbox, 0.3)\n select_anchor = select_anchor[keep]\n select_score = select_score[keep]\n\n # text line-\n textConn = TextProposalConnectorOriented()\n text = textConn.get_text_lines(select_anchor, select_score, [h, w])\n # print(text)\n # print(text.shape)\n\n for i in text:\n s = str(round(i[-1] * 100, 2)) + '%'\n i = [int(j) for j in i]\n cv2.line(image_c, (i[0], i[1]), (i[2], i[3]), (0, 0, 255), 2)\n cv2.line(image_c, (i[0], i[1]), (i[4], i[5]), (0, 0, 255), 2)\n cv2.line(image_c, (i[6], i[7]), (i[2], i[3]), (0, 0, 255), 2)\n cv2.line(image_c, (i[4], i[5]), (i[6], i[7]), (0, 0, 255), 2)\n cv2.putText(image_c, s, (i[0]+13, i[1]+13),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1,\n (255,0,0),\n 2,\n cv2.LINE_AA)\n outpath = \"./predict_img_{}.jpg\".format(k)\n cv2.imwrite(outpath,image_c)\n #dis(image_c)\n" ]
[ [ "numpy.hstack", "torch.nn.functional.softmax", "torch.load", "numpy.reshape", "torch.no_grad", "torch.device", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
annsilje/midgard
[ "ffa81f6704f36dd86aea745fb24b7f7da4233828", "ffa81f6704f36dd86aea745fb24b7f7da4233828", "ffa81f6704f36dd86aea745fb24b7f7da4233828" ]
[ "midgard/parsers/ure_control_tool_csv.py", "midgard/parsers/gipsyx_series.py", "midgard/data/collection.py" ]
[ "\"\"\"A parser for reading URE Control Tool CSV output files\n\nExample:\n--------\n\n from midgard import parsers\n p = parsers.parse_file(parser_name='ure_control_tool_csv', file_path='G_GAL258_E1E5a_URE-AllPRN_190301.csv')\n data = p.as_dict()\n\nDescription:\n------------\n\nReads data from files in URE Control Tool CSV output format. The header information of the URE Control Tool CSV file is\nnot read (TODO).\n\"\"\"\n\n# Standard library import\nimport dateutil.parser\n\n# External library import\nimport numpy as np\n\n\n# Midgard imports\nfrom midgard.dev import log\nfrom midgard.dev import plugins\nfrom midgard.parsers.csv_ import CsvParser\n\n\[email protected]\nclass UreControlToolCsvParser(CsvParser):\n \"\"\"A parser for reading URE Control Tool CSV output files\n\n The URE Control Tool CSV data header line is used to define the keys of the **data** dictionary. The values of the \n **data** dictionary are represented by the URE Control Tool CSV colum values.\n\n \"\"\"\n\n def write_to_dataset(self, dset) -> \"Dataset\":\n \"\"\"Return the parsed data as a Dataset\n\n Args:\n dset (Dataset): The Dataset. Depending on the Spring CSV following dataset fields can be available:\n\n | Field | Former name | Description |\n |-----------------------|--------------------------------------------------------------------------------------|\n | adjbgd-dcb_mean | | |\n | adjbgd-dcb_med | | |\n | clk_diff_dt_mean | dB_mean | MEAN clock offset determined in each epoch |\n | clk_diff_with_dt_mean | dH_mean | Satellite clock correction difference corrected for satellite bias and |\n | | | the MEAN constellation clock offset in each epoch | \n | dr | | Satellite coordinate difference between broadcast and precise ephemeris|\n | | | in radial direction in [m] |\n | dx | | Satellite coordinate difference between broadcast and precise ephemeris|\n | | | for x-coordinate |\n | dy | | Satellite coordinate difference between broadcast and precise ephemeris|\n | | | for y-coordinate |\n | dz | | Satellite coordinate difference between broadcast and precise ephemeris|\n | | | for z-coordinate |\n | dh_med | | Satellite clock correction difference corrected for satellite bias and |\n | | | the MEDIAN clock offset in each epoch |\n | db_med | | MEDIAN constellation clock offset determined in each epoch |\n | dbgd_mean | | |\n | dbgd_med | | |\n | orb_diff_3d | d3D | 3D orbit difference |\n | satellite | SVID | Satellite number |\n | sqrt_a2_c2 | dAC | sqrt(a^2 + c^2) |\n | system | | System identifier |\n | sisre | URE_Av_mean | Global average user range error (signal-in-space range error) with use |\n | | | of MEAN constellation clock offset |\n | ure_av_med | | Global average user range error (signal-in-space range error) with use |\n | | | of MEDIAN constellation clock offset |\n | ure_wul_mean | | Global average user range error for worst user location with use of |\n | | | MEAN constellation clock offset |\n | ure_wul_med | | Global average user range error for worst user location with use of |\n | | | MEDIAN constellation clock offset |\n \"\"\"\n\n field_ure_control_tool_to_where = {\n \"dAC(m)\": \"sqrt_a2_c2\",\n \"dB_mean(m)\": \"clk_diff_dt_mean\",\n \"dH_mean(m)\": \"clk_diff_with_dt_mean\",\n \"dR(m)\": \"dradial\",\n \"d3D(m)\": \"orb_diff_3d\",\n \"SVID\": \"satellite\",\n \"URE_Av_mean(m)\": \"sisre\",\n }\n\n # Initialize dataset\n if not self.data:\n log.warn(\"No data in {self.file_path}.\")\n return dset\n dset.num_obs = len(self.data[\"dX(m)\"])\n\n # Add time\n dset.add_time(\n \"time\",\n val=[\n dateutil.parser.parse(self.data[\"YYYY/MM/DD\"][i] + \" \" + self.data[\"HH:MM:SS\"][i])\n for i in range(0, dset.num_obs)\n ],\n scale=\"gps\",\n fmt=\"datetime\",\n write_level=\"operational\",\n )\n\n # Add system field\n dset.add_text(\"system\", val=[s[0:1] for s in self.data[\"SVID\"]])\n\n # Add position field\n dset.add_position(\n \"orb_diff\", itrs=np.vstack((self.data[\"dX(m)\"], self.data[\"dY(m)\"], self.data[\"dZ(m)\"])).T, time=\"time\"\n )\n\n # Define fields to save in dataset\n remove_fields = {\"YYYY/MM/DD\", \"HH:MM:SS\", \"dX(m)\", \"dY(m)\", \"dZ(m)\"}\n fields = set(self.data.keys()) - remove_fields\n\n # Add text and float fields\n for field in fields:\n\n where_fieldname = (\n field_ure_control_tool_to_where[field]\n if field in field_ure_control_tool_to_where.keys()\n else field.lower()\n )\n where_fieldname = where_fieldname.replace(\"(m)\", \"\") # Remove unit (m) from field name\n\n if self.data[field].dtype.kind in {\"U\", \"S\"}: # Check if numpy type is string\n dset.add_text(where_fieldname, val=self.data[field])\n continue\n\n dset.add_float(where_fieldname, val=self.data[field], unit=\"meter\")\n", "\"\"\"A parser for reading NASA JPL GipsyX timeseries file\n\nExample:\n--------\n\n from analyx import parsers\n p = parsers.parse_file(parser_name='gipsyx_series', file_path='NYA1.series')\n data = p.as_dict()\n\nDescription:\n------------\n\nReads data from files in GipsyX timeseries format.\n\n\"\"\"\n# Standard library imports\nfrom typing import Any, Dict, List, Union\n\n# External library imports\nimport numpy as np\n\n# Midgard imports\nfrom midgard.data import dataset\nfrom midgard.data import position\nfrom midgard.dev import log\nfrom midgard.dev import plugins\nfrom midgard.parsers import LineParser\n\n\[email protected]\nclass GipsyxSeriesParser(LineParser):\n \"\"\"A parser for reading GipsyX timeseries file\n\n Following **data** are available after reading GipsyX residual output file:\n\n\n | Key | Description |\n |----------------------|--------------------------------------------------------------------------------------|\n | corr_en | Correlation East-North. |\n | corr_ev | Correlation East-Vertical. |\n | corr_nv | Correlation North-Vertical. |\n | day | Day |\n | decimalyear | Date in unit year. |\n | east | East coordinate in [m]. |\n | east_sigma | Standard devication of east coordinate in [m]. |\n | hour | Hour |\n | minute | Minute |\n | month | Month |\n | north | North coordinate in [m]. |\n | north_sigma | Standard devication of north coordinate in [m]. |\n | second | Second |\n | time_past_j2000 | Time given in GPS seconds past J2000, whereby GipsyX uses following definition: |\n | | J2000 is continuous seconds past Jan. 1, 2000 11:59:47 UTC. |\n | vertical | Vertical coordinate in [m]. |\n | vertical_sigma | Standard devication of vertical coordinate in [m]. |\n | year | Year |\n\n and **meta**-data:\n\n | Key | Description |\n |----------------------|--------------------------------------------------------------------------------------|\n | \\__data_path__ | File path |\n | \\__parser_name__ | Parser name |\n \"\"\"\n\n def setup_parser(self) -> Dict[str, Any]:\n \"\"\"Set up information needed for the parser\n\n This should return a dictionary with all parameters needed by np.genfromtxt to do the actual parsing.\n\n Returns:\n Dict: Parameters needed by np.genfromtxt to parse the input file.\n \"\"\"\n\n # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----\n # 1997.41546410 -0.129049 -0.184509 -0.104704 0.000704 0.000885 0.004622 0.057219 0.479851 0.539105 -81561750.00 1997 6 1 11 57 30\n # 1997.41820195 -0.131761 -0.188031 -0.106736 0.000698 0.000846 0.004422 0.010166 0.229144 0.489866 -81475350.00 1997 6 2 11 57 30\n # 1997.42093981 -0.128925 -0.186854 -0.109757 0.000743 0.000918 0.004718 0.031938 -0.126787 0.490283 -81388950.00 1997 6 3 11 57 30\n return dict(\n skip_header=1,\n names=(\n \"decimalyear\",\n \"east\",\n \"north\",\n \"vertical\",\n \"east_sigma\",\n \"north_sigma\",\n \"vertical_sigma\",\n \"corr_en\",\n \"corr_ev\",\n \"corr_nv\",\n \"time_past_j2000\",\n \"year\",\n \"month\",\n \"day\",\n \"hour\",\n \"minute\",\n \"second\",\n ),\n delimiter=(13, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 6, 3, 3, 3, 3, 3),\n dtype=(\n \"f8\",\n \"f8\",\n \"f8\",\n \"f8\",\n \"f8\",\n \"f8\",\n \"f8\",\n \"f8\",\n \"f8\",\n \"f8\",\n \"f8\",\n \"f8\",\n \"f8\",\n \"f8\",\n \"f8\",\n \"f8\",\n \"f8\",\n ),\n autostrip=True,\n )\n\n #\n # WRITE DATA\n #\n def as_dataset(self, ref_pos: Union[np.ndarray, List[float]] = [0.0, 0.0, 0.0]) -> \"Dataset\":\n \"\"\"Return the parsed data as a Dataset\n\n Args:\n ref_pos: Reference position given in terrestrial reference system and meters\n\n Returns:\n Midgard Dataset where timeseries data are stored with following fields:\n\n \n | Field | Type | Description |\n |---------------------|-------------------|----------------------------------------------------------------|\n | obs.pos | PositionDelta | Position delta object referred to a reference position |\n | obs.pos_sigma_east | numpy.array | Standard deviation of east position |\n | obs.pos_sigma_north | numpy.array | Standard deviation of north position |\n | obs.pos_sigma_up | numpy.array | Standard deviation of up position |\n | time | Time | Parameter time given as TimeTable object |\n \"\"\"\n\n # Initialize dataset\n dset = dataset.Dataset()\n if not self.data:\n log.warn(\"No data in {self.file_path}.\")\n return dset\n dset.num_obs = len(self.data[\"decimalyear\"])\n dset.meta.update(self.meta)\n\n # Add position\n ref_pos = position.Position(np.repeat(np.array([ref_pos]), dset.num_obs, axis=0), system=\"trs\")\n dset.add_position_delta(\n name=\"obs.pos\",\n val=np.stack((self.data[\"east\"], self.data[\"north\"], self.data[\"vertical\"]), axis=1),\n system=\"enu\",\n ref_pos=ref_pos,\n )\n\n # TODO: sigma functionality has to be improved: pos_sigma.enu.east, pos_sigma.trs.x\n ## Add position sigma\n # sigma = np.stack((self.data[\"east_sigma\"], self.data[\"north_sigma\"], self.data[\"vertical_sigma\"]), axis=1)\n # dset.add_sigma(name=\"pos_sigma\", val=dset.pos.val, sigma=sigma, unit=\"meter\")\n dset.add_float(name=\"obs.pos_sigma_east\", val=self.data[\"east_sigma\"], unit=\"meter\")\n dset.add_float(name=\"obs.pos_sigma_north\", val=self.data[\"north_sigma\"], unit=\"meter\")\n dset.add_float(name=\"obs.pos_sigma_up\", val=self.data[\"vertical_sigma\"], unit=\"meter\")\n\n # Add time\n dset.add_time(\n name=\"time\", val=self.data[\"decimalyear\"], scale=\"utc\", fmt=\"decimalyear\", write_level=\"operational\"\n )\n\n return dset\n", "\"\"\" A collection of fields \n\nAlso serves as base class for dataset\n\"\"\"\n\n# Standard library imports\nimport copy\nfrom typing import List, Dict, Any\n\n# Third party imports\nimport numpy as np\n\n# Midgard imports\nfrom midgard.data import fieldtypes\nfrom midgard.dev import console\nfrom midgard.dev import exceptions\nfrom midgard.math.unit import Unit\n\n\nclass Collection:\n\n type = \"collection\"\n\n def __init__(self):\n self._fields = dict()\n self._default_field_suffix = None\n\n @property\n def fields(self):\n \"\"\"Names of fields and nested fields in the collection\"\"\"\n all_fields = list()\n for fieldname, field in self._fields.items():\n all_fields.append(fieldname)\n try:\n all_fields.extend([f\"{fieldname}.{f}\" for f in field.fields])\n except AttributeError:\n pass\n\n return sorted(all_fields)\n\n def fieldnames(self):\n \"\"\"Names of fields, nested fields and field attributes in the collection\"\"\"\n all_fields = list()\n for field in self._fields.values():\n all_fields.extend(field.subfields)\n\n return sorted(all_fields)\n\n def field(self, fieldname: str) -> \"FieldType\":\n \"\"\"Return the field matching the given fieldname\"\"\"\n mainfield, _, subfield = fieldname.partition(\".\")\n try:\n field = self._fields[mainfield]\n except KeyError:\n raise exceptions.FieldDoesNotExistError(f\"Field {mainfield!r} does not exist\") from None\n if subfield:\n field_data = field.data\n try:\n # Recursive call for collections\n field = field_data.field(subfield)\n except AttributeError:\n # field_data does not have a function field\n # Only collections should have this function\n pass\n return field\n\n @property\n def plot_fields(self):\n \"\"\"Names of fields in the collection\"\"\"\n all_fields = list()\n for fieldname, field in self._fields.items():\n if fieldname.endswith(\"_\"):\n # Fields ending with trailing underscore should not be plotted\n continue\n all_fields.extend(field.plot_fields)\n\n return sorted(all_fields)\n\n def unit(self, field):\n \"\"\"Unit for values in a given field\"\"\"\n mainfield, _, subfield = field.partition(\".\")\n try:\n return self._fields[mainfield].unit(subfield)\n except KeyError:\n raise exceptions.FieldDoesNotExistError(f\"Field {mainfield!r} does not exist\") from None\n\n def unit_short(self, field):\n units = self.unit(field)\n if units is None:\n return tuple()\n return tuple([Unit.symbol(u) for u in units])\n\n def set_unit(self, field, new_unit):\n \"\"\"Update the unit of a given field\"\"\"\n mainfield, _, subfield = field.partition(\".\")\n try:\n return self._fields[mainfield].set_unit(subfield, new_unit)\n except KeyError:\n raise exceptions.FieldDoesNotExistError(f\"Field {mainfield!r} does not exist\") from None\n\n def for_each_fieldtype(self, fieldtype):\n for field in self._fields.values():\n module = field.__module__.split(\".\")[-1]\n if fieldtype == module:\n yield field.data\n\n def for_each_suffix(self, key):\n \"\"\"Do something for each suffix\"\"\"\n *collections, key = key.split(\".\")\n container = self\n for c in collections:\n container = container._fields[c].data\n previous_field_suffix = self.default_field_suffix\n sm = [(f[len(key) :], container._fields[f].multiplier) for f in container._fields if f.startswith(key)]\n for suffix, multiplier in sm:\n if suffix and not suffix[1:].isdigit():\n continue\n self.default_field_suffix = suffix\n yield multiplier\n\n self.default_field_suffix = previous_field_suffix\n\n @property\n def default_field_suffix(self):\n \"\"\"Default field suffix\"\"\"\n if self._default_field_suffix is None:\n return \"\"\n return self._default_field_suffix\n\n @default_field_suffix.setter\n def default_field_suffix(self, suffix):\n \"\"\"Set the default field suffix\"\"\"\n if suffix:\n suffix = str(suffix)\n if not suffix.startswith(\"_\"):\n suffix = \"_\" + suffix\n else:\n suffix = None\n for collection in self.for_each_fieldtype(\"collection\"):\n collection.default_field_suffix = suffix\n self._default_field_suffix = suffix\n\n def _difference(self, other, num_obs, self_idx, other_idx, copy_self_on_error=False, copy_other_on_error=False):\n \"\"\"Perform the - operation for each field in self and other\"\"\"\n result = self.__class__()\n for fieldname, field in self._fields.items():\n if fieldname in other._fields:\n try:\n factors = [Unit(_from, _to) for _to, _from in zip(field._unit, other._fields[fieldname]._unit)]\n except TypeError:\n factors = None\n except exceptions.UnitError as err:\n raise ValueError(f\"Cannot compute difference for field `{fieldname}`: {err}\")\n try:\n if factors:\n difference = self[fieldname][self_idx] - other[fieldname][other_idx] * np.array(factors)\n else:\n difference = self[fieldname][self_idx] - other[fieldname][other_idx]\n fieldtype = fieldtypes.fieldtype(difference)\n func = fieldtypes.function(fieldtype)\n field = func(\n num_obs=num_obs,\n name=fieldname,\n val=difference,\n unit=field._unit,\n write_level=field._write_level.name,\n )\n result.add_field(fieldname, field)\n except IndexError as err:\n # fieldname is a collection\n collection = self[fieldname]._difference(\n other[fieldname],\n num_obs,\n self_idx,\n other_idx,\n copy_self_on_error=copy_self_on_error,\n copy_other_on_error=copy_other_on_error,\n )\n fieldtype = fieldtypes.fieldtype(collection)\n func = fieldtypes.function(fieldtype)\n field = func(\n num_obs=num_obs,\n name=fieldname,\n val=collection,\n unit=field._unit,\n write_level=field._write_level.name,\n )\n result.add_field(fieldname, field)\n except TypeError as err:\n # Fields that do not support the - operator\n if copy_self_on_error:\n index_data = self[fieldname][self_idx]\n fieldtype = fieldtypes.fieldtype(index_data)\n func = fieldtypes.function(fieldtype)\n self_fieldname = f\"{fieldname}_self\"\n field = func(\n num_obs=num_obs,\n name=self_fieldname,\n val=index_data,\n unit=field._unit,\n write_level=field._write_level.name,\n )\n result.add_field(self_fieldname, field)\n if copy_other_on_error:\n index_data = other[fieldname][other_idx]\n fieldtype = fieldtypes.fieldtype(index_data)\n func = fieldtypes.function(fieldtype)\n other_fieldname = f\"{fieldname}_other\"\n field = func(\n num_obs=num_obs,\n name=other_fieldname,\n val=index_data,\n unit=other._fields[fieldname]._unit,\n write_level=other._fields[fieldname]._write_level.name,\n )\n result.add_field(other_fieldname, field)\n\n return result\n\n def _extend(self, other, memo):\n \"\"\"Extend fields in self with data from other\"\"\"\n only_in_other = set(other._fields.keys()) - set(self._fields.keys())\n only_in_self = set(self._fields.keys()) - set(other._fields.keys())\n\n if len(self) == 0:\n only_in_other = only_in_other | set(self._fields.keys())\n\n if len(other) == 0:\n only_in_self = only_in_self | set(other._fields.keys())\n\n for field_name, field in other._fields.items():\n\n if field_name in only_in_other:\n new_field = field.copy()\n new_field.prepend_empty(len(self), memo)\n self._fields[field_name] = new_field\n else:\n self._fields[field_name].extend(other._fields[field_name], memo)\n\n for field_name in only_in_self:\n self._fields[field_name].append_empty(len(other), memo)\n\n def add_field(self, fieldname: str, field: \"FieldType\") -> None:\n \"\"\"Update the _fields dictionary with a field\"\"\"\n self._fields[fieldname] = field\n\n def __bool__(self) -> bool:\n \"\"\"Dataset is truthy if it has fields with observations\"\"\"\n return len(self) > 0 and len(self._fields) > 0\n\n def __getitem__(self, key):\n \"\"\"Get a field from the dataset using dict-notation\"\"\"\n try:\n mainfield, _, subfield = key.partition(\".\")\n except (TypeError, AttributeError):\n raise IndexError(f\"Class {self.__class__.__name__} does not support slicing\")\n\n if not subfield:\n return getattr(self, key)\n else:\n return getattr(getattr(self, mainfield), subfield)\n\n def __getattr__(self, key):\n \"\"\"Get a field from the dataset using dot-notation\"\"\"\n if key == \"_fields\" or key.endswith(\"default_field_suffix\"):\n return self.__getattribute__(key)\n\n container = self\n field = key\n if \".\" in key:\n collection, _, field = key.partition(\".\")\n container = self[collection]\n\n # If field exists\n if field in container._fields:\n if \".\" in field:\n return getattr(container, field)\n else:\n return container._fields[field].data\n\n # Field did not exist. Try default_field_suffix if set\n if container._default_field_suffix is not None:\n field = field + container.default_field_suffix\n if field in container._fields:\n if \".\" in field:\n return getattr(container, field)\n else:\n return container._fields[field].data\n\n else:\n # Raise error for unknown attributes\n raise AttributeError(f\"{type(self).__name__!r} has no attribute {key!r}\")\n\n def __delitem__(self, key):\n \"\"\"Delete a field from the dataset\"\"\"\n try:\n mainfield, _, subfield = key.partition(\".\")\n except (TypeError, AttributeError):\n raise IndexError(\n f\"Class {self.__class__.__name__} does not support deletion of slices. Use subset instead.\"\n )\n\n if not subfield:\n delattr(self, key)\n else:\n delattr(getattr(self, mainfield), subfield)\n\n def __delattr__(self, key):\n \"\"\"Delete a field from the dataset\"\"\"\n if self._default_field_suffix is not None:\n key_with_suffix = key + self.default_field_suffix\n if key_with_suffix in self._fields:\n del self._fields[key_with_suffix]\n return\n if key in self._fields:\n del self._fields[key]\n return\n super().__delattr__(key)\n\n def __dir__(self):\n \"\"\"List all fields and attributes on the collection\"\"\"\n return super().__dir__() + self.fields\n\n def _ipython_key_completions_(self) -> List[str]:\n \"\"\"Tab completion for dict-style lookups in ipython\n\n See http://ipython.readthedocs.io/en/stable/config/integrating.html\n\n Returns:\n Fieldnames in the dataset.\n \"\"\"\n return self.fields\n\n def __repr__(self) -> str:\n \"\"\"A string representing the collection\"\"\"\n return f\"{type(self).__name__}(num_fields={len(self.fields)})\"\n\n def __setattr__(self, key, value):\n \"\"\"Protect fields from being accidentally overwritten\"\"\"\n if key != \"_fields\" and key in self._fields:\n if id(value) != id(self._fields[key].data):\n raise AttributeError(\n f\"Can't set attribute '{key}' on '{type(self).__name__}' object, use slicing instead\"\n )\n super().__setattr__(key, value)\n\n def __str__(self) -> str:\n \"\"\"A string describing the dataset\"\"\"\n fields = console.fill(f\"Fields: {', '.join(self.fields)}\", hanging=8)\n return f\"{self!r}\\n{fields}\"\n\n def __len__(self) -> int:\n \"\"\"The length of a collection is the number of rows in the fields\"\"\"\n if not self._fields:\n return 0\n first_field = list(self._fields.keys())[0]\n # All fields should have same length. Use length of first field in collection as length\n return len(self._fields[first_field].data)\n\n def __deepcopy__(self, memo):\n \"\"\"Deep copy of collection\"\"\"\n new_collection = self.__class__()\n for fieldname, field in self._fields.items():\n new_collection._fields[fieldname] = copy.deepcopy(field, memo)\n return new_collection\n" ]
[ [ "numpy.vstack" ], [ "numpy.array", "numpy.stack" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
llguo95/gpytorch
[ "1fa69935104565c377ce95d2c581c9eedfb55817", "1fa69935104565c377ce95d2c581c9eedfb55817", "1fa69935104565c377ce95d2c581c9eedfb55817", "1fa69935104565c377ce95d2c581c9eedfb55817", "1fa69935104565c377ce95d2c581c9eedfb55817", "1fa69935104565c377ce95d2c581c9eedfb55817", "1fa69935104565c377ce95d2c581c9eedfb55817" ]
[ "gpytorch/utils/toeplitz.py", "gpytorch/lazy/sum_batch_lazy_tensor.py", "test/likelihoods/test_laplace_likelihood.py", "test/lazy/test_diag_lazy_tensor.py", "gpytorch/priors/torch_priors.py", "gpytorch/mlls/kl_gaussian_added_loss_term.py", "gpytorch/utils/qr.py" ]
[ "#!/usr/bin/env python3\n\nimport torch\nfrom torch.fft import fft, ifft\n\nfrom ..utils import broadcasting\n\n\ndef toeplitz(toeplitz_column, toeplitz_row):\n \"\"\"\n Constructs tensor version of toeplitz matrix from column vector\n Args:\n - toeplitz_column (vector n) - column of toeplitz matrix\n - toeplitz_row (vector n-1) - row of toeplitz matrix\n Returns:\n - Matrix (n x n) - matrix representation\n \"\"\"\n if toeplitz_column.ndimension() != 1:\n raise RuntimeError(\"toeplitz_column must be a vector.\")\n\n if toeplitz_row.ndimension() != 1:\n raise RuntimeError(\"toeplitz_row must be a vector.\")\n\n if toeplitz_column[0] != toeplitz_row[0]:\n raise RuntimeError(\n \"The first column and first row of the Toeplitz matrix should have \"\n \"the same first otherwise the value of T[0,0] is ambiguous. \"\n \"Got: c[0]={} and r[0]={}\".format(toeplitz_column[0], toeplitz_row[0])\n )\n\n if len(toeplitz_column) != len(toeplitz_row):\n raise RuntimeError(\"c and r should have the same length \" \"(Toeplitz matrices are necessarily square).\")\n\n if type(toeplitz_column) != type(toeplitz_row):\n raise RuntimeError(\"toeplitz_column and toeplitz_row should be the same type.\")\n\n if len(toeplitz_column) == 1:\n return toeplitz_column.view(1, 1)\n\n res = torch.empty(\n len(toeplitz_column), len(toeplitz_column), dtype=toeplitz_column.dtype, device=toeplitz_column.device\n )\n for i, val in enumerate(toeplitz_column):\n for j in range(len(toeplitz_column) - i):\n res[j + i, j] = val\n for i, val in list(enumerate(toeplitz_row))[1:]:\n for j in range(len(toeplitz_row) - i):\n res[j, j + i] = val\n return res\n\n\ndef sym_toeplitz(toeplitz_column):\n \"\"\"\n Constructs tensor version of symmetric toeplitz matrix from column vector\n Args:\n - toeplitz_column (vector n) - column of Toeplitz matrix\n Returns:\n - Matrix (n x n) - matrix representation\n \"\"\"\n return toeplitz(toeplitz_column, toeplitz_column)\n\n\ndef toeplitz_getitem(toeplitz_column, toeplitz_row, i, j):\n \"\"\"\n Gets the (i,j)th entry of a Toeplitz matrix T.\n Args:\n - toeplitz_column (vector n) - column of Toeplitz matrix\n - toeplitz_row (vector n) - row of Toeplitz matrix\n - i (scalar) - row of entry to get\n - j (scalar) - column of entry to get\n Returns:\n - T[i,j], where T is the Toeplitz matrix specified by c and r.\n \"\"\"\n index = i - j\n if index < 0:\n return toeplitz_row[abs(index)]\n else:\n return toeplitz_column[index]\n\n\ndef sym_toeplitz_getitem(toeplitz_column, i, j):\n \"\"\"\n Gets the (i,j)th entry of a symmetric Toeplitz matrix T.\n Args:\n - toeplitz_column (vector n) - column of symmetric Toeplitz matrix\n - i (scalar) - row of entry to get\n - j (scalar) - column of entry to get\n Returns:\n - T[i,j], where T is the Toeplitz matrix specified by c and r.\n \"\"\"\n return toeplitz_getitem(toeplitz_column, toeplitz_column, i, j)\n\n\ndef toeplitz_matmul(toeplitz_column, toeplitz_row, tensor):\n \"\"\"\n Performs multiplication T * M where the matrix T is Toeplitz.\n Args:\n - toeplitz_column (vector n or b x n) - First column of the Toeplitz matrix T.\n - toeplitz_row (vector n or b x n) - First row of the Toeplitz matrix T.\n - tensor (matrix n x p or b x n x p) - Matrix or vector to multiply the Toeplitz matrix with.\n Returns:\n - tensor (n x p or b x n x p) - The result of the matrix multiply T * M.\n \"\"\"\n if toeplitz_column.size() != toeplitz_row.size():\n raise RuntimeError(\"c and r should have the same length (Toeplitz matrices are necessarily square).\")\n\n toeplitz_shape = torch.Size((*toeplitz_column.shape, toeplitz_row.size(-1)))\n output_shape = broadcasting._matmul_broadcast_shape(toeplitz_shape, tensor.shape)\n broadcasted_t_shape = output_shape[:-1] if tensor.dim() > 1 else output_shape\n\n if tensor.ndimension() == 1:\n tensor = tensor.unsqueeze(-1)\n toeplitz_column = toeplitz_column.expand(*broadcasted_t_shape)\n toeplitz_row = toeplitz_row.expand(*broadcasted_t_shape)\n tensor = tensor.expand(*output_shape)\n\n if not torch.equal(toeplitz_column[..., 0], toeplitz_row[..., 0]):\n raise RuntimeError(\n \"The first column and first row of the Toeplitz matrix should have \"\n \"the same first element, otherwise the value of T[0,0] is ambiguous. \"\n \"Got: c[0]={} and r[0]={}\".format(toeplitz_column[0], toeplitz_row[0])\n )\n\n if type(toeplitz_column) != type(toeplitz_row) or type(toeplitz_column) != type(tensor):\n raise RuntimeError(\"The types of all inputs to ToeplitzMV must match.\")\n\n *batch_shape, orig_size, num_rhs = tensor.size()\n r_reverse = toeplitz_row[..., 1:].flip(dims=(-1,))\n\n c_r_rev = torch.zeros(*batch_shape, orig_size + r_reverse.size(-1), dtype=tensor.dtype, device=tensor.device)\n c_r_rev[..., :orig_size] = toeplitz_column\n c_r_rev[..., orig_size:] = r_reverse\n\n temp_tensor = torch.zeros(\n *batch_shape, 2 * orig_size - 1, num_rhs, dtype=toeplitz_column.dtype, device=toeplitz_column.device\n )\n temp_tensor[..., :orig_size, :] = tensor\n\n fft_M = fft(temp_tensor.transpose(-1, -2).contiguous())\n fft_c = fft(c_r_rev).unsqueeze(-2).expand_as(fft_M)\n fft_product = fft_M.mul_(fft_c)\n\n output = ifft(fft_product).real.transpose(-1, -2)\n output = output[..., :orig_size, :]\n return output\n\n\ndef sym_toeplitz_matmul(toeplitz_column, tensor):\n \"\"\"\n Performs a matrix-matrix multiplication TM where the matrix T is symmetric Toeplitz.\n Args:\n - toeplitz_column (vector n) - First column of the symmetric Toeplitz matrix T.\n - matrix (matrix n x p) - Matrix or vector to multiply the Toeplitz matrix with.\n Returns:\n - tensor\n \"\"\"\n return toeplitz_matmul(toeplitz_column, toeplitz_column, tensor)\n\n\ndef sym_toeplitz_derivative_quadratic_form(left_vectors, right_vectors):\n r\"\"\"\n Given a left vector v1 and a right vector v2, computes the quadratic form:\n v1'*(dT/dc_i)*v2\n for all i, where dT/dc_i is the derivative of the Toeplitz matrix with respect to\n the ith element of its first column. Note that dT/dc_i is the same for any symmetric\n Toeplitz matrix T, so we do not require it as an argument.\n\n In particular, dT/dc_i is given by:\n [0 0; I_{m-i+1} 0] + [0 I_{m-i+1}; 0 0]\n where I_{m-i+1} is the (m-i+1) dimensional identity matrix. In other words, dT/dc_i\n for i=1..m is the matrix with ones on the ith sub- and superdiagonal.\n\n Args:\n - left_vectors (vector m or matrix s x m) - s left vectors u[j] in the quadratic form.\n - right_vectors (vector m or matrix s x m) - s right vectors v[j] in the quadratic form.\n Returns:\n - vector m - a vector so that the ith element is the result of \\sum_j(u[j]*(dT/dc_i)*v[j])\n \"\"\"\n if left_vectors.ndimension() == 1:\n left_vectors = left_vectors.unsqueeze(1)\n right_vectors = right_vectors.unsqueeze(1)\n\n batch_shape = left_vectors.shape[:-2]\n toeplitz_size = left_vectors.size(-2)\n num_vectors = left_vectors.size(-1)\n\n left_vectors = left_vectors.transpose(-1, -2).contiguous()\n right_vectors = right_vectors.transpose(-1, -2).contiguous()\n\n columns = torch.zeros_like(left_vectors)\n columns[..., 0] = left_vectors[..., 0]\n res = toeplitz_matmul(columns, left_vectors, right_vectors.unsqueeze(-1))\n rows = left_vectors.flip(dims=(-1,))\n columns[..., 0] = rows[..., 0]\n res += toeplitz_matmul(columns, rows, torch.flip(right_vectors, dims=(-1,)).unsqueeze(-1))\n\n res = res.reshape(*batch_shape, num_vectors, toeplitz_size).sum(-2)\n res[..., 0] -= (left_vectors * right_vectors).view(*batch_shape, -1).sum(-1)\n\n return res\n", "#!/usr/bin/env python3\n\nimport torch\n\nfrom ..utils.broadcasting import _pad_with_singletons\nfrom ..utils.getitem import _noop_index\nfrom .block_lazy_tensor import BlockLazyTensor\n\n\nclass SumBatchLazyTensor(BlockLazyTensor):\n \"\"\"\n Represents a lazy tensor that is actually the sum of several lazy tensors blocks.\n The :attr:`block_dim` attribute specifies which dimension of the base LazyTensor\n specifies the blocks.\n For example, (with `block_dim=-3` a `k x n x n` tensor represents `k` `n x n` blocks (a `n x n` matrix).\n A `b x k x n x n` tensor represents `k` `b x n x n` blocks (a `b x n x n` batch matrix).\n\n Args:\n :attr:`base_lazy_tensor` (LazyTensor):\n A `k x n x n` LazyTensor, or a `b x k x n x n` LazyTensor.\n :attr:`block_dim` (int):\n The dimension that specifies the blocks.\n \"\"\"\n\n def _add_batch_dim(self, other):\n shape = list(other.shape)\n expand_shape = list(other.shape)\n shape.insert(-2, 1)\n expand_shape.insert(-2, self.base_lazy_tensor.size(-3))\n other = other.reshape(*shape).expand(*expand_shape)\n return other\n\n def _get_indices(self, row_index, col_index, *batch_indices):\n # Create an extra index for the summed dimension\n sum_index = torch.arange(0, self.base_lazy_tensor.size(-3), device=self.device)\n sum_index = _pad_with_singletons(sum_index, row_index.dim(), 0)\n row_index = row_index.unsqueeze(-1)\n col_index = col_index.unsqueeze(-1)\n batch_indices = [index.unsqueeze(-1) for index in batch_indices]\n\n res = self.base_lazy_tensor._get_indices(row_index, col_index, *batch_indices, sum_index)\n return res.sum(-1)\n\n def _getitem(self, row_index, col_index, *batch_indices):\n res = self.base_lazy_tensor._getitem(row_index, col_index, *batch_indices, _noop_index)\n return self.__class__(res, **self._kwargs)\n\n def _remove_batch_dim(self, other):\n return other.sum(-3)\n\n def _size(self):\n shape = list(self.base_lazy_tensor.shape)\n del shape[-3]\n return torch.Size(shape)\n\n def diag(self):\n diag = self.base_lazy_tensor.diag().sum(-2)\n return diag\n\n def evaluate(self):\n return self.base_lazy_tensor.evaluate().sum(dim=-3) # BlockLazyTensors always use dim3 for the block_dim\n", "#!/usr/bin/env python3\n\nimport unittest\n\nimport torch\n\nfrom gpytorch.likelihoods import LaplaceLikelihood, _OneDimensionalLikelihood\nfrom gpytorch.test.base_likelihood_test_case import BaseLikelihoodTestCase\n\n\nclass TestLaplaceLikelihood(BaseLikelihoodTestCase, unittest.TestCase):\n seed = 1\n\n def _create_targets(self, batch_shape=torch.Size([])):\n res = torch.sigmoid(torch.randn(*batch_shape, 5)).float()\n return res\n\n def create_likelihood(self, **kwargs):\n return LaplaceLikelihood(**kwargs)\n\n def _test_log_marginal(self, batch_shape):\n likelihood = self.create_likelihood()\n input = self._create_marginal_input(batch_shape)\n target = self._create_targets(batch_shape)\n output = likelihood.log_marginal(target, input)\n\n self.assertTrue(torch.is_tensor(output))\n self.assertEqual(output.shape, batch_shape + torch.Size([5]))\n default_log_prob = _OneDimensionalLikelihood.log_marginal(likelihood, target, input)\n self.assertAllClose(output.sum(-1), default_log_prob.sum(-1), rtol=0.25, atol=0.1)\n\n def _test_log_prob(self, batch_shape):\n likelihood = self.create_likelihood()\n input = self._create_marginal_input(batch_shape)\n target = self._create_targets(batch_shape)\n output = likelihood.expected_log_prob(target, input)\n\n self.assertTrue(torch.is_tensor(output))\n self.assertEqual(output.shape, batch_shape + torch.Size([5]))\n default_log_prob = _OneDimensionalLikelihood.expected_log_prob(likelihood, target, input)\n self.assertAllClose(output.sum(-1), default_log_prob.sum(-1), rtol=0.25, atol=0.1)\n\n def _test_marginal(self, batch_shape):\n # Likelihood uses the default marginal behavior, no point testing a set of samples against a set of samples.\n pass\n", "#!/usr/bin/env python3\n\nimport unittest\n\nimport torch\n\nfrom gpytorch.lazy import DiagLazyTensor\nfrom gpytorch.test.lazy_tensor_test_case import LazyTensorTestCase\n\n\nclass TestDiagLazyTensor(LazyTensorTestCase, unittest.TestCase):\n seed = 0\n should_test_sample = True\n should_call_cg = False\n should_call_lanczos = False\n\n def create_lazy_tensor(self):\n diag = torch.tensor([1.0, 2.0, 4.0, 5.0, 3.0], requires_grad=True)\n return DiagLazyTensor(diag)\n\n def evaluate_lazy_tensor(self, lazy_tensor):\n diag = lazy_tensor._diag\n return diag.diag()\n\n\nclass TestDiagLazyTensorBatch(TestDiagLazyTensor):\n seed = 0\n\n def create_lazy_tensor(self):\n diag = torch.tensor(\n [[1.0, 2.0, 4.0, 2.0, 3.0], [2.0, 1.0, 2.0, 1.0, 4.0], [1.0, 2.0, 2.0, 3.0, 4.0]], requires_grad=True\n )\n return DiagLazyTensor(diag)\n\n def evaluate_lazy_tensor(self, lazy_tensor):\n diag = lazy_tensor._diag\n return torch.cat([diag[i].diag().unsqueeze(0) for i in range(3)])\n\n\nclass TestDiagLazyTensorMultiBatch(TestDiagLazyTensor):\n seed = 0\n # Because these LTs are large, we'll skil the big tests\n should_test_sample = True\n skip_slq_tests = True\n\n def create_lazy_tensor(self):\n diag = torch.randn(6, 3, 5).pow_(2)\n diag.requires_grad_(True)\n return DiagLazyTensor(diag)\n\n def evaluate_lazy_tensor(self, lazy_tensor):\n diag = lazy_tensor._diag\n flattened_diag = diag.view(-1, diag.size(-1))\n res = torch.cat([flattened_diag[i].diag().unsqueeze(0) for i in range(18)])\n return res.view(6, 3, 5, 5)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "#!/usr/bin/env python3\n\nimport torch\nfrom torch.distributions import Gamma, LogNormal, MultivariateNormal, Normal, Uniform\nfrom torch.nn import Module as TModule\n\nfrom .prior import Prior\nfrom .utils import _bufferize_attributes, _del_attributes\n\nMVN_LAZY_PROPERTIES = (\"covariance_matrix\", \"scale_tril\", \"precision_matrix\")\n\n\nclass NormalPrior(Prior, Normal):\n \"\"\"\n Normal (Gaussian) Prior\n\n pdf(x) = (2 * pi * sigma^2)^-0.5 * exp(-(x - mu)^2 / (2 * sigma^2))\n\n where mu is the mean and sigma^2 is the variance.\n \"\"\"\n\n def __init__(self, loc, scale, validate_args=False, transform=None):\n TModule.__init__(self)\n Normal.__init__(self, loc=loc, scale=scale, validate_args=validate_args)\n _bufferize_attributes(self, (\"loc\", \"scale\"))\n self._transform = transform\n\n def expand(self, batch_shape):\n batch_shape = torch.Size(batch_shape)\n return NormalPrior(self.loc.expand(batch_shape), self.scale.expand(batch_shape))\n\n\nclass LogNormalPrior(Prior, LogNormal):\n \"\"\"\n Log Normal prior.\n \"\"\"\n\n def __init__(self, loc, scale, validate_args=None, transform=None):\n TModule.__init__(self)\n LogNormal.__init__(self, loc=loc, scale=scale, validate_args=validate_args)\n self._transform = transform\n\n def expand(self, batch_shape):\n batch_shape = torch.Size(batch_shape)\n return LogNormalPrior(self.loc.expand(batch_shape), self.scale.expand(batch_shape))\n\n\nclass UniformPrior(Prior, Uniform):\n \"\"\"\n Uniform prior.\n \"\"\"\n\n def __init__(self, a, b, validate_args=None, transform=None):\n TModule.__init__(self)\n Uniform.__init__(self, a, b, validate_args=validate_args)\n self._transform = transform\n\n def expand(self, batch_shape):\n batch_shape = torch.Size(batch_shape)\n return UniformPrior(self.low.expand(batch_shape), self.high.expand(batch_shape))\n\n\nclass GammaPrior(Prior, Gamma):\n \"\"\"Gamma Prior parameterized by concentration and rate\n\n pdf(x) = beta^alpha / Gamma(alpha) * x^(alpha - 1) * exp(-beta * x)\n\n were alpha > 0 and beta > 0 are the concentration and rate parameters, respectively.\n \"\"\"\n\n def __init__(self, concentration, rate, validate_args=False, transform=None):\n TModule.__init__(self)\n Gamma.__init__(self, concentration=concentration, rate=rate, validate_args=validate_args)\n _bufferize_attributes(self, (\"concentration\", \"rate\"))\n self._transform = transform\n\n def expand(self, batch_shape):\n batch_shape = torch.Size(batch_shape)\n return GammaPrior(self.concentration.expand(batch_shape), self.rate.expand(batch_shape))\n\n def __call__(self, *args, **kwargs):\n return super(Gamma, self).__call__(*args, **kwargs)\n\n\nclass MultivariateNormalPrior(Prior, MultivariateNormal):\n \"\"\"Multivariate Normal prior\n\n pdf(x) = det(2 * pi * Sigma)^-0.5 * exp(-0.5 * (x - mu)' Sigma^-1 (x - mu))\n\n where mu is the mean and Sigma > 0 is the covariance matrix.\n \"\"\"\n\n def __init__(\n self, loc, covariance_matrix=None, precision_matrix=None, scale_tril=None, validate_args=False, transform=None\n ):\n TModule.__init__(self)\n MultivariateNormal.__init__(\n self,\n loc=loc,\n covariance_matrix=covariance_matrix,\n precision_matrix=precision_matrix,\n scale_tril=scale_tril,\n validate_args=validate_args,\n )\n _bufferize_attributes(self, (\"loc\", \"_unbroadcasted_scale_tril\"))\n self._transform = transform\n\n def cuda(self, device=None):\n \"\"\"Applies module-level cuda() call and resets all lazy properties\"\"\"\n module = self._apply(lambda t: t.cuda(device))\n _del_attributes(module, MVN_LAZY_PROPERTIES)\n return module\n\n def cpu(self):\n \"\"\"Applies module-level cpu() call and resets all lazy properties\"\"\"\n module = self._apply(lambda t: t.cpu())\n _del_attributes(module, MVN_LAZY_PROPERTIES)\n return module\n\n def expand(self, batch_shape):\n batch_shape = torch.Size(batch_shape)\n cov_shape = batch_shape + self.event_shape\n new_loc = self.loc.expand(batch_shape)\n new_scale_tril = self.scale_tril.expand(cov_shape)\n\n return MultivariateNormalPrior(loc=new_loc, scale_tril=new_scale_tril)\n", "#!/usr/bin/env python3\n\nfrom torch.distributions import kl_divergence\n\nfrom .added_loss_term import AddedLossTerm\n\n\nclass KLGaussianAddedLossTerm(AddedLossTerm):\n def __init__(self, q_x, p_x, n, data_dim):\n super().__init__()\n self.q_x = q_x\n self.p_x = p_x\n self.n = n\n self.data_dim = data_dim\n\n def loss(self):\n kl_per_latent_dim = kl_divergence(self.q_x, self.p_x).sum(axis=0) # vector of size latent_dim\n kl_per_point = kl_per_latent_dim.sum() / self.n # scalar\n # inside the forward method of variational ELBO,\n # the added loss terms are expanded (using add_) to take the same\n # shape as the log_lik term (has shape data_dim)\n # so they can be added together. Hence, we divide by data_dim to avoid\n # overcounting the kl term\n return kl_per_point / self.data_dim\n", "#!/usr/bin/env python3\n\nimport torch\n\n\ndef stable_qr(mat):\n \"\"\"\n performs a QR decomposition on the batched matrix mat.\n We need to use these functions because of\n\n 1. slow batched QR in pytorch (pytorch/pytorch#22573)\n 2. possible singularity in R\n \"\"\"\n if mat.shape[-1] <= 2048:\n # Dispatch to CPU so long as pytorch/pytorch#22573 is not fixed\n device = mat.device\n Q, R = torch.linalg.qr(mat.cpu())\n Q = Q.to(device)\n R = R.to(device)\n else:\n Q, R = torch.linalg.qr(mat)\n\n Rdiag = torch.diagonal(R, dim1=-2, dim2=-1)\n # if R is almost singular, add jitter\n zeroish = Rdiag.abs() < 1e-6\n if torch.any(zeroish):\n # can't use in-place operation here b/c it would mess up backward pass\n # haven't found a more elegant way to add a jitter diagonal yet...\n Rdiag_sign = torch.sign(Rdiag)\n # force zero diagonals to have jitter added to them.\n Rdiag_sign[Rdiag_sign == 0] = 1.0\n jitter_diag = 1e-6 * Rdiag_sign * zeroish.to(Rdiag)\n R = R + torch.diag_embed(jitter_diag)\n return Q, R\n" ]
[ [ "torch.fft.fft", "torch.fft.ifft", "torch.zeros", "torch.zeros_like", "torch.equal", "torch.flip" ], [ "torch.Size" ], [ "torch.randn", "torch.Size", "torch.is_tensor" ], [ "torch.randn", "torch.tensor" ], [ "torch.distributions.LogNormal.__init__", "torch.distributions.Uniform.__init__", "torch.Size", "torch.nn.Module.__init__", "torch.distributions.MultivariateNormal.__init__", "torch.distributions.Gamma.__init__", "torch.distributions.Normal.__init__" ], [ "torch.distributions.kl_divergence" ], [ "torch.diagonal", "torch.sign", "torch.diag_embed", "torch.linalg.qr", "torch.any" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dendisuhubdy/representation_mixing
[ "146ddc7a2cc34544bb4516149ccfcbe72eedd102" ]
[ "code/lib/examples/unaligned_ljspeech_chars/rnn_unaligned_speech_ljspeech_chars.py" ]
[ "from __future__ import print_function\nimport os\nimport argparse\nimport numpy as np\nimport tensorflow as tf\nfrom collections import namedtuple\n\nimport logging\nimport shutil\nfrom tfbldr.datasets import rsync_fetch, fetch_ljspeech\nfrom tfbldr.datasets import wavfile_caching_mel_tbptt_iterator\nfrom tfbldr.utils import next_experiment_path\nfrom tfbldr import get_logger\nfrom tfbldr import run_loop\nfrom tfbldr.nodes import Linear\nfrom tfbldr.nodes import Linear\nfrom tfbldr.nodes import LSTMCell\nfrom tfbldr.nodes import BiLSTMLayer\nfrom tfbldr.nodes import SequenceConv1dStack\nfrom tfbldr.nodes import Embedding\nfrom tfbldr.nodes import GaussianAttentionCell\nfrom tfbldr.nodes import DiscreteMixtureOfLogistics\nfrom tfbldr.nodes import DiscreteMixtureOfLogisticsCost\nfrom tfbldr.nodes import AdditiveGaussianNoise\nfrom tfbldr import scan\n\nseq_len = 256\nbatch_size = 64\nwindow_mixtures = 10\ncell_dropout = .925\n#noise_scale = 8.\nprenet_units = 128\nn_filts = 128\nn_stacks = 3\nenc_units = 128\ndec_units = 512\nemb_dim = 15\ntruncation_len = seq_len\ncell_dropout_scale = cell_dropout\nepsilon = 1E-8\nforward_init = \"truncated_normal\"\nrnn_init = \"truncated_normal\"\n\nbasedir = \"/Tmp/kastner/lj_speech/LJSpeech-1.0/\"\nljspeech = rsync_fetch(fetch_ljspeech, \"leto01\")\n\n# THESE ARE CANNOT BE PAIRED (SOME MISSING), ITERATOR PAIRS THEM UP BY NAME\nwavfiles = ljspeech[\"wavfiles\"]\njsonfiles = ljspeech[\"jsonfiles\"]\n\n# THESE HAVE TO BE THE SAME TO ENSURE SPLIT IS CORRECT\ntrain_random_state = np.random.RandomState(3122)\nvalid_random_state = np.random.RandomState(3122)\n\ntrain_itr = wavfile_caching_mel_tbptt_iterator(wavfiles, jsonfiles, batch_size, seq_len, stop_index=.95, shuffle=True, symbol_processing=\"chars_only\", random_state=train_random_state)\nvalid_itr = wavfile_caching_mel_tbptt_iterator(wavfiles, jsonfiles, batch_size, seq_len, start_index=.95, shuffle=True, symbol_processing=\"chars_only\", random_state=valid_random_state)\n\n\"\"\"\nfor i in range(10000):\n print(i)\n mels, mel_mask, text, text_mask, mask, mask_mask, reset = train_itr.next_masked_batch()\n\"\"\"\n\n# STRONG CHECK TO ENSURE NO OVERLAP IN TRAIN/VALID\nfor tai in train_itr.all_indices_:\n assert tai not in valid_itr.all_indices_\nfor vai in valid_itr.all_indices_:\n assert vai not in train_itr.all_indices_\n\nrandom_state = np.random.RandomState(1442)\n# use the max of the two blended types...\nvocabulary_size = max(train_itr.vocabulary_sizes)\noutput_size = train_itr.n_mel_filters\n\ndef create_graph():\n graph = tf.Graph()\n with graph.as_default():\n tf.set_random_seed(2899)\n\n text = tf.placeholder(tf.float32, shape=[None, batch_size, 1])\n text_mask = tf.placeholder(tf.float32, shape=[None, batch_size])\n\n #mask = tf.placeholder(tf.float32, shape=[None, batch_size, 1])\n #mask_mask = tf.placeholder(tf.float32, shape=[None, batch_size])\n\n mels = tf.placeholder(tf.float32, shape=[None, batch_size, output_size])\n mel_mask = tf.placeholder(tf.float32, shape=[None, batch_size])\n\n bias = tf.placeholder_with_default(tf.zeros(shape=[]), shape=[])\n cell_dropout = tf.placeholder_with_default(cell_dropout_scale * tf.ones(shape=[]), shape=[])\n prenet_dropout = tf.placeholder_with_default(0.5 * tf.ones(shape=[]), shape=[])\n bn_flag = tf.placeholder_with_default(tf.zeros(shape=[]), shape=[])\n\n att_w_init = tf.placeholder(tf.float32, shape=[batch_size, 2 * enc_units])\n att_k_init = tf.placeholder(tf.float32, shape=[batch_size, window_mixtures])\n att_h_init = tf.placeholder(tf.float32, shape=[batch_size, dec_units])\n att_c_init = tf.placeholder(tf.float32, shape=[batch_size, dec_units])\n h1_init = tf.placeholder(tf.float32, shape=[batch_size, dec_units])\n c1_init = tf.placeholder(tf.float32, shape=[batch_size, dec_units])\n h2_init = tf.placeholder(tf.float32, shape=[batch_size, dec_units])\n c2_init = tf.placeholder(tf.float32, shape=[batch_size, dec_units])\n\n in_mels = mels[:-1, :, :]\n in_mel_mask = mel_mask[:-1]\n out_mels = mels[1:, :, :]\n out_mel_mask = mel_mask[1:]\n\n projmel1 = Linear([in_mels], [output_size], prenet_units,\n dropout_flag_prob_keep=prenet_dropout, name=\"prenet1\",\n random_state=random_state)\n projmel2 = Linear([projmel1], [prenet_units], prenet_units,\n dropout_flag_prob_keep=prenet_dropout, name=\"prenet2\",\n random_state=random_state)\n\n text_char_e, t_c_emb = Embedding(text, vocabulary_size, emb_dim, random_state=random_state,\n name=\"text_char_emb\")\n #text_phone_e, t_p_emb = Embedding(text, vocabulary_size, emb_dim, random_state=random_state,\n # name=\"text_phone_emb\")\n\n #text_e = (1. - mask) * text_char_e + mask * text_phone_e\n text_e = text_char_e\n\n # masks are either 0 or 1... use embed + voc size of two so that text and mask embs have same size / same impact on the repr\n #mask_e, m_emb = Embedding(mask, 2, emb_dim, random_state=random_state,\n # name=\"mask_emb\")\n conv_text = SequenceConv1dStack([text_e], [emb_dim], n_filts, bn_flag,\n n_stacks=n_stacks,\n kernel_sizes=[(1, 1), (3, 3), (5, 5)],\n name=\"enc_conv1\", random_state=random_state)\n\n # text_mask and mask_mask should be the same, doesn't matter which one we use\n bitext = BiLSTMLayer([conv_text], [n_filts],\n enc_units,\n input_mask=text_mask,\n name=\"encode_bidir\",\n init=rnn_init,\n random_state=random_state)\n\n\n def step(inp_t, inp_mask_t,\n corr_inp_t,\n att_w_tm1, att_k_tm1, att_h_tm1, att_c_tm1,\n h1_tm1, c1_tm1, h2_tm1, c2_tm1):\n\n o = GaussianAttentionCell([corr_inp_t], [prenet_units],\n (att_h_tm1, att_c_tm1),\n att_k_tm1,\n bitext,\n 2 * enc_units,\n dec_units,\n att_w_tm1,\n input_mask=inp_mask_t,\n conditioning_mask=text_mask,\n #attention_scale=1. / 10.,\n attention_scale=1.,\n step_op=\"softplus\",\n name=\"att\",\n random_state=random_state,\n cell_dropout=1.,#cell_dropout,\n init=rnn_init)\n att_w_t, att_k_t, att_phi_t, s = o\n att_h_t = s[0]\n att_c_t = s[1]\n\n output, s = LSTMCell([corr_inp_t, att_w_t, att_h_t],\n [prenet_units, 2 * enc_units, dec_units],\n h1_tm1, c1_tm1, dec_units,\n input_mask=inp_mask_t,\n random_state=random_state,\n cell_dropout=cell_dropout,\n name=\"rnn1\", init=rnn_init)\n h1_t = s[0]\n c1_t = s[1]\n\n output, s = LSTMCell([corr_inp_t, att_w_t, h1_t],\n [prenet_units, 2 * enc_units, dec_units],\n h2_tm1, c2_tm1, dec_units,\n input_mask=inp_mask_t,\n random_state=random_state,\n cell_dropout=cell_dropout,\n name=\"rnn2\", init=rnn_init)\n h2_t = s[0]\n c2_t = s[1]\n return output, att_w_t, att_k_t, att_phi_t, att_h_t, att_c_t, h1_t, c1_t, h2_t, c2_t\n\n r = scan(step,\n [in_mels, in_mel_mask, projmel2],\n [None, att_w_init, att_k_init, None, att_h_init, att_c_init,\n h1_init, c1_init, h2_init, c2_init])\n output = r[0]\n att_w = r[1]\n att_k = r[2]\n att_phi = r[3]\n att_h = r[4]\n att_c = r[5]\n h1 = r[6]\n c1 = r[7]\n h2 = r[8]\n c2 = r[9]\n\n pred = Linear([output], [dec_units], output_size, name=\"out_proj\", random_state=random_state)\n \"\"\"\n mix, means, lins = DiscreteMixtureOfLogistics([proj], [output_size], n_output_channels=1,\n name=\"dml\", random_state=random_state)\n cc = DiscreteMixtureOfLogisticsCost(mix, means, lins, out_mels, 256)\n \"\"\"\n\n # correct masking\n cc = (pred - out_mels) ** 2\n #cc = out_mel_mask[..., None] * cc\n #loss = tf.reduce_sum(tf.reduce_sum(cc, axis=-1)) / tf.reduce_sum(out_mel_mask)\n loss = tf.reduce_mean(tf.reduce_sum(cc, axis=-1))\n\n learning_rate = 0.0001\n #steps = tf.Variable(0.)\n #learning_rate = tf.train.exponential_decay(0.001, steps, staircase=True,\n # decay_steps=50000, decay_rate=0.5)\n\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, use_locking=True)\n grad, var = zip(*optimizer.compute_gradients(loss))\n grad, _ = tf.clip_by_global_norm(grad, 10.)\n #train_step = optimizer.apply_gradients(zip(grad, var), global_step=steps)\n train_step = optimizer.apply_gradients(zip(grad, var))\n\n things_names = [\"mels\",\n \"mel_mask\",\n \"in_mels\",\n \"in_mel_mask\",\n \"out_mels\",\n \"out_mel_mask\",\n \"text\",\n \"text_mask\",\n #\"mask\",\n #\"mask_mask\",\n \"bias\",\n \"cell_dropout\",\n \"prenet_dropout\",\n \"bn_flag\",\n \"pred\",\n #\"mix\", \"means\", \"lins\",\n \"att_w_init\",\n \"att_k_init\",\n \"att_h_init\",\n \"att_c_init\",\n \"h1_init\",\n \"c1_init\",\n \"h2_init\",\n \"c2_init\",\n \"att_w\",\n \"att_k\",\n \"att_phi\",\n \"att_h\",\n \"att_c\",\n \"h1\",\n \"c1\",\n \"h2\",\n \"c2\",\n \"loss\",\n \"train_step\",\n \"learning_rate\"]\n things_tf = [eval(name) for name in things_names]\n for tn, tt in zip(things_names, things_tf):\n graph.add_to_collection(tn, tt)\n train_model = namedtuple('Model', things_names)(*things_tf)\n return graph, train_model\n\ng, vs = create_graph()\n\natt_w_init = np.zeros((batch_size, 2 * enc_units))\natt_k_init = np.zeros((batch_size, window_mixtures))\natt_h_init = np.zeros((batch_size, dec_units))\natt_c_init = np.zeros((batch_size, dec_units))\nh1_init = np.zeros((batch_size, dec_units))\nc1_init = np.zeros((batch_size, dec_units))\nh2_init = np.zeros((batch_size, dec_units))\nc2_init = np.zeros((batch_size, dec_units))\n\nstateful_args = [att_w_init,\n att_k_init,\n att_h_init,\n att_c_init,\n h1_init,\n c1_init,\n h2_init,\n c2_init]\nstep_count = 0\ndef loop(sess, itr, extras, stateful_args):\n \"\"\"\n global step_count\n global noise_scale\n step_count += 1\n if step_count > 10000:\n step_count = 0\n if noise_scale == 2:\n noise_scale = 1.\n else:\n noise_scale = noise_scale - 2.\n if noise_scale < .5:\n noise_scale = .5\n \"\"\"\n mels, mel_mask, text, text_mask, mask, mask_mask, reset = itr.next_masked_batch()\n in_m = mels[:-1]\n in_mel_mask = mel_mask[:-1]\n\n #noise_block = np.clip(random_state.randn(*in_m.shape), -6, 6)\n #in_m = in_m + noise_scale * noise_block\n\n out_m = mels[1:]\n out_mel_mask = mel_mask[1:]\n\n att_w_init = stateful_args[0]\n att_k_init = stateful_args[1]\n att_h_init = stateful_args[2]\n att_c_init = stateful_args[3]\n h1_init = stateful_args[4]\n c1_init = stateful_args[5]\n h2_init = stateful_args[6]\n c2_init = stateful_args[7]\n\n att_w_init *= reset\n att_k_init *= reset\n att_h_init *= reset\n att_c_init *= reset\n h1_init *= reset\n c1_init *= reset\n h2_init *= reset\n c2_init *= reset\n\n feed = {\n vs.in_mels: in_m,\n vs.in_mel_mask: in_mel_mask,\n vs.out_mels: out_m,\n vs.out_mel_mask: out_mel_mask,\n vs.bn_flag: 0.,\n vs.text: text,\n vs.text_mask: text_mask,\n #vs.mask: mask,\n #vs.mask_mask: mask_mask,\n vs.att_w_init: att_w_init,\n vs.att_k_init: att_k_init,\n vs.att_h_init: att_h_init,\n vs.att_c_init: att_c_init,\n vs.h1_init: h1_init,\n vs.c1_init: c1_init,\n vs.h2_init: h2_init,\n vs.c2_init: c2_init}\n outs = [vs.att_w, vs.att_k,\n vs.att_h, vs.att_c,\n vs.h1, vs.c1, vs.h2, vs.c2,\n vs.att_phi,\n vs.loss, vs.train_step]\n\n r = sess.run(outs, feed_dict=feed)\n\n att_w_np = r[0]\n att_k_np = r[1]\n att_h_np = r[2]\n att_c_np = r[3]\n h1_np = r[4]\n c1_np = r[5]\n h2_np = r[6]\n c2_np = r[7]\n att_phi_np = r[8]\n l = r[-2]\n _ = r[-1]\n\n # set next inits\n att_w_init = att_w_np[-1]\n att_k_init = att_k_np[-1]\n att_h_init = att_h_np[-1]\n att_c_init = att_c_np[-1]\n h1_init = h1_np[-1]\n c1_init = c1_np[-1]\n h2_init = h2_np[-1]\n c2_init = c2_np[-1]\n\n stateful_args = [att_w_init,\n att_k_init,\n att_h_init,\n att_c_init,\n h1_init,\n c1_init,\n h2_init,\n c2_init]\n return l, None, stateful_args\n\nwith tf.Session(graph=g) as sess:\n run_loop(sess,\n loop, train_itr,\n loop, train_itr,\n n_steps=1000000,\n n_train_steps_per=1000,\n train_stateful_args=stateful_args,\n n_valid_steps_per=0,\n valid_stateful_args=stateful_args)\n" ]
[ [ "tensorflow.Graph", "tensorflow.zeros", "tensorflow.reduce_sum", "tensorflow.placeholder", "tensorflow.ones", "tensorflow.clip_by_global_norm", "tensorflow.Session", "tensorflow.train.AdamOptimizer", "tensorflow.set_random_seed", "numpy.random.RandomState", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
lgarrison/asdf
[ "8d1dd589d1fb1d9c286ade7ba787bd95c7e5e88f", "a28746cd077d5a4f12553fe13d8e1ecc96354cba" ]
[ "asdf/tests/test_compression.py", "asdf/tests/test_schema.py" ]
[ "import io\nimport os\n\nimport numpy as np\n\nimport pytest\n\nimport asdf\nfrom asdf import compression\nfrom asdf import generic_io\n\nfrom ..tests import helpers\n\n\ndef _get_large_tree():\n np.random.seed(0)\n x = np.random.rand(128, 128)\n tree = {\n 'science_data': x,\n }\n return tree\n\n\ndef _get_sparse_tree():\n np.random.seed(0)\n arr = np.zeros((128, 128))\n for x, y, z in np.random.rand(64, 3):\n arr[int(x*127), int(y*127)] = z\n arr[0, 0] = 5.0\n tree = {'science_data': arr}\n return tree\n\n\ndef _roundtrip(tmpdir, tree, compression=None,\n write_options={}, read_options={}):\n tmpfile = os.path.join(str(tmpdir), 'test.asdf')\n\n ff = asdf.AsdfFile(tree)\n ff.set_array_compression(tree['science_data'], compression)\n ff.write_to(tmpfile, **write_options)\n\n with asdf.open(tmpfile, mode=\"rw\") as ff:\n ff.update(**write_options)\n\n with asdf.open(tmpfile, **read_options) as ff:\n helpers.assert_tree_match(tree, ff.tree)\n\n # Also test saving to a buffer\n buff = io.BytesIO()\n\n ff = asdf.AsdfFile(tree)\n ff.set_array_compression(tree['science_data'], compression)\n ff.write_to(buff, **write_options)\n\n buff.seek(0)\n with asdf.open(buff, **read_options) as ff:\n helpers.assert_tree_match(tree, ff.tree)\n\n # Test saving to a non-seekable buffer\n buff = io.BytesIO()\n\n ff = asdf.AsdfFile(tree)\n ff.set_array_compression(tree['science_data'], compression)\n ff.write_to(generic_io.OutputStream(buff), **write_options)\n\n buff.seek(0)\n with asdf.open(generic_io.InputStream(buff), **read_options) as ff:\n helpers.assert_tree_match(tree, ff.tree)\n\n return ff\n\n\ndef test_invalid_compression():\n tree = _get_large_tree()\n ff = asdf.AsdfFile(tree)\n with pytest.raises(ValueError):\n ff.set_array_compression(tree['science_data'], 'foo')\n with pytest.raises(ValueError):\n compression._get_decoder('foo')\n with pytest.raises(ValueError):\n compression._get_encoder('foo')\n\n\ndef test_get_compressed_size():\n assert compression.get_compressed_size(b'0' * 1024, 'zlib') < 1024\n\n\ndef test_decompress_too_long_short():\n fio = io.BytesIO()\n compression.compress(fio, b'0' * 1024, 'zlib')\n size = fio.tell()\n fio.seek(0)\n fio.read_blocks = lambda us: [fio.read(us)]\n compression.decompress(fio, size, 1024, 'zlib')\n fio.seek(0)\n with pytest.raises(ValueError):\n compression.decompress(fio, size, 1025, 'zlib')\n fio.seek(0)\n with pytest.raises(ValueError):\n compression.decompress(fio, size, 1023, 'zlib')\n\n\ndef test_zlib(tmpdir):\n tree = _get_large_tree()\n\n _roundtrip(tmpdir, tree, 'zlib')\n\n\ndef test_bzp2(tmpdir):\n tree = _get_large_tree()\n\n _roundtrip(tmpdir, tree, 'bzp2')\n\n\ndef test_lz4(tmpdir):\n pytest.importorskip('lz4')\n tree = _get_large_tree()\n\n _roundtrip(tmpdir, tree, 'lz4')\n\n\ndef test_recompression(tmpdir):\n tree = _get_large_tree()\n tmpfile = os.path.join(str(tmpdir), 'test1.asdf')\n afile = asdf.AsdfFile(tree)\n afile.write_to(tmpfile, all_array_compression='zlib')\n afile.close()\n afile = asdf.open(tmpfile)\n tmpfile = os.path.join(str(tmpdir), 'test2.asdf')\n afile.write_to(tmpfile, all_array_compression='bzp2')\n afile.close()\n afile = asdf.open(tmpfile)\n helpers.assert_tree_match(tree, afile.tree)\n afile.close()\n\n\ndef test_input(tmpdir):\n tree = _get_large_tree()\n tmpfile = os.path.join(str(tmpdir), 'test1.asdf')\n afile = asdf.AsdfFile(tree)\n afile.write_to(tmpfile, all_array_compression='zlib')\n afile.close()\n afile = asdf.open(tmpfile)\n tmpfile = os.path.join(str(tmpdir), 'test2.asdf')\n afile.write_to(tmpfile)\n afile.close()\n afile = asdf.open(tmpfile)\n helpers.assert_tree_match(tree, afile.tree)\n assert afile.get_array_compression(afile.tree['science_data']) == 'zlib'\n afile.close()\n\n\ndef test_none(tmpdir):\n\n tree = _get_large_tree()\n\n tmpfile1 = os.path.join(str(tmpdir), 'test1.asdf')\n with asdf.AsdfFile(tree) as afile:\n afile.write_to(tmpfile1)\n\n tmpfile2 = os.path.join(str(tmpdir), 'test2.asdf')\n with asdf.open(tmpfile1) as afile:\n assert afile.get_array_compression(afile.tree['science_data']) is None\n afile.write_to(tmpfile2, all_array_compression='zlib')\n assert afile.get_array_compression(afile.tree['science_data']) == 'zlib'\n\n with asdf.open(tmpfile2) as afile:\n afile.write_to(tmpfile1, all_array_compression=None)\n\n with asdf.open(tmpfile1) as afile:\n helpers.assert_tree_match(tree, afile.tree)\n assert afile.get_array_compression(afile.tree['science_data']) is None\n\n\ndef test_set_array_compression(tmpdir):\n\n tmpfile = os.path.join(str(tmpdir), 'compressed.asdf')\n\n zlib_data = np.array([x for x in range(1000)])\n bzp2_data = np.array([x for x in range(1000)])\n\n tree = dict(zlib_data=zlib_data, bzp2_data=bzp2_data)\n with asdf.AsdfFile(tree) as af_out:\n af_out.set_array_compression(zlib_data, 'zlib')\n af_out.set_array_compression(bzp2_data, 'bzp2')\n af_out.write_to(tmpfile)\n\n with asdf.open(tmpfile) as af_in:\n assert af_in.get_array_compression(af_in.tree['zlib_data']) == 'zlib'\n assert af_in.get_array_compression(af_in.tree['bzp2_data']) == 'bzp2'\n", "import io\nfrom datetime import datetime\n\nfrom jsonschema import ValidationError\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nimport pytest\n\nimport asdf\nfrom asdf import get_config, config_context\nfrom asdf import extension\nfrom asdf import resolver\nfrom asdf import schema\nfrom asdf import types\nfrom asdf import util\nfrom asdf import yamlutil\nfrom asdf import tagged\nfrom asdf.tests import helpers, CustomExtension\nfrom asdf.exceptions import AsdfWarning, AsdfConversionWarning, AsdfDeprecationWarning\n\n\nclass TagReferenceType(types.CustomType):\n \"\"\"\n This class is used by several tests below for validating foreign type\n references in schemas and ASDF files.\n \"\"\"\n name = 'tag_reference'\n organization = 'nowhere.org'\n version = (1, 0, 0)\n standard = 'custom'\n\n @classmethod\n def from_tree(cls, tree, ctx):\n node = {}\n node['name'] = tree['name']\n node['things'] = tree['things']\n return node\n\n\ndef test_tagging_scalars():\n pytest.importorskip('astropy', '3.0.0')\n from astropy import units as u\n\n yaml = \"\"\"\nunit: !unit/unit-1.0.0\n m\nnot_unit:\n m\n \"\"\"\n buff = helpers.yaml_to_asdf(yaml)\n with asdf.open(buff) as ff:\n assert isinstance(ff.tree['unit'], u.UnitBase)\n assert not isinstance(ff.tree['not_unit'], u.UnitBase)\n assert isinstance(ff.tree['not_unit'], str)\n\n assert ff.tree == {\n 'unit': u.m,\n 'not_unit': 'm'\n }\n\n\ndef test_read_json_schema():\n \"\"\"Pytest to make sure reading JSON schemas succeeds.\n\n This was known to fail on Python 3.5 See issue #314 at\n https://github.com/asdf-format/asdf/issues/314 for more details.\n \"\"\"\n json_schema = helpers.get_test_data_path('example_schema.json')\n schema_tree = schema.load_schema(json_schema, resolve_references=True)\n schema.check_schema(schema_tree)\n\n\ndef test_load_schema(tmpdir):\n schema_def = \"\"\"\n%YAML 1.1\n---\n$schema: \"http://stsci.edu/schemas/asdf/asdf-schema-1.0.0\"\nid: \"http://stsci.edu/schemas/asdf/nugatory/nugatory-1.0.0\"\ntag: \"tag:stsci.edu:asdf/nugatory/nugatory-1.0.0\"\n\ntype: object\nproperties:\n foobar:\n $ref: \"../core/ndarray-1.0.0\"\n\nrequired: [foobar]\n...\n \"\"\"\n schema_path = tmpdir.join('nugatory.yaml')\n schema_path.write(schema_def.encode())\n\n schema_tree = schema.load_schema(str(schema_path), resolve_references=True)\n schema.check_schema(schema_tree)\n\n\ndef test_load_schema_with_full_tag(tmpdir):\n schema_def = \"\"\"\n%YAML 1.1\n---\n$schema: \"http://stsci.edu/schemas/asdf/asdf-schema-1.0.0\"\nid: \"http://stsci.edu/schemas/asdf/nugatory/nugatory-1.0.0\"\ntag: \"tag:stsci.edu:asdf/nugatory/nugatory-1.0.0\"\n\ntype: object\nproperties:\n foobar:\n $ref: \"tag:stsci.edu:asdf/core/ndarray-1.0.0\"\n\nrequired: [foobar]\n...\n \"\"\"\n schema_path = tmpdir.join('nugatory.yaml')\n schema_path.write(schema_def.encode())\n\n schema_tree = schema.load_schema(str(schema_path), resolve_references=True)\n schema.check_schema(schema_tree)\n\n\ndef test_load_schema_with_tag_address(tmpdir):\n schema_def = \"\"\"\n%YAML 1.1\n%TAG !asdf! tag:stsci.edu:asdf/\n---\n$schema: \"http://stsci.edu/schemas/asdf/asdf-schema-1.0.0\"\nid: \"http://stsci.edu/schemas/asdf/nugatory/nugatory-1.0.0\"\ntag: \"tag:stsci.edu:asdf/nugatory/nugatory-1.0.0\"\n\ntype: object\nproperties:\n foobar:\n $ref: \"http://stsci.edu/schemas/asdf/core/ndarray-1.0.0\"\n\nrequired: [foobar]\n...\n \"\"\"\n schema_path = tmpdir.join('nugatory.yaml')\n schema_path.write(schema_def.encode())\n\n schema_tree = schema.load_schema(str(schema_path), resolve_references=True)\n schema.check_schema(schema_tree)\n\n\ndef test_load_schema_with_file_url(tmpdir):\n schema_def = \"\"\"\n%YAML 1.1\n%TAG !asdf! tag:stsci.edu:asdf/\n---\n$schema: \"http://stsci.edu/schemas/asdf/asdf-schema-1.0.0\"\nid: \"http://stsci.edu/schemas/asdf/nugatory/nugatory-1.0.0\"\ntag: \"tag:stsci.edu:asdf/nugatory/nugatory-1.0.0\"\n\ntype: object\nproperties:\n foobar:\n $ref: \"{}\"\n\nrequired: [foobar]\n...\n \"\"\".format(extension.get_default_resolver()('tag:stsci.edu:asdf/core/ndarray-1.0.0'))\n schema_path = tmpdir.join('nugatory.yaml')\n schema_path.write(schema_def.encode())\n\n schema_tree = schema.load_schema(str(schema_path), resolve_references=True)\n schema.check_schema(schema_tree)\n\n\ndef test_load_schema_with_asdf_uri_scheme():\n subschema_content=\"\"\"%YAML 1.1\n---\n$schema: http://stsci.edu/schemas/asdf/asdf-schema-1.0.0\nid: asdf://somewhere.org/schemas/bar\n\nbar:\n type: string\n...\n\"\"\"\n content = \"\"\"%YAML 1.1\n---\n$schema: http://stsci.edu/schemas/asdf/asdf-schema-1.0.0\nid: asdf://somewhere.org/schemas/foo\n\ndefinitions:\n local_bar:\n type: string\n\ntype: object\nproperties:\n bar:\n $ref: asdf://somewhere.org/schemas/bar#/bar\n local_bar:\n $ref: '#/definitions/local_bar'\n...\n\"\"\"\n with asdf.config_context() as config:\n config.add_resource_mapping({\"asdf://somewhere.org/schemas/foo\": content})\n config.add_resource_mapping({\"asdf://somewhere.org/schemas/bar\": subschema_content})\n\n schema_tree = schema.load_schema(\"asdf://somewhere.org/schemas/foo\")\n instance = {\"bar\": \"baz\", \"local_bar\": \"foz\"}\n schema.validate(instance, schema=schema_tree)\n with pytest.raises(ValidationError):\n schema.validate({\"bar\": 12}, schema=schema_tree)\n\n\ndef test_schema_caching():\n # Make sure that if we request the same URL, we get a different object\n # (despite the caching internal to load_schema). Changes to a schema\n # dict should not impact other uses of that schema.\n\n s1 = schema.load_schema(\n 'http://stsci.edu/schemas/asdf/core/asdf-1.0.0')\n s2 = schema.load_schema(\n 'http://stsci.edu/schemas/asdf/core/asdf-1.0.0')\n assert s1 is not s2\n\n\ndef test_asdf_file_resolver_hashing():\n # Confirm that resolvers from distinct AsdfFile instances\n # hash to the same value (this allows schema caching to function).\n a1 = asdf.AsdfFile()\n a2 = asdf.AsdfFile()\n\n assert hash(a1.resolver) == hash(a2.resolver)\n assert a1.resolver == a2.resolver\n\n\ndef test_load_schema_from_resource_mapping():\n content = \"\"\"\nid: http://somewhere.org/schemas/razmataz-1.0.0\ntype: object\nproperties:\n foo:\n type: string\n bar:\n type: boolean\n\"\"\".encode(\"utf-8\")\n\n get_config().add_resource_mapping({\"http://somewhere.org/schemas/razmataz-1.0.0\": content})\n\n s = schema.load_schema(\"http://somewhere.org/schemas/razmataz-1.0.0\")\n\n assert s[\"id\"] == \"http://somewhere.org/schemas/razmataz-1.0.0\"\n\n\ndef test_flow_style():\n class CustomFlowStyleType(dict, types.CustomType):\n name = 'custom_flow'\n organization = 'nowhere.org'\n version = (1, 0, 0)\n standard = 'custom'\n\n class CustomFlowStyleExtension(CustomExtension):\n @property\n def types(self):\n return [CustomFlowStyleType]\n\n tree = {\n 'custom_flow': CustomFlowStyleType({'a': 42, 'b': 43})\n }\n\n buff = io.BytesIO()\n ff = asdf.AsdfFile(tree, extensions=CustomFlowStyleExtension())\n ff.write_to(buff)\n\n assert b' a: 42\\n b: 43' in buff.getvalue()\n\n\ndef test_style():\n class CustomStyleType(str, types.CustomType):\n name = 'custom_style'\n organization = 'nowhere.org'\n version = (1, 0, 0)\n standard = 'custom'\n\n class CustomStyleExtension(CustomExtension):\n @property\n def types(self):\n return [CustomStyleType]\n\n tree = {\n 'custom_style': CustomStyleType(\"short\")\n }\n\n buff = io.BytesIO()\n ff = asdf.AsdfFile(tree, extensions=CustomStyleExtension())\n ff.write_to(buff)\n\n assert b'|-\\n short\\n' in buff.getvalue()\n\n\ndef test_property_order():\n tree = {'foo': np.ndarray([1, 2, 3])}\n\n buff = io.BytesIO()\n ff = asdf.AsdfFile(tree)\n ff.write_to(buff)\n\n ndarray_schema = schema.load_schema(\n 'http://stsci.edu/schemas/asdf/core/ndarray-1.0.0')\n property_order = ndarray_schema['anyOf'][1]['propertyOrder']\n\n last_index = 0\n for prop in property_order:\n index = buff.getvalue().find(prop.encode('utf-8') + b':')\n if index != -1:\n assert index > last_index\n last_index = index\n\n\ndef test_invalid_nested():\n class CustomType(str, types.CustomType):\n name = 'custom'\n organization = 'nowhere.org'\n version = (1, 0, 0)\n standard = 'custom'\n\n class CustomTypeExtension(CustomExtension):\n @property\n def types(self):\n return [CustomType]\n\n yaml = \"\"\"\ncustom: !<tag:nowhere.org:custom/custom-1.0.0>\n foo\n \"\"\"\n buff = helpers.yaml_to_asdf(yaml)\n # This should cause a warning but not an error because without explicitly\n # providing an extension, our custom type will not be recognized and will\n # simply be converted to a raw type.\n with pytest.warns(AsdfConversionWarning, match=\"tag:nowhere.org:custom/custom-1.0.0\"):\n with asdf.open(buff):\n pass\n\n buff.seek(0)\n with pytest.raises(ValidationError):\n with asdf.open(buff, extensions=[CustomTypeExtension()]):\n pass\n\n # Make sure tags get validated inside of other tags that know\n # nothing about them.\n yaml = \"\"\"\narray: !core/ndarray-1.0.0\n data: [0, 1, 2]\n custom: !<tag:nowhere.org:custom/custom-1.0.0>\n foo\n \"\"\"\n buff = helpers.yaml_to_asdf(yaml)\n with pytest.raises(ValidationError):\n with asdf.open(buff, extensions=[CustomTypeExtension()]):\n pass\n\n\ndef test_invalid_schema():\n s = {'type': 'integer'}\n schema.check_schema(s)\n\n s = {'type': 'foobar'}\n with pytest.raises(ValidationError):\n schema.check_schema(s)\n\n\ndef test_defaults():\n s = {\n 'type': 'object',\n 'properties': {\n 'a': {\n 'type': 'integer',\n 'default': 42\n }\n }\n }\n\n t = {}\n\n cls = schema._create_validator(schema.FILL_DEFAULTS)\n validator = cls(s)\n validator.validate(t, _schema=s)\n\n assert t['a'] == 42\n\n cls = schema._create_validator(schema.REMOVE_DEFAULTS)\n validator = cls(s)\n validator.validate(t, _schema=s)\n\n assert t == {}\n\n\ndef test_default_check_in_schema():\n s = {\n 'type': 'object',\n 'properties': {\n 'a': {\n 'type': 'integer',\n 'default': 'foo'\n }\n }\n }\n\n with pytest.raises(ValidationError):\n schema.check_schema(s)\n\n schema.check_schema(s, validate_default=False)\n\n\ndef test_check_complex_default():\n default_software = tagged.TaggedDict(\n {\"name\": \"asdf\", \"version\": \"2.7.0\"},\n \"tag:stsci.edu/asdf/core/software-1.0.0\"\n )\n\n s = {\n 'type': 'object',\n 'properties': {\n 'a': {\n 'type': 'object',\n 'tag': 'tag:stsci.edu/asdf/core/software-1.0.0',\n 'default': default_software\n }\n }\n }\n\n schema.check_schema(s)\n\n s['properties']['a']['tag'] = 'tag:stsci.edu/asdf/core/ndarray-1.0.0'\n with pytest.raises(ValidationError):\n schema.check_schema(s)\n\n\ndef test_fill_and_remove_defaults():\n class DefaultType(dict, types.CustomType):\n name = 'default'\n organization = 'nowhere.org'\n version = (1, 0, 0)\n standard = 'custom'\n\n class DefaultTypeExtension(CustomExtension):\n @property\n def types(self):\n return [DefaultType]\n\n yaml = \"\"\"\ncustom: !<tag:nowhere.org:custom/default-1.0.0>\n b: {}\n d: {}\n g: {}\n j:\n l: 362\n \"\"\"\n buff = helpers.yaml_to_asdf(yaml)\n with asdf.open(buff, extensions=[DefaultTypeExtension()]) as ff:\n assert 'a' in ff.tree['custom']\n assert ff.tree['custom']['a'] == 42\n assert ff.tree['custom']['b']['c'] == 82\n # allOf combiner should fill defaults from all subschemas:\n assert ff.tree['custom']['d']['e'] == 122\n assert ff.tree['custom']['d']['f'] == 162\n # anyOf combiners should be ignored:\n assert 'h' not in ff.tree['custom']['g']\n assert 'i' not in ff.tree['custom']['g']\n # oneOf combiners should be ignored:\n assert 'k' not in ff.tree['custom']['j']\n assert ff.tree['custom']['j']['l'] == 362\n\n buff.seek(0)\n with pytest.warns(AsdfDeprecationWarning, match='do_not_fill_defaults'):\n with asdf.open(buff, extensions=[DefaultTypeExtension()],\n do_not_fill_defaults=True) as ff:\n assert 'a' not in ff.tree['custom']\n assert 'c' not in ff.tree['custom']['b']\n assert 'e' not in ff.tree['custom']['d']\n assert 'f' not in ff.tree['custom']['d']\n assert 'h' not in ff.tree['custom']['g']\n assert 'i' not in ff.tree['custom']['g']\n assert 'k' not in ff.tree['custom']['j']\n assert ff.tree['custom']['j']['l'] == 362\n ff.fill_defaults()\n assert 'a' in ff.tree['custom']\n assert ff.tree['custom']['a'] == 42\n assert 'c' in ff.tree['custom']['b']\n assert ff.tree['custom']['b']['c'] == 82\n assert ff.tree['custom']['b']['c'] == 82\n assert ff.tree['custom']['d']['e'] == 122\n assert ff.tree['custom']['d']['f'] == 162\n assert 'h' not in ff.tree['custom']['g']\n assert 'i' not in ff.tree['custom']['g']\n assert 'k' not in ff.tree['custom']['j']\n assert ff.tree['custom']['j']['l'] == 362\n ff.remove_defaults()\n assert 'a' not in ff.tree['custom']\n assert 'c' not in ff.tree['custom']['b']\n assert 'e' not in ff.tree['custom']['d']\n assert 'f' not in ff.tree['custom']['d']\n assert 'h' not in ff.tree['custom']['g']\n assert 'i' not in ff.tree['custom']['g']\n assert 'k' not in ff.tree['custom']['j']\n assert ff.tree['custom']['j']['l'] == 362\n\n buff.seek(0)\n with config_context() as config:\n config.legacy_fill_schema_defaults = False\n with asdf.open(buff, extensions=[DefaultTypeExtension()]) as ff:\n assert 'a' not in ff.tree['custom']\n assert 'c' not in ff.tree['custom']['b']\n assert 'e' not in ff.tree['custom']['d']\n assert 'f' not in ff.tree['custom']['d']\n assert 'h' not in ff.tree['custom']['g']\n assert 'i' not in ff.tree['custom']['g']\n assert 'k' not in ff.tree['custom']['j']\n assert ff.tree['custom']['j']['l'] == 362\n\n\ndef test_one_of():\n \"\"\"\n Covers https://github.com/asdf-format/asdf/issues/809\n \"\"\"\n class OneOfType(dict, types.CustomType):\n name = 'one_of'\n organization = 'nowhere.org'\n version = (1, 0, 0)\n standard = 'custom'\n\n class OneOfTypeExtension(CustomExtension):\n @property\n def types(self):\n return [OneOfType]\n\n yaml = \"\"\"\none_of: !<tag:nowhere.org:custom/one_of-1.0.0>\n value: foo\n \"\"\"\n buff = helpers.yaml_to_asdf(yaml)\n with asdf.open(buff, extensions=[OneOfTypeExtension()]) as ff:\n assert ff['one_of']['value'] == 'foo'\n\n\ndef test_tag_reference_validation():\n class DefaultTypeExtension(CustomExtension):\n @property\n def types(self):\n return [TagReferenceType]\n\n yaml = \"\"\"\ncustom: !<tag:nowhere.org:custom/tag_reference-1.0.0>\n name:\n \"Something\"\n things: !core/ndarray-1.0.0\n data: [1, 2, 3]\n \"\"\"\n\n buff = helpers.yaml_to_asdf(yaml)\n with asdf.open(buff, extensions=[DefaultTypeExtension()]) as ff:\n custom = ff.tree['custom']\n assert custom['name'] == \"Something\"\n assert_array_equal(custom['things'], [1, 2, 3])\n\n\ndef test_foreign_tag_reference_validation():\n class ForeignTagReferenceType(types.CustomType):\n name = 'foreign_tag_reference'\n organization = 'nowhere.org'\n version = (1, 0, 0)\n standard = 'custom'\n\n @classmethod\n def from_tree(cls, tree, ctx):\n node = {}\n node['a'] = tree['a']\n node['b'] = tree['b']\n return node\n\n class ForeignTypeExtension(CustomExtension):\n @property\n def types(self):\n return [TagReferenceType, ForeignTagReferenceType]\n\n yaml = \"\"\"\ncustom: !<tag:nowhere.org:custom/foreign_tag_reference-1.0.0>\n a: !<tag:nowhere.org:custom/tag_reference-1.0.0>\n name:\n \"Something\"\n things: !core/ndarray-1.0.0\n data: [1, 2, 3]\n b: !<tag:nowhere.org:custom/tag_reference-1.0.0>\n name:\n \"Anything\"\n things: !core/ndarray-1.0.0\n data: [4, 5, 6]\n \"\"\"\n\n buff = helpers.yaml_to_asdf(yaml)\n with asdf.open(buff, extensions=ForeignTypeExtension()) as ff:\n a = ff.tree['custom']['a']\n b = ff.tree['custom']['b']\n assert a['name'] == 'Something'\n assert_array_equal(a['things'], [1, 2, 3])\n assert b['name'] == 'Anything'\n assert_array_equal(b['things'], [4, 5, 6])\n\n\ndef test_self_reference_resolution():\n r = resolver.Resolver(CustomExtension().url_mapping, 'url')\n s = schema.load_schema(\n helpers.get_test_data_path('self_referencing-1.0.0.yaml'),\n resolver=r,\n resolve_references=True)\n assert '$ref' not in repr(s)\n assert s['anyOf'][1] == s['anyOf'][0]\n\n\ndef test_schema_resolved_via_entry_points():\n \"\"\"Test that entry points mappings to core schema works\"\"\"\n r = extension.get_default_resolver()\n tag = types.format_tag('stsci.edu', 'asdf', '1.0.0', 'fits/fits')\n url = extension.default_extensions.extension_list.tag_mapping(tag)\n\n s = schema.load_schema(url, resolver=r, resolve_references=True)\n assert tag in repr(s)\n\n\[email protected]('use_numpy', [False, True])\ndef test_large_literals(use_numpy):\n\n largeval = 1 << 53\n if use_numpy:\n largeval = np.uint64(largeval)\n\n tree = {\n 'large_int': largeval,\n }\n\n with pytest.raises(ValidationError):\n asdf.AsdfFile(tree)\n\n tree = {\n 'large_list': [largeval],\n }\n\n with pytest.raises(ValidationError):\n asdf.AsdfFile(tree)\n\n tree = {\n largeval: 'large_key',\n }\n\n with pytest.raises(ValidationError):\n asdf.AsdfFile(tree)\n\n tree = {\n 'large_array': np.array([largeval], np.uint64)\n }\n\n ff = asdf.AsdfFile(tree)\n buff = io.BytesIO()\n ff.write_to(buff)\n\n ff.set_array_storage(ff.tree['large_array'], 'inline')\n buff = io.BytesIO()\n with pytest.raises(ValidationError):\n ff.write_to(buff)\n print(buff.getvalue())\n\n\ndef test_read_large_literal():\n value = 1 << 64\n yaml = f\"integer: {value}\"\n\n buff = helpers.yaml_to_asdf(yaml)\n\n with pytest.warns(AsdfWarning, match=\"Invalid integer literal value\"):\n with asdf.open(buff) as af:\n assert af['integer'] == value\n\n yaml = f\"{value}: foo\"\n\n buff = helpers.yaml_to_asdf(yaml)\n\n with pytest.warns(AsdfWarning, match=\"Invalid integer literal value\"):\n with asdf.open(buff) as af:\n assert af[value] == \"foo\"\n\n\[email protected](\n \"version,keys\",\n [\n (\"1.6.0\", [\"foo\", 42, True]),\n (\"1.5.0\", [\"foo\", 42, True, 3.14159, datetime.now(), b\"foo\", None]),\n ]\n)\ndef test_mapping_supported_key_types(keys, version):\n for key in keys:\n with helpers.assert_no_warnings():\n af = asdf.AsdfFile({key: \"value\"}, version=version)\n buff = io.BytesIO()\n af.write_to(buff)\n buff.seek(0)\n with asdf.open(buff) as af:\n assert af[key] == \"value\"\n\n\[email protected](\n \"version,keys\",\n [\n (\"1.6.0\", [3.14159, datetime.now(), b\"foo\", None, (\"foo\", \"bar\")]),\n ]\n)\ndef test_mapping_unsupported_key_types(keys, version):\n for key in keys:\n with pytest.raises(ValidationError, match=\"Mapping key .* is not permitted\"):\n af = asdf.AsdfFile({key: \"value\"}, version=version)\n buff = io.BytesIO()\n af.write_to(buff)\n\n\ndef test_nested_array():\n s = {\n 'type': 'object',\n 'properties': {\n 'stuff': {\n 'type': 'array',\n 'items': {\n 'type': 'array',\n 'items': [\n { 'type': 'integer' },\n { 'type': 'string' },\n { 'type': 'number' },\n ],\n 'minItems': 3,\n 'maxItems': 3\n }\n }\n }\n }\n\n good = dict(stuff=[[1, 'hello', 2], [4, 'world', 9.7]])\n schema.validate(good, schema=s)\n\n bads = [\n dict(stuff=[[1, 2, 3]]),\n dict(stuff=[12,'dldl']),\n dict(stuff=[[12, 'dldl']]),\n dict(stuff=[[1, 'hello', 2], [4, 5]]),\n dict(stuff=[[1, 'hello', 2], [4, 5, 6]])\n ]\n\n for b in bads:\n with pytest.raises(ValidationError):\n schema.validate(b, schema=s)\n\n\ndef test_nested_array_yaml(tmpdir):\n schema_def = \"\"\"\n%YAML 1.1\n---\ntype: object\nproperties:\n stuff:\n type: array\n items:\n type: array\n items:\n - type: integer\n - type: string\n - type: number\n minItems: 3\n maxItems: 3\n...\n \"\"\"\n schema_path = tmpdir.join('nested.yaml')\n schema_path.write(schema_def.encode())\n\n schema_tree = schema.load_schema(str(schema_path))\n schema.check_schema(schema_tree)\n\n good = dict(stuff=[[1, 'hello', 2], [4, 'world', 9.7]])\n schema.validate(good, schema=schema_tree)\n\n bads = [\n dict(stuff=[[1, 2, 3]]),\n dict(stuff=[12,'dldl']),\n dict(stuff=[[12, 'dldl']]),\n dict(stuff=[[1, 'hello', 2], [4, 5]]),\n dict(stuff=[[1, 'hello', 2], [4, 5, 6]])\n ]\n\n for b in bads:\n with pytest.raises(ValidationError):\n schema.validate(b, schema=schema_tree)\n\n\ndef test_type_missing_dependencies():\n pytest.importorskip('astropy', '3.0.0')\n\n class MissingType(types.CustomType):\n name = 'missing'\n organization = 'nowhere.org'\n version = (1, 1, 0)\n standard = 'custom'\n types = ['asdfghjkl12345.foo']\n requires = [\"ASDFGHJKL12345\"]\n\n class DefaultTypeExtension(CustomExtension):\n @property\n def types(self):\n return [MissingType]\n\n yaml = \"\"\"\ncustom: !<tag:nowhere.org:custom/missing-1.1.0>\n b: {foo: 42}\n \"\"\"\n buff = helpers.yaml_to_asdf(yaml)\n with pytest.warns(AsdfConversionWarning, match=\"Failed to convert tag:nowhere.org:custom/missing-1.1.0\"):\n with asdf.open(buff, extensions=[DefaultTypeExtension()]) as ff:\n assert ff.tree['custom']['b']['foo'] == 42\n\n\ndef test_assert_roundtrip_with_extension(tmpdir):\n called_custom_assert_equal = [False]\n\n class CustomType(dict, types.CustomType):\n name = 'custom_flow'\n organization = 'nowhere.org'\n version = (1, 0, 0)\n standard = 'custom'\n\n @classmethod\n def assert_equal(cls, old, new):\n called_custom_assert_equal[0] = True\n\n class CustomTypeExtension(CustomExtension):\n @property\n def types(self):\n return [CustomType]\n\n tree = {\n 'custom': CustomType({'a': 42, 'b': 43})\n }\n\n def check(ff):\n assert isinstance(ff.tree['custom'], CustomType)\n\n with helpers.assert_no_warnings():\n helpers.assert_roundtrip_tree(\n tree, tmpdir, extensions=[CustomTypeExtension()])\n\n assert called_custom_assert_equal[0] is True\n\n\ndef test_custom_validation_bad(tmpdir):\n custom_schema_path = helpers.get_test_data_path('custom_schema.yaml')\n asdf_file = str(tmpdir.join('out.asdf'))\n\n # This tree does not conform to the custom schema\n tree = {'stuff': 42, 'other_stuff': 'hello'}\n\n # Creating file without custom schema should pass\n with asdf.AsdfFile(tree) as ff:\n ff.write_to(asdf_file)\n\n # Creating file using custom schema should fail\n with pytest.raises(ValidationError):\n with asdf.AsdfFile(tree, custom_schema=custom_schema_path):\n pass\n\n # Opening file without custom schema should pass\n with asdf.open(asdf_file):\n pass\n\n # Opening file with custom schema should fail\n with pytest.raises(ValidationError):\n with asdf.open(asdf_file, custom_schema=custom_schema_path):\n pass\n\n\ndef test_custom_validation_good(tmpdir):\n custom_schema_path = helpers.get_test_data_path('custom_schema.yaml')\n asdf_file = str(tmpdir.join('out.asdf'))\n\n # This tree conforms to the custom schema\n tree = {\n 'foo': {'x': 42, 'y': 10},\n 'bar': {'a': 'hello', 'b': 'banjo'}\n }\n\n with asdf.AsdfFile(tree, custom_schema=custom_schema_path) as ff:\n ff.write_to(asdf_file)\n\n with asdf.open(asdf_file, custom_schema=custom_schema_path):\n pass\n\n\ndef test_custom_validation_pathlib(tmpdir):\n \"\"\"\n Make sure custom schema paths can be pathlib.Path objects\n\n See https://github.com/asdf-format/asdf/issues/653 for discussion.\n \"\"\"\n from pathlib import Path\n\n custom_schema_path = Path(helpers.get_test_data_path('custom_schema.yaml'))\n asdf_file = str(tmpdir.join('out.asdf'))\n\n # This tree conforms to the custom schema\n tree = {\n 'foo': {'x': 42, 'y': 10},\n 'bar': {'a': 'hello', 'b': 'banjo'}\n }\n\n with asdf.AsdfFile(tree, custom_schema=custom_schema_path) as ff:\n ff.write_to(asdf_file)\n\n with asdf.open(asdf_file, custom_schema=custom_schema_path):\n pass\n\n\ndef test_custom_validation_with_definitions_good(tmpdir):\n custom_schema_path = helpers.get_test_data_path('custom_schema_definitions.yaml')\n asdf_file = str(tmpdir.join('out.asdf'))\n\n # This tree conforms to the custom schema\n tree = {\n 'thing': { 'biz': 'hello', 'baz': 'world' }\n }\n\n with asdf.AsdfFile(tree, custom_schema=custom_schema_path) as ff:\n ff.write_to(asdf_file)\n\n with asdf.open(asdf_file, custom_schema=custom_schema_path):\n pass\n\n\ndef test_custom_validation_with_definitions_bad(tmpdir):\n custom_schema_path = helpers.get_test_data_path('custom_schema_definitions.yaml')\n asdf_file = str(tmpdir.join('out.asdf'))\n\n # This tree does NOT conform to the custom schema\n tree = {\n 'forb': { 'biz': 'hello', 'baz': 'world' }\n }\n\n # Creating file without custom schema should pass\n with asdf.AsdfFile(tree) as ff:\n ff.write_to(asdf_file)\n\n # Creating file with custom schema should fail\n with pytest.raises(ValidationError):\n with asdf.AsdfFile(tree, custom_schema=custom_schema_path):\n pass\n\n # Opening file without custom schema should pass\n with asdf.open(asdf_file):\n pass\n\n # Opening file with custom schema should fail\n with pytest.raises(ValidationError):\n with asdf.open(asdf_file, custom_schema=custom_schema_path):\n pass\n\n\ndef test_custom_validation_with_external_ref_good(tmpdir):\n custom_schema_path = helpers.get_test_data_path('custom_schema_external_ref.yaml')\n asdf_file = str(tmpdir.join('out.asdf'))\n\n # This tree conforms to the custom schema\n tree = {\n 'foo': asdf.tags.core.Software(name=\"Microsoft Windows\", version=\"95\")\n }\n\n with asdf.AsdfFile(tree, custom_schema=custom_schema_path) as ff:\n ff.write_to(asdf_file)\n\n with asdf.open(asdf_file, custom_schema=custom_schema_path):\n pass\n\n\ndef test_custom_validation_with_external_ref_bad(tmpdir):\n custom_schema_path = helpers.get_test_data_path('custom_schema_external_ref.yaml')\n asdf_file = str(tmpdir.join('out.asdf'))\n\n # This tree does not conform to the custom schema\n tree = {\n 'foo': False\n }\n\n # Creating file without custom schema should pass\n with asdf.AsdfFile(tree) as ff:\n ff.write_to(asdf_file)\n\n # Creating file with custom schema should fail\n with pytest.raises(ValidationError):\n with asdf.AsdfFile(tree, custom_schema=custom_schema_path):\n pass\n\n # Opening file without custom schema should pass\n with asdf.open(asdf_file):\n pass\n\n # Opening file with custom schema should fail\n with pytest.raises(ValidationError):\n with asdf.open(asdf_file, custom_schema=custom_schema_path):\n pass\n\n\ndef test_load_custom_schema_deprecated():\n custom_schema_path = helpers.get_test_data_path('custom_schema.yaml')\n\n with pytest.deprecated_call():\n schema.load_custom_schema(custom_schema_path)\n\n\ndef test_load_schema_resolve_local_refs_deprecated():\n custom_schema_path = helpers.get_test_data_path('custom_schema_definitions.yaml')\n\n with pytest.deprecated_call():\n schema.load_schema(custom_schema_path, resolve_local_refs=True)\n\n\ndef test_nonexistent_tag(tmpdir):\n \"\"\"\n This tests the case where a node is tagged with a type that apparently\n comes from an extension that is known, but the type itself can't be found.\n\n This could occur when a more recent version of an installed package\n provides the new type, but an older version of the package is installed.\n ASDF should still be able to open the file in this case, but it won't be\n able to restore the type.\n\n The bug that prompted this test results from attempting to load a schema\n file that doesn't exist, which is why this test belongs in this file.\n \"\"\"\n\n # This shouldn't ever happen, but it's a useful test case\n yaml = \"\"\"\na: !core/doesnt_exist-1.0.0\n hello\n \"\"\"\n\n buff = helpers.yaml_to_asdf(yaml)\n with pytest.warns(AsdfWarning, match=\"Unable to locate schema file\"):\n with asdf.open(buff) as af:\n assert str(af['a']) == 'hello'\n\n # This is a more realistic case since we're using an external extension\n yaml = \"\"\"\na: !<tag:nowhere.org:custom/doesnt_exist-1.0.0>\n hello\n \"\"\"\n\n buff = helpers.yaml_to_asdf(yaml)\n with pytest.warns(AsdfWarning, match=\"Unable to locate schema file\"):\n with asdf.open(buff, extensions=CustomExtension()) as af:\n assert str(af['a']) == 'hello'\n\n\[email protected](\"numpy_value,valid_types\", [\n (np.str_(\"foo\"), {\"string\"}),\n (np.bytes_(\"foo\"), set()),\n (np.float16(3.14), {\"number\"}),\n (np.float32(3.14159), {\"number\"}),\n (np.float64(3.14159), {\"number\"}),\n # Evidently float128 is not available on Windows:\n (getattr(np, \"float128\", np.float64)(3.14159), {\"number\"}),\n (np.int8(42), {\"number\", \"integer\"}),\n (np.int16(42), {\"number\", \"integer\"}),\n (np.int32(42), {\"number\", \"integer\"}),\n (np.longlong(42), {\"number\", \"integer\"}),\n (np.uint8(42), {\"number\", \"integer\"}),\n (np.uint16(42), {\"number\", \"integer\"}),\n (np.uint32(42), {\"number\", \"integer\"}),\n (np.uint64(42), {\"number\", \"integer\"}),\n (np.ulonglong(42), {\"number\", \"integer\"}),\n])\ndef test_numpy_scalar_type_validation(numpy_value, valid_types):\n def _assert_validation(jsonschema_type, expected_valid):\n validator = schema.get_validator()\n try:\n validator.validate(numpy_value, _schema={\"type\": jsonschema_type})\n except ValidationError:\n valid = False\n else:\n valid = True\n\n if valid is not expected_valid:\n if expected_valid:\n description = \"valid\"\n else:\n description = \"invalid\"\n assert False, \"Expected numpy.{} to be {} against jsonschema type '{}'\".format(\n type(numpy_value).__name__, description, jsonschema_type\n )\n\n for jsonschema_type in valid_types:\n _assert_validation(jsonschema_type, True)\n\n invalid_types = {\"string\", \"number\", \"integer\", \"boolean\", \"null\", \"object\"} - valid_types\n for jsonschema_type in invalid_types:\n _assert_validation(jsonschema_type, False)\n\n\ndef test_validator_visit_repeat_nodes():\n ctx = asdf.AsdfFile()\n node = asdf.tags.core.Software(name=\"Minesweeper\")\n tree = yamlutil.custom_tree_to_tagged_tree(\n {\"node\": node, \"other_node\": node, \"nested\": {\"node\": node}},\n ctx\n )\n\n visited_nodes = []\n def _test_validator(validator, value, instance, schema):\n visited_nodes.append(instance)\n\n validator = schema.get_validator(ctx=ctx, validators=util.HashableDict(type=_test_validator))\n validator.validate(tree)\n assert len(visited_nodes) == 1\n\n visited_nodes.clear()\n validator = schema.get_validator(\n validators=util.HashableDict(type=_test_validator),\n _visit_repeat_nodes=True\n )\n validator.validate(tree)\n assert len(visited_nodes) == 3\n\n\ndef test_tag_validator():\n content=\"\"\"%YAML 1.1\n---\n$schema: http://stsci.edu/schemas/asdf/asdf-schema-1.0.0\nid: asdf://somewhere.org/schemas/foo\ntag: asdf://somewhere.org/tags/foo\n...\n\"\"\"\n with asdf.config_context() as config:\n config.add_resource_mapping({\"asdf://somewhere.org/schemas/foo\": content})\n\n schema_tree = schema.load_schema(\"asdf://somewhere.org/schemas/foo\")\n instance = tagged.TaggedDict(tag=\"asdf://somewhere.org/tags/foo\")\n schema.validate(instance, schema=schema_tree)\n with pytest.raises(ValidationError):\n schema.validate(tagged.TaggedDict(tag=\"asdf://somewhere.org/tags/bar\"), schema=schema_tree)\n\n content=\"\"\"%YAML 1.1\n---\n$schema: http://stsci.edu/schemas/asdf/asdf-schema-1.0.0\nid: asdf://somewhere.org/schemas/bar\ntag: asdf://somewhere.org/tags/bar-*\n...\n\"\"\"\n with asdf.config_context() as config:\n config.add_resource_mapping({\"asdf://somewhere.org/schemas/bar\": content})\n\n schema_tree = schema.load_schema(\"asdf://somewhere.org/schemas/bar\")\n instance = tagged.TaggedDict(tag=\"asdf://somewhere.org/tags/bar-2.5\")\n schema.validate(instance, schema=schema_tree)\n with pytest.raises(ValidationError):\n schema.validate(tagged.TaggedDict(tag=\"asdf://somewhere.org/tags/foo-1.0\"), schema=schema_tree)\n" ]
[ [ "numpy.zeros", "numpy.random.rand", "numpy.random.seed" ], [ "numpy.uint32", "numpy.uint8", "numpy.float16", "numpy.int32", "numpy.int8", "numpy.ndarray", "numpy.ulonglong", "numpy.int16", "numpy.testing.assert_array_equal", "numpy.longlong", "numpy.uint64", "numpy.float64", "numpy.bytes_", "numpy.float32", "numpy.str_", "numpy.uint16", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
huy-ha/frustum-pointnets
[ "0c5b8040707e4497ee2fe7bc3445462cf31ac9e0" ]
[ "models/tf_util.py" ]
[ "\"\"\" Wrapper functions for TensorFlow layers.\n\nAuthor: Charles R. Qi\nDate: November 2017\n\"\"\"\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nimport tensorflow as tf2\n\ndef _variable_on_cpu(name, shape, initializer, use_fp16=False):\n \"\"\"Helper to create a Variable stored on CPU memory.\n Args:\n name: name of the variable\n shape: list of ints\n initializer: initializer for Variable\n Returns:\n Variable Tensor\n \"\"\"\n with tf.device(\"/cpu:0\"):\n dtype = tf.float16 if use_fp16 else tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var\n\ndef _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True):\n \"\"\"Helper to create an initialized Variable with weight decay.\n\n Note that the Variable is initialized with a truncated normal distribution.\n A weight decay is added only if one is specified.\n\n Args:\n name: name of the variable\n shape: list of ints\n stddev: standard deviation of a truncated Gaussian\n wd: add L2Loss weight decay multiplied by this float. If None, weight\n decay is not added for this Variable.\n use_xavier: bool, whether to use xavier initializer\n\n Returns:\n Variable Tensor\n \"\"\"\n if use_xavier:\n initializer = tf2.initializers.GlorotUniform()\n # initializer = tf.contrib.layers.xavier_initializer()\n else:\n initializer = tf.truncated_normal_initializer(stddev=stddev)\n var = _variable_on_cpu(name, shape, initializer)\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var\n\n\ndef conv1d(inputs,\n num_output_channels,\n kernel_size,\n scope,\n stride=1,\n padding='SAME',\n data_format='NHWC',\n use_xavier=True,\n stddev=1e-3,\n weight_decay=None,\n activation_fn=tf.nn.relu,\n bn=False,\n bn_decay=None,\n is_training=None):\n \"\"\" 1D convolution with non-linear operation.\n\n Args:\n inputs: 3-D tensor variable BxLxC\n num_output_channels: int\n kernel_size: int\n scope: string\n stride: int\n padding: 'SAME' or 'VALID'\n data_format: 'NHWC' or 'NCHW'\n use_xavier: bool, use xavier_initializer if true\n stddev: float, stddev for truncated_normal init\n weight_decay: float\n activation_fn: function\n bn: bool, whether to use batch norm\n bn_decay: float or float tensor variable in [0,1]\n is_training: bool Tensor variable\n\n Returns:\n Variable tensor\n \"\"\"\n with tf.variable_scope(scope) as sc:\n assert(data_format=='NHWC' or data_format=='NCHW')\n if data_format == 'NHWC':\n num_in_channels = inputs.get_shape()[-1].value\n elif data_format=='NCHW':\n num_in_channels = inputs.get_shape()[1].value\n kernel_shape = [kernel_size,\n num_in_channels, num_output_channels]\n kernel = _variable_with_weight_decay('weights',\n shape=kernel_shape,\n use_xavier=use_xavier,\n stddev=stddev,\n wd=weight_decay)\n outputs = tf.nn.conv1d(inputs, kernel,\n stride=stride,\n padding=padding,\n data_format=data_format)\n biases = _variable_on_cpu('biases', [num_output_channels],\n tf.constant_initializer(0.0))\n outputs = tf.nn.bias_add(outputs, biases, data_format=data_format)\n\n if bn:\n outputs = batch_norm_for_conv1d(outputs, is_training,\n bn_decay=bn_decay, scope='bn',\n data_format=data_format)\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return outputs\n\n\n\n\ndef conv2d(inputs,\n num_output_channels,\n kernel_size,\n scope,\n stride=[1, 1],\n padding='SAME',\n data_format='NHWC',\n use_xavier=True,\n stddev=1e-3,\n weight_decay=None,\n activation_fn=tf.nn.relu,\n bn=False,\n bn_decay=None,\n is_training=None):\n \"\"\" 2D convolution with non-linear operation.\n\n Args:\n inputs: 4-D tensor variable BxHxWxC\n num_output_channels: int\n kernel_size: a list of 2 ints\n scope: string\n stride: a list of 2 ints\n padding: 'SAME' or 'VALID'\n data_format: 'NHWC' or 'NCHW'\n use_xavier: bool, use xavier_initializer if true\n stddev: float, stddev for truncated_normal init\n weight_decay: float\n activation_fn: function\n bn: bool, whether to use batch norm\n bn_decay: float or float tensor variable in [0,1]\n is_training: bool Tensor variable\n\n Returns:\n Variable tensor\n \"\"\"\n with tf.variable_scope(scope) as sc:\n kernel_h, kernel_w = kernel_size\n assert(data_format=='NHWC' or data_format=='NCHW')\n if data_format == 'NHWC':\n num_in_channels = inputs.get_shape()[-1]\n elif data_format=='NCHW':\n num_in_channels = inputs.get_shape()[1]\n kernel_shape = [kernel_h, kernel_w,\n num_in_channels, num_output_channels]\n kernel = _variable_with_weight_decay('weights',\n shape=kernel_shape,\n use_xavier=use_xavier,\n stddev=stddev,\n wd=weight_decay)\n stride_h, stride_w = stride\n outputs = tf.nn.conv2d(inputs, kernel,\n [1, stride_h, stride_w, 1],\n padding=padding,\n data_format=data_format)\n biases = _variable_on_cpu('biases', [num_output_channels],\n tf.constant_initializer(0.0))\n outputs = tf.nn.bias_add(outputs, biases, data_format=data_format)\n\n if bn:\n outputs = batch_norm_for_conv2d(outputs, is_training,\n bn_decay=bn_decay, scope='bn',\n data_format=data_format)\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return outputs\n\n\ndef conv2d_transpose(inputs,\n num_output_channels,\n kernel_size,\n scope,\n stride=[1, 1],\n padding='SAME',\n use_xavier=True,\n stddev=1e-3,\n weight_decay=None,\n activation_fn=tf.nn.relu,\n bn=False,\n bn_decay=None,\n is_training=None):\n \"\"\" 2D convolution transpose with non-linear operation.\n\n Args:\n inputs: 4-D tensor variable BxHxWxC\n num_output_channels: int\n kernel_size: a list of 2 ints\n scope: string\n stride: a list of 2 ints\n padding: 'SAME' or 'VALID'\n use_xavier: bool, use xavier_initializer if true\n stddev: float, stddev for truncated_normal init\n weight_decay: float\n activation_fn: function\n bn: bool, whether to use batch norm\n bn_decay: float or float tensor variable in [0,1]\n is_training: bool Tensor variable\n\n Returns:\n Variable tensor\n\n Note: conv2d(conv2d_transpose(a, num_out, ksize, stride), a.shape[-1], ksize, stride) == a\n \"\"\"\n with tf.variable_scope(scope) as sc:\n kernel_h, kernel_w = kernel_size\n num_in_channels = inputs.get_shape()[-1].value\n kernel_shape = [kernel_h, kernel_w,\n num_output_channels, num_in_channels] # reversed to conv2d\n kernel = _variable_with_weight_decay('weights',\n shape=kernel_shape,\n use_xavier=use_xavier,\n stddev=stddev,\n wd=weight_decay)\n stride_h, stride_w = stride\n \n # from slim.convolution2d_transpose\n def get_deconv_dim(dim_size, stride_size, kernel_size, padding):\n dim_size *= stride_size\n\n if padding == 'VALID' and dim_size is not None:\n dim_size += max(kernel_size - stride_size, 0)\n return dim_size\n\n # caculate output shape\n batch_size = inputs.get_shape()[0].value\n height = inputs.get_shape()[1].value\n width = inputs.get_shape()[2].value\n out_height = get_deconv_dim(height, stride_h, kernel_h, padding)\n out_width = get_deconv_dim(width, stride_w, kernel_w, padding)\n output_shape = [batch_size, out_height, out_width, num_output_channels]\n\n outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape,\n [1, stride_h, stride_w, 1],\n padding=padding)\n biases = _variable_on_cpu('biases', [num_output_channels],\n tf.constant_initializer(0.0))\n outputs = tf.nn.bias_add(outputs, biases)\n\n if bn:\n outputs = batch_norm_for_conv2d(outputs, is_training,\n bn_decay=bn_decay, scope='bn')\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return outputs\n\n \n\ndef conv3d(inputs,\n num_output_channels,\n kernel_size,\n scope,\n stride=[1, 1, 1],\n padding='SAME',\n use_xavier=True,\n stddev=1e-3,\n weight_decay=None,\n activation_fn=tf.nn.relu,\n bn=False,\n bn_decay=None,\n is_training=None):\n \"\"\" 3D convolution with non-linear operation.\n\n Args:\n inputs: 5-D tensor variable BxDxHxWxC\n num_output_channels: int\n kernel_size: a list of 3 ints\n scope: string\n stride: a list of 3 ints\n padding: 'SAME' or 'VALID'\n use_xavier: bool, use xavier_initializer if true\n stddev: float, stddev for truncated_normal init\n weight_decay: float\n activation_fn: function\n bn: bool, whether to use batch norm\n bn_decay: float or float tensor variable in [0,1]\n is_training: bool Tensor variable\n\n Returns:\n Variable tensor\n \"\"\"\n with tf.variable_scope(scope) as sc:\n kernel_d, kernel_h, kernel_w = kernel_size\n num_in_channels = inputs.get_shape()[-1].value\n kernel_shape = [kernel_d, kernel_h, kernel_w,\n num_in_channels, num_output_channels]\n kernel = _variable_with_weight_decay('weights',\n shape=kernel_shape,\n use_xavier=use_xavier,\n stddev=stddev,\n wd=weight_decay)\n stride_d, stride_h, stride_w = stride\n outputs = tf.nn.conv3d(inputs, kernel,\n [1, stride_d, stride_h, stride_w, 1],\n padding=padding)\n biases = _variable_on_cpu('biases', [num_output_channels],\n tf.constant_initializer(0.0))\n outputs = tf.nn.bias_add(outputs, biases)\n \n if bn:\n outputs = batch_norm_for_conv3d(outputs, is_training,\n bn_decay=bn_decay, scope='bn')\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return outputs\n\ndef fully_connected(inputs,\n num_outputs,\n scope,\n use_xavier=True,\n stddev=1e-3,\n weight_decay=None,\n activation_fn=tf.nn.relu,\n bn=False,\n bn_decay=None,\n is_training=None):\n \"\"\" Fully connected layer with non-linear operation.\n \n Args:\n inputs: 2-D tensor BxN\n num_outputs: int\n \n Returns:\n Variable tensor of size B x num_outputs.\n \"\"\"\n with tf.variable_scope(scope) as sc:\n num_input_units = inputs.get_shape()[-1].value\n weights = _variable_with_weight_decay('weights',\n shape=[num_input_units, num_outputs],\n use_xavier=use_xavier,\n stddev=stddev,\n wd=weight_decay)\n outputs = tf.matmul(inputs, weights)\n biases = _variable_on_cpu('biases', [num_outputs],\n tf.constant_initializer(0.0))\n outputs = tf.nn.bias_add(outputs, biases)\n \n if bn:\n outputs = batch_norm_for_fc(outputs, is_training, bn_decay, 'bn')\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return outputs\n\n\ndef max_pool2d(inputs,\n kernel_size,\n scope,\n stride=[2, 2],\n padding='VALID'):\n \"\"\" 2D max pooling.\n\n Args:\n inputs: 4-D tensor BxHxWxC\n kernel_size: a list of 2 ints\n stride: a list of 2 ints\n \n Returns:\n Variable tensor\n \"\"\"\n with tf.variable_scope(scope) as sc:\n kernel_h, kernel_w = kernel_size\n stride_h, stride_w = stride\n outputs = tf.nn.max_pool(inputs,\n ksize=[1, kernel_h, kernel_w, 1],\n strides=[1, stride_h, stride_w, 1],\n padding=padding,\n name=sc.name)\n return outputs\n\ndef avg_pool2d(inputs,\n kernel_size,\n scope,\n stride=[2, 2],\n padding='VALID'):\n \"\"\" 2D avg pooling.\n\n Args:\n inputs: 4-D tensor BxHxWxC\n kernel_size: a list of 2 ints\n stride: a list of 2 ints\n \n Returns:\n Variable tensor\n \"\"\"\n with tf.variable_scope(scope) as sc:\n kernel_h, kernel_w = kernel_size\n stride_h, stride_w = stride\n outputs = tf.nn.avg_pool(inputs,\n ksize=[1, kernel_h, kernel_w, 1],\n strides=[1, stride_h, stride_w, 1],\n padding=padding,\n name=sc.name)\n return outputs\n\n\ndef max_pool3d(inputs,\n kernel_size,\n scope,\n stride=[2, 2, 2],\n padding='VALID'):\n \"\"\" 3D max pooling.\n\n Args:\n inputs: 5-D tensor BxDxHxWxC\n kernel_size: a list of 3 ints\n stride: a list of 3 ints\n \n Returns:\n Variable tensor\n \"\"\"\n with tf.variable_scope(scope) as sc:\n kernel_d, kernel_h, kernel_w = kernel_size\n stride_d, stride_h, stride_w = stride\n outputs = tf.nn.max_pool3d(inputs,\n ksize=[1, kernel_d, kernel_h, kernel_w, 1],\n strides=[1, stride_d, stride_h, stride_w, 1],\n padding=padding,\n name=sc.name)\n return outputs\n\ndef avg_pool3d(inputs,\n kernel_size,\n scope,\n stride=[2, 2, 2],\n padding='VALID'):\n \"\"\" 3D avg pooling.\n\n Args:\n inputs: 5-D tensor BxDxHxWxC\n kernel_size: a list of 3 ints\n stride: a list of 3 ints\n \n Returns:\n Variable tensor\n \"\"\"\n with tf.variable_scope(scope) as sc:\n kernel_d, kernel_h, kernel_w = kernel_size\n stride_d, stride_h, stride_w = stride\n outputs = tf.nn.avg_pool3d(inputs,\n ksize=[1, kernel_d, kernel_h, kernel_w, 1],\n strides=[1, stride_d, stride_h, stride_w, 1],\n padding=padding,\n name=sc.name)\n return outputs\n\n\ndef batch_norm_template_unused(inputs, is_training, scope, moments_dims, bn_decay):\n \"\"\" NOTE: this is older version of the util func. it is deprecated.\n Batch normalization on convolutional maps and beyond...\n Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow\n \n Args:\n inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC\n is_training: boolean tf.Varialbe, true indicates training phase\n scope: string, variable scope\n moments_dims: a list of ints, indicating dimensions for moments calculation\n bn_decay: float or float tensor variable, controling moving average weight\n Return:\n normed: batch-normalized maps\n \"\"\"\n with tf.variable_scope(scope) as sc:\n num_channels = inputs.get_shape()[-1].value\n beta = _variable_on_cpu(name='beta',shape=[num_channels],\n initializer=tf.constant_initializer(0))\n gamma = _variable_on_cpu(name='gamma',shape=[num_channels],\n initializer=tf.constant_initializer(1.0))\n batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments')\n decay = bn_decay if bn_decay is not None else 0.9\n ema = tf.train.ExponentialMovingAverage(decay=decay)\n # Operator that maintains moving averages of variables.\n # Need to set reuse=False, otherwise if reuse, will see moments_1/mean/ExponentialMovingAverage/ does not exist\n # https://github.com/shekkizh/WassersteinGAN.tensorflow/issues/3\n with tf.variable_scope(tf.get_variable_scope(), reuse=False):\n ema_apply_op = tf.cond(is_training,\n lambda: ema.apply([batch_mean, batch_var]),\n lambda: tf.no_op())\n \n # Update moving average and return current batch's avg and var.\n def mean_var_with_update():\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n \n # ema.average returns the Variable holding the average of var.\n mean, var = tf.cond(is_training,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3)\n return normed\n\n\ndef batch_norm_template(inputs, is_training, scope, moments_dims_unused, bn_decay, data_format='NHWC'):\n \"\"\" Batch normalization on convolutional maps and beyond...\n Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow\n \n Args:\n inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC\n is_training: boolean tf.Varialbe, true indicates training phase\n scope: string, variable scope\n moments_dims: a list of ints, indicating dimensions for moments calculation\n bn_decay: float or float tensor variable, controling moving average weight\n data_format: 'NHWC' or 'NCHW'\n Return:\n normed: batch-normalized maps\n \"\"\"\n bn_decay = bn_decay if bn_decay is not None else 0.9\n return tf.layers.batch_normalization(inputs, \n center=True, scale=True,\n training=is_training, momentum=bn_decay,\n name=scope,\n data_format=data_format)\n\n\ndef batch_norm_for_fc(inputs, is_training, bn_decay, scope):\n \"\"\" Batch normalization on FC data.\n \n Args:\n inputs: Tensor, 2D BxC input\n is_training: boolean tf.Varialbe, true indicates training phase\n bn_decay: float or float tensor variable, controling moving average weight\n scope: string, variable scope\n Return:\n normed: batch-normalized maps\n \"\"\"\n return batch_norm_template(inputs, is_training, scope, [0,], bn_decay)\n\n\ndef batch_norm_for_conv1d(inputs, is_training, bn_decay, scope, data_format):\n \"\"\" Batch normalization on 1D convolutional maps.\n \n Args:\n inputs: Tensor, 3D BLC input maps\n is_training: boolean tf.Varialbe, true indicates training phase\n bn_decay: float or float tensor variable, controling moving average weight\n scope: string, variable scope\n data_format: 'NHWC' or 'NCHW'\n Return:\n normed: batch-normalized maps\n \"\"\"\n return batch_norm_template(inputs, is_training, scope, [0,1], bn_decay, data_format)\n\n\n\n \ndef batch_norm_for_conv2d(inputs, is_training, bn_decay, scope, data_format):\n \"\"\" Batch normalization on 2D convolutional maps.\n \n Args:\n inputs: Tensor, 4D BHWC input maps\n is_training: boolean tf.Varialbe, true indicates training phase\n bn_decay: float or float tensor variable, controling moving average weight\n scope: string, variable scope\n data_format: 'NHWC' or 'NCHW'\n Return:\n normed: batch-normalized maps\n \"\"\"\n return batch_norm_template(inputs, is_training, scope, [0,1,2], bn_decay, data_format)\n\n\ndef batch_norm_for_conv3d(inputs, is_training, bn_decay, scope):\n \"\"\" Batch normalization on 3D convolutional maps.\n \n Args:\n inputs: Tensor, 5D BDHWC input maps\n is_training: boolean tf.Varialbe, true indicates training phase\n bn_decay: float or float tensor variable, controling moving average weight\n scope: string, variable scope\n Return:\n normed: batch-normalized maps\n \"\"\"\n return batch_norm_template(inputs, is_training, scope, [0,1,2,3], bn_decay)\n\n\ndef dropout(inputs,\n is_training,\n scope,\n keep_prob=0.5,\n noise_shape=None):\n \"\"\" Dropout layer.\n\n Args:\n inputs: tensor\n is_training: boolean tf.Variable\n scope: string\n keep_prob: float in [0,1]\n noise_shape: list of ints\n\n Returns:\n tensor variable\n \"\"\"\n with tf.variable_scope(scope) as sc:\n outputs = tf.cond(is_training,\n lambda: tf.nn.dropout(inputs, keep_prob, noise_shape),\n lambda: inputs)\n return outputs\n" ]
[ [ "tensorflow.compat.v1.nn.conv1d", "tensorflow.compat.v1.nn.dropout", "tensorflow.compat.v1.nn.conv3d", "tensorflow.compat.v1.no_op", "tensorflow.compat.v1.train.ExponentialMovingAverage", "tensorflow.compat.v1.nn.avg_pool3d", "tensorflow.compat.v1.identity", "tensorflow.compat.v1.truncated_normal_initializer", "tensorflow.compat.v1.nn.avg_pool", "tensorflow.compat.v1.constant_initializer", "tensorflow.compat.v1.get_variable_scope", "tensorflow.compat.v1.layers.batch_normalization", "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.get_variable", "tensorflow.compat.v1.nn.batch_normalization", "tensorflow.compat.v1.nn.moments", "tensorflow.compat.v1.nn.conv2d", "tensorflow.compat.v1.nn.max_pool", "tensorflow.compat.v1.device", "tensorflow.initializers.GlorotUniform", "tensorflow.compat.v1.nn.max_pool3d", "tensorflow.compat.v1.control_dependencies", "tensorflow.compat.v1.add_to_collection", "tensorflow.compat.v1.matmul", "tensorflow.compat.v1.nn.l2_loss", "tensorflow.compat.v1.nn.bias_add", "tensorflow.compat.v1.nn.conv2d_transpose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
drodeur/spam
[ "d7063cee3878f695cf6efa414cfcaa3812d3956f" ]
[ "classifiers/strategies/bagOfWords.py" ]
[ "from sklearn.feature_extraction.text import CountVectorizer;\n\nclass BagOfWords(object):\n def __init__(self, data, options):\n self.data = data;\n self.options = options;\n\n try:\n self.options['vector'];\n except KeyError:\n self.options['vector'] = count_vector = CountVectorizer(token_pattern='(?u)\\\\b\\\\w\\\\w+\\\\b');\n\n def compute(self):\n if self.options['fit'] == True:\n return self.options['vector'].fit_transform(self.data);\n\n return self.options['vector'].transform(self.data);" ]
[ [ "sklearn.feature_extraction.text.CountVectorizer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
oriolfornes/JASPAR-motif-discovery
[ "4c3fe91e9345df0aa06a52e94a9bb02ff2748965", "4c3fe91e9345df0aa06a52e94a9bb02ff2748965", "4c3fe91e9345df0aa06a52e94a9bb02ff2748965" ]
[ "explainn/predict-danq.py", "explainn/parsers/de-novo/remap2cam.py", "explainn/damo/helpers.py" ]
[ "#!/usr/bin/env python\n\nfrom Bio import SeqIO\nimport click\nfrom io import StringIO\nimport numpy as np\nimport pandas as pd\nimport sys\nimport torch\nfrom torch.utils.data import DataLoader, TensorDataset\nfrom tqdm import tqdm\nbar_format = \"{percentage:3.0f}%|{bar:20}{r_bar}\"\n\n# Local imports\nfrom architectures import DanQ\nfrom sequence import one_hot_encode_many, rc_one_hot_encoding_many\nfrom utils import get_file_handle\n\nCONTEXT_SETTINGS = {\n \"help_option_names\": [\"-h\", \"--help\"],\n}\n\[email protected](no_args_is_help=True, context_settings=CONTEXT_SETTINGS)\[email protected](\n \"model_file\",\n type=click.Path(exists=True, resolve_path=True)\n)\[email protected](\n \"fasta_file\",\n type=click.Path(exists=True, resolve_path=True)\n)\[email protected](\n \"-b\", \"--batch-size\",\n help=\"Batch size.\",\n type=int,\n default=2**6,\n show_default=True,\n)\[email protected](\n \"-o\", \"--output-file\",\n help=\"Output file. [default: stdout]\",\n type=click.Path(resolve_path=True),\n)\[email protected](\n \"-s\", \"--apply-sigmoid\",\n help=\"Apply the logistic sigmoid function to outputs.\",\n is_flag=True,\n)\n\ndef main(**args):\n\n ##############\n # Load Data #\n ##############\n\n # Get data\n Xs, seq_ids = _get_Xs_ids(args[\"fasta_file\"])\n\n # Get DataLoader\n data_loader = DataLoader(TensorDataset(torch.Tensor(Xs),\n torch.Tensor(rc_one_hot_encoding_many(Xs))), args[\"batch_size\"])\n\n # Load model\n model = _load_model(args[\"model_file\"])\n\n ##############\n # Predict #\n ############## \n\n # Initialize\n idx = 0\n predictions = np.empty((len(Xs), model._options[\"n_features\"], 4))\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n with torch.no_grad():\n\n for i, (fwd, rev) in tqdm(enumerate(iter(data_loader)),\n total=len(data_loader), bar_format=bar_format):\n\n # Get strand-specific predictions\n fwd = np.expand_dims(model(fwd.to(device)).cpu().numpy(), axis=2)\n rev = np.expand_dims(model(rev.to(device)).cpu().numpy(), axis=2)\n\n # Combine predictions from both strands\n fwd_rev = np.concatenate((fwd, rev), axis=2)\n mean_fwd_rev = np.expand_dims(np.mean(fwd_rev, axis=2), axis=2)\n max_fwd_rev = np.expand_dims(np.max(fwd_rev, axis=2), axis=2)\n\n # Concatenate predictions for this batch\n p = np.concatenate((fwd, rev, mean_fwd_rev, max_fwd_rev), axis=2)\n predictions[idx:idx+fwd.shape[0]] = p\n\n # Index increase\n idx += fwd.shape[0]\n\n # Apply sigmoid\n if args[\"apply_sigmoid\"]:\n predictions = torch.sigmoid(torch.Tensor(predictions)).numpy()\n\n ##############\n # Output #\n ############## \n\n dfs = []\n for i in range(model._options[\"n_features\"]):\n p = predictions[:, i, :]\n df = pd.DataFrame(p, columns=[\"Fwd\", \"Rev\", \"Mean\", \"Max\"])\n df[\"SeqId\"] = seq_ids\n df[\"Class\"] = i \n dfs.append(df)\n df = pd.concat(dfs)[[\"SeqId\", \"Class\", \"Fwd\", \"Rev\", \"Mean\", \"Max\"]]\n df.reset_index(drop=True, inplace=True)\n if args[\"output_file\"] is not None:\n df.to_csv(args[\"output_file\"], sep=\"\\t\", index=False)\n else:\n o = StringIO()\n df.to_csv(o, sep=\"\\t\", index=False)\n sys.stdout.write(o.getvalue())\n\ndef _get_Xs_ids(fasta_file):\n\n # Get sequences\n fh = get_file_handle(fasta_file, \"rt\")\n records = list(SeqIO.parse(fh, \"fasta\"))\n fh.close()\n\n # Xs / ids\n Xs = one_hot_encode_many(np.array([str(r.seq) for r in records]))\n ids = np.array([r.id for r in records])\n\n return(Xs, ids)\n\ndef _load_model(model_file):\n\n # Initialize\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n # Get model\n selene_dict = torch.load(model_file)\n model = DanQ(\n selene_dict[\"options\"][\"sequence_length\"],\n selene_dict[\"options\"][\"n_features\"],\n selene_dict[\"options\"][\"weights_file\"],\n )\n model.load_state_dict(selene_dict[\"state_dict\"])\n model.to(device)\n model.eval()\n\n return(model)\n\nif __name__ == \"__main__\":\n main()\n", "#!/usr/bin/env python\n\nimport click\nfrom functools import partial\nfrom multiprocessing import Pool\nimport os\nimport pandas as pd\nfrom pybedtools import BedTool\nfrom pybedtools.helpers import cleanup\nimport re\nimport subprocess as sp\nfrom tqdm import tqdm\nbar_format = \"{percentage:3.0f}%|{bar:20}{r_bar}\"\n\nfrom utils import get_chrom_sizes, get_file_handle\n\n# Globals\nscripts_dir = os.path.dirname(os.path.realpath(__file__))\n\nCONTEXT_SETTINGS = {\n \"help_option_names\": [\"-h\", \"--help\"],\n}\n\[email protected](no_args_is_help=True, context_settings=CONTEXT_SETTINGS)\[email protected](\n \"remap_dir\", type=click.Path(exists=True, resolve_path=True)\n)\[email protected](\n \"genome_file\", type=click.Path(exists=True, resolve_path=True)\n)\[email protected](\n \"dhs_file\", type=click.Path(exists=True, resolve_path=True)\n)\n# @click.argument(\n# \"tfs_file\", type=click.Path(exists=True, resolve_path=True)\n# )\[email protected](\n \"-d\", \"--dummy-dir\",\n help=\"Dummy directory.\",\n type=click.Path(resolve_path=True),\n default=\"/tmp/\",\n show_default=True\n)\[email protected](\n \"-l\", \"--length\",\n help=\"Sequence length.\",\n type=click.Choice([\"201\", \"501\", \"1001\"]),\n default=\"201\",\n show_default=True\n)\[email protected](\n \"-o\", \"--output-dir\",\n help=\"Output directory.\",\n type=click.Path(resolve_path=True),\n default=\"./\",\n show_default=True\n)\[email protected](\n \"-t\", \"--threads\",\n help=\"Threads to use.\",\n type=int,\n default=1,\n show_default=True\n)\n\ndef main(**args):\n\n # Create output dir\n if not os.path.exists(args[\"output_dir\"]):\n os.makedirs(args[\"output_dir\"])\n\n # Get chrom sizes\n chrom_sizes = get_chrom_sizes(\"%s.sizes\" % args[\"genome_file\"])\n\n # Get already processed TFs\n tfs = set()\n for tsv_file in os.listdir(args[\"output_dir\"]):\n m = re.search(\"^(\\S+).(train|validation|test).tsv.gz$\", tsv_file)\n tfs.add(m.group(1))\n\n # Get BED files\n remap_files = []\n for bed_file in os.listdir(args[\"remap_dir\"]):\n m = re.search(\"^remap2022_(\\S+)_nr_macs2_\\w+_v1_0.bed.gz$\", bed_file)\n if m.group(1) not in tfs:\n remap_files.append(os.path.join(args[\"remap_dir\"], bed_file))\n # d = args[\"remap_dir\"]\n # fh = get_file_handle(args[\"tfs_file\"], \"rt\")\n # for line in fh:\n # tf = line.strip()\n # f = os.path.join(d, f\"remap2022_{tf}_nr_macs2_mm10_v1_0.bed.gz\")\n # if os.path.exists(f):\n # remap_files.append(f)\n # fh.close()\n\n # Get FASTA sequences\n kwargs = {\"total\": len(remap_files), \"bar_format\": bar_format}\n pool = Pool(args[\"threads\"])\n p = partial(__get_FASTA_sequences, genome_file=args[\"genome_file\"],\n dhs_file=args[\"dhs_file\"], chrom_sizes=chrom_sizes,\n dummy_dir=args[\"dummy_dir\"], length=int(args[\"length\"]),\n output_dir=args[\"output_dir\"])\n for _ in tqdm(pool.imap(p, remap_files), **kwargs):\n pass\n\ndef __get_FASTA_sequences(remap_file, genome_file, dhs_file, chrom_sizes,\n dummy_dir=\"/tmp/\", length=201, output_dir=\"./\"):\n\n # Initialize\n prefix = re.search(\"^remap2022_(\\S+)_nr_macs2_\\w+_v1_0.bed.gz$\",\n os.path.split(remap_file)[1]).group(1)\n\n # Intervals as pandas DataFrame\n df = pd.read_csv(remap_file, sep=\"\\t\", usecols=[0, 6, 7],\n names=[\"chrom\", \"start\", \"end\"])\n df = df[df[\"chrom\"].isin(chrom_sizes.keys())]\n\n # Get non-redundant BedTool object (i.e. positive sequences)\n b = BedTool(\"\\n\".join([\"\\t\".join(map(str, row.tolist())) \\\n for _, row in df.iterrows()]), from_string=True).sort()\n b.set_chromsizes(chrom_sizes)\n b = b.slop(b=int((length-1)/2)).filter(lambda x: len(x) == length).sort()\n m = b.merge()\n nr = BedTool(\"\\n\".join([\"\\t\".join(map(str, i.fields[-3:])) \\\n for i in m.closest(b, t=\"first\")]), from_string=True)\n nr.sequence(fi=genome_file)\n positive_file = os.path.join(dummy_dir, \"%s_pos.fa\" % prefix)\n nr.save_seqs(positive_file)\n\n # Get non-overlapping BedTool object (i.e. negative sequences)\n b = BedTool(dhs_file)\n nov = b.intersect(nr, v=True)\n nov.sequence(fi=genome_file)\n negative_file = os.path.join(dummy_dir, \"%s_neg.fa\" % prefix)\n nov.save_seqs(negative_file)\n\n # Subsample negative sequences by %GC\n json_file = os.path.join(dummy_dir, \"%s.json\" % prefix)\n cmd = \"./match-seqs-by-gc.py -f -o %s %s %s\" % \\\n (json_file, negative_file, positive_file)\n _ = sp.run([cmd], shell=True, cwd=scripts_dir, stdout=sp.DEVNULL,\n stderr=sp.DEVNULL)\n\n # Subsample negative sequences by %GC\n cmd = \"./json2cam.py -o %s -p %s --test %s\" % \\\n (output_dir, prefix, json_file)\n _ = sp.run([cmd], shell=True, cwd=scripts_dir, stdout=sp.DEVNULL,\n stderr=sp.DEVNULL)\n\n # Delete tmp files\n cleanup()\n os.remove(positive_file)\n os.remove(negative_file)\n os.remove(json_file)\n\nif __name__ == \"__main__\":\n main()", "# coding: utf-8\nimport operator\nimport requests\nfrom urllib.parse import urlparse\nimport pickle\nimport linecache\nimport gzip\nfrom .universal import *\nfrom scipy import random\n\n\nURL = 'https://www.encodeproject.org/'\n\n\ndef Sym(dx): # assume both seq & rev already in dx, and data on 1 single strand\n \"\"\"\n :param dx: dict\n :return: dict\n \"\"\"\n dy = {}\n for seq, rev in itt.izip(dx, RC(dx)):\n dy[seq] = 0.5 * (dx[seq] + dx[rev])\n return dy\n\n\nclass Distribution(dict):\n def __init__(self, pred, prefix, suffix):\n \"\"\"\n :param pred: dict\n :param prefix: list[dict]\n :param suffix: list[dict]\n :return:\n \"\"\"\n d = Op.Nom(pred)\n for k, v in d.items():\n it = itt.chain(self.L(k, v, prefix), self.R(k, v, suffix))\n for seq, count in it:\n assert len(seq) == len(k)\n d[seq] = d.get(seq, 0.0) + count\n super(Distribution, self).__init__(Sym(d))\n\n @classmethod\n def R(cls, seq, count, suffix):\n for i, d in enumerate(suffix):\n frac = seq[+i+1:]\n for k, v in d.iteritems():\n yield frac + k, v * count\n\n @classmethod\n def L(cls, seq, count, prefix):\n for i, d in enumerate(prefix):\n frac = seq[:-i-1]\n for k, v in d.iteritems():\n yield k + frac, v * count\n\n\ndef fasta(path, seqs):\n lines = []\n for i, seq in enumerate(seqs):\n lines.append(['> seq_%d' % (i + 1)])\n lines.append([seq])\n WriteCSV(path, lines)\n\n\ndef fmt_ppm(path, _PPM, header='', labels=BASES):\n out = [[b, '|'] + map(str, xs) for b, xs in zip(labels, np.transpose(_PPM))]\n np.savetxt(path, out, '%s', header=str(header), comments='> ')\n\n\nclass Freq(object):\n def __init__(self, seqs, counts):\n \"\"\"\n :param seqs: iterable\n :param counts: iterable\n :return:\n \"\"\"\n self.matrix = np.array(map(list, seqs))\n self.counts = np.fromiter(counts, float)\n _, self.width = self.matrix.shape\n\n @classmethod\n def FromIter(cls, iterable):\n \"\"\"\n :param iterable: iterable\n :return: Freq\n \"\"\"\n iters = list(iterable)\n seqs, counts = zip(*iters)\n return cls(seqs, counts)\n\n def Freq(self, ind=np.s_[:], pseudo=0.0):\n \"\"\"\n :param ind: int\n :param pseudo: float\n :return: dict\n \"\"\"\n freq = dict.fromkeys(BASES, pseudo)\n for chars, count in zip(self.matrix[:, ind], self.counts):\n for char in chars:\n freq[char] += count\n return freq # not consider rc\n\n def GetPFM(self, pseudo=0.0):\n \"\"\"\n :param pseudo: float\n :return: ndarray\n \"\"\"\n dicts = [self.Freq(i, pseudo) for i in range(self.width)]\n return np.array(DictList.ToList(dicts, BASES))\n\n\ndef uniq_subseqs(seqs, length):\n assert 0 < length <= len(seqs[0])\n out = []\n for seq in seqs:\n subseqs = Subset(seq, length)\n out.extend(subseqs + RC(subseqs))\n return list(set(out))\n\n\ndef mean_std_fmt(array, n=3):\n fmt = '%.{0}f (%.{0}f)'.format(n)\n a = np.array(array, float).ravel()\n return fmt % (np.mean(a), np.std(a))\n\n\ndef gen_pred(seqs, _PPM):\n log_PPM = np.log(_PPM)\n n1, n2 = log_PPM.shape\n assert n2 == len(BASES)\n dicts = DictList.ToDict(log_PPM, BASES)\n pred = {}\n for key in set(seqs):\n assert n1 == len(key)\n pred[key] = np.exp(sum(d[b] for d, b in zip(dicts, key)))\n return pred\n\n\ndef Gradient(f, x0, h=1e-6):\n n = len(x0)\n g = np.zeros(n)\n for i in range(n):\n x = np.array(x0)\n x[i] -= h\n g[i] -= f(x)\n x = np.array(x0)\n x[i] += h\n g[i] += f(x)\n g /= 2 * h\n return g\n\n\ndef Hessian(f, x0, h=1e-6):\n n = len(x0)\n H = np.zeros((n, n))\n for i in range(n):\n x = np.array(x0)\n x[i] -= h\n H[:, i] -= Gradient(f, x, h)\n x = np.array(x0)\n x[i] += h\n H[:, i] += Gradient(f, x, h)\n H /= 2 * h\n return (H + H.T) / 2\n\n\nclass Peaks(list):\n def sorted(self, size=None, step=1, reverse=True, rand=False):\n \"\"\"\n :param size: int | None\n :param step: int\n :param reverse: bool\n :param rand: bool\n :return: list[Peak]\n \"\"\"\n if rand:\n out = random.choice(self, size=size, replace=False)\n else:\n sort_pks = sorted(self, reverse=reverse)\n if size is None:\n stop = None\n else:\n stop = step * size\n assert stop <= len(self)\n out = sort_pks[0:stop:step]\n return Peaks(out)\n\n def get_attr(self, ind=None, name='signal'):\n if ind is None:\n ind = range(len(self))\n return [getattr(self[i], name) for i in ind]\n\n\nclass BedReader(Peaks):\n hd = 'chrom', 'start', 'stop', 'name', 'score', 'strand', 'signal', 'p', 'q'\n selector = {'bed broadPeak': hd, 'bed narrowPeak': hd + ('peak',)}\n\n def __init__(self, path, file_type, sep='\\t'):\n super(BedReader, self).__init__()\n keys = self.selector[file_type]\n\n with gzip.open(path) as f: # assume gzip file\n for line in f:\n values = line.rstrip().split(sep)\n assert len(keys) == len(values)\n kwargs = dict(zip(keys, values))\n self.append(Peak(**kwargs))\n\n\nclass Peak(object):\n def __init__(self, chrom, start, stop, signal, peak=None, **kwargs):\n self.chrom = chrom\n self.start = int(start)\n self.stop = int(stop)\n self.signal = float(signal) # enrichment\n if peak is None or int(peak) == -1: # Use -1 if no point-source called.\n self.peak = (self.start + self.stop) / 2 # int\n else:\n self.peak = self.start + int(peak)\n assert self.peak <= self.stop\n del kwargs\n\n def __cmp__(self, other):\n return cmp(self.signal, other.signal)\n\n def chr_path(self, path_fmt):\n return path_fmt % self.chrom\n\n def coord(self, shift, length):\n if length:\n half = length / 2\n mid = self.peak + shift\n return mid - half, mid + half\n else:\n return self.start + shift, self.stop + shift\n\n def seek(self, shift, length, path_fmt, skip=1):\n start, stop = self.coord(shift, length)\n return seek_seq(self.chr_path(path_fmt), start, stop - start, skip)\n\n\nclass GetScore(list):\n def __init__(self, seqs, pred_dict, use_best_site=True):\n \"\"\"\n :param seqs: list[str]\n :param pred_dict: dict\n :return:\n \"\"\"\n super(GetScore, self).__init__()\n width = len(next(pred_dict.iterkeys()))\n\n self.ind = []\n self.dists = []\n for seq in seqs:\n contigs = Subset(seq, width)\n contigs.extend(RC(contigs))\n\n scores = [pred_dict[key] for key in contigs] # len(key) == width\n max_score = max(scores)\n if use_best_site:\n self.append(max_score)\n else:\n self.append(np.mean(scores))\n\n ind = [i for i, x in enumerate(scores) if x == max_score]\n idx = random.choice(ind)\n self.ind.append(idx)\n\n n = len(scores) / 2 # binding sites positions #\n self.dists.append(idx - n/2 if idx < n else idx - n - n/2)\n\n\nclass DataFile(dict):\n def __init__(self, *args, **kwargs):\n super(DataFile, self).__init__(*args, **kwargs)\n\n self.accession = self['accession']\n self.href = self['href'] # relative download path\n self.file_type = self['file_type']\n\n self.basename = os.path.basename(self.href)\n self.url = urlparse.urljoin(URL, self.href)\n self.response = None\n\n def get_response(self):\n if self.response is None:\n self.response = requests.get(self.url)\n\n def download(self, local_dir, mode='wb'):\n self.local_path = os.path.join(local_dir, self.basename)\n if not os.path.isfile(self.local_path):\n self.get_response()\n with open(self.local_path, mode) as f:\n f.write(self.response.content) # binary\n\n\nclass Assay(dict):\n def __init__(self, *args, **kwargs):\n super(Assay, self).__init__(*args, **kwargs)\n self.accession = self['accession']\n self.files = [DataFile(d) for d in self['files']]\n self.target = Target(self['target'])\n\n def get_types(self, file_types, assembly):\n out = []\n for f in self.files:\n if f.file_type in file_types and f['assembly'] == assembly:\n out.append(f)\n return out\n\n\nclass Target(dict):\n def __init__(self, *args, **kwargs):\n super(Target, self).__init__(*args, **kwargs)\n self.gene_name = self['gene_name']\n\n def is_gene(self, gene_name):\n \"\"\"\n :param gene_name: str\n :return: bool\n \"\"\"\n return self.gene_name.upper() == gene_name.upper()\n\n\nclass Cmp(list):\n msg = 'r2 = %.3f, norm = %#.4g, kl = %.3f, kl_sym = %.3f (n = %d)'\n\n def __init__(self, x, y, normed=True):\n \"\"\"\n :param x: array_like\n :param y: array_like\n :return:\n \"\"\"\n a = np.array(x, float).flatten()\n b = np.array(y, float).flatten()\n\n assert len(a) == len(b)\n assert min(a) > 0 and min(b) > 0\n\n if normed:\n a /= sum(a)\n b /= sum(b)\n\n super(Cmp, self).__init__(self.Stats(a, b))\n\n @classmethod\n def FromDict(cls, dx, dy, normed=True):\n \"\"\"\n :param dx: dict\n :param dy: dict\n :param normed: bool\n :return: Cmp\n \"\"\"\n keys = set(dx) & set(dy)\n x = [dx[k] for k in keys]\n y = [dy[k] for k in keys]\n return cls(x, y, normed)\n\n @classmethod\n def Stats(cls, x, y):\n n = len(x)\n\n r2 = stats.pearsonr(x, y)[0] ** 2\n norm = np.fabs(x - y).sum() / n\n kl = KL(x, y)\n kl_sym = (KL(x, y) + KL(y, x)) / 2\n\n tup = r2, norm, kl, kl_sym, n\n print(cls.msg % tup)\n return tup\n\n def Write(self, path, sep='\\t'):\n Write(path, sep.join(map(str, self)))\n\n\ndef get_ecdf(array, reverse=False):\n \"\"\"\n Generate the empirical distribution function.\n :param array: array_like\n :param reverse: bool\n :return: float -> float\n \"\"\"\n n = len(array)\n op = operator.ge if reverse else operator.le\n\n def ecdf(t):\n m = sum(op(x, t) for x in array) # x <= t or x >= t if reverse\n return float(m) / float(n)\n\n return ecdf # return func\n\n\ndef seek_seq(path, start, length, skip=1):\n width = len(getline(path, skip + 1))\n\n idx = start - 1\n row_idx = idx / width\n col_idx = idx % width\n\n lineno = row_idx + skip + 1\n line = getline(path, lineno)\n chars = list(line[col_idx:])\n\n while len(chars) < length:\n lineno += 1\n line = getline(path, lineno)\n if line:\n chars.extend(line)\n else:\n break\n\n return ''.join(chars[:length]).upper()\n\n\ndef getline(path, lineno):\n return linecache.getline(path, lineno).rstrip()\n\n\ndef load_PPM(path, skipcols=1, skiprows=0):\n return np.loadtxt(path, str, skiprows=skiprows).T[skipcols:].astype(float)\n\n\ndef Dump(path, obj):\n with open(path, 'w') as f:\n pickle.dump(obj, f)\n\n\ndef pad_PPM(_PPM, width):\n if width > len(_PPM):\n diff = width - len(_PPM)\n prefix = diff / 2\n suffix = diff - prefix\n return np.pad(_PPM, ((prefix, suffix), (0, 0)), 'constant',\n constant_values=(0.25,))\n else:\n return _PPM\n" ]
[ [ "pandas.concat", "torch.Tensor", "torch.load", "pandas.DataFrame", "numpy.concatenate", "numpy.max", "torch.no_grad", "numpy.mean", "torch.cuda.is_available", "numpy.array" ], [ "pandas.read_csv" ], [ "scipy.random.choice" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hanbingyan/collusion
[ "c6cb9e39b24f9d77c25d0d2a9df3f90721f830a8" ]
[ "classic_random.py" ]
[ "import random\nfrom collections import namedtuple\nimport numpy as np\nimport pickle\n\nrandom.seed(12345)\nnp.random.seed(12345)\n\nTransition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward'))\nMEM_SIZE = 2250\nBATCH_SIZE = 1\nDELTA = 0.95\n\neps = 1e-5\nalpha = 0.15\n\nactions_space = np.arange(1.43, 2.0, 0.04)\nquality = np.ones(2) * 2\nmargin_cost = np.ones(2)\nhorizon = 1 / 4\na0 = 0\nn_actions = actions_space.size\nn_agents = 2\n\nreward_sum = np.array([5.58232796, 5.78802889, 5.92606135, 5.99644584, 6.00067233,\n 5.94172477, 5.82402394, 5.65328833, 5.43631956, 5.18072579,\n 4.89460298, 4.58619785, 4.26357789, 3.93433261, 3.60532586])\n\n\n\nclass ReplayMemory(object):\n def __init__(self, capacity):\n self.capacity = capacity\n self.memory = []\n self.position = 0\n\n def push(self, *args):\n \"\"\"Saves a transition.\"\"\"\n if len(self.memory) < self.capacity:\n self.memory.append(None)\n self.memory[self.position] = Transition(*args)\n self.position = (self.position + 1) % self.capacity\n\n def sample(self, batch_size):\n return random.sample(self.memory, batch_size)\n\n def recent(self, batch_size):\n ind = np.arange(self.position - batch_size, self.position)%self.capacity\n return [self.memory[i] for i in ind]\n\n def __len__(self):\n return len(self.memory)\n\ndef replay_classic_reward(action):\n # Compute profits for all agents\n\n price = actions_space[action]\n demand = np.exp((quality - price) / horizon)\n demand = demand / (np.sum(demand) + np.exp(a0 / horizon))\n reward = np.multiply(price - margin_cost, demand)\n return reward\n\ndef replay_classic_select(agent, state, steps_done):\n\n sample = random.random()\n eps_threshold = np.exp(-eps * steps_done)\n\n if sample > eps_threshold:\n return Q[agent][state[0]][state[1]].argmax()\n else:\n return np.random.randint(0, n_actions, 1, dtype=int)\n\n\ndef replay_classic_opt(agent, memory, BS):\n if len(memory) < BS:\n return\n transitions = memory.sample(BS)\n batch = Transition(*zip(*transitions))\n\n for i in range(BS):\n stat = batch.state[i]\n next_stat = batch.next_state[i]\n act = batch.action[i]\n rew = batch.reward[i]\n Q[agent][stat[0]][stat[1]][act[agent]] = (1 - alpha) * Q[agent][stat[0]][stat[1]][act[agent]] + \\\n alpha * (rew[agent] + DELTA * Q[agent][next_stat[0]][next_stat[1]].max())\n return\n\nQ_hist = []\nend_price = []\nfor sess in range(500):\n steps_done = 0\n\n state_hist = []\n # Initialize the environment and state\n state = np.random.randint(0, n_actions, size=n_agents)\n state_hist.append(state)\n # Counter for variations in heat\n count = 0\n\n Q = np.zeros((n_agents, n_actions, n_actions, n_actions))\n for agent in range(n_agents):\n for i in range(n_actions):\n for j in range(n_actions):\n Q[agent, i, j, :] = reward_sum\n memory = ReplayMemory(MEM_SIZE)\n\n for i_episode in range(10000000):\n # For each agent, select and perform an action\n action = np.zeros(n_agents, dtype=int)\n\n # if i_episode == num_episodes - 100:\n # action[0, 0] = 4\n # action[0, 1] = select_action_classic(Q[1], state, steps_done)\n # else:\n for i in range(n_agents):\n action[i] = replay_classic_select(i, state, steps_done)\n\n steps_done += 1\n\n reward = replay_classic_reward(action)\n\n # Observe new state\n next_state = action\n\n # Store the transition in memory\n memory.push(state, action, next_state, reward)\n\n old_heat0 = Q[0].argmax(2)\n old_heat1 = Q[1].argmax(2)\n\n replay_classic_opt(0, memory, BATCH_SIZE)\n replay_classic_opt(1, memory, BATCH_SIZE)\n\n new_heat0 = Q[0].argmax(2)\n\n new_heat1 = Q[1].argmax(2)\n\n if np.sum(np.abs(old_heat0 - new_heat0)) == 0 and np.sum(np.abs(old_heat1 - new_heat1)) == 0:\n count += 1\n else:\n count = 0\n\n\n if i_episode%100000 == 0:\n print('Session price', sess, actions_space[action])\n print('count', count)\n print('steps done:', steps_done)\n\n state = next_state\n state_hist.append(state)\n\n if count == 100000:\n print('Terminate condition satisfied with price', np.array(state_hist[-20:]))\n break\n end_price.append(state_hist[-20:])\n Q_hist.append(Q)\n\nwith open('classic2250_rand_endprice.pickle', 'wb') as fp:\n pickle.dump(end_price, fp)\n\nwith open('classic2250_rand_Q.pickle', 'wb') as fp:\n pickle.dump(Q_hist, fp)\n" ]
[ [ "numpy.abs", "numpy.random.seed", "numpy.multiply", "numpy.arange", "numpy.ones", "numpy.array", "numpy.exp", "numpy.zeros", "numpy.sum", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jasperol/MLinPractice
[ "b4a8eaee88c8da1550c8c66931951c82f5cb9250", "b4a8eaee88c8da1550c8c66931951c82f5cb9250" ]
[ "scripts/preprocessing/create_labels.py", "scripts/feature_extraction/words_most_common.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nReads in the original csv files and creates labels for the data points.\nStores the result as a single pandas DataFrame in a pickle file.\n\nCreated on Tue Sep 28 15:55:44 2021\n\n@author: lbechberger\n\"\"\"\n\nimport os, argparse, csv\nimport pandas as pd\nfrom scripts.util import COLUMN_LIKES, COLUMN_RETWEETS, COLUMN_LABEL, COLUMN_VIRAL_COUNT\n\n# setting up CLI\nparser = argparse.ArgumentParser(description = \"Creation of Labels\")\nparser.add_argument(\"data_directory\", help = \"directory where the original csv files reside\")\nparser.add_argument(\"output_file\", help = \"path to the output csv file\")\nparser.add_argument(\"-l\", '--likes_weight', type = int, help = \"weight of likes\", default = 1)\nparser.add_argument(\"-r\", '--retweet_weight', type = int, help = \"weight of retweets\", default = 1)\nparser.add_argument(\"-t\", '--threshold', type = int, help = \"threshold to surpass for positive class\", default = 50)\nparser.add_argument(\"-c\", '--viral_count', type = int, help = \"a score of how viral the tweet was\")\nargs = parser.parse_args()\n\n# get all csv files in data_directory\nfile_paths = [args.data_directory + f for f in os.listdir(args.data_directory) if f.endswith(\".csv\")]\n\n# load all csv files\ndfs = []\nfor file_path in file_paths:\n dfs.append(pd.read_csv(file_path, quoting = csv.QUOTE_NONNUMERIC, lineterminator = \"\\n\"))\n\n# join all data into a single DataFrame\ndf = pd.concat(dfs)\n\n# compute new column \"label\" based on likes and retweets\ndf[COLUMN_LABEL] = (args.likes_weight * df[COLUMN_LIKES] + args.retweet_weight * df[COLUMN_RETWEETS]) > args.threshold\n# compute new column \"viral count\" based on continuos score of likes and retweets\ndf[COLUMN_VIRAL_COUNT] = (args.likes_weight * df[COLUMN_LIKES] + args.retweet_weight * df[COLUMN_RETWEETS])\n\n# print statistics\nprint(\"Number of tweets: {0}\".format(len(df)))\nprint(\"Label distribution:\")\nprint(df[COLUMN_LABEL].value_counts(normalize = True))\n\n# store the DataFrame into a csv file\ndf.to_csv(args.output_file, index = False, quoting = csv.QUOTE_NONNUMERIC, line_terminator = \"\\n\")\n", "# -*- coding: utf-8 -*-\n\"\"\"\nFeature that extracts the most common words in all tweets, to be stored in a bag of words.\nIt will then go through each tweet and check how many of the top used words have been used.\n\nCreated on Thu Oct 21 11:23:21 2021\n\n@author: jch\n\"\"\"\n\nimport numpy as np\nimport nltk\nfrom sklearn.feature_extraction.text import ENGLISH_STOP_WORDS\nimport sys\nsys.path.append('./scripts/')\nfrom scripts.feature_extraction.feature_extractor import FeatureExtractor\n\n# class for extracting the most common words\nclass WordsMostCommon(FeatureExtractor):\n \n # constructor\n def __init__(self, input_column):\n # access superclass of all features\n super().__init__([input_column], \"{0}_words_most_common\".format(input_column))\n \n def _get_values(self, inputs):\n \n # function to drop special characters\n def join_char(word):\n new = ''.join(char for char in word if char.isalnum())\n return new\n \n # pre-processing\n tweets = inputs[0]\n \n # concatenate all tweets into one text\n text = ' '.join(tweet for tweet in tweets)\n \n # tokenize text\n token_list = list(text.split(\",\"))\n \n # drop special characters\n tokens = [join_char(token) for token in token_list]\n \n # filter for empty strings and remove stopwords\n tokens_final = list(filter(lambda x: x != \"\", tokens))\n tokens_noStopWords = [word for word in tokens_final if word.lower() not in ENGLISH_STOP_WORDS]\n \n # filter for 's' & 'll'\n tokens_fixed = list(filter(lambda x: x != \"s\", tokens_noStopWords))\n tokens_clear = list(filter(lambda x: x != \"ll\", tokens_fixed))\n \n # extract most common words\n freq = nltk.FreqDist(tokens_clear)\n most_common_words = freq.most_common(50)\n \n # check for each tweet how many of the most common words are included\n counts = []\n\n for t in tweets:\n counter = 0\n \n words = t.split(\"'\")\n tokens = [w for w in words if w.isalnum()]\n \n for token in tokens:\n if any(token in mcw for mcw in most_common_words):\n counter += 1\n \n counts.append(counter)\n \n result = np.array(counts)\n result = result.reshape(-1,1)\n \n return result\n \n \n " ]
[ [ "pandas.concat", "pandas.read_csv" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
grahamrow/Auspex
[ "f12d4d8d5e8abc4d5fa54415090d62000db49e8d", "f12d4d8d5e8abc4d5fa54415090d62000db49e8d" ]
[ "src/auspex/instruments/interface.py", "test/test_pipeline.py" ]
[ "import os\nimport visa\nimport numpy as np\nfrom auspex.log import logger\nfrom .prologix import PrologixSocketResource\n\nclass Interface(object):\n \"\"\"Currently just a dummy interface for testing.\"\"\"\n def __init__(self):\n super(Interface, self).__init__()\n def write(self, value):\n logger.debug(\"Writing '%s'\" % value)\n def query(self, value):\n logger.debug(\"Querying '%s'\" % value)\n if value == \":output?;\":\n return \"on\"\n return np.random.random()\n def values(self, query):\n logger.debug(\"Returning values %s\" % query)\n return np.random.random()\n def close(self):\n pass\n\nclass VisaInterface(Interface):\n \"\"\"PyVISA interface for communicating with instruments.\"\"\"\n def __init__(self, resource_name):\n super(VisaInterface, self).__init__()\n try:\n if os.name == \"nt\":\n visa_loc = 'C:\\\\windows\\\\system32\\\\visa64.dll'\n rm = visa.ResourceManager(visa_loc)\n else:\n rm = visa.ResourceManager(\"@py\")\n self._resource = rm.open_resource(resource_name)\n except:\n raise Exception(\"Unable to create the resource '%s'\" % resource_name)\n def values(self, query_string):\n return self._resource.query_ascii_values(query_string, container=np.array)\n def value(self, query_string):\n return self._resource.query_ascii_values(query_string)\n def write(self, write_string):\n self._resource.write(write_string)\n def write_raw(self, raw_string):\n self._resource.write_raw(raw_string)\n def read(self):\n return self._resource.read()\n def read_raw(self, size=None):\n return self._resource.read_raw(size=size)\n def read_bytes(self, count, chunk_size=None, break_on_termchar=False):\n return self._resource.read_bytes(count, chunk_size=chunk_size, break_on_termchar=break_on_termchar)\n def query(self, query_string):\n return self._resource.query(query_string)\n def write_binary_values(self, query_string, values, **kwargs):\n return self._resource.write_binary_values(query_string, values, **kwargs)\n def query_ascii_values(self, query_string, **kwargs):\n return self._resource.query_ascii_values(query_string, **kwargs)\n def query_binary_values(self, query_string, container=np.array, datatype=u'h',\n is_big_endian=False):\n return self._resource.query_binary_values(query_string, container=container, datatype=datatype,\n is_big_endian=is_big_endian)\n def close(self):\n self._resource.close()\n\n # IEEE Mandated SCPI commands\n def CLS(self):\n self._resource.write(\"*CLS\") # Clear Status Command\n def ESE(self):\n return self._resource.query(\"*ESE?\") # Standard Event Status Enable Query\n def ESR(self):\n return self._resource.query(\"*ESR?\") # Standard Event Status Register Query\n def IDN(self):\n return self._resource.query(\"*IDN?\") # Identification Query\n def OPC(self):\n return self._resource.query(\"*OPC?\") # Operation Complete Command\n def RST(self):\n self._resource.write(\"*RST\") # Reset Command\n def SRE(self):\n return self._resource.query(\"*SRE?\") # Service Request Enable Query\n def STB(self):\n return self._resource.query(\"*STB?\") # Read Status Byte Query\n def TST(self):\n return self._resource.query(\"*TST?\") # Self-Test Query\n def WAI(self):\n self._resource.write(\"*WAI\") # Wait-to-Continue Command\n\nclass PrologixInterface(VisaInterface):\n \"\"\"Prologix-Ethernet interface for communicating with remote GPIB instruments.\"\"\"\n def __init__(self, resource_name):\n Interface.__init__(self)\n try:\n if len(resource_name.split(\"::\")) != 2:\n raise Exception(\"Resource name for Prologix-Ethernet adapter must be of form IPv4_ADDR::GPIB_ADDR\")\n self._resource = PrologixSocketResource(ipaddr=resource_name.split(\"::\")[0], gpib=int(resource_name.split(\"::\")[1]))\n self._resource.connect()\n except:\n raise Exception(\"Unable to create the resource '%s'\" % resource_name)\n", "import unittest\nimport os\nimport glob\nimport shutil\nimport time\nimport tempfile\nimport numpy as np\n\npl = None\ncl = None\n\n\nimport QGL.config\nimport auspex.config\nauspex.config.auspex_dummy_mode = True\n\n# Set temporary output directories\nawg_dir = tempfile.TemporaryDirectory()\nkern_dir = tempfile.TemporaryDirectory()\nauspex.config.AWGDir = QGL.config.AWGDir = awg_dir.name\nauspex.config.KernelDir = kern_dir.name\n\nfrom QGL import *\nfrom auspex.qubit import *\nimport bbndb\n\ndef clear_test_data():\n for file in glob.glob(\"test_*.h5\"):\n os.remove(file)\n for direc in glob.glob(\"test_writehdf5*\"):\n shutil.rmtree(direc)\n\nclass PipelineTestCase(unittest.TestCase):\n\n qubits = [\"q1\"]\n instrs = ['BBNAPS1', 'BBNAPS2', 'X6-1', 'Holz1', 'Holz2']\n filts = ['avg-q1-int', 'q1-WriteToHDF5'] #'partial-avg-buff'\n nbr_round_robins = 50\n\n @classmethod\n def setUpClass(cls):\n global cl, pl\n\n cl = ChannelLibrary(\":memory:\")\n pl = PipelineManager()\n\n def test_create(self):\n cl.clear()\n q1 = cl.new_qubit(\"q1\")\n q2 = cl.new_qubit(\"q2\")\n aps1 = cl.new_APS2(\"BBNAPS1\", address=\"192.168.5.102\")\n aps2 = cl.new_APS2(\"BBNAPS2\", address=\"192.168.5.103\")\n aps3 = cl.new_APS2(\"BBNAPS3\", address=\"192.168.5.104\")\n aps4 = cl.new_APS2(\"BBNAPS4\", address=\"192.168.5.105\")\n x6_1 = cl.new_X6(\"X6_1\", address=\"1\", record_length=512)\n x6_2 = cl.new_X6(\"X6_2\", address=\"1\", record_length=512)\n holz1 = cl.new_source(\"Holz_1\", \"HolzworthHS9000\", \"HS9004A-009-1\", power=-30)\n holz2 = cl.new_source(\"Holz_2\", \"HolzworthHS9000\", \"HS9004A-009-2\", power=-30)\n holz3 = cl.new_source(\"Holz_3\", \"HolzworthHS9000\", \"HS9004A-009-3\", power=-30)\n holz4 = cl.new_source(\"Holz_4\", \"HolzworthHS9000\", \"HS9004A-009-4\", power=-30)\n\n cl.set_control(q1, aps1, generator=holz1)\n cl.set_measure(q1, aps2, x6_1[1], generator=holz2)\n cl.set_control(q2, aps3, generator=holz3)\n cl.set_measure(q2, aps4, x6_2[1], generator=holz4)\n cl.set_master(aps1, aps1.ch(\"m2\"))\n cl.commit()\n\n pl.create_default_pipeline()\n pl[\"q1\"].clear_pipeline()\n pl[\"q1\"].stream_type = \"raw\"\n pl.reset_pipelines()\n\n exp = QubitExperiment(PulsedSpec(q1), averages=5)\n\n # These should only be related to q1\n self.assertTrue([q1] == exp.measured_qubits)\n self.assertTrue([q1] == exp.controlled_qubits)\n self.assertTrue(set(exp.transmitters) == set([aps1, aps2]))\n self.assertTrue(set(exp.instrument_proxies) == set([aps1, aps2, x6_1, holz1, holz2]))\n self.assertTrue(set(exp.generators) == set([holz1, holz2]))\n self.assertTrue(set(exp.receivers) == set([x6_1]))\n self.assertTrue(len(exp.output_connectors[\"q1-raw\"].descriptor.axes) == 2)\n self.assertTrue(len(exp.output_connectors[\"q1-raw\"].descriptor.axes[0].points) == 5)\n\n def test_create_correlator(self):\n \"\"\"Create a mildly non-trivial pipeline\"\"\"\n cl.clear()\n q1 = cl.new_qubit(\"q1\")\n q2 = cl.new_qubit(\"q2\")\n aps1 = cl.new_APS2(\"BBNAPS1\", address=\"192.168.5.102\")\n aps2 = cl.new_APS2(\"BBNAPS2\", address=\"192.168.5.103\")\n aps3 = cl.new_APS2(\"BBNAPS3\", address=\"192.168.5.104\")\n aps4 = cl.new_APS2(\"BBNAPS4\", address=\"192.168.5.105\")\n x6_1 = cl.new_X6(\"X6_1\", address=\"1\", record_length=512)\n x6_2 = cl.new_X6(\"X6_2\", address=\"1\", record_length=512)\n holz1 = cl.new_source(\"Holz_1\", \"HolzworthHS9000\", \"HS9004A-009-1\", power=-30)\n holz2 = cl.new_source(\"Holz_2\", \"HolzworthHS9000\", \"HS9004A-009-2\", power=-30)\n holz3 = cl.new_source(\"Holz_3\", \"HolzworthHS9000\", \"HS9004A-009-3\", power=-30)\n holz4 = cl.new_source(\"Holz_4\", \"HolzworthHS9000\", \"HS9004A-009-4\", power=-30)\n\n cl.set_control(q1, aps1, generator=holz1)\n cl.set_measure(q1, aps2, x6_1[1], generator=holz2)\n cl.set_control(q2, aps3, generator=holz3)\n cl.set_measure(q2, aps4, x6_2[1], generator=holz4)\n cl.set_master(aps1, aps1.ch(\"m2\"))\n cl.commit()\n\n pl.create_default_pipeline()\n\n for ql in ['q1', 'q2']:\n qb = cl[ql]\n pl[ql].clear_pipeline()\n pl[ql].create_default_pipeline(buffers=False)\n\n pl[ql][\"Demodulate\"][\"Integrate\"][\"Average\"].add(Write(label='var'), connector_out='final_variance')\n pl[ql][\"Demodulate\"][\"Integrate\"][\"Average\"][\"var\"].groupname = ql + '-main'\n pl[ql][\"Demodulate\"][\"Integrate\"][\"Average\"][\"var\"].datasetname = 'variance'\n\n pl.add_correlator(pl[\"q1\"][\"Demodulate\"][\"Integrate\"], pl[\"q2\"][\"Demodulate\"][\"Integrate\"])\n pl[\"q1\"][\"Demodulate\"][\"Integrate\"][\"Correlate\"].add(Average(label='corr_avg')).add(Display(label='test_corr_avg',plot_dims=0))\n pl[\"q1\"][\"Demodulate\"][\"Integrate\"][\"Correlate\"][\"Average\"].add(Write(label='corr_write'))\n pl[\"q1\"][\"Demodulate\"][\"Integrate\"][\"Correlate\"][\"Average\"].add(Write(label='corr_var'), connector_out='final_variance')\n pl.reset_pipelines()\n\n exp = QubitExperiment(PulsedSpec(q1), averages=5)\n\n def test_create_integrated_correlator(self):\n \"\"\"Create a mildly non-trivial pipeline\"\"\"\n cl.clear()\n q1 = cl.new_qubit(\"q1\")\n q2 = cl.new_qubit(\"q2\")\n aps1 = cl.new_APS2(\"BBNAPS1\", address=\"192.168.5.102\")\n aps2 = cl.new_APS2(\"BBNAPS2\", address=\"192.168.5.103\")\n aps3 = cl.new_APS2(\"BBNAPS3\", address=\"192.168.5.104\")\n aps4 = cl.new_APS2(\"BBNAPS4\", address=\"192.168.5.105\")\n x6_1 = cl.new_X6(\"X6_1\", address=\"1\", record_length=512)\n x6_2 = cl.new_X6(\"X6_2\", address=\"1\", record_length=512)\n holz1 = cl.new_source(\"Holz_1\", \"HolzworthHS9000\", \"HS9004A-009-1\", power=-30)\n holz2 = cl.new_source(\"Holz_2\", \"HolzworthHS9000\", \"HS9004A-009-2\", power=-30)\n holz3 = cl.new_source(\"Holz_3\", \"HolzworthHS9000\", \"HS9004A-009-3\", power=-30)\n holz4 = cl.new_source(\"Holz_4\", \"HolzworthHS9000\", \"HS9004A-009-4\", power=-30)\n\n cl.set_control(q1, aps1, generator=holz1)\n cl.set_measure(q1, aps2, x6_1[1], generator=holz2)\n cl.set_control(q2, aps3, generator=holz3)\n cl.set_measure(q2, aps4, x6_2[1], generator=holz4)\n cl.set_master(aps1, aps1.ch(\"m2\"))\n cl.commit()\n\n pl.create_default_pipeline()\n\n for ql in ['q1', 'q2']:\n pl[ql].clear_pipeline()\n pl[ql].stream_type = \"integrated\"\n pl[ql].create_default_pipeline(buffers=False)\n\n pl[ql][\"Average\"].add(Write(label='var'), connector_out='final_variance')\n pl[ql][\"Average\"][\"var\"].groupname = ql + '-main'\n pl[ql][\"Average\"][\"var\"].datasetname = 'variance'\n\n pl.add_correlator(pl[\"q1\"], pl[\"q2\"])\n pl[\"q1\"][\"Correlate\"].add(Average(label='corr_avg')).add(Display(label='test_corr_avg',plot_dims=0))\n pl[\"q1\"][\"Correlate\"][\"Average\"].add(Write(label='corr_write'))\n pl[\"q1\"][\"Correlate\"][\"Average\"].add(Write(label='corr_var'), connector_out='final_variance')\n pl.reset_pipelines()\n\n exp = QubitExperiment(PulsedSpec(q1), averages=5)\n\n def test_create_transceiver(self):\n cl.clear()\n q1 = cl.new_qubit(\"q1\")\n q2 = cl.new_qubit(\"q2\")\n rack = cl.new_APS2_rack(\"APS2Rack\", [f\"192.168.5.10{i}\" for i in range(4)])\n x6_1 = cl.new_X6(\"X6_1\", address=\"1\", record_length=512)\n x6_2 = cl.new_X6(\"X6_2\", address=\"1\", record_length=512)\n holz1 = cl.new_source(\"Holz_1\", \"HolzworthHS9000\", \"HS9004A-009-1\", power=-30)\n holz2 = cl.new_source(\"Holz_2\", \"HolzworthHS9000\", \"HS9004A-009-2\", power=-30)\n holz3 = cl.new_source(\"Holz_3\", \"HolzworthHS9000\", \"HS9004A-009-3\", power=-30)\n holz4 = cl.new_source(\"Holz_4\", \"HolzworthHS9000\", \"HS9004A-009-4\", power=-30)\n\n self.assertTrue(rack.tx(\"1\").label == 'APS2Rack_U1')\n\n cl.set_control(q1, rack.tx(\"1\"), generator=holz1)\n cl.set_measure(q1, rack.tx(\"2\"), x6_1[1], generator=holz2)\n cl.set_control(q2, rack.tx(\"3\"), generator=holz3)\n cl.set_measure(q2, rack.tx(\"4\"), x6_2[1], generator=holz4)\n cl.set_master(rack.tx(\"1\"), rack.tx(\"1\").ch(\"m2\"))\n cl.commit()\n\n pl.create_default_pipeline()\n pl[\"q1\"].clear_pipeline()\n pl[\"q1\"].stream_type = \"raw\"\n pl.reset_pipelines()\n\n exp = QubitExperiment(PulsedSpec(q1), averages=5)\n\n # These should only be related to q1\n self.assertTrue([q1] == exp.measured_qubits)\n self.assertTrue([q1] == exp.controlled_qubits)\n self.assertTrue(set(exp.transmitters) == set([rack.tx(\"1\"), rack.tx(\"2\")]))\n self.assertTrue(set(exp.generators) == set([holz1, holz2]))\n self.assertTrue(set(exp.receivers) == set([x6_1]))\n self.assertTrue(len(exp.output_connectors[\"q1-raw\"].descriptor.axes) == 2)\n self.assertTrue(len(exp.output_connectors[\"q1-raw\"].descriptor.axes[0].points) == 5)\n\n def test_add_qubit_sweep(self):\n cl.clear()\n q1 = cl.new_qubit(\"q1\")\n aps1 = cl.new_APS2(\"BBNAPS1\", address=\"192.168.5.102\")\n aps2 = cl.new_APS2(\"BBNAPS2\", address=\"192.168.5.103\")\n x6_1 = cl.new_X6(\"X6_1\", address=\"1\", record_length=512)\n holz1 = cl.new_source(\"Holz_1\", \"HolzworthHS9000\", \"HS9004A-009-1\", power=-30)\n holz2 = cl.new_source(\"Holz_2\", \"HolzworthHS9000\", \"HS9004A-009-2\", power=-30)\n cl.set_control(q1, aps1, generator=holz1)\n cl.set_measure(q1, aps2, x6_1[1], generator=holz2)\n cl.set_master(aps1, aps1.ch(\"m2\"))\n pl.create_default_pipeline()\n cl.commit()\n\n exp = QubitExperiment(PulsedSpec(q1), averages=5)\n exp.add_qubit_sweep(q1, \"measure\", \"frequency\", np.linspace(6e9, 6.5e9, 500))\n self.assertTrue(len(exp.output_connectors[\"q1-raw\"].descriptor.axes[0].points) == 500)\n self.assertTrue(exp.output_connectors[\"q1-raw\"].descriptor.axes[0].points[-1] == 6.5e9)\n\n def test_multiple_streamselectors_per_qubit(self):\n cl.clear()\n q1 = cl.new_qubit(\"q1\")\n aps1 = cl.new_APS2(\"BBNAPS1\", address=\"192.168.5.102\")\n aps2 = cl.new_APS2(\"BBNAPS2\", address=\"192.168.5.103\")\n x6_1 = cl.new_X6(\"X6_1\", address=\"1\", record_length=512)\n holz1 = cl.new_source(\"Holz_1\", \"HolzworthHS9000\", \"HS9004A-009-1\", power=-30)\n holz2 = cl.new_source(\"Holz_2\", \"HolzworthHS9000\", \"HS9004A-009-2\", power=-30)\n cl.set_control(q1, aps1, generator=holz1)\n cl.set_measure(q1, aps2, x6_1[1], generator=holz2)\n cl.set_master(aps1, aps1.ch(\"m2\"))\n pl.create_default_pipeline(buffers=True)\n pl.add_qubit_pipeline(\"q1\", \"demodulated\", buffers=True)\n cl.commit()\n\n self.assertTrue(pl[\"q1 raw\"])\n self.assertTrue(pl[\"q1 demodulated\"])\n\n exp = QubitExperiment(RabiAmp(q1, np.linspace(-1,1,21)), averages=5)\n exp.set_fake_data(x6_1, np.random.random(21))\n exp.run_sweeps()\n\n self.assertTrue(len(exp.buffers)==2)\n\n def test_run_pipeline(self):\n cl.clear()\n q1 = cl.new_qubit(\"q1\")\n aps1 = cl.new_APS2(\"BBNAPS1\", address=\"192.168.5.102\")\n aps2 = cl.new_APS2(\"BBNAPS2\", address=\"192.168.5.103\")\n x6_1 = cl.new_X6(\"X6_1\", address=\"1\", record_length=512)\n holz1 = cl.new_source(\"Holz_1\", \"HolzworthHS9000\", \"HS9004A-009-1\", power=-30)\n holz2 = cl.new_source(\"Holz_2\", \"HolzworthHS9000\", \"HS9004A-009-2\", power=-30)\n cl.set_control(q1, aps1, generator=holz1)\n cl.set_measure(q1, aps2, x6_1[1], generator=holz2)\n cl.set_master(aps1, aps1.ch(\"m2\"))\n cl.commit()\n pl.create_default_pipeline()\n pl.reset_pipelines()\n pl[\"q1\"].clear_pipeline()\n pl[\"q1\"].stream_type = \"raw\"\n pl[\"q1\"].create_default_pipeline(buffers=True)\n exp = QubitExperiment(RabiAmp(q1, np.linspace(-1,1,21)), averages=5)\n exp.set_fake_data(x6_1, np.random.random(21))\n exp.run_sweeps()\n\n buf = list(exp.qubits_by_output.keys())[0]\n ax = buf.input_connectors[\"sink\"].descriptor.axes[0]\n\n # self.assertTrue(buf.done.is_set())\n data, desc = buf.get_data()\n self.assertTrue(len(data) == 21) # Record length * segments * averages (record decimated by 4x)\n self.assertTrue(np.all(np.array(ax.points) == np.linspace(-1,1,21)))\n self.assertTrue(ax.name == 'amplitude')\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.random.random" ], [ "numpy.array", "numpy.random.random", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
adfoucart/deephisto
[ "f70fbaad9f95a9b9f2e420c9c33d46bdfab5bdf9", "f70fbaad9f95a9b9f2e420c9c33d46bdfab5bdf9" ]
[ "model/F1Metric.py", "data/GenericDataFeed.py" ]
[ "import tensorflow as tf\n\nclass F1Metric(tf.keras.metrics.Metric):\n def __init__(self, name=None, dtype=None):\n super(F1Metric, self).__init__(name=name, dtype=dtype)\n self.tp_ = tf.keras.metrics.TruePositives()\n self.fp_ = tf.keras.metrics.FalsePositives()\n self.fn_ = tf.keras.metrics.FalseNegatives()\n \n def reset_states(self):\n self.tp_.reset_states()\n self.fp_.reset_states()\n self.fn_.reset_states()\n \n def update_state(self, y_true, y_pred, sample_weight=None):\n self.tp_.update_state(y_true[:,:,:,1], y_pred[:,:,:,1], sample_weight)\n self.fp_.update_state(y_true[:,:,:,1], y_pred[:,:,:,1], sample_weight)\n self.fn_.update_state(y_true[:,:,:,1], y_pred[:,:,:,1], sample_weight)\n \n def result(self):\n tp = self.tp_.result()\n fp = self.fp_.result()\n fn = self.fn_.result()\n return (2*tp)/(2*tp+fp+fn)", "# -*- coding: utf-8 -*-\n'''\nAuthor: Adrien Foucart\n\nGeneric DataFeed class, containing the common parts of ArtefactDataFeed, EpitheliumDataFeed & WarwickDataFeed \n'''\nimport numpy as np\n\nclass GenericDataFeed:\n\n def __init__(self, params, db, generator=None):\n # General parameters\n self.directory = params['dataset_dir']\n self.tile_size = params['tile_size']\n self.db = db\n self.v = params['verbose'] if 'verbose' in params else False\n self.augmentData = params['augmentData'] if 'augmentData' in params else True\n self.randomSequence = params['randomSequence'] if 'randomSequence' in params else None\n\n # SNOW parameters\n self.noisy = params['noisy'] if 'noisy' in params else False\n self.pNoise = params['pNoise'] if 'pNoise' in params else 0.5\n self.generative = params['generative'] if 'generative' in params else False\n self.generator = generator\n self.onlyPositives = params['onlyPositives'] if 'onlyPositives' in params else False\n self.tda = params['tda'] if 'tda' in params else False\n self.weak = params['weak'] if 'weak' in params else False\n self.annotations = params['annotations'] if 'annotations' in params else 'full'\n\n # Random sampling\n self.seed = params['random_seed'] if 'random_seed' in params else 56489143\n self.pointer = 0\n\n self.files_X = []\n self.files_Y = []\n self.idxs = []\n\n '''\n Batch generator\n Yield samples from dataset\n '''\n def next_batch(self, batch_size, max_iterations, forValidation=False):\n if( self.v ): print(\"Starting %d iterations\"%(max_iterations))\n # iterations = sampling each image at least once\n np.random.seed(self.seed)\n # If sequence provided: load it\n seq_idxs = None\n if( self.randomSequence != None ):\n seq_idxs = np.load(self.randomSequence).astype('int')\n else:\n seq_idxs = self.generate_random_sequence(batch_size, max_iterations)\n\n for it in range(max_iterations):\n idxs = seq_idxs[it]\n for idr,idx in enumerate(idxs):\n # batch_x,batch_y,_ = self.get_sample(idx, batch_size, forValidation, it+idr)\n # yield batch_x,batch_y[:,:,:,0]\n yield self.get_sample(idx, batch_size, forValidation, it+idr)\n\n '''\n Draw a validation set from the training set, using no data augmentation. By default, use 10% of the images in the training set to create validation set.\n '''\n def validation_set(self, validation_set_size, fractionOfTrainingSet=10):\n images_used = max([len(self.files_X)//fractionOfTrainingSet, 1])\n n_per_image = max([1,validation_set_size//images_used])\n\n Xval = np.zeros((validation_set_size, self.tile_size, self.tile_size, 3))\n Yval_seg = np.zeros((validation_set_size, self.tile_size, self.tile_size, 2))\n Yval_det = np.zeros((validation_set_size, 2))\n n = 0\n i = 0\n while n < validation_set_size:\n n_in_this_image = min([n_per_image, validation_set_size-n])\n X,Y_seg,Y_det = self.get_sample(self.idxs[i], n_in_this_image, True)\n Xval[n:n+n_in_this_image,:,:,:] = X.copy()\n Yval_seg[n:n+n_in_this_image,:,:,:] = Y_seg.copy()\n Yval_det[n:n+n_in_this_image] = Y_det.copy()\n i += 1\n n += n_in_this_image\n return Xval,Yval_seg,Yval_det\n\n '''\n Generate a random sequence to make sure that we can save & replicate the full data pipeline to test reproductibility\n '''\n def generate_random_sequence(self, batch_size, max_iterations, saveAs=None):\n np.random.seed(self.seed)\n seq_idxs = np.zeros((max_iterations,len(self.idxs))).astype('int')\n for i,it in enumerate(range(max_iterations)):\n np.random.shuffle(self.idxs)\n seq_idxs[i,:] = self.idxs[:]\n\n if( saveAs != None ): \n np.save(saveAs, seq_idxs)\n\n return seq_idxs\n\n" ]
[ [ "tensorflow.keras.metrics.TruePositives", "tensorflow.keras.metrics.FalsePositives", "tensorflow.keras.metrics.FalseNegatives" ], [ "numpy.random.seed", "numpy.random.shuffle", "numpy.save", "numpy.load", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
thinhnguyenuit/sombra
[ "5176d264508dd5cce780dc63f1dd948d66b189e8", "5176d264508dd5cce780dc63f1dd948d66b189e8" ]
[ "shaders.py", "examples/flat_sphere.py" ]
[ "import numpy as np\n# Local modules\nfrom ray import Ray\n\nTYPE_FLAT = \"flat\"\nTYPE_DIFFUSE_LIGHT = \"diffuse_light\"\nTYPE_DIFFUSE_COLORS = \"diffuse_colors\"\nTYPE_DIFF_SPECULAR = \"diffuse_with_specular\"\nTYPE_DIFF_SPEC_BORDER = \"diffuse_specular_border\"\nTYPE_LIGHT_MAP = \"light_map\"\nCOLOR_FOR_LIGHT = np.array([255, 255, 255], dtype=float)\nCOLOR_FOR_BORDER = np.array([185, 185, 185], dtype=float)\nSHADOW_STRENGTH = 0.87\n\n\ndef diffuse_light(n, l):\n \"\"\"\n Shader calculation for a normal and a light vector.\n Args:\n n(numpy.array): Unit normal vector\n l(numpy.array): Unit vector in the direction to the light\n Returns:\n numpy.array: The calculated color in RGB (grayscale 0-255)\n \"\"\"\n diffuse_coef = np.dot(n, l)\n color = np.maximum(0, diffuse_coef) * COLOR_FOR_LIGHT\n return color\n\n\ndef diffuse_colors(n, l, dark, light):\n \"\"\"\n Shader calculation for a normal and a light vector and light and dark\n colors.\n Args:\n n(numpy.array): Unit normal vector\n l(numpy.array): Unit vector in the direction to the light\n dark(numpy.array): RGB dark color\n light(numpy.array): RGB light color\n Returns:\n numpy.array: The calculated color (RGB)\n \"\"\"\n # This formula changes the value [-1 - 1] to [0 - 1]\n diffuse_coef = np.dot(n, l)\n t = np.maximum(0, diffuse_coef)\n color = light * t + dark * (1 - t)\n return color\n\n\ndef diffuse_with_specular(n, l, eye, dark, light, ks):\n \"\"\"\n Shader calculation for normal and light vectors, dark and light colors and\n specular size ks.\n Args:\n n(numpy.array): Unit normal vector\n l(numpy.array): Unit vector in the direction to the light\n eye(numpy.array): Unit vector in the direction of the viewer\n dark(numpy.array): RGB dark color\n light(numpy.array): RGB light color\n ks(float): size of specularity (this can be changed by the user)\n Returns:\n numpy.array: The calculated color (RGB)\n \"\"\"\n n_dot_l = np.dot(n, l)\n t = np.maximum(0, n_dot_l)\n color = light * t + dark * (1 - t)\n # --------------- Adding specular\n # Get the reflection of light vector\n r = -1 * l + 2 * n_dot_l * n\n s = np.dot(eye, r)\n s = np.maximum(0, s)\n # try smooth step\n step_min = 0.78\n step_max = 1\n s = (s - step_min) / (step_max - step_min)\n if s < 0:\n s = 0\n elif s > 1:\n s = 1\n s = -2 * (s ** 3) + 3 * (s ** 2)\n s = s ** 4\n color = color * (1 - s * ks) + s * ks * COLOR_FOR_LIGHT\n return color\n\n\ndef diffuse_specular_border(n, l, eye, dark, light, ks, thickness):\n \"\"\"\n Shader calculation for normal and light vectors, dark and light colors,\n and ks specular size and thickness of border parameters.\n Args:\n n(numpy.array): Unit normal vector\n l(numpy.array): Unit vector in the direction to the light\n eye(numpy.array): Unit vector in the direction of the viewer\n dark(numpy.array): RGB dark color\n light(numpy.array): RGB light color\n ks(float): size of specularity (this can be changed by the user)\n thickness(float): thickness parameter for the border defined by user\n Returns:\n numpy.array: The calculated color (RGB)\n \"\"\"\n b = np.maximum(0, 1 - np.dot(eye, n))\n step_min = thickness\n step_max = 1\n b = (b - step_min) / (step_max - step_min)\n if b < 0:\n b = 0\n elif b > 1:\n b = 1\n color = diffuse_with_specular(n, l, eye, dark, light, ks)\n color = color * (1 - b) + b * COLOR_FOR_BORDER\n return color\n\n\ndef hard_shadow(ph, objects, l, dist_l):\n \"\"\"\n Determines if this point should have a shadow for the light in pl.\n\n Args:\n ph: 3D Point of hit\n objects([Object]): list of objects that can be between the point and\n the light\n l(numpy.array): unit vector pointing to the light\n dist_l(float): distance to the light\n\n Returns:\n numpy.array: The calculated color for this hard shadow (RGB)\n \"\"\"\n # Case outside of cone in SpotLight\n if np.array_equal(l, np.zeros(3)):\n return np.zeros(3)\n shadow_coef = 0\n r = Ray(ph, l)\n for obj in objects:\n # Cast ray from ph to the object with n = l and shadow if t < dist_l\n t = r.intersect(obj)\n if 0 < t < dist_l:\n shadow_coef = 1\n break\n shadow_color = np.zeros(3)\n # Use SHADOW_STRENGTH = 0 for no shadows and 1 for hard shadows\n shadow_coef *= max(0.0, min(SHADOW_STRENGTH, 1.0))\n color = COLOR_FOR_LIGHT * (1 - shadow_coef) + shadow_color * shadow_coef\n return color\n\n\ndef light_map(n, l, dark, light, caustic):\n \"\"\"\n Shader calculation for a normal and a light vector and light and dark\n colors.\n Args:\n n(ndarray): Unit normal vector\n l(ndarray): Unit vector in the direction to the light\n dark(ndarray): RGB dark color\n light(ndarray): RGB light color\n caustic(ndarray): Caustic contribution vector\n Returns:\n ndarray: The calculated color (RGB)\n \"\"\"\n surface_color = diffuse_colors(n, l, dark, light)\n color = surface_color + caustic\n return color\n", "\"\"\"\nExample that generates an image of a sphere with a flat color (no lighting).\n\"\"\"\nimport numpy as np\nfrom PIL import Image\n\n# Local Modules\nfrom camera import Camera\nfrom constants import RGB_CHANNELS, MAX_QUALITY\nfrom material import Material, COLOR_BLUE\nfrom object import Sphere\nfrom scene import Scene\nimport shaders\n\nSCREEN_WIDTH = 400\nSCREEN_HEIGHT = 300\nOUTPUT_IMG_FILENAME = \"1_flat_sphere.jpg\"\n\n\ndef set_camera():\n camera_pos = np.array([0.0, 0.25, 0.0])\n v_view = np.array([0.0, 0.0, 1.0])\n v_up = np.array([0.0, 1.0, 0.0])\n return Camera(camera_pos, v_view, v_up, d=0.26, scale_x=0.4, scale_y=0.3)\n\n\ndef set_scene():\n pos = np.array([0.0, 0.25, 0.6])\n mat = Material(COLOR_BLUE)\n radius = 0.25\n sphere = Sphere(pos, mat, shaders.TYPE_FLAT, radius)\n cameras = [set_camera()]\n return Scene(cameras, [], [sphere])\n\n\ndef main():\n scene = set_scene()\n main_camera = scene.get_main_camera()\n screen = np.zeros([SCREEN_HEIGHT, SCREEN_WIDTH, RGB_CHANNELS])\n img_output = Image.fromarray(screen)\n img_output.save(OUTPUT_IMG_FILENAME, quality=MAX_QUALITY)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.dot", "numpy.array", "numpy.maximum", "numpy.zeros" ], [ "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
renato2099/ibis-bigquery
[ "58d624abaaa9db4106241128559e28b5c2a2e715" ]
[ "ibis_bigquery/client.py" ]
[ "\"\"\"BigQuery ibis client implementation.\"\"\"\n\nimport datetime\nfrom collections import OrderedDict\nfrom typing import Optional, Tuple\n\nimport google.cloud.bigquery as bq\nimport ibis\nimport ibis.common.exceptions as com\nimport ibis.expr.datatypes as dt\nimport ibis.expr.lineage as lin\nimport ibis.expr.operations as ops\nimport ibis.expr.schema as sch\nimport ibis.expr.types as ir\nimport pandas as pd\nimport regex as re\nfrom google.api_core.client_info import ClientInfo\nfrom google.api_core.exceptions import NotFound\nfrom ibis.client import Database, Query, SQLClient\nfrom multipledispatch import Dispatcher\nfrom pkg_resources import parse_version\n\nfrom . import compiler as comp\nfrom .datatypes import ibis_type_to_bigquery_type\n\nNATIVE_PARTITION_COL = '_PARTITIONTIME'\n\n\n_DTYPE_TO_IBIS_TYPE = {\n 'INT64': dt.int64,\n 'FLOAT64': dt.double,\n 'BOOL': dt.boolean,\n 'STRING': dt.string,\n 'DATE': dt.date,\n # FIXME: enforce no tz info\n 'DATETIME': dt.timestamp,\n 'TIME': dt.time,\n 'TIMESTAMP': dt.timestamp,\n 'BYTES': dt.binary,\n 'NUMERIC': dt.Decimal(38, 9),\n}\n\n\n_LEGACY_TO_STANDARD = {\n 'INTEGER': 'INT64',\n 'FLOAT': 'FLOAT64',\n 'BOOLEAN': 'BOOL',\n}\n\n\n_USER_AGENT_DEFAULT_TEMPLATE = 'ibis/{}'\n\n\ndef _create_client_info(application_name):\n user_agent = []\n\n if application_name:\n user_agent.append(application_name)\n\n user_agent.append(_USER_AGENT_DEFAULT_TEMPLATE.format(ibis.__version__))\n return ClientInfo(user_agent=\" \".join(user_agent))\n\n\[email protected](bq.schema.SchemaField)\ndef bigquery_field_to_ibis_dtype(field):\n \"\"\"Convert BigQuery `field` to an ibis type.\"\"\"\n typ = field.field_type\n if typ == 'RECORD':\n fields = field.fields\n assert fields, 'RECORD fields are empty'\n names = [el.name for el in fields]\n ibis_types = list(map(dt.dtype, fields))\n ibis_type = dt.Struct(names, ibis_types)\n else:\n ibis_type = _LEGACY_TO_STANDARD.get(typ, typ)\n ibis_type = _DTYPE_TO_IBIS_TYPE.get(ibis_type, ibis_type)\n if field.mode == 'REPEATED':\n ibis_type = dt.Array(ibis_type)\n return ibis_type\n\n\[email protected](bq.table.Table)\ndef bigquery_schema(table):\n \"\"\"Infer the schema of a BigQuery `table` object.\"\"\"\n fields = OrderedDict((el.name, dt.dtype(el)) for el in table.schema)\n partition_info = table._properties.get('timePartitioning', None)\n\n # We have a partitioned table\n if partition_info is not None:\n partition_field = partition_info.get('field', NATIVE_PARTITION_COL)\n\n # Only add a new column if it's not already a column in the schema\n fields.setdefault(partition_field, dt.timestamp)\n return sch.schema(fields)\n\n\nclass BigQueryCursor:\n \"\"\"BigQuery cursor.\n\n This allows the BigQuery client to reuse machinery in\n :file:`ibis/client.py`.\n\n \"\"\"\n\n def __init__(self, query):\n \"\"\"Construct a BigQueryCursor with query `query`.\"\"\"\n self.query = query\n\n def fetchall(self):\n \"\"\"Fetch all rows.\"\"\"\n result = self.query.result()\n return [row.values() for row in result]\n\n @property\n def columns(self):\n \"\"\"Return the columns of the result set.\"\"\"\n result = self.query.result()\n return [field.name for field in result.schema]\n\n @property\n def description(self):\n \"\"\"Get the fields of the result set's schema.\"\"\"\n result = self.query.result()\n return list(result.schema)\n\n def __enter__(self):\n # For compatibility when constructed from Query.execute()\n \"\"\"No-op for compatibility.\n\n See Also\n --------\n ibis.client.Query.execute\n\n \"\"\"\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"No-op for compatibility.\n\n See Also\n --------\n ibis.client.Query.execute\n\n \"\"\"\n\n\ndef _find_scalar_parameter(expr):\n \"\"\"Find all :class:`~ibis.expr.types.ScalarParameter` instances.\n\n Parameters\n ----------\n expr : ibis.expr.types.Expr\n\n Returns\n -------\n Tuple[bool, object]\n The operation and the parent expresssion's resolved name.\n\n \"\"\"\n op = expr.op()\n\n if isinstance(op, ops.ScalarParameter):\n result = op, expr.get_name()\n else:\n result = None\n return lin.proceed, result\n\n\nclass BigQueryQuery(Query):\n def __init__(self, client, ddl, query_parameters=None):\n super().__init__(client, ddl)\n\n # self.expr comes from the parent class\n query_parameter_names = dict(\n lin.traverse(_find_scalar_parameter, self.expr)\n )\n self.query_parameters = [\n bigquery_param(\n param.to_expr().name(query_parameter_names[param]), value\n )\n for param, value in (query_parameters or {}).items()\n ]\n\n def _fetch(self, cursor):\n df = cursor.query.to_dataframe()\n schema = self.schema()\n return schema.apply_to(df)\n\n def execute(self):\n # synchronous by default\n with self.client._execute(\n self.compiled_sql,\n results=True,\n query_parameters=self.query_parameters,\n ) as cur:\n result = self._fetch(cur)\n\n return self._wrap_result(result)\n\n\nclass BigQueryDatabase(Database):\n \"\"\"A BigQuery dataset.\"\"\"\n\n\nbigquery_param = Dispatcher('bigquery_param')\n\n\n@bigquery_param.register(ir.StructScalar, OrderedDict)\ndef bq_param_struct(param, value):\n field_params = [bigquery_param(param[k], v) for k, v in value.items()]\n result = bq.StructQueryParameter(param.get_name(), *field_params)\n return result\n\n\n@bigquery_param.register(ir.ArrayValue, list)\ndef bq_param_array(param, value):\n param_type = param.type()\n assert isinstance(param_type, dt.Array), str(param_type)\n\n try:\n bigquery_type = ibis_type_to_bigquery_type(param_type.value_type)\n except NotImplementedError:\n raise com.UnsupportedBackendType(param_type)\n else:\n if isinstance(param_type.value_type, dt.Struct):\n query_value = [\n bigquery_param(param[i].name('element_{:d}'.format(i)), struct)\n for i, struct in enumerate(value)\n ]\n bigquery_type = 'STRUCT'\n elif isinstance(param_type.value_type, dt.Array):\n raise TypeError('ARRAY<ARRAY<T>> is not supported in BigQuery')\n else:\n query_value = value\n result = bq.ArrayQueryParameter(\n param.get_name(), bigquery_type, query_value\n )\n return result\n\n\n@bigquery_param.register(\n ir.TimestampScalar, (str, datetime.datetime, datetime.date)\n)\ndef bq_param_timestamp(param, value):\n assert isinstance(param.type(), dt.Timestamp), str(param.type())\n\n # TODO(phillipc): Not sure if this is the correct way to do this.\n timestamp_value = pd.Timestamp(value, tz='UTC').to_pydatetime()\n return bq.ScalarQueryParameter(\n param.get_name(), 'TIMESTAMP', timestamp_value\n )\n\n\n@bigquery_param.register(ir.StringScalar, str)\ndef bq_param_string(param, value):\n return bq.ScalarQueryParameter(param.get_name(), 'STRING', value)\n\n\n@bigquery_param.register(ir.IntegerScalar, int)\ndef bq_param_integer(param, value):\n return bq.ScalarQueryParameter(param.get_name(), 'INT64', value)\n\n\n@bigquery_param.register(ir.FloatingScalar, float)\ndef bq_param_double(param, value):\n return bq.ScalarQueryParameter(param.get_name(), 'FLOAT64', value)\n\n\n@bigquery_param.register(ir.BooleanScalar, bool)\ndef bq_param_boolean(param, value):\n return bq.ScalarQueryParameter(param.get_name(), 'BOOL', value)\n\n\n@bigquery_param.register(ir.DateScalar, str)\ndef bq_param_date_string(param, value):\n return bigquery_param(param, pd.Timestamp(value).to_pydatetime().date())\n\n\n@bigquery_param.register(ir.DateScalar, datetime.datetime)\ndef bq_param_date_datetime(param, value):\n return bigquery_param(param, value.date())\n\n\n@bigquery_param.register(ir.DateScalar, datetime.date)\ndef bq_param_date(param, value):\n return bq.ScalarQueryParameter(param.get_name(), 'DATE', value)\n\n\nclass BigQueryTable(ops.DatabaseTable):\n pass\n\n\ndef rename_partitioned_column(table_expr, bq_table):\n partition_info = bq_table._properties.get('timePartitioning', None)\n\n # If we don't have any partiton information, the table isn't partitioned\n if partition_info is None:\n return table_expr\n\n # If we have a partition, but no \"field\" field in the table properties,\n # then use NATIVE_PARTITION_COL as the default\n partition_field = partition_info.get('field', NATIVE_PARTITION_COL)\n\n # The partition field must be in table_expr columns\n assert partition_field in table_expr.columns\n\n # User configured partition column name default\n col = ibis.options.bigquery.partition_col\n\n # No renaming if the config option is set to None or the partition field\n # is not _PARTITIONTIME\n if col is None or partition_field != NATIVE_PARTITION_COL:\n return table_expr\n return table_expr.relabel({NATIVE_PARTITION_COL: col})\n\n\ndef parse_project_and_dataset(\n project: str, dataset: Optional[str] = None\n) -> Tuple[str, str, Optional[str]]:\n \"\"\"Compute the billing project, data project, and dataset if available.\n\n This function figure out the project id under which queries will run versus\n the project of where the data live as well as what dataset to use.\n\n Parameters\n ----------\n project : str\n A project name\n dataset : Optional[str]\n A ``<project>.<dataset>`` string or just a dataset name\n\n Examples\n --------\n >>> data_project, billing_project, dataset = parse_project_and_dataset(\n ... 'ibis-gbq',\n ... 'foo-bar.my_dataset'\n ... )\n >>> data_project\n 'foo-bar'\n >>> billing_project\n 'ibis-gbq'\n >>> dataset\n 'my_dataset'\n >>> data_project, billing_project, dataset = parse_project_and_dataset(\n ... 'ibis-gbq',\n ... 'my_dataset'\n ... )\n >>> data_project\n 'ibis-gbq'\n >>> billing_project\n 'ibis-gbq'\n >>> dataset\n 'my_dataset'\n >>> data_project, billing_project, dataset = parse_project_and_dataset(\n ... 'ibis-gbq'\n ... )\n >>> data_project\n 'ibis-gbq'\n >>> print(dataset)\n None\n\n \"\"\"\n if dataset is not None and \".\" in dataset:\n data_project, dataset = dataset.split(\".\")\n billing_project = project\n else:\n billing_project = data_project = project\n\n return data_project, billing_project, dataset\n\n\nclass BigQueryClient(SQLClient):\n \"\"\"An ibis BigQuery client implementation.\"\"\"\n\n def __init__(\n self,\n backend,\n project_id,\n dataset_id=None,\n credentials=None,\n application_name=None,\n ):\n \"\"\"Construct a BigQueryClient.\n\n Parameters\n ----------\n project_id : str\n A project name\n dataset_id : Optional[str]\n A ``<project_id>.<dataset_id>`` string or just a dataset name\n credentials : google.auth.credentials.Credentials\n application_name : str\n A string identifying your application to Google API endpoints.\n\n \"\"\"\n self.query_class = backend.query_class\n self.database_class = backend.database_class\n self.table_class = backend.table_class\n self.dialect = backend.dialect\n (\n self.data_project,\n self.billing_project,\n self.dataset,\n ) = parse_project_and_dataset(project_id, dataset_id)\n self.client = bq.Client(\n project=self.billing_project,\n credentials=credentials,\n client_info=_create_client_info(application_name),\n )\n\n def _parse_project_and_dataset(self, dataset):\n if not dataset and not self.dataset:\n raise ValueError(\"Unable to determine BigQuery dataset.\")\n project, _, dataset = parse_project_and_dataset(\n self.billing_project,\n dataset or '{}.{}'.format(self.data_project, self.dataset),\n )\n return project, dataset\n\n @property\n def project_id(self):\n return self.data_project\n\n @property\n def dataset_id(self):\n return self.dataset\n\n def table(self, name, database=None):\n t = super().table(name, database=database)\n project, dataset, name = t.op().name.split('.')\n dataset_ref = self.client.dataset(dataset, project=project)\n table_ref = dataset_ref.table(name)\n bq_table = self.client.get_table(table_ref)\n return rename_partitioned_column(t, bq_table)\n\n def _build_ast(self, expr, context):\n result = comp.build_ast(expr, context)\n return result\n\n def _get_query(self, dml, **kwargs):\n return self.query_class(self, dml, query_parameters=dml.context.params)\n\n def _fully_qualified_name(self, name, database):\n project, dataset = self._parse_project_and_dataset(database)\n return \"{}.{}.{}\".format(project, dataset, name)\n\n def _get_table_schema(self, qualified_name):\n dataset, table = qualified_name.rsplit('.', 1)\n assert dataset is not None, \"dataset is None\"\n return self.get_schema(table, database=dataset)\n\n def _get_schema_using_query(self, limited_query):\n with self._execute(limited_query, results=True) as cur:\n # resets the state of the cursor and closes operation\n names, ibis_types = self._adapt_types(cur.description)\n return sch.Schema(names, ibis_types)\n\n def _adapt_types(self, descr):\n names = []\n adapted_types = []\n for col in descr:\n names.append(col.name)\n typename = bigquery_field_to_ibis_dtype(col)\n adapted_types.append(typename)\n return names, adapted_types\n\n def _execute(self, stmt, results=True, query_parameters=None):\n job_config = bq.job.QueryJobConfig()\n job_config.query_parameters = query_parameters or []\n job_config.use_legacy_sql = False # False by default in >=0.28\n query = self.client.query(\n stmt, job_config=job_config, project=self.billing_project\n )\n query.result() # blocks until finished\n return BigQueryCursor(query)\n\n def database(self, name=None):\n if name is None and self.dataset is None:\n raise ValueError(\n \"Unable to determine BigQuery dataset. Call \"\n \"client.database('my_dataset') or set_database('my_dataset') \"\n \"to assign your client a dataset.\"\n )\n return self.database_class(name or self.dataset, self)\n\n @property\n def current_database(self):\n return self.database(self.dataset)\n\n def set_database(self, name):\n self.data_project, self.dataset = self._parse_project_and_dataset(name)\n\n def exists_database(self, name):\n project, dataset = self._parse_project_and_dataset(name)\n client = self.client\n dataset_ref = client.dataset(dataset, project=project)\n try:\n client.get_dataset(dataset_ref)\n except NotFound:\n return False\n else:\n return True\n\n def list_databases(self, like=None):\n results = [\n dataset.dataset_id for dataset in self.client.list_datasets(\n project=self.data_project\n )\n ]\n if like:\n results = [\n dataset_name\n for dataset_name in results\n if re.match(like, dataset_name) is not None\n ]\n return results\n\n def exists_table(self, name, database=None):\n project, dataset = self._parse_project_and_dataset(database)\n client = self.client\n dataset_ref = self.client.dataset(dataset, project=project)\n table_ref = dataset_ref.table(name)\n try:\n client.get_table(table_ref)\n except NotFound:\n return False\n else:\n return True\n\n def list_tables(self, like=None, database=None):\n project, dataset = self._parse_project_and_dataset(database)\n dataset_ref = bq.DatasetReference(project, dataset)\n result = [\n table.table_id for table in self.client.list_tables(dataset_ref)\n ]\n if like:\n result = [\n table_name\n for table_name in result\n if re.match(like, table_name) is not None\n ]\n return result\n\n def get_schema(self, name, database=None):\n project, dataset = self._parse_project_and_dataset(database)\n table_ref = self.client.dataset(dataset, project=project).table(name)\n bq_table = self.client.get_table(table_ref)\n return sch.infer(bq_table)\n\n @property\n def version(self):\n return parse_version(bq.__version__)\n" ]
[ [ "pandas.Timestamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
preethamvishy/ml
[ "2aa155cd50b6995f4641da2b864caff202a870e1" ]
[ "sourced/ml/tests/test_merge_coocc_entry.py" ]
[ "import logging\nimport os\nimport shutil\nimport sys\nimport tempfile\nimport unittest\n\nimport numpy\n\nfrom sourced.ml.cmd.merge_coocc import merge_coocc, load_and_check, MAX_INT32\nfrom sourced.ml.models import Cooccurrences\nfrom sourced.ml.tests import models\n\nCOPIES_NUMBER = 3\n\n\ndef get_args(_input_dir, _no_spark):\n class args:\n input = [os.path.join(_input_dir, x) for x in os.listdir(_input_dir)]\n output = os.path.join(_input_dir, \"res_coocc.asdf\")\n docfreq = models.COOCC_DF\n pause = False\n filter = \"**/*.asdf\"\n log_level = \"INFO\"\n no_spark = _no_spark\n return args\n\n\nclass MergeCooccEntry(unittest.TestCase):\n def check_coocc(self, output, copies_number=COPIES_NUMBER):\n coocc = Cooccurrences().load(models.COOCC)\n res = Cooccurrences().load(output)\n self.assertEqual(len(res.tokens), len(coocc.tokens))\n permutation = [coocc.tokens.index(token) for token in res.tokens]\n self.assertTrue(numpy.all(res.matrix.todense() ==\n copies_number *\n coocc.matrix.todense()[permutation][:, permutation]))\n\n def copy_models(self, model_path, to_dir, n):\n coocc_filename = os.path.split(model_path)[1]\n for i in range(n):\n shutil.copyfile(model_path,\n os.path.join(to_dir, \"{}.\".format(i).join(coocc_filename.split(\".\"))))\n\n @unittest.skipUnless(sys.version_info < (3, 7), \"Python 3.7 is not yet supported\")\n def test_with_spark(self):\n with tempfile.TemporaryDirectory(prefix=\"merge-coocc-entry-test\") as input_dir:\n self.copy_models(models.COOCC, input_dir, COPIES_NUMBER)\n args = get_args(input_dir, False)\n merge_coocc(args)\n self.check_coocc(args.output)\n\n def test_without_spark(self):\n with tempfile.TemporaryDirectory(prefix=\"merge-coocc-entry-test\") as input_dir:\n self.copy_models(models.COOCC, input_dir, COPIES_NUMBER)\n args = get_args(input_dir, True)\n merge_coocc(args)\n self.check_coocc(args.output)\n\n def test_load_and_check(self):\n with tempfile.TemporaryDirectory(prefix=\"merge-coocc-entry-test\") as input_dir:\n self.copy_models(models.COOCC, input_dir, COPIES_NUMBER)\n args = get_args(input_dir, True)\n c_neg = Cooccurrences().load(args.input[0])\n c_neg.matrix.data[0] = -1\n c_neg.save(args.input[0])\n self.assertEqual(len(list(load_and_check(args.input, logging.getLogger(\"test\")))), 2)\n\n c_neg = Cooccurrences().load(args.input[0])\n c_neg.matrix.data = numpy.uint32(c_neg.matrix.data)\n c_neg.matrix.data[0] = MAX_INT32 + 1\n c_neg.save(args.input[0])\n for path, coocc in load_and_check(args.input, logging.getLogger(\"test\")):\n self.assertTrue(numpy.all(coocc.matrix.data <= MAX_INT32))\n break\n\n @unittest.skipUnless(sys.version_info < (3, 7), \"Python 3.7 is not yet supported\")\n def test_overflow_with_spark(self):\n with tempfile.TemporaryDirectory(prefix=\"merge-coocc-entry-test\") as input_dir:\n self.copy_models(models.COOCC, input_dir, COPIES_NUMBER)\n args = get_args(input_dir, False)\n c_neg = Cooccurrences().load(args.input[0])\n c_neg.matrix.data[0] = MAX_INT32 - c_neg.matrix.data[0]\n c_neg.save(args.input[0])\n merge_coocc(args)\n\n result = Cooccurrences().load(args.output)\n self.assertTrue(numpy.all(result.matrix.data <= MAX_INT32))\n self.assertTrue(numpy.all(result.matrix.data >= 0))\n\n def test_overflow_without_spark(self):\n with tempfile.TemporaryDirectory(prefix=\"merge-coocc-entry-test\") as input_dir:\n self.copy_models(models.COOCC, input_dir, 10)\n args = get_args(input_dir, True)\n c_neg = Cooccurrences().load(args.input[0])\n c_neg.matrix.data[0] = MAX_INT32 - 5 * c_neg.matrix.data[0]\n c_neg.save(args.input[0])\n merge_coocc(args)\n\n result = Cooccurrences().load(args.output)\n self.assertTrue(numpy.all(result.matrix.data <= MAX_INT32))\n self.assertTrue(numpy.all(result.matrix.data >= 0))\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.all", "numpy.uint32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sechaparroc/LeapMotion
[ "c72cf5dc6cc40dd73edcd9a30b461ebbe475158d" ]
[ "recognizer/recognizer.py" ]
[ "import os\nimport numpy as np\nimport sys\n\n################################################\n######### GLOBAL VARIABLES ##########\n################################################\nlocation = \"\"\nmy_data_location = \"../visualize/\"\nfinger_names = [\"TYPE_THUMB\", \"TYPE_INDEX\", \"TYPE_MIDDLE\", \"PALM\", \"TYPE_RING\", \"TYPE_PINKY\"]\nminimum = [150,100,0]\nranges = [750,500,600]\nimg_rows = 50\nimg_cols = 50\nNO_INFO = False\n#max_v = np.array([[15.29, 20.92, 16.35], [ 8.99, 16.15, 11.92], [12.13, 19.84, 15.06], [18.23, 20.62, 21.93], [12.02, 25.24, 15.59], [13.15, 18.8 , 17.37]])\n\n#max_v = np.array([[18.09092683, 28.46656277, 28.67797005], [16.86947857, 28.18406665, 22.46399137], [24.11379573, 39.75088479, 31.89060003],\n# [21.60760976, 42.81772681, 46.89036457], [20.56626923, 29.64425473, 42.53967063], [25.01218211, 30.00609044, 43.22103779]])\n\nmax_v = np.array([[18.09092683, 28.46656277, 28.67797005],\n [16.86947857, 28.18406665, 22.46399137],\n [24.11379573, 39.75088479, 31.89060003],\n [21.60760976, 42.81772681, 46.89036457],\n [20.56626923, 29.64425473, 42.53967063],\n [25.01218211, 30.00609044, 43.22103779]])\n\nmin_v = max_v*0\n\n################################################\n######### PREPROCESSING ###########\n################################################\nfrom scipy import ndimage\nimport scipy\nimport numpy as np\nfrom skimage import transform\n\n#Preprocessing Steps:\n#1. create a 200X200 image in XY plane\ndef createPlanes(data, invertY = False, rowsX = 200, rowsY = 200, rowsZ = 200):\n idxX = rowsX * (data[::3] - minimum[0]) / ranges[0] \n if invertY:\n idxY = rowsY * (-1*(data[1::3] - minimum[1]) + ranges[1] - minimum[1]) / ranges[1]\n else:\n idxY = rowsY * (data[1::3] - minimum[1]) / ranges[1] \n idxZ = rowsZ * (data[2::3] - minimum[2]) / ranges[2] \n\n matYX = np.zeros((rowsY,rowsX))\n matYZ = np.zeros((rowsY,rowsZ))\n matZX = np.zeros((rowsZ,rowsX))\n\n timeYX = np.zeros((rowsY,rowsX))\n timeYZ = np.zeros((rowsY,rowsZ))\n timeZX = np.zeros((rowsZ,rowsX))\n\n for i in range(0,len(idxX)):\n r = min(int(round(idxY[i])), rowsY - 1) if idxY[i] > 0 else 0\n c = min(int(round(idxX[i])), rowsX - 1) if idxX[i] > 0 else 0\n \n x = min(int(round(idxX[i])), rowsX - 1) if idxX[i] > 0 else 0\n y = min(int(round(idxY[i])), rowsY - 1) if idxY[i] > 0 else 0\n z = min(int(round(idxZ[i])), rowsZ - 1) if idxZ[i] > 0 else 0\n \n #comment to not consider order\n timeYX[y][x] = i+1 - timeYX[y][x]\n timeYZ[y][z] = i+1 - timeYZ[y][z]\n timeZX[z][x] = i+1 - timeZX[z][x]\n #not use 1 but Vel or Dir\n matYX[y][x] = matYX[y][x] + 1*timeYX[y][x]\n matYZ[y][z] = matYZ[y][z] + 1*timeYZ[y][z]\n matZX[z][x] = matZX[z][x] + 1*timeZX[z][x]\n #Gray value depends on relative time (non absolute)\n return matYX/len(idxX), matYZ/len(idxX), matZX/len(idxX) \n #return matYX, matYZ, matZX\n\n#2. Apply dilation\ndef dilation(mat, dim = 5):\n kernel = np.ones((5,5))\n #ndimage.binary_dilation(mat, structure=kernel).astype(mat.dtype)\n return ndimage.grey_dilation(mat, size=(5,5))\n\n \n#3. Crop\ndef crop(mat, margin = 5):\n m = np.nonzero(mat)\n max_r = np.max(m[0])\n max_c = np.max(m[1])\n min_r = np.min(m[0])\n min_c = np.min(m[1])\n return mat[max(min_r - margin, 0): min(max_r + 1 + margin, mat.shape[0]), max(min_c - margin, 0): min(max_c + 1 + margin, mat.shape[1])]\n#4. resize image\ndef resize(mat, dim = (50,50)):\n #return scipy.misc.imresize(mat, dim, mode = 'F')\n return transform.resize(mat, dim, preserve_range = True)\n\n# Do all preprocess steps\ndef preprocess(data, invertY = False):\n imageYX, imageYZ, imageZX = createPlanes(data, invertY)\n #Dilation\n imageYX = dilation(imageYX)\n imageYZ = dilation(imageYZ)\n imageZX = dilation(imageZX)\n #Crop\n #imageYX = crop(imageYX)\n #imageYZ = crop(imageYZ)\n #imageZX = crop(imageZX)\n #Resize\n imageYX = resize(imageYX)\n imageYZ = resize(imageYZ)\n imageZX = resize(imageZX)\n return [imageYX, imageYZ, imageZX]\n\n#One Hot Encode\nfrom numpy import array\nfrom numpy import argmax\n\ndata_classes = [\"capE\", \"CheckMark\", \"e\", \"F\", \"Figure8\", \"Tap\", \"Tap2\", \"Grab\", \"Pinch\", \"Release\", \"Swipe\", \"Wipe\"]\nint_to_label = {}\nlabel_to_int = {}\n\nfor i,c in enumerate(data_classes):\n int_to_label[i] = c\n label_to_int[c] = i\n\ndef encode(labels):\n m = np.zeros((len(labels), len(data_classes)))\n for i in range(0, len(labels)):\n m[i][label_to_int[labels[i]]] = 1\n return m\n\ndef decode(values):\n m = [\" \"] * len(values)\n for i in range(0, len(m)):\n m[i] = int_to_label[np.argmax(values[i])]\n return np.array(m)\n\n################################################\n######### LOAD DATA ##########\n################################################\ndef getPosition(line, finger = \"TYPE_INDEX\", pos = None):\n line_ = line.split(\" \")\n if(pos != None):\n if \"NP\" in line_[pos:pos+3]:\n return []\n r = [float(i) for i in line_[pos:pos+3]]\n return r\n idx = [i for i,s in enumerate(line_) if s == finger]\n if(len(idx) == 0):\n return []\n idx = idx[0] + 5\n r = [float(i) for i in line_[idx:idx+3]]\n if(len(r) != 3):\n print(\"ERROR!\")\n return (None)\n return r\n\n\ndef load_data(location, name):\n my_fingers = {}\n file = open(location + name, \"r\") \n lines = file.readlines()\n for finger in [\"PALM\",\"TYPE_THUMB\", \"TYPE_INDEX\", \"TYPE_MIDDLE\", \"TYPE_RING\", \"TYPE_PINKY\"]:\n data = []\n features = []\n pos = None\n if(finger == \"PALM\"):\n print(lines[1])\t\n for k,s in enumerate(lines[1].split(\" \")):\n if s == \"h_PosX\":\n pos = k\n for line in lines[2:]:\n f = getPosition(line, finger, pos) \n if len(f) > 0:\n features = features + f\n if len(features) > 0:\n data.append(np.array(features))\n file.close()\n sys.stdout.write('\\r')\n sys.stdout.write(\"Finger: %s %s \" % (location + name, finger))\n sys.stdout.flush() \n my_fingers[finger] = data\n if len(data) == 0:\n NO_INFO = True\n\n return my_fingers\n\n\n\ndef get_images(my_fingers):\n my_data = {}\n print(\"Preprocessing\")\n for finger in my_fingers:\n my_data[finger] = []\n for i,gesture in enumerate(my_fingers[finger]):\n image = preprocess(gesture, True)\n my_data[finger].append(image)\n sys.stdout.write(\"Image: %d\" % (i))\n sys.stdout.write('\\r')\n sys.stdout.flush()\n return my_data\n\n\ndef merge(my_data):\n my_images = np.zeros((len(my_data[\"TYPE_INDEX\"]),6,3,50,50))\n for i in range(0, len(my_data[\"TYPE_INDEX\"])):\n sys.stdout.write('\\r')\n sys.stdout.write(\"Image: %d\" % (i))\n sys.stdout.flush()\n image = np.zeros((6,3,50,50))\n for j,finger in enumerate(['TYPE_THUMB', 'TYPE_INDEX', 'TYPE_MIDDLE', 'PALM', 'TYPE_RING', 'TYPE_PINKY']):\n image[j][0] = resize(crop(my_data[finger][i][0]))\n image[j][1] = resize(crop(my_data[finger][i][1]))\n image[j][2] = resize(crop(my_data[finger][i][2]))\n my_images[i] = image\n return my_images\n\n\ndef normalize(mat, max_v, min_v, scale = 255):\n ids = np.nonzero(mat)\n mat[ids] = scale * ( mat[ids] - min_v ) / (max_v - min_v)\n m = mat\n return mat\n\ndef normalize_data(my_images):\n for i,image in enumerate(my_images):\n for j,finger in enumerate(image):\n info = True\n for k,channel in enumerate(finger):\n channel_n = normalize(channel, max_v[j][k], min_v[j][k])\n if channel_n == []:\n print(\"image: \" + str(i) + \" contains no information in channel \" + str(k))\n info = False\n\ndef channel_last(data):\n data_ = np.zeros((data.shape[0], data.shape[1], data.shape[3], data.shape[4], data.shape[2]))\n for i,sample in enumerate(data):\n for j,finger in enumerate(sample):\n rgbArray = np.zeros((finger.shape[1],finger.shape[2],3))\n rgbArray[:,:, 0] = finger[0]\n rgbArray[:,:, 1] = finger[1]\n rgbArray[:,:, 2] = finger[2]\n data_[i][j] = rgbArray \n return data_\n\ndef scale_data(my_images):\n #scale\n data_im = np.zeros((my_images.shape[0], my_images.shape[1], img_rows, img_cols, 3))\n new_shape = (img_rows, img_cols, 3)\n print(\"SCALE\")\n for i,sample in enumerate(my_images):\n for j,image in enumerate(sample):\n im = transform.resize(image, new_shape)\n im = image/255.0\n data_im[i][j] = im\n sys.stdout.write('\\r')\n sys.stdout.write(\"Image: %d\" % (i))\n sys.stdout.flush() \n return data_im\n\ndef swap(data):\n return np.swapaxes(data,1,4)\n\ndef predict_data(model_alt_3, data_im):\n data_im_t = swap(data_im)\n my_d_t = []\n for i in range(0,3):\n my_d_t.append(data_im_t[:,i])\n predicted = decode(model_alt_3.predict(my_d_t, verbose=1))\n return predicted\n\n\n#Create same model for each channel\n#create a small model\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Merge\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.models import Sequential, Model\nfrom keras import optimizers\nimport keras\nimport numpy as np\n\ndef create_model():\n dirs = [\"capE\", \"CheckMark\", \"e\", \"F\", \"Figure8\", \"Tap\", \"Tap2\", \"Grab\", \"Pinch\", \"Release\", \"Swipe\", \"Wipe\"]\n\n input_shape = (img_rows, img_cols, 6)\n\n models_alt = []\n for i in range(0,3):\n model_alt_3d = Sequential()\n model_alt_3d.add(Conv2D(50, kernel_size=(5, 5),\n activation='relu', input_shape=input_shape))\n model_alt_3d.add(MaxPooling2D(pool_size=(2, 2)))\n model_alt_3d.add(Conv2D(50, (5, 5), activation='relu'))\n model_alt_3d.add(MaxPooling2D(pool_size=(2, 2)))\n model_alt_3d.add(Conv2D(64, (3, 3), activation='relu'))\n model_alt_3d.add(Dropout(0.25))\n model_alt_3d.add(Flatten())\n models_alt.append(model_alt_3d)\n\n #concatenate\n model_alt_3 = Sequential()\n model_alt_3.add(Merge(models_alt, mode = 'concat'))\n # dense layers\n model_alt_3.add(Dense(128, activation='relu'))\n model_alt_3.add(Dropout(0.5))\n model_alt_3.add(Dense(len(dirs), activation='softmax'))\n\n model_alt_3.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n\n print(model_alt_3.summary())\n return model_alt_3\n\n\ndef setup_model():\n model_alt_3 = create_model()\n model_alt_3.load_weights(location + 'model.h5')\n return model_alt_3\n\ndef execute(model_alt_3, name):\n NO_INFO = False\n my_fingers = load_data(my_data_location, name)\n if NO_INFO == True:\n return \"NO INFORMATION\"\n my_data = get_images(my_fingers)\n my_images = merge(my_data)\n normalize_data(my_images)\n my_images = channel_last(my_images)\n data_im = scale_data(my_images)\n return predict_data(model_alt_3, data_im)\n\n\nmodel_alt_3 = setup_model()\n\nimport socket\nprint('Start Connection')\n\nHOST = '' \nPORT = 50007 \ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ns.bind((HOST, PORT))\ns.listen(1)\n\nconn, addr = s.accept()\n\nprint('Connected by', addr)\nwhile True:\n data = conn.recv(1024)\n if not data: break\n\n if \"gesture\" in data.decode(\"utf-8\"): \n print(data) \n response = execute(model_alt_3, data.decode(\"utf-8\"))\n print(\"Predicted : \" + str(response))\n if len(response) > 0:\n conn.send(response[0].encode(\"utf-8\"))\n\n \nconn.close()\n\n\n\n\n" ]
[ [ "numpy.swapaxes", "numpy.nonzero", "scipy.ndimage.grey_dilation", "numpy.min", "numpy.ones", "numpy.max", "numpy.argmax", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
congchan/nnnlp
[ "9a2026a2577817d485d139bf442de7fd602418e6", "9a2026a2577817d485d139bf442de7fd602418e6" ]
[ "tf/train_glue_baseline.py", "tf/masking.py" ]
[ "# coding=utf-8\n\"\"\"GLUE tasks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\nimport six\nimport json\nimport copy\nimport glue_utils as classifier_utils\nimport modeling\nimport optimization\nimport tokenization\nimport tensorflow as tf\nfrom tensorflow.contrib import cluster_resolver as contrib_cluster_resolver\nfrom tensorflow.contrib import tpu as contrib_tpu\nfrom tensorflow.contrib import metrics as contrib_metrics\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\n## Required parameters\nflags.DEFINE_string(\n \"data_dir\", None,\n \"The input data dir. Should contain the .tsv files (or other data files) \"\n \"for the task.\")\n\nflags.DEFINE_string(\n \"config_file\", None,\n \"The config json file corresponding to the pre-trained model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_string(\"task_name\", None, \"The name of the task to train.\")\n\nflags.DEFINE_string(\n \"vocab_file\", None,\n \"The vocabulary file that the model was trained on.\")\n\nflags.DEFINE_string(\n \"output_dir\", None,\n \"The output directory where the model checkpoints will be written.\")\n\nflags.DEFINE_string(\"cached_dir\", None,\n \"Path to cached training and dev tfrecord file. \"\n \"The file will be generated if not exist.\")\n\n## Other parameters\n\nflags.DEFINE_string(\n \"init_checkpoint\", None,\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\n# flags.DEFINE_string(\n# \"albert_hub_module_handle\", None,\n# \"If set, the ALBERT hub module to use.\")\n\nflags.DEFINE_bool(\n \"do_lower_case\", True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 512,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\")\n\nflags.DEFINE_bool(\"do_train\", False, \"Whether to run training.\")\n\nflags.DEFINE_bool(\"do_eval\", False, \"Whether to run eval on the dev set.\")\n\nflags.DEFINE_bool(\n \"do_predict\", False,\n \"Whether to run the model in inference mode on the test set.\")\n\nflags.DEFINE_integer(\"train_batch_size\", 32, \"Total batch size for training.\")\n\nflags.DEFINE_integer(\"eval_batch_size\", 8, \"Total batch size for eval.\")\n\nflags.DEFINE_integer(\"predict_batch_size\", 8, \"Total batch size for predict.\")\n\nflags.DEFINE_float(\"learning_rate\", 5e-5, \"The initial learning rate for Adam.\")\n\nflags.DEFINE_integer(\"train_step\", 1000,\n \"Total number of training steps to perform.\")\n\nflags.DEFINE_integer(\n \"warmup_step\", 0,\n \"number of steps to perform linear learning rate warmup for.\")\n\nflags.DEFINE_integer(\"save_checkpoints_steps\", 1000,\n \"How often to save the model checkpoint.\")\n\nflags.DEFINE_integer(\"keep_checkpoint_max\", 5,\n \"How many checkpoints to keep.\")\n\nflags.DEFINE_integer(\"iterations_per_loop\", 1000,\n \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\nflags.DEFINE_string(\"optimizer\", \"adamw\", \"Optimizer to use\")\n\ntf.flags.DEFINE_string(\n \"tpu_name\", None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \"\n \"url.\")\n\ntf.flags.DEFINE_string(\n \"tpu_zone\", None,\n \"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\n \"gcp_project\", None,\n \"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\", 8,\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\n\n\nclass Config(object):\n \"\"\"Configuration.\"\"\"\n\n def __init__(self,\n vocab_size,\n embedding_size=128,\n hidden_size=4096,\n num_hidden_layers=12, \n num_hidden_groups=1,\n num_attention_heads=64,\n intermediate_size=16384,\n inner_group_num=1,\n down_scale_factor=1,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0,\n attention_probs_dropout_prob=0,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02,\n num_bilstm=1,\n lstm_size=128,\n bilstm_dropout_rate=0.2):\n \"\"\"Constructs Config.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `Model`.\n embedding_size: size of voc embeddings.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_hidden_groups: Number of group for the hidden layers, parameters in\n the same group are shared.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n inner_group_num: int, number of inner repetition of attention and ffn.\n down_scale_factor: float, the scale to apply\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `Model`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.\n num_bilstm: The number of bilstm layer.\n lstm_size: The hidden size of bilstm state.\n bilstm_dropout_rate: The dropout rate of bilstm.\n \"\"\"\n self.vocab_size = vocab_size\n self.embedding_size = embedding_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_hidden_groups = num_hidden_groups\n self.num_attention_heads = num_attention_heads\n self.inner_group_num = inner_group_num\n self.down_scale_factor = down_scale_factor\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n self.num_bilstm=num_bilstm\n self.lstm_size=lstm_size\n self.bilstm_dropout_rate=bilstm_dropout_rate\n\n @classmethod\n def from_dict(cls, json_object):\n \"\"\"Constructs a `Config` from a Python dictionary of parameters.\"\"\"\n config = Config(vocab_size=None)\n for (key, value) in six.iteritems(json_object):\n config.__dict__[key] = value\n return config\n\n @classmethod\n def from_json_file(cls, json_file):\n \"\"\"Constructs a `Config` from a json file of parameters.\"\"\"\n with tf.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))\n\n def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n\ndef create_model(config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings, task_name,):\n \"\"\"Creates a classification model from_scratch.\"\"\"\n _true_length = tf.cast(tf.reduce_sum(input_mask, axis=-1), dtype=tf.int32)\n\n with tf.variable_scope(\"baseline\"):\n with tf.variable_scope(\"embeddings\"):\n # Perform embedding lookup on the word ids.\n (word_embedding_output,\n output_embedding_table) = modeling.embedding_lookup(\n input_ids=input_ids,\n vocab_size=config.vocab_size,\n embedding_size=config.embedding_size,\n initializer_range=config.initializer_range,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # Add positional embeddings and token type embeddings, then layer\n # normalize and perform dropout.\n embedding_output = modeling.embedding_postprocessor(\n input_tensor=word_embedding_output,\n use_token_type=True,\n token_type_ids=segment_ids,\n token_type_vocab_size=config.type_vocab_size,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=config.initializer_range,\n max_position_embeddings=config.max_position_embeddings,\n dropout_prob=config.hidden_dropout_prob)\n with tf.variable_scope(\"bilstm\"):\n sequence_output = modeling.bilstm_fused(\n inputs=embedding_output, \n sequence_lengths=_true_length, \n lstm_size=config.lstm_size,\n bilstm_dropout_rate=config.bilstm_dropout_rate, \n is_training=is_training,\n num_layers=config.num_bilstm)\n # with tf.variable_scope(\"bilstm\"):\n # sequence_output, _ = modeling.cudnn_rnn(\n # inputs=embedding_output, \n # sequence_lengths=_true_length, \n # rnn_size=config.lstm_size,\n # dropout=config.bilstm_dropout_rate, \n # is_training=is_training,\n # num_layers=config.num_bilstm,\n # direction='bidirectional')\n\n # first_token_tensor = tf.squeeze(sequence_output[:, -1:, :], axis=1)\n last_token_tensor = tf.squeeze(sequence_output[:, -1:, :], axis=1)\n output_layer = tf.layers.dense(\n last_token_tensor,\n config.hidden_size,\n activation=tf.tanh,\n kernel_initializer=modeling.create_initializer(config.initializer_range))\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n if task_name != \"sts-b\":\n probabilities = tf.nn.softmax(logits, axis=-1)\n predictions = tf.argmax(probabilities, axis=-1, output_type=tf.int32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n else:\n probabilities = logits\n logits = tf.squeeze(logits, [-1])\n predictions = logits\n per_example_loss = tf.square(logits - labels)\n loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, probabilities, logits, predictions)\n\n\ndef model_fn_builder(config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings, task_name,\n optimizer=\"adamw\"):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n is_real_example = None\n if \"is_real_example\" in features:\n is_real_example = tf.cast(features[\"is_real_example\"], dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, probabilities, logits, predictions) = \\\n create_model(config, is_training, input_ids, input_mask,\n segment_ids, label_ids, num_labels,\n use_one_hot_embeddings, task_name)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps,\n use_tpu, optimizer)\n\n output_spec = contrib_tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n if task_name not in [\"sts-b\", \"cola\"]:\n def metric_fn(per_example_loss, label_ids, logits, is_real_example):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions,\n weights=is_real_example)\n loss = tf.metrics.mean(\n values=per_example_loss, weights=is_real_example)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n elif task_name == \"sts-b\":\n def metric_fn(per_example_loss, label_ids, logits, is_real_example):\n \"\"\"Compute Pearson correlations for STS-B.\"\"\"\n # Display labels and predictions\n concat1 = contrib_metrics.streaming_concat(logits)\n concat2 = contrib_metrics.streaming_concat(label_ids)\n\n # Compute Pearson correlation\n pearson = contrib_metrics.streaming_pearson_correlation(\n logits, label_ids, weights=is_real_example)\n\n # Compute MSE\n # mse = tf.metrics.mean(per_example_loss)\n mse = tf.metrics.mean_squared_error(\n label_ids, logits, weights=is_real_example)\n\n loss = tf.metrics.mean(\n values=per_example_loss,\n weights=is_real_example)\n\n return {\"pred\": concat1, \"label_ids\": concat2, \"pearson\": pearson,\n \"MSE\": mse, \"eval_loss\": loss,}\n elif task_name == \"cola\":\n def metric_fn(per_example_loss, label_ids, logits, is_real_example):\n \"\"\"Compute Matthew's correlations for STS-B.\"\"\"\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n # https://en.wikipedia.org/wiki/Matthews_correlation_coefficient\n tp, tp_op = tf.metrics.true_positives(\n predictions, label_ids, weights=is_real_example)\n tn, tn_op = tf.metrics.true_negatives(\n predictions, label_ids, weights=is_real_example)\n fp, fp_op = tf.metrics.false_positives(\n predictions, label_ids, weights=is_real_example)\n fn, fn_op = tf.metrics.false_negatives(\n predictions, label_ids, weights=is_real_example)\n\n # Compute Matthew's correlation\n mcc = tf.div_no_nan(\n tp * tn - fp * fn,\n tf.pow((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn), 0.5))\n\n # Compute accuracy\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions,\n weights=is_real_example)\n\n loss = tf.metrics.mean(\n values=per_example_loss,\n weights=is_real_example)\n\n return {\"matthew_corr\": (mcc, tf.group(tp_op, tn_op, fp_op, fn_op)),\n \"eval_accuracy\": accuracy, \"eval_loss\": loss,}\n\n eval_metrics = (metric_fn,\n [per_example_loss, label_ids, logits, is_real_example])\n output_spec = contrib_tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n output_spec = contrib_tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\n \"probabilities\": probabilities,\n \"predictions\": predictions\n },\n scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn\n\n\n# This function is not used by this file but is still used by the Colab and\n# people who depend on it.\ndef input_fn_builder(features, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n all_label_ids = []\n\n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n all_label_ids.append(feature.label_id)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d\n\n return input_fn\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n processors = {\n \"cola\": classifier_utils.ColaProcessor,\n \"mnli\": classifier_utils.MnliProcessor,\n \"mismnli\": classifier_utils.MisMnliProcessor,\n \"mrpc\": classifier_utils.MrpcProcessor,\n \"rte\": classifier_utils.RteProcessor,\n \"sst-2\": classifier_utils.Sst2Processor,\n \"sts-b\": classifier_utils.StsbProcessor,\n \"qqp\": classifier_utils.QqpProcessor,\n \"qnli\": classifier_utils.QnliProcessor,\n \"wnli\": classifier_utils.WnliProcessor,\n }\n\n tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,\n FLAGS.init_checkpoint)\n\n if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:\n raise ValueError(\n \"At least one of `do_train`, `do_eval` or `do_predict' must be True.\")\n\n # if not FLAGS.config_file and not FLAGS.albert_hub_module_handle:\n # raise ValueError(\"At least one of `--config_file` and \"\n # \"`--albert_hub_module_handle` must be set\")\n\n if FLAGS.config_file:\n config = Config.from_json_file(\n FLAGS.config_file)\n if FLAGS.max_seq_length > config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the model \"\n \"was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, config.max_position_embeddings))\n else:\n config = None # Get the config from TF-Hub.\n\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n task_name = FLAGS.task_name.lower()\n\n if task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (task_name))\n\n processor = processors[task_name](\n do_lower_case=FLAGS.do_lower_case)\n\n label_list = processor.get_labels()\n \n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n tpu_cluster_resolver = None\n if FLAGS.use_tpu and FLAGS.tpu_name:\n tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)\n\n is_per_host = contrib_tpu.InputPipelineConfig.PER_HOST_V2\n if FLAGS.do_train:\n iterations_per_loop = int(min(FLAGS.iterations_per_loop,\n FLAGS.save_checkpoints_steps))\n else:\n iterations_per_loop = FLAGS.iterations_per_loop\n run_config = contrib_tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=int(FLAGS.save_checkpoints_steps),\n keep_checkpoint_max=0,\n tpu_config=contrib_tpu.TPUConfig(\n iterations_per_loop=iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host))\n\n train_examples = None\n if FLAGS.do_train:\n train_examples = processor.get_train_examples(FLAGS.data_dir)\n model_fn = model_fn_builder(\n config=config,\n num_labels=len(label_list),\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=FLAGS.train_step,\n num_warmup_steps=FLAGS.warmup_step,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_tpu,\n task_name=task_name,\n optimizer=FLAGS.optimizer)\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = contrib_tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size,\n eval_batch_size=FLAGS.eval_batch_size,\n predict_batch_size=FLAGS.predict_batch_size)\n\n if FLAGS.do_train:\n cached_dir = FLAGS.cached_dir\n if not cached_dir:\n cached_dir = FLAGS.output_dir\n train_file = os.path.join(cached_dir, task_name + \"_train.tf_record\")\n if not tf.gfile.Exists(train_file):\n classifier_utils.file_based_convert_examples_to_features(\n train_examples, label_list, FLAGS.max_seq_length, tokenizer,\n train_file, task_name)\n tf.logging.info(\"***** Running training *****\")\n tf.logging.info(\" Num examples = %d\", len(train_examples))\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n tf.logging.info(\" Num steps = %d\", FLAGS.train_step)\n train_input_fn = classifier_utils.file_based_input_fn_builder(\n input_file=train_file,\n seq_length=FLAGS.max_seq_length,\n is_training=True,\n drop_remainder=True,\n task_name=task_name,\n use_tpu=FLAGS.use_tpu,\n bsz=FLAGS.train_batch_size)\n estimator.train(input_fn=train_input_fn, max_steps=FLAGS.train_step)\n\n if FLAGS.do_eval:\n eval_examples = processor.get_dev_examples(FLAGS.data_dir)\n num_actual_eval_examples = len(eval_examples)\n if FLAGS.use_tpu:\n # TPU requires a fixed batch size for all batches, therefore the number\n # of examples must be a multiple of the batch size, or else examples\n # will get dropped. So we pad with fake examples which are ignored\n # later on. These do NOT count towards the metric (all tf.metrics\n # support a per-instance weight, and these get a weight of 0.0).\n while len(eval_examples) % FLAGS.eval_batch_size != 0:\n eval_examples.append(classifier_utils.PaddingInputExample())\n\n cached_dir = FLAGS.cached_dir\n if not cached_dir:\n cached_dir = FLAGS.output_dir\n eval_file = os.path.join(cached_dir, task_name + \"_eval.tf_record\")\n if not tf.gfile.Exists(eval_file):\n classifier_utils.file_based_convert_examples_to_features(\n eval_examples, label_list, FLAGS.max_seq_length, tokenizer,\n eval_file, task_name)\n\n tf.logging.info(\"***** Running evaluation *****\")\n tf.logging.info(\" Num examples = %d (%d actual, %d padding)\",\n len(eval_examples), num_actual_eval_examples,\n len(eval_examples) - num_actual_eval_examples)\n tf.logging.info(\" Batch size = %d\", FLAGS.eval_batch_size)\n\n # This tells the estimator to run through the entire set.\n eval_steps = None\n # However, if running eval on the TPU, you will need to specify the\n # number of steps.\n if FLAGS.use_tpu:\n assert len(eval_examples) % FLAGS.eval_batch_size == 0\n eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)\n\n eval_drop_remainder = True if FLAGS.use_tpu else False\n eval_input_fn = classifier_utils.file_based_input_fn_builder(\n input_file=eval_file,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=eval_drop_remainder,\n task_name=task_name,\n use_tpu=FLAGS.use_tpu,\n bsz=FLAGS.eval_batch_size)\n\n best_trial_info_file = os.path.join(FLAGS.output_dir, \"best_trial.txt\")\n\n def _best_trial_info():\n \"\"\"Returns information about which checkpoints have been evaled so far.\"\"\"\n if tf.gfile.Exists(best_trial_info_file):\n with tf.gfile.GFile(best_trial_info_file, \"r\") as best_info:\n global_step, best_metric_global_step, metric_value = (\n best_info.read().split(\":\"))\n global_step = int(global_step)\n best_metric_global_step = int(best_metric_global_step)\n metric_value = float(metric_value)\n else:\n metric_value = -1\n best_metric_global_step = -1\n global_step = -1\n tf.logging.info(\n \"Best trial info: Step: %s, Best Value Step: %s, \"\n \"Best Value: %s\", global_step, best_metric_global_step, metric_value)\n return global_step, best_metric_global_step, metric_value\n\n def _remove_checkpoint(checkpoint_path):\n for ext in [\"meta\", \"data-00000-of-00001\", \"index\"]:\n src_ckpt = checkpoint_path + \".{}\".format(ext)\n tf.logging.info(\"removing {}\".format(src_ckpt))\n tf.gfile.Remove(src_ckpt)\n\n def _find_valid_cands(curr_step):\n filenames = tf.gfile.ListDirectory(FLAGS.output_dir)\n candidates = []\n for filename in filenames:\n if filename.endswith(\".index\"):\n ckpt_name = filename[:-6]\n idx = ckpt_name.split(\"-\")[-1]\n if int(idx) > curr_step:\n candidates.append(filename)\n return candidates\n\n output_eval_file = os.path.join(FLAGS.output_dir, \"eval_results.txt\")\n\n if task_name == \"sts-b\":\n key_name = \"pearson\"\n elif task_name == \"cola\":\n key_name = \"matthew_corr\"\n else:\n key_name = \"eval_accuracy\"\n\n global_step, best_perf_global_step, best_perf = _best_trial_info()\n writer = tf.gfile.GFile(output_eval_file, \"w\")\n while global_step < FLAGS.train_step:\n steps_and_files = {}\n filenames = tf.gfile.ListDirectory(FLAGS.output_dir)\n for filename in filenames:\n if filename.endswith(\".index\"):\n ckpt_name = filename[:-6]\n cur_filename = os.path.join(FLAGS.output_dir, ckpt_name)\n gstep = int(cur_filename.split(\"-\")[-1])\n if gstep not in steps_and_files:\n tf.logging.info(\"Add {} to eval list.\".format(cur_filename))\n steps_and_files[gstep] = cur_filename\n tf.logging.info(\"found {} files.\".format(len(steps_and_files)))\n if not steps_and_files:\n tf.logging.info(\"found 0 file, global step: {}. Sleeping.\"\n .format(global_step))\n time.sleep(60)\n else:\n for checkpoint in sorted(steps_and_files.items()):\n step, checkpoint_path = checkpoint\n if global_step >= step:\n if (best_perf_global_step != step and\n len(_find_valid_cands(step)) > 1):\n _remove_checkpoint(checkpoint_path)\n continue\n result = estimator.evaluate(\n input_fn=eval_input_fn,\n steps=eval_steps,\n checkpoint_path=checkpoint_path)\n global_step = result[\"global_step\"]\n tf.logging.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n tf.logging.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n writer.write(\"best = {}\\n\".format(best_perf))\n if result[key_name] > best_perf:\n best_perf = result[key_name]\n best_perf_global_step = global_step\n elif len(_find_valid_cands(global_step)) > 1:\n _remove_checkpoint(checkpoint_path)\n writer.write(\"=\" * 50 + \"\\n\")\n writer.flush()\n with tf.gfile.GFile(best_trial_info_file, \"w\") as best_info:\n best_info.write(\"{}:{}:{}\".format(\n global_step, best_perf_global_step, best_perf))\n writer.close()\n\n for ext in [\"meta\", \"data-00000-of-00001\", \"index\"]:\n src_ckpt = \"model.ckpt-{}.{}\".format(best_perf_global_step, ext)\n tgt_ckpt = \"model.ckpt-best.{}\".format(ext)\n tf.logging.info(\"saving {} to {}\".format(src_ckpt, tgt_ckpt))\n tf.io.gfile.rename(\n os.path.join(FLAGS.output_dir, src_ckpt),\n os.path.join(FLAGS.output_dir, tgt_ckpt),\n overwrite=True)\n\n if FLAGS.do_predict:\n predict_examples = processor.get_test_examples(FLAGS.data_dir)\n num_actual_predict_examples = len(predict_examples)\n if FLAGS.use_tpu:\n # TPU requires a fixed batch size for all batches, therefore the number\n # of examples must be a multiple of the batch size, or else examples\n # will get dropped. So we pad with fake examples which are ignored\n # later on.\n while len(predict_examples) % FLAGS.predict_batch_size != 0:\n predict_examples.append(classifier_utils.PaddingInputExample())\n\n predict_file = os.path.join(FLAGS.output_dir, \"predict.tf_record\")\n classifier_utils.file_based_convert_examples_to_features(\n predict_examples, label_list,\n FLAGS.max_seq_length, tokenizer,\n predict_file, task_name)\n\n tf.logging.info(\"***** Running prediction*****\")\n tf.logging.info(\" Num examples = %d (%d actual, %d padding)\",\n len(predict_examples), num_actual_predict_examples,\n len(predict_examples) - num_actual_predict_examples)\n tf.logging.info(\" Batch size = %d\", FLAGS.predict_batch_size)\n\n predict_drop_remainder = True if FLAGS.use_tpu else False\n predict_input_fn = classifier_utils.file_based_input_fn_builder(\n input_file=predict_file,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=predict_drop_remainder,\n task_name=task_name,\n use_tpu=FLAGS.use_tpu,\n bsz=FLAGS.predict_batch_size)\n\n checkpoint_path = os.path.join(FLAGS.output_dir, \"model.ckpt-best\")\n result = estimator.predict(\n input_fn=predict_input_fn,\n checkpoint_path=checkpoint_path)\n\n output_predict_file = os.path.join(FLAGS.output_dir, \"test_results.tsv\")\n output_submit_file = os.path.join(FLAGS.output_dir, \"submit_results.tsv\")\n with tf.gfile.GFile(output_predict_file, \"w\") as pred_writer,\\\n tf.gfile.GFile(output_submit_file, \"w\") as sub_writer:\n sub_writer.write(\"index\" + \"\\t\" + \"prediction\\n\")\n num_written_lines = 0\n tf.logging.info(\"***** Predict results *****\")\n for (i, (example, prediction)) in\\\n enumerate(zip(predict_examples, result)):\n probabilities = prediction[\"probabilities\"]\n if i >= num_actual_predict_examples:\n break\n output_line = \"\\t\".join(\n str(class_probability)\n for class_probability in probabilities) + \"\\n\"\n pred_writer.write(output_line)\n\n if task_name != \"sts-b\":\n actual_label = label_list[int(prediction[\"predictions\"])]\n else:\n actual_label = str(prediction[\"predictions\"])\n sub_writer.write(example.guid + \"\\t\" + actual_label + \"\\n\")\n num_written_lines += 1\n assert num_written_lines == num_actual_predict_examples\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"data_dir\")\n flags.mark_flag_as_required(\"task_name\")\n flags.mark_flag_as_required(\"output_dir\")\n tf.app.run()\n", "\"\"\"Generate all sort of masks\"\"\"\n\nimport tensorflow as tf\n\n\ndef upper_left_square_mask(mask):\n \"\"\"\n Transform a sequence mask to square mask\n :param mask: A mask tensor of shape (batch, feature_size) specify each sample mask\n :return: A mask tensor of shape (batch, feature_size, feature_size)\n\n Example:\n input\n [ True True True True True False]\n output\n [[ True True True True True False]\n [ True True True True True False]\n [ True True True True True False]\n [ True True True True True False]\n [ True True True True True False]\n [False False False False False False]]]\n \"\"\"\n return tf.logical_and(\n tf.expand_dims(mask, [1]), # [batch_size, 1, feature_size]\n tf.expand_dims(mask, [2]) # [batch_size, feature_size, 1]\n )\n\n\ndef upper_triangular_mask(seq_length):\n \"\"\"\n Return a (seq_length x seq_length) mask which all upper triangular element are true,\n including the diagonal.\n :param seq_length: specify the shape of the matrix\n :return: A mask with shape (seq_length, seq_length)\n\n '''\n seq_length = 5\n [[ True True True True True]\n [False True True True True]\n [False False True True True]\n [False False False True True]\n [False False False False True]]\n '''\n \"\"\"\n return tf.linalg.band_part(\n tf.ones([seq_length, seq_length], tf.bool), 0, -1,\n )\n" ]
[ [ "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.metrics.true_negatives", "tensorflow.metrics.accuracy", "tensorflow.nn.log_softmax", "tensorflow.gfile.Exists", "tensorflow.reduce_sum", "tensorflow.gfile.GFile", "tensorflow.cast", "tensorflow.train.init_from_checkpoint", "tensorflow.gfile.MakeDirs", "tensorflow.group", "tensorflow.contrib.tpu.TPUEstimatorSpec", "tensorflow.contrib.tpu.TPUEstimator", "tensorflow.squeeze", "tensorflow.truncated_normal_initializer", "tensorflow.metrics.false_positives", "tensorflow.contrib.metrics.streaming_pearson_correlation", "tensorflow.logging.set_verbosity", "tensorflow.square", "tensorflow.gfile.Remove", "tensorflow.trainable_variables", "tensorflow.argmax", "tensorflow.nn.dropout", "tensorflow.app.run", "tensorflow.gfile.ListDirectory", "tensorflow.matmul", "tensorflow.metrics.mean", "tensorflow.shape", "tensorflow.pow", "tensorflow.zeros_initializer", "tensorflow.logging.info", "tensorflow.one_hot", "tensorflow.contrib.tpu.TPUConfig", "tensorflow.metrics.mean_squared_error", "tensorflow.metrics.true_positives", "tensorflow.nn.bias_add", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.train.Scaffold", "tensorflow.metrics.false_negatives", "tensorflow.reduce_mean", "tensorflow.flags.DEFINE_string", "tensorflow.contrib.metrics.streaming_concat", "tensorflow.variable_scope" ], [ "tensorflow.ones", "tensorflow.expand_dims" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
sangdon/intern2020_cocal
[ "2f434b76fbf3426c6685fb92c5bbc2d32fcba7ba" ]
[ "uncertainty/plots/plot_prec_cov_twoparams.py" ]
[ "import os, sys\nimport argparse\nimport types\nimport numpy as np\nimport pickle\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nimport tensorflow as tf\n\nimport data\nimport model \nfrom util import *\nfrom learning import LearnerCls, LearnerDACls, LearnerClsRT, LearnerConfPred\nfrom learning import TempScalingCls as CalibratorCls\n\n##TODO: clean-up tf options\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n#gpus = tf.config.experimental.list_physical_devices('GPU')\n#tf.config.experimental.set_memory_growth(gpus[0], True)\n\ndef plot_prec_cov(T, prec, cov, fn, fontsize=15):\n\n plt.figure(1)\n plt.clf()\n\n plt.plot(cov, prec, 'rs-')\n plt.xlabel('coverage (%)', fontsize=fontsize)\n plt.ylabel('precision (%)', fontsize=fontsize)\n plt.grid('on')\n plt.savefig(fn+'.png', bbox_inches='tight')\n\n\ndef main(args):\n\n data_fn = 'plots/prec_cov_list_twoparams.pk'\n fig_fn = 'plots/prec_cov_twoparams'\n if os.path.exists(data_fn):\n pc_data = pickle.load(open(data_fn, 'rb'))\n plot_prec_cov(pc_data['T_list'], pc_data['prec_list'], pc_data['cov_list'], fig_fn)\n return\n\n ## init a snapshot path\n os.makedirs(args.train.save_root, exist_ok=True)\n\n ## init logger\n sys.stdout = Logger(os.path.join(args.train.save_root, 'out'))\n\n ## print args\n print_args(args)\n\n ## init gpus\n if not args.cpu:\n print(\"##GPUs Available: \", len(tf.config.experimental.list_physical_devices('GPU')))\n print()\n\n ## init datasets\n print(\"## init datasets\")\n ds_src = data.MultiSourceDataset(\n args.data.src,\n args.aug_params,\n batch_size=args.data.batch_size,\n val_shuffle=True,\n val_aug=True,\n domain_id=1,\n color=False if args.data.img_size[2]==1 else True,\n size=args.data.img_size[0],\n sample_ratio=args.data.sample_ratio[0])\n assert(len(args.aug_params) == 1) ##TODO\n ds_tar = getattr(data, args.data.tar)(\n root=os.path.join('data', args.data.tar.lower()),\n batch_size=args.data.batch_size,\n aug_list=args.aug_params[0],\n val_shuffle=True,\n val_aug=True,\n domain_id=0,\n color=False if args.data.img_size[2]==1 else True,\n size=args.data.img_size[0],\n sample_ratio=args.data.sample_ratio[1])\n ds_dom = data.DomainDataset(\n data.MultiSourceDataset(\n args.data.src,\n args.aug_params,\n batch_size=args.data.batch_size,\n val_shuffle=True,\n val_aug=True,\n test_aug=True, #diff\n domain_id=1,\n color=False if args.data.img_size[2]==1 else True,\n size=args.data.img_size[0],\n sample_ratio=args.data.sample_ratio[0]),\n getattr(data, args.data.tar)(\n root=os.path.join('data', args.data.tar.lower()),\n batch_size=args.data.batch_size,\n aug_list=args.aug_params[0],\n val_shuffle=True,\n val_aug=True,\n test_aug=True, #diff\n domain_id=0,\n color=False if args.data.img_size[2]==1 else True,\n size=args.data.img_size[0],\n sample_ratio=args.data.sample_ratio[1]))\n print() \n\n ####\n ## reliable teacher learning\n #### \n mdl_st_base = getattr(model, args.model.base)(num_class=args.model.n_labels, input_shape=args.model.img_size)\n #mdl_st_base = model.TempCls(mdl_st_base)\n mdl_st = model.Student(args.model, mdl_st_base, ds_src, ds_tar, ideal=args.ideal)\n\n mdl_tc_base = getattr(model, args.model.base)(num_class=args.model.n_labels, input_shape=args.model.img_size)\n #mdl_tc_base = model.TempCls(mdl_tc_base)\n mdl_tc = model.Teacher(args.model, mdl_tc_base, ds_src, ds_tar, ideal=args.ideal)\n\n\n ## rename\n model_t = mdl_tc\n model_s = mdl_st\n model_c = model_s.model_base\n params = args.train\n params_base = args.train_base\n params_advtr = args.train_advtr\n params_iw = args.train_iw\n params_iw_cal = args.cal_iw\n params_conf = args.est_conf\n i_epoch = 1\n \n ## init a model\n if params.init == 'sourceonly':\n\n ##TODO: assume classification\n print(\"## init the student model with sourceonly training\")\n model.set_trainable(model_c, True)\n ## init a learner\n learner = LearnerCls(params_base, model_c, model_name_postfix='_sourceonlyinit')\n ## train the model\n learner.train(ds_src.train, ds_src.val, ds_src.test)\n ## test the model\n learner.test(ds_src.test, ld_name='src', verbose=True)\n print()\n\n elif params.init == 'advtr':\n ##TODO: assume classification\n print(\"## init a base model with adversarial training\")\n model.set_trainable(model_c, True)\n ## init a adv model\n mdl_adv = getattr(model, params_advtr.model_advtr)(n_in=model_c.dim_feat)\n ## init a learner\n learner = LearnerDACls(params_advtr, model.DAN(model_c, mdl_adv), model_name_postfix='_advtrinit')\n ## train the model\n learner.train([ds_src.train, ds_dom.train], None, ds_tar.test) \n ## test the model\n learner.test(ds_tar.test, ld_name='tar', verbose=True)\n print() \n\n else:\n raise NotImplementedError\n\n ## init iw\n if model_t.train.model_conf.model_iw is not None:\n print(\"## learn IW\")\n model_sd = model_t.train.model_conf.model_iw.model_sd.model\n model_sd.train()\n\n ## init a learner\n learner_sd = LearnerCls(params_iw, model_sd, model_name_postfix='_iw_epoch_%d'%(i_epoch))\n ## train the model\n learner_sd.train(ds_dom.train, ds_dom.val, ds_dom.test)\n ## test the model\n learner_sd.test(ds_dom.test, ld_name='domain', verbose=True)\n print()\n\n ## init a calibraton model\n model_sd_cal = model_t.train.model_conf.model_iw.model_sd\n model_sd_cal.train()\n\n ## init a calibrator\n calibrator_iw = CalibratorCls(params_iw_cal, model_sd_cal, model_name_postfix='_iw_cal_epoch_%d'%(i_epoch))\n ## calibrate the model\n calibrator_iw.train(ds_dom.val, ds_dom.val, ds_dom.test)\n ## test the model\n calibrator_iw.test(ds_dom.test, ld_name='domain', verbose=True)\n print()\n\n ## 2. learn confidence predictor\n model_base = model_t.train.model_base\n #model_conf = model_t.train.model_conf\n model_iw = model_t.train.model_conf.model_iw\n #model_iw_cond = model.CondIW(model_iw, model_conf, ds_src.train, ds_tar.train)\n model_conf = model.TwoParamsConfPred(model_base, model_iw)\n \n \n ## init a learner\n learner = LearnerConfPred(params_conf, model_conf, model_base, None, model_name_postfix='_confpred_epoch_%d'%(i_epoch))\n # ## train the model\n # learner.train(ds_src.val, ds_src.val, ds_tar.test)\n # ## test the model\n # learner.test(ds_tar.test, ld_name='tar', verbose=True)\n # learner.test(ds_tar.train, ld_name='tar (train)', verbose=True)\n # print()\n \n else:\n model_base = model_t.train.model_base\n model_conf = model_t.train.model_conf\n\n ## init a learner\n learner = LearnerConfPred(params_conf, model_conf, model_base, None, model_name_postfix='_confpred_epoch_%d'%(i_epoch))\n ## train the model\n model_conf.T = tf.Variable(1.0 - params_conf.eps) ##TODO\n print(\"T = %f\"%(model_conf.T.numpy()))\n ## test the model\n learner.test(ds_tar.test, ld_name='tar', verbose=True)\n learner.test(ds_tar.train, ld_name='tar (train)', verbose=True)\n\n ## compute precision and coverage\n T_list, prec_list, cov_list = [], [], []\n rng = [0.99, 0.98, 0.97, 0.96, 0.95, 0.90, 0.8, 0.7, 0.6, 0.5]\n for T_bnd in rng:\n for T_iw in rng:\n model_conf.T_bnd = T_bnd\n model_conf.T_iw = T_iw\n prec, n_conf, n = learner.test(ds_tar.train, ld_name='tar (train)', verbose=True)\n T_list.append((T_bnd, T_iw))\n prec_list.append(prec.numpy())\n cov_list.append(float(n_conf)/float(n))\n\n print(T_list)\n print(prec_list)\n print(cov_list)\n print()\n\n T_list = np.array(T_list)\n prec_list = np.array(prec_list)\n cov_list = np.array(cov_list)\n pickle.dump({'T_list': T_list, 'prec_list': prec_list, 'cov_list': cov_list}, open(data_fn, 'wb'))\n \n\n \ndef parse_args():\n ## inint a parser\n parser = argparse.ArgumentParser(description='digit dataset training')\n\n ## meta args\n parser.add_argument('--exp_name', required=True, type=str, help='experiment name')\n parser.add_argument('--snapshot_root', default='snapshots', type=str, help='snapshot root name')\n parser.add_argument('--cpu', action='store_true', help='use CPU')\n parser.add_argument('--ideal', action='store_true', help='enable cheatkey')\n\n ## dataset args\n parser.add_argument('--data.batch_size', default=100, type=int, help='batch size')\n parser.add_argument('--data.n_labels', default=10, type=int, help='the number of labels')\n parser.add_argument('--data.src', type=str, nargs='*', default=['MNIST'], help='list of sources')\n parser.add_argument('--data.tar', type=str, default='USPS', help='target') \n parser.add_argument('--data.aug', type=str, nargs='*', default=[''], help='list of data augmentation')\n parser.add_argument('--data.img_size', type=int, nargs=3, default=(32, 32, 3), help='image size')\n parser.add_argument('--data.sample_ratio', type=float, nargs=2, default=[1.0, 1.0])\n \n ## model args\n parser.add_argument('--model.base', default='ResNet18', type=str, help='model name')\n parser.add_argument('--model.iw', default='BigFNN', type=str, help='model name')\n parser.add_argument('--model.conf', default='ConfPred', type=str, help='model name')\n\n ## RT train args\n parser.add_argument('--train.find_best', action='store_true', help='find the best model')\n parser.add_argument('--train.load_final', action='store_true', help='load the final model')\n parser.add_argument('--train.n_epochs', type=int, default=5, help='the number of training iterations')\n parser.add_argument('--train.init', type=str, default='advtr', help='model initialization approach')\n parser.add_argument('--train.val_period', default=1, type=int, help='validation period in epochs')\n \n ## base model train args\n parser.add_argument('--train_base.find_best', action='store_true', help='find the best model')\n parser.add_argument('--train_base.load_final', action='store_true', help='load the final model')\n parser.add_argument('--train_base.optim', default='SGD', type=str, help='optimizer')\n parser.add_argument('--train_base.lr', default=0.01, type=float, help='learning rate')\n parser.add_argument('--train_base.lr_step_size', default=20, type=float, help='stepsize for step learning rate scheduler')\n parser.add_argument('--train_base.lr_step_decay_rate', default=0.5, type=float, help='decay rate for step learning rate scheduler')\n parser.add_argument('--train_base.weight_decay', type=float, default=0.0, help='L2 weight decay')\n parser.add_argument('--train_base.momentum', default=0.9, type=float, help='momentum')\n parser.add_argument('--train_base.n_epochs', default=100, type=int, help='the number of epochs')\n parser.add_argument('--train_base.val_period', default=1, type=int, help='validation period in epochs')\n\n ## iw train args\n parser.add_argument('--train_iw.find_best', action='store_true', help='find the best model')\n parser.add_argument('--train_iw.load_final', action='store_true', help='load the final model') \n parser.add_argument('--train_iw.optim', default='SGD', type=str, help='optimizer')\n parser.add_argument('--train_iw.lr', default=0.01, type=float, help='learning rate')\n parser.add_argument('--train_iw.lr_step_size', default=20, type=float, help='stepsize for step learning rate scheduler')\n parser.add_argument('--train_iw.lr_step_decay_rate', default=0.5, type=float, help='decay rate for step learning rate scheduler')\n parser.add_argument('--train_iw.weight_decay', type=float, default=0.0, help='L2 weight decay')\n parser.add_argument('--train_iw.momentum', default=0.9, type=float, help='momentum')\n parser.add_argument('--train_iw.n_epochs', default=100, type=int, help='the number of epochs')\n parser.add_argument('--train_iw.val_period', default=1, type=int, help='validation period in epochs')\n\n ## cal args\n parser.add_argument('--cal_iw.find_best', action='store_true', help='find the best model')\n parser.add_argument('--cal_iw.load_final', action='store_true', help='load the final model') \n parser.add_argument('--cal_iw.optim', default='SGD', type=str, help='optimizer')\n parser.add_argument('--cal_iw.lr', default=0.01, type=float, help='learning rate')\n parser.add_argument('--cal_iw.lr_step_size', default=50, type=float, help='stepsize for step learning rate scheduler')\n parser.add_argument('--cal_iw.lr_step_decay_rate', default=0.5, type=float, help='decay rate for step learning rate scheduler')\n parser.add_argument('--cal_iw.weight_decay', type=float, default=0.0, help='L2 weight decay')\n parser.add_argument('--cal_iw.momentum', default=0.9, type=float, help='momentum')\n parser.add_argument('--cal_iw.n_epochs', default=500, type=int, help='the number of epochs')\n parser.add_argument('--cal_iw.val_period', default=1, type=int, help='validation period in epochs')\n\n ## train args\n parser.add_argument('--train_advtr.find_best', action='store_true', help='find the best model')\n parser.add_argument('--train_advtr.load_final', action='store_true', help='load the final model')\n parser.add_argument('--train_advtr.optim', default='SGD', type=str, help='optimizer')\n parser.add_argument('--train_advtr.lr', default=0.01, type=float, help='learning rate')\n parser.add_argument('--train_advtr.lr_step_size', default=20, type=float, help='stepsize for step learning rate scheduler')\n parser.add_argument('--train_advtr.lr_step_decay_rate', default=0.5, type=float, help='decay rate for step learning rate scheduler')\n parser.add_argument('--train_advtr.weight_decay', type=float, default=0.0, help='L2 weight decay')\n parser.add_argument('--train_advtr.momentum', default=0.9, type=float, help='momentum')\n parser.add_argument('--train_advtr.n_epochs', default=100, type=int, help='the number of epochs')\n parser.add_argument('--train_advtr.val_period', default=1, type=int, help='validation period in epochs')\n parser.add_argument('--train_advtr.advtr_type', type=str, default='DANN', help='domain-adversarial training type')\n\n parser.add_argument('--train_advtr.model_advtr', type=str, default='BigAdvFNN', help='adversarial network name')\n parser.add_argument('--train_advtr.reg_param_adv', type=float, default=1.0, help='adversarial loss regularization parameter')\n #parser.add_argument('--train_advtr.schedule_reg_param_adv', action='store_true', help='schedule the adversarial loss regularization parameter')\n parser.add_argument('--train_advtr.no_adv_reg_schedule', action='store_true', help='do not schedule the adversarial loss regularization parameter')\n\n ## conf args\n parser.add_argument('--est_conf.find_best', action='store_true', help='find the best model')\n parser.add_argument('--est_conf.load_final', action='store_true', help='load the final model')\n #parser.add_argument('--est_conf.model', type=str, default='c+w', help='model name')\n parser.add_argument('--est_conf.eps', type=float, default=0.01, help='epsilon')\n parser.add_argument('--est_conf.T_max', type=float, default=1.0, help='T max range')\n parser.add_argument('--est_conf.T_min', type=float, default=1e-6, help='T min range')\n parser.add_argument('--est_conf.T_step', type=float, default=0.01, help='T step size')\n\n\n args = parser.parse_args()\n args = to_tree_namespace(args)\n\n ## duplicate\n args.train.save_root = os.path.join(args.snapshot_root, args.exp_name)\n args.train_base.save_root = args.train.save_root\n args.train_iw.save_root = args.train.save_root\n args.cal_iw.save_root = args.train.save_root\n args.train_advtr.save_root = args.train.save_root\n args.est_conf.save_root = args.train.save_root\n \n args.model.n_labels = args.data.n_labels\n args.model.img_size = args.data.img_size\n\n args.train_advtr.schedule_reg_param_adv = not args.train_advtr.no_adv_reg_schedule\n\n ## init aug parameters\n args.aug_params = []\n for a in args.data.aug:\n if a == 'jitter':\n args.aug_params.append([('jitter', {'brightness': 0.4, 'contrast': 0.4, 'saturation': 0.4})])\n \n elif a == 'shake':\n args.aug_params.append([('randaug', {'size': 32, 'mode': 'SHAKE'})])\n \n elif a == 'svhnspec':\n args.aug_params.append([ \n ('intensity_flip', {}),\n ('intensity_scaling', {'min': -1.5, 'max': 1.5}),\n ('intensity_offset', {'min': -0.5, 'max': 0.5}),\n ('affine', {'std': 0.1}),\n ('translation', {'x_max': 2.0, 'y_max': 2.0}),\n ('gaussian', {'std': 0.1}),\n ])\n else:\n ##TODO: simplify\n args.aug_params.append(None)\n \n return args\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n\n\n\n" ]
[ [ "tensorflow.Variable", "matplotlib.use", "tensorflow.config.experimental.list_physical_devices", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.clf", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cubetastic33/stanford-dogs
[ "26cd9043aa8ae894e44e57aa0b3b7cbcdae75bd9" ]
[ "test.py" ]
[ "import math\nimport os\n\nfrom torch.utils.data import DataLoader\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.transforms as T\nfrom torchvision import models\n\nfrom dataloader import StanfordDogsDataset\n\n\ndef preprocess(image):\n width, height = image.size\n if width > height and width > 512:\n height = math.floor(512 * height / width)\n width = 512\n elif width < height and height > 512:\n width = math.floor(512 * width / height)\n height = 512\n pad_values = (\n (512 - width) // 2 + (0 if width % 2 == 0 else 1),\n (512 - height) // 2 + (0 if height % 2 == 0 else 1),\n (512 - width) // 2,\n (512 - height) // 2,\n )\n return T.Compose([\n T.Resize((height, width)),\n T.Pad(pad_values),\n T.ToTensor(),\n T.Lambda(lambda x: x[:3]), # Remove the alpha channel if it's there\n ])(image)\n\n\nDEVICE = torch.device(\"cuda\")\nLOAD_FILE = \"HAL9000.pt\"\n\ntest_set = StanfordDogsDataset(\n root=os.path.join(os.getcwd(), \"data\"), set_type=\"test\", transform=preprocess\n)\ntest_loader = DataLoader(test_set, batch_size=16, shuffle=True, num_workers=4)\n\nmodel = models.resnet50(pretrained=True)\n\n# Parameters of newly constructed modules have requires_grad=True by default\nnum_ftrs = model.fc.in_features\nmodel.fc = nn.Linear(num_ftrs, 120)\nmodel.to(DEVICE)\nmodel.load_state_dict(torch.load(LOAD_FILE))\nmodel.eval()\n\nnum_correct = 0\ntotal_loss = 0\n\nwith torch.no_grad():\n for inputs, labels in test_loader:\n predictions = model(inputs.to(DEVICE))\n loss = F.cross_entropy(predictions, labels.to(DEVICE))\n num_correct += predictions.argmax(dim=1).eq(labels.to(DEVICE)).sum().item()\n total_loss += loss.item()\n\nprint(f\"Accuracy: {num_correct / len(test_set) * 100:.2f}%, Average loss: {total_loss / len(test_set):.4f}\")\n" ]
[ [ "torch.load", "torch.utils.data.DataLoader", "torch.nn.Linear", "torch.no_grad", "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DianaTaukin/DSD-SATN
[ "5a4ab5e3cfcb00e72ca27cf5ec10a8d8e29ef312" ]
[ "src/utils/util.py" ]
[ "#encoding=utf-8\nimport h5py\nimport torch\nimport numpy as np\nimport os\nimport sys\nsys.path.append(os.path.abspath(__file__).replace(os.path.basename(__file__),'').replace('utils/',''))\nfrom config import args\nimport json\nimport torch.nn.functional as F\nimport cv2\nimport math\nfrom scipy import interpolate\nimport hashlib\nimport shutil\nimport pickle\nimport csv\n\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom skimage import io\n\n# logger tools\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0.\n self.avg = 0.\n self.sum = 0.\n self.count = 0.\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\nclass Logger(object):\n\n def __init__(self, path, header):\n self.log_file = open(path, 'w')\n self.logger = csv.writer(self.log_file, delimiter='\\t')\n\n self.logger.writerow(header)\n self.header = header\n\n def __del(self):\n self.log_file.close()\n\n def log(self, values):\n write_values = []\n for col in self.header:\n assert col in values\n write_values.append(values[col])\n\n self.logger.writerow(write_values)\n self.log_file.flush()\n\ndef wrap(func, *args, unsqueeze=False):\n \"\"\"\n Wrap a torch function so it can be called with NumPy arrays.\n Input and return types are seamlessly converted.\n \"\"\"\n # Convert input types where applicable\n args = list(args)\n for i, arg in enumerate(args):\n if type(arg) == np.ndarray:\n args[i] = torch.from_numpy(arg)\n if unsqueeze:\n args[i] = args[i].unsqueeze(0)\n\n result = func(*args)\n\n # Convert output types where applicable\n if isinstance(result, tuple):\n result = list(result)\n for i, res in enumerate(result):\n if type(res) == torch.Tensor:\n if unsqueeze:\n res = res.squeeze(0)\n result[i] = res.numpy()\n return tuple(result)\n elif type(result) == torch.Tensor:\n if unsqueeze:\n result = result.squeeze(0)\n return result.numpy()\n else:\n return result\n\ndef deterministic_random(min_value, max_value, data):\n digest = hashlib.sha256(data.encode()).digest()\n raw_value = int.from_bytes(digest[:4], byteorder='little', signed=False)\n return int(raw_value / (2**32 - 1) * (max_value - min_value)) + min_value\n\n# Math transform\n\ndef compute_similarity_transform(S1, S2):\n '''\n Computes a similarity transform (sR, t) that takes\n a set of 3D points S1 (3 x N) closest to a set of 3D points S2,\n where R is an 3x3 rotation matrix, t 3x1 translation, s scale.\n i.e. solves the orthogonal Procrutes problem.\n '''\n transposed = False\n if S1.shape[0] != 3 and S1.shape[0] != 2:\n S1 = S1.T\n S2 = S2.T\n transposed = True\n assert(S2.shape[1] == S1.shape[1])\n\n # 1. Remove mean.\n mu1 = S1.mean(axis=1, keepdims=True)\n mu2 = S2.mean(axis=1, keepdims=True)\n X1 = S1 - mu1\n X2 = S2 - mu2\n\n # 2. Compute variance of X1 used for scale.\n var1 = np.sum(X1**2)\n\n # 3. The outer product of X1 and X2.\n K = X1.dot(X2.T)\n\n # 4. Solution that Maximizes trace(R'K) is R=U*V', where U, V are\n # singular vectors of K.\n U, s, Vh = np.linalg.svd(K)\n V = Vh.T\n # Construct Z that fixes the orientation of R to get det(R)=1.\n Z = np.eye(U.shape[0])\n Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))\n # Construct R.\n R = V.dot(Z.dot(U.T))\n\n # 5. Recover scale.\n scale = np.trace(R.dot(K)) / var1\n\n # 6. Recover translation.\n t = mu2 - scale*(R.dot(mu1))\n\n # 7. Error:\n S1_hat = scale*R.dot(S1) + t\n\n if transposed:\n S1_hat = S1_hat.T\n\n return S1_hat\n\ndef compute_average_loss(loss_list):\n loss_np = np.array(loss_list)\n loss = np.mean(loss_np,axis=0)\n return loss\n\ndef _init_weights_deconv(m):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n\ndef _init_batchnorm(m):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\ndef load_mean_param():\n mean = np.zeros(args.total_param_count, dtype = np.float)\n\n mean_values = h5py.File(args.smpl_mean_param_path)\n mean_pose = mean_values['pose']\n mean_pose[:3] = 0\n mean_shape = mean_values['shape']\n mean_pose[0]=np.pi\n\n #init scale is 0.9\n mean[0] = 0.9\n\n mean[3:75] = mean_pose[:]\n mean[75:] = mean_shape[:]\n\n return mean\n\ndef batch_rodrigues(param):\n #param N x 3\n batch_size = param.shape[0]\n\n l1norm = torch.norm(param + 1e-8, p = 2, dim = 1)\n angle = torch.unsqueeze(l1norm, -1)\n normalized = torch.div(param, angle)\n angle = angle * 0.5\n\n v_cos = torch.cos(angle)\n v_sin = torch.sin(angle)\n\n quat = torch.cat([v_cos, v_sin * normalized], dim = 1)\n\n return quat2mat(quat)\n\ndef quat2mat(quat):\n \"\"\"Convert quaternion coefficients to rotation matrix.\n Args:\n quat: size = [B, 4] 4 <===>(w, x, y, z)\n Returns:\n Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]\n \"\"\"\n norm_quat = quat\n norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]\n\n B = quat.size(0)\n\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\n wx, wy, wz = w*x, w*y, w*z\n xy, xz, yz = x*y, x*z, y*z\n\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,\n 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,\n 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)\n return rotMat\n\ndef batch_global_rigid_transformation(Rs, Js, parent, rotate_base = False,root_rot_mat =None):\n N = Rs.shape[0]\n if rotate_base:\n np_rot_x = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]], dtype = np.float)\n np_rot_x = np.reshape(np.tile(np_rot_x, [N, 1]), [N, 3, 3])\n rot_x = torch.from_numpy(np_rot_x).float().cuda()\n root_rotation = torch.matmul(Rs[:, 0, :, :], rot_x)\n elif root_rot_mat is not None:\n np_rot_x = np.reshape(np.tile(root_rot_mat, [N, 1]), [N, 3, 3])\n rot_x =torch.from_numpy(np_rot_x).float().cuda()\n root_rotation = torch.matmul(Rs[:, 0, :, :], rot_x)\n else:\n root_rotation = Rs[:, 0, :, :]\n Js = torch.unsqueeze(Js, -1)\n\n def make_A(R, t):\n R_homo = F.pad(R, [0, 0, 0, 1, 0, 0])\n t_homo = torch.cat([t, torch.ones(N, 1, 1).cuda()], dim = 1)\n return torch.cat([R_homo, t_homo], 2)\n\n A0 = make_A(root_rotation, Js[:, 0])\n results = [A0]\n\n for i in range(1, parent.shape[0]):\n j_here = Js[:, i] - Js[:, parent[i]]\n A_here = make_A(Rs[:, i], j_here)\n res_here = torch.matmul(results[parent[i]], A_here)\n results.append(res_here)\n\n results = torch.stack(results, dim = 1)\n\n new_J = results[:, :, :3, 3]\n Js_w0 = torch.cat([Js, torch.zeros(N, 24, 1, 1).cuda()], dim = 2)\n init_bone = torch.matmul(results, Js_w0)\n init_bone = F.pad(init_bone, [3, 0, 0, 0, 0, 0, 0, 0])\n A = results - init_bone\n\n return new_J, A\n\ndef batch_global_rigid_transformation_cpu(Rs, Js, parent, rotate_base = False,root_rot_mat =None):\n N = Rs.shape[0]\n if rotate_base:\n np_rot_x = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]], dtype = np.float)\n np_rot_x = np.reshape(np.tile(np_rot_x, [N, 1]), [N, 3, 3])\n rot_x =torch.from_numpy(np_rot_x).float()\n root_rotation = torch.matmul(Rs[:, 0, :, :], rot_x)\n elif root_rot_mat is not None:\n np_rot_x = np.reshape(np.tile(root_rot_mat, [N, 1]), [N, 3, 3])\n rot_x =torch.from_numpy(np_rot_x).float()\n root_rotation = torch.matmul(Rs[:, 0, :, :], rot_x)\n else:\n root_rotation = Rs[:, 0, :, :]\n Js = torch.unsqueeze(Js, -1)\n\n def make_A(R, t):\n R_homo = F.pad(R, [0, 0, 0, 1, 0, 0])\n t_homo = torch.cat([t, torch.ones(N, 1, 1)], dim = 1)\n return torch.cat([R_homo, t_homo], 2)\n\n A0 = make_A(root_rotation, Js[:, 0])\n results = [A0]\n\n for i in range(1, parent.shape[0]):\n j_here = Js[:, i] - Js[:, parent[i]]\n A_here = make_A(Rs[:, i], j_here)\n res_here = torch.matmul(results[parent[i]], A_here)\n results.append(res_here)\n\n results = torch.stack(results, dim = 1)\n\n new_J = results[:, :, :3, 3]\n Js_w0 = torch.cat([Js, torch.zeros(N, 24, 1, 1)], dim = 2)\n init_bone = torch.matmul(results, Js_w0)\n init_bone = F.pad(init_bone, [3, 0, 0, 0, 0, 0, 0, 0])\n A = results - init_bone\n\n return new_J, A\n\ndef batch_lrotmin(param):\n param = param[:,3:].contiguous()\n Rs = batch_rodrigues(param.view(-1, 3))\n print(Rs.shape)\n e = torch.eye(3).float()\n Rs = Rs.sub(1.0, e)\n\n return Rs.view(-1, 23 * 9)\n\ndef batch_orth_proj(X, camera, mode='2d'):\n camera = camera.view(-1, 1, 3)\n s = camera[:, :, 0].unsqueeze(-1)\n X_trans = X[:,:,:2].contiguous()\n if mode=='2d':\n X_trans = s * X_trans + camera[:, :, 1:]\n return X_trans\n elif mode=='v3d':\n X[:, :, :2] = s * X_trans + camera[:, :, 1:]\n return X\n elif mode=='j3d':\n X[:, :, :2] = s * X_trans/torch.abs(s) + camera[:, :, 1:]\n return X\n else:\n print('projection mode is not included')\n return X\n\ndef calc_aabb(ptSets):\n\n ptLeftTop = np.array([np.min(ptSets[:,0]),np.min(ptSets[:,1])])\n ptRightBottom = np.array([np.max(ptSets[:,0]),np.max(ptSets[:,1])])\n return [ptLeftTop, ptRightBottom]\n\n ptLeftTop = np.array([ptSets[0][0], ptSets[0][1]])\n ptRightBottom = ptLeftTop.copy()\n for pt in ptSets:\n ptLeftTop[0] = min(ptLeftTop[0], pt[0])\n ptLeftTop[1] = min(ptLeftTop[1], pt[1])\n ptRightBottom[0] = max(ptRightBottom[0], pt[0])\n ptRightBottom[1] = max(ptRightBottom[1], pt[1])\n\n return ptLeftTop, ptRightBottom#, len(ptSets) >= 5\n\ndef calc_aabb_batch(ptSets_batch):\n batch_size = ptSets_batch.shape[0]\n ptLeftTop = np.array([np.min(ptSets_batch[:,:,0],axis=1),np.min(ptSets_batch[:,:,1],axis=1)]).T\n ptRightBottom = np.array([np.max(ptSets_batch[:,:,0],axis=1),np.max(ptSets_batch[:,:,1],axis=1)]).T\n bbox = np.concatenate((ptLeftTop.reshape(batch_size,1,2),ptRightBottom.reshape(batch_size,1,2)),axis=1)\n return bbox\n\ndef calc_obb(ptSets):\n ca = np.cov(ptSets,y = None,rowvar = 0,bias = 1)\n v, vect = np.linalg.eig(ca)\n tvect = np.transpose(vect)\n ar = np.dot(ptSets,np.linalg.inv(tvect))\n mina = np.min(ar,axis=0)\n maxa = np.max(ar,axis=0)\n diff = (maxa - mina)*0.5\n center = mina + diff\n corners = np.array([center+[-diff[0],-diff[1]],center+[diff[0],-diff[1]],center+[diff[0],diff[1]],center+[-diff[0],diff[1]]])\n corners = np.dot(corners, tvect)\n return corners[0], corners[1], corners[2], corners[3]\n\ndef get_image_cut_box(leftTop, rightBottom, ExpandsRatio, Center = None):\n try:\n l = len(ExpandsRatio)\n except:\n ExpandsRatio = [ExpandsRatio, ExpandsRatio, ExpandsRatio, ExpandsRatio]\n\n def _expand_crop_box(lt, rb, scale):\n center = (lt + rb) / 2.0\n xl, xr, yt, yb = lt[0] - center[0], rb[0] - center[0], lt[1] - center[1], rb[1] - center[1]\n\n xl, xr, yt, yb = xl * scale[0], xr * scale[1], yt * scale[2], yb * scale[3]\n #expand it\n lt, rb = np.array([center[0] + xl, center[1] + yt]), np.array([center[0] + xr, center[1] + yb])\n lb, rt = np.array([center[0] + xl, center[1] + yb]), np.array([center[0] + xr, center[1] + yt])\n center = (lt + rb) / 2\n return center, lt, rt, rb, lb\n\n if Center == None:\n Center = (leftTop + rightBottom) // 2\n\n Center, leftTop, rightTop, rightBottom, leftBottom = _expand_crop_box(leftTop, rightBottom, ExpandsRatio)\n offset = (rightBottom - leftTop) // 2\n\n cx = offset[0]\n cy = offset[1]\n\n r = max(cx, cy)\n\n cx = r\n cy = r\n\n x = int(Center[0])\n y = int(Center[1])\n\n return [x - cx, y - cy], [x + cx, y + cy]\n\ndef shrink(leftTop, rightBottom, width, height):\n xl = -leftTop[0]\n xr = rightBottom[0] - width\n\n yt = -leftTop[1]\n yb = rightBottom[1] - height\n\n cx = (leftTop[0] + rightBottom[0]) / 2\n cy = (leftTop[1] + rightBottom[1]) / 2\n\n r = (rightBottom[0] - leftTop[0]) / 2\n\n sx = max(xl, 0) + max(xr, 0)\n sy = max(yt, 0) + max(yb, 0)\n\n if (xl <= 0 and xr <= 0) or (yt <= 0 and yb <=0):\n return leftTop, rightBottom\n elif leftTop[0] >= 0 and leftTop[1] >= 0 : # left top corner is in box\n l = min(yb, xr)\n r = r - l / 2\n cx = cx - l / 2\n cy = cy - l / 2\n elif rightBottom[0] <= width and rightBottom[1] <= height : # right bottom corner is in box\n l = min(yt, xl)\n r = r - l / 2\n cx = cx + l / 2\n cy = cy + l / 2\n elif leftTop[0] >= 0 and rightBottom[1] <= height : #left bottom corner is in box\n l = min(xr, yt)\n r = r - l / 2\n cx = cx - l / 2\n cy = cy + l / 2\n elif rightBottom[0] <= width and leftTop[1] >= 0 : #right top corner is in box\n l = min(xl, yb)\n r = r - l / 2\n cx = cx + l / 2\n cy = cy - l / 2\n elif xl < 0 or xr < 0 or yb < 0 or yt < 0:\n return leftTop, rightBottom\n elif sx >= sy:\n sx = max(xl, 0) + max(0, xr)\n sy = max(yt, 0) + max(0, yb)\n # cy = height / 2\n if yt >= 0 and yb >= 0:\n cy = height / 2\n elif yt >= 0:\n cy = cy + sy / 2\n else:\n cy = cy - sy / 2\n r = r - sy / 2\n\n if xl >= sy / 2 and xr >= sy / 2:\n pass\n elif xl < sy / 2:\n cx = cx - (sy / 2 - xl)\n else:\n cx = cx + (sy / 2 - xr)\n elif sx < sy:\n cx = width / 2\n r = r - sx / 2\n if yt >= sx / 2 and yb >= sx / 2:\n pass\n elif yt < sx / 2:\n cy = cy - (sx / 2 - yt)\n else:\n cy = cy + (sx / 2 - yb)\n\n\n return [cx - r, cy - r], [cx + r, cy + r]\n\ndef off_set_pts(keyPoints, leftTop):\n result = keyPoints.copy()\n result[:, 0] -= leftTop[0]\n result[:, 1] -= leftTop[1]\n return result\n\n'''\n cut the image, by expanding a bounding box\n'''\ndef cut_image(originImage, kps, expand_ratio, leftTop, rightBottom,cam=None,centralize=False):\n\n original_shape = originImage.shape\n height = originImage.shape[0]\n width = originImage.shape[1]\n channels = originImage.shape[2] if len(originImage.shape) >= 3 else 1\n leftTop[0] = max(0, leftTop[0])\n leftTop[1] = max(0, leftTop[1])\n\n leftTop, rightBottom = get_image_cut_box(leftTop, rightBottom, expand_ratio)\n\n lt = [int(leftTop[0]), int(leftTop[1])]\n rb = [int(rightBottom[0]), int(rightBottom[1])]\n\n lt[0] = max(0, lt[0])\n lt[1] = max(0, lt[1])\n rb[0] = min(rb[0], width)\n rb[1] = min(rb[1], height)\n\n leftTop = np.array([int(leftTop[0]), int(leftTop[1])])\n rightBottom = np.array([int(rightBottom[0] + 0.5), int(rightBottom[1] + 0.5)])\n\n length = max(rightBottom[1] - leftTop[1]+1, rightBottom[0] - leftTop[0]+1)\n if length<20:\n return False,False,False\n\n dstImage = np.zeros(shape = [length,length, channels], dtype = np.uint8)\n dstImage[:,:,:] = 0\n\n offset = np.array([lt[0] - leftTop[0], lt[1] - leftTop[1]])\n size = [rb[0] - lt[0], rb[1] - lt[1]]\n\n try:\n dstImage[offset[1]:size[1] + offset[1], offset[0]:size[0] + offset[0], :] = originImage[lt[1]:rb[1], lt[0]:rb[0],:]\n except Exception as error:\n return False,False,False\n\n if cam is not None:\n cam[1] = (cam[1]+1.0)*float(original_shape[1])/float(length)-2.0*float(leftTop[0])/float(length)-1.0\n cam[2] = (cam[2]+1.0)*float(original_shape[0])/float(length)-2.0*float(leftTop[1])/float(length)-1.0\n cam[0] *= original_shape[0]/length\n\n return dstImage, off_set_pts(kps, leftTop),cam,(offset,lt,rb,size,original_shape[:2])\n\n return dstImage, off_set_pts(kps, leftTop),(offset,lt,rb,size,original_shape[:2])\n\ndef getltrb(expand_ratio, leftTop, rightBottom,height,width,kp2d):\n inimage = (kp2d<0).sum()\n inimage += (kp2d[:,0]>320).sum()\n inimage += (kp2d[:,1]>240).sum()\n if inimage>0:\n return True\n originImage = np.zeros((240,320,3))\n original_shape = originImage.shape\n height = originImage.shape[0]\n width = originImage.shape[1]\n channels = originImage.shape[2] if len(originImage.shape) >= 3 else 1\n leftTop, rightBottom = get_image_cut_box(leftTop, rightBottom, expand_ratio)\n\n lt = [int(leftTop[0]), int(leftTop[1])]\n rb = [int(rightBottom[0]), int(rightBottom[1])]\n\n lt[0] = max(0, lt[0])\n lt[1] = max(0, lt[1])\n rb[0] = min(rb[0], width)\n rb[1] = min(rb[1], height)\n\n h = float(rb[1]-lt[1])\n w = float(rb[0]-lt[0])\n\n leftTop = [int(leftTop[0]), int(leftTop[1])]\n rightBottom = [int(rightBottom[0] + 0.5), int(rightBottom[1] + 0.5)]\n\n length = max(rightBottom[1] - leftTop[1]+1, rightBottom[0] - leftTop[0]+1)\n\n dstImage = np.zeros(shape = [length,length, channels], dtype = np.uint8)\n dstImage[:,:,:] = 0\n\n offset = [lt[0] - leftTop[0], lt[1] - leftTop[1]]\n size = [rb[0] - lt[0], rb[1] - lt[1]]\n\n try:\n dstImage[offset[1]:size[1] + offset[1], offset[0]:size[0] + offset[0], :] = originImage[lt[1]:rb[1], lt[0]:rb[0],:]\n except:\n print('error in image crop')\n return True\n mask = np.ones((240,320))\n\n if mask is not None:\n dstmask = np.zeros(shape = [length, length], dtype = np.uint8)\n dstmask[:,:] = 0\n try:\n dstmask[offset[1]:size[1] + offset[1], offset[0]:size[0] + offset[0]] = mask[lt[1]:rb[1], lt[0]:rb[0]]\n except:\n print('error in mask crop')\n return True\n\n if h<4 or w<4:\n return True\n else:\n return False\n\ndef reflect_lsp_kp(kps):\n kp_map = [5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6, 12, 13]\n joint_ref = kps[kp_map]\n joint_ref[:,0] = -joint_ref[:,0]\n\n return joint_ref - np.mean(joint_ref, axis = 0)\n\ndef reflect_pose(poses):\n swap_inds = np.array([\n 0, 1, 2, 6, 7, 8, 3, 4, 5, 9, 10, 11, 15, 16, 17, 12, 13, 14, 18,\n 19, 20, 24, 25, 26, 21, 22, 23, 27, 28, 29, 33, 34, 35, 30, 31, 32,\n 36, 37, 38, 42, 43, 44, 39, 40, 41, 45, 46, 47, 51, 52, 53, 48, 49,\n 50, 57, 58, 59, 54, 55, 56, 63, 64, 65, 60, 61, 62, 69, 70, 71, 66,\n 67, 68\n ])\n\n sign_flip = np.array([\n 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1,\n -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1,\n -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1,\n 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1,\n -1, 1, -1, -1\n ])\n\n return poses[swap_inds] * sign_flip\n\ndef crop_image(image_path, angle, lt, rb, scale, kp_2d, crop_size):\n '''\n given a crop box, expand it at 4 directions.(left, right, top, bottom)\n '''\n assert 'error algorithm exist.' and 0\n\n def _expand_crop_box(lt, rb, scale):\n center = (lt + rb) / 2.0\n xl, xr, yt, yb = lt[0] - center[0], rb[0] - center[0], lt[1] - center[1], rb[1] - center[1]\n xl, xr, yt, yb = xl * scale[0], xr * scale[1], yt * scale[2], yb * scale[3]\n #expand it\n lt, rb = np.array([center[0] + xl, center[1] + yt]), np.array([center[0] + xr, center[1] + yb])\n lb, rt = np.array([center[0] + xl, center[1] + yb]), np.array([center[0] + xr, center[1] + yt])\n center = (lt + rb) / 2\n return center, lt, rt, rb, lb\n\n def _extend_box(center, lt, rt, rb, lb, crop_size):\n lx, ly = np.linalg.norm(rt - lt), np.linalg.norm(lb - lt)\n dx, dy = (rt - lt) / lx, (lb - lt) / ly\n l = max(lx, ly) / 2.0\n return center - l * dx - l * dy, center + l * dx - l *dy, center + l * dx + l * dy, center - l * dx + l * dy, dx, dy, crop_size * 1.0 / l\n\n def _get_sample_points(lt, rt, rb, lb, crop_size):\n vec_x = rt - lt\n vec_y = lb - lt\n i_x, i_y = np.meshgrid(range(crop_size), range(crop_size))\n i_x = i_x.astype(np.float)\n i_y = i_y.astype(np.float)\n i_x /= float(crop_size)\n i_y /= float(crop_size)\n interp_points = i_x[..., np.newaxis].repeat(2, axis=2) * vec_x + i_y[..., np.newaxis].repeat(2, axis=2) * vec_y\n interp_points += lt\n return interp_points\n\n def _sample_image(src_image, interp_points):\n sample_method = 'nearest'\n interp_image = np.zeros((interp_points.shape[0] * interp_points.shape[1], src_image.shape[2]))\n i_x = range(src_image.shape[1])\n i_y = range(src_image.shape[0])\n flatten_interp_points = interp_points.reshape([interp_points.shape[0]*interp_points.shape[1], 2])\n for i_channel in range(src_image.shape[2]):\n interp_image[:, i_channel] = interpolate.interpn((i_y, i_x), src_image[:, :, i_channel],\n flatten_interp_points[:, [1, 0]], method = sample_method,\n bounds_error=False, fill_value=0)\n interp_image = interp_image.reshape((interp_points.shape[0], interp_points.shape[1], src_image.shape[2]))\n\n return interp_image\n\n def _trans_kp_2d(kps, center, dx, dy, lt, ratio):\n kp2d_offset = kps[:, :2] - center\n proj_x, proj_y = np.dot(kp2d_offset, dx), np.dot(kp2d_offset, dy)\n for idx in range(len(kps)):\n kps[idx, :2] = (dx * proj_x[idx] + dy * proj_y[idx] + lt) * ratio\n return kps\n\n\n src_image = cv2.imread(image_path)\n\n center, lt, rt, rb, lb = _expand_crop_box(lt, rb, scale)\n\n #calc rotated box\n radian = angle * np.pi / 180.0\n v_sin, v_cos = math.sin(radian), math.cos(radian)\n\n rot_matrix = np.array([[v_cos, v_sin],[-v_sin, v_cos]])\n\n n_corner = (np.dot(rot_matrix, np.array([lt - center, rt - center, rb - center, lb - center]).T).T) + center\n n_lt, n_rt, n_rb, n_lb = n_corner[0], n_corner[1], n_corner[2], n_corner[3]\n\n lt, rt, rb, lb = calc_obb(np.array([lt, rt, rb, lb, n_lt, n_rt, n_rb, n_lb]))\n lt, rt, rb, lb, dx, dy, ratio = _extend_box(center, lt, rt, rb, lb, crop_size = crop_size)\n s_pts = _get_sample_points(lt, rt, rb, lb, crop_size)\n dst_image = _sample_image(src_image, s_pts)\n kp_2d = _trans_kp_2d(kp_2d, center, dx, dy, lt, ratio)\n\n return dst_image, kp_2d\n\ndef flip_image(src_image, kps, mask=None):\n h, w = src_image.shape[0], src_image.shape[1]\n src_image = cv2.flip(src_image, 1)\n\n kps[:, 0] = w - 1 - kps[:, 0]\n kp_map = [5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6, 12, 13]\n kps[:, :] = kps[kp_map]\n if mask is None:\n return src_image, kps\n mask = cv2.flip(mask, 1)\n return src_image, kps, mask\n\n# Visualization func.\n\ndef draw_lsp_14kp__bone(src_image, pts):\n bones = [\n [0, 1, 255, 0, 0],\n [1, 2, 255, 0, 0],\n [2, 12, 255, 0, 0],\n [3, 12, 0, 0, 255],\n [3, 4, 0, 0, 255],\n [4, 5, 0, 0, 255],\n [12, 9, 0, 0, 255],\n [9,10, 0, 0, 255],\n [10,11, 0, 0, 255],\n [12, 8, 255, 0, 0],\n [8,7, 255, 0, 0],\n [7,6, 255, 0, 0],\n [12, 13, 0, 255, 0]\n ]\n\n for pt in pts:\n src_image = cv2.circle(src_image,(int(pt[0]), int(pt[1])),2,(0,255,255),-1)\n if pts.shape[0]!=14:\n return src_image\n for line in bones:\n pa = pts[line[0]]\n pb = pts[line[1]]\n if (pa>0).all() and (pb>0).all():\n xa,ya,xb,yb = int(pa[0]),int(pa[1]),int(pb[0]),int(pb[1])\n src_image = cv2.line(src_image,(xa,ya),(xb,yb),(line[2], line[3], line[4]),2)\n return src_image\n\ndef plot_mesh(vertices, triangles, subplot = [1,1,1], title = 'mesh', el = 90, az = -90, lwdt=.1, dist = 6, color = \"blue\"):\n '''\n plot the mesh\n Args:\n vertices: [nver, 3]\n triangles: [ntri, 3]\n '''\n ax = plt.subplot(subplot[0], subplot[1], subplot[2], projection = '3d')\n ax.plot_trisurf(vertices[:, 0], vertices[:, 1], vertices[:, 2], triangles = triangles, lw = lwdt, color = color, alpha = 1)\n ax.axis(\"off\")\n ax.view_init(elev = el, azim = az)\n ax.dist = dist\n plt.title(title)\n return plt\n\ndef plot_3d_points(points, color = 'r', save_path='test.png'):\n\n x, y, z = points[:,0], points[:,1],points[:,2]\n ax = plt.subplot(111, projection='3d')\n ax.scatter(x, y, z, c=color)\n\n ax.set_zlabel('Z')\n ax.set_ylabel('Y')\n ax.set_xlabel('X')\n plt.savefig(save_path)\n\ndef plot_3d_points_set(points_set, colors = ['r'], save_path='test.png'):\n ax = plt.subplot(111, projection='3d')\n for points,color in zip(points_set,colors):\n x, y, z = points[:,0], points[:,1],points[:,2]\n ax.scatter(x, y, z, c=color)\n\n ax.set_zlabel('Z')\n ax.set_ylabel('Y')\n ax.set_xlabel('X')\n plt.savefig(save_path)\n\ndef show3Dpose(kp3ds, lcolor=[\"#3498db\"], rcolor=[\"#e74c3c\"], save_path='test.png',skeleton_type='lsp'): # blue, orange\n \"\"\"\n Visualize a 3d skeleton\n Args\n kp3d: kp_num x 3 vector.\n ax: matplotlib 3d axis to draw on\n lcolor: color for left part of the body\n rcolor: color for right part of the body\n add_labels: whether to add coordinate labels\n Returns\n Nothing. Draws on ax.\n \"\"\"\n\n #I = np.array([1,2,3,1,7,8,1, 13,14,15,14,18,19,14,26,27])-1 # start points\n #J = np.array([2,3,4,7,8,9,13,14,15,16,18,19,20,26,27,28])-1 # end points\n #LR = np.array([1,1,1,0,0,0,0, 0, 0, 0, 0, 0, 0, 1, 1, 1], dtype=bool)#1-left 0-right\n if skeleton_type=='lsp':\n I = np.array([0,1,2, 5,4,3, 6,7,8, 11,10, 9, 12]) # start points\n J = np.array([1,2,12, 4,3,12, 7,8,12, 10, 9,12, 13]) # end points\n LR = np.array([0,0,0, 1,1,1, 0,0,0, 1, 1, 1, 0], dtype=bool)#1-left 0-right\n elif skeleton_type=='smpl':\n I = np.array([0,0,1, 2,4,5, 0, 12,12, 12,16,17, 18,19]) # start points\n J = np.array([1,2,4, 5,7,8, 12,15,16, 17,18,19, 20,21]) # end points\n LR = np.array([1,0,1, 0,1,0, 0, 0, 1, 0, 1, 0, 1, 0], dtype=bool)#1-left 0-right\n\n for idx,kp3d in enumerate(kp3ds):\n ax = plt.subplot(1,len(kp3ds),idx+1, projection='3d')\n for i in np.arange( len(I) ):\n x, y, z = [np.array( [kp3d[I[i], j], kp3d[J[i], j]] ) for j in range(3)]\n ax.plot(z, x, -y, lw=2, c=lcolor[idx] if LR[i] else rcolor[idx])\n\n RADIUS = 1 # space around the subject\n xroot, yroot, zroot = 0,0,0#(kp3d[2,0]+kp3d[3,0], kp3d[0,1], kp3d[0,2]\n ax.set_xlim3d([-RADIUS+xroot, RADIUS+xroot])\n ax.set_zlim3d([-RADIUS+zroot, RADIUS+zroot])\n ax.set_ylim3d([-RADIUS+yroot, RADIUS+yroot])\n\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.set_zlabel(\"z\")\n\n # Get rid of the ticks and tick labels\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n\n ax.get_xaxis().set_ticklabels([])\n ax.get_yaxis().set_ticklabels([])\n ax.set_zticklabels([])\n ax.set_aspect('equal')\n\n # Get rid of the panes (actually, make them white)\n white = (1.0, 1.0, 1.0, 0.0)\n ax.w_xaxis.set_pane_color(white)\n ax.w_yaxis.set_pane_color(white)\n # Keep z pane\n\n # Get rid of the lines in 3d\n ax.w_xaxis.line.set_color(white)\n ax.w_yaxis.line.set_color(white)\n ax.w_zaxis.line.set_color(white)\n\n plt.savefig(save_path)\n\ndef show2Dpose(channels, ax, lcolor=\"#3498db\", rcolor=\"#e74c3c\", add_labels=False):\n \"\"\"\n Visualize a 2d skeleton\n Args\n channels: 64x1 vector. The pose to plot.\n ax: matplotlib axis to draw on\n lcolor: color for left part of the body\n rcolor: color for right part of the body\n add_labels: whether to add coordinate labels\n Returns\n Nothing. Draws on ax.\n \"\"\"\n\n assert channels.size == len(data_utils.H36M_NAMES)*2, \"channels should have 64 entries, it has %d instead\" % channels.size\n vals = np.reshape( channels, (len(data_utils.H36M_NAMES), -1) )\n\n I = np.array([1,2,3,1,7,8,1, 13,14,14,18,19,14,26,27])-1 # start points\n J = np.array([2,3,4,7,8,9,13,14,16,18,19,20,26,27,28])-1 # end points\n LR = np.array([1,1,1,0,0,0,0, 0, 0, 0, 0, 0, 1, 1, 1], dtype=bool)\n\n # Make connection matrix\n for i in np.arange( len(I) ):\n x, y = [np.array( [vals[I[i], j], vals[J[i], j]] ) for j in range(2)]\n ax.plot(x, y, lw=2, c=lcolor if LR[i] else rcolor)\n\n # Get rid of the ticks\n ax.set_xticks([])\n ax.set_yticks([])\n\n # Get rid of tick labels\n ax.get_xaxis().set_ticklabels([])\n ax.get_yaxis().set_ticklabels([])\n\n RADIUS = 350 # space around the subject\n xroot, yroot = vals[0,0], vals[0,1]\n ax.set_xlim([-RADIUS+xroot, RADIUS+xroot])\n ax.set_ylim([-RADIUS+yroot, RADIUS+yroot])\n if add_labels:\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"z\")\n\n ax.set_aspect('equal')\n\ndef fig2data ( fig ):\n \"\"\"\n @brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it\n @param fig a matplotlib figure\n @return a numpy 3D array of RGBA values\n \"\"\"\n # draw the renderer\n fig.canvas.draw ( )\n\n # Get the RGBA buffer from the figure\n w,h = fig.canvas.get_width_height()\n buf = numpy.fromstring ( fig.canvas.tostring_argb(), dtype=numpy.uint8 )\n buf.shape = ( w, h,4 )\n\n # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode\n buf = numpy.roll ( buf, 3, axis = 2 )\n return buf\n\n\ndef line_intersect(sa, sb):\n al, ar, bl, br = sa[0], sa[1], sb[0], sb[1]\n assert al <= ar and bl <= br\n if al >= br or bl >= ar:\n return False\n return True\n\n'''\n return whether two rectangle intersect\n ra, rb left_top point, right_bottom point\n'''\ndef rectangle_intersect(ra, rb):\n ax = [ra[0][0], ra[1][0]]\n ay = [ra[0][1], ra[1][1]]\n\n bx = [rb[0][0], rb[1][0]]\n by = [rb[0][1], rb[1][1]]\n\n return line_intersect(ax, bx) and line_intersect(ay, by)\n\ndef get_intersected_rectangle(lt0, rb0, lt1, rb1):\n if not rectangle_intersect([lt0, rb0], [lt1, rb1]):\n return None, None\n\n lt = lt0.copy()\n rb = rb0.copy()\n\n lt[0] = max(lt[0], lt1[0])\n lt[1] = max(lt[1], lt1[1])\n\n rb[0] = min(rb[0], rb1[0])\n rb[1] = min(rb[1], rb1[1])\n return lt, rb\n\ndef get_union_rectangle(lt0, rb0, lt1, rb1):\n lt = lt0.copy()\n rb = rb0.copy()\n\n lt[0] = min(lt[0], lt1[0])\n lt[1] = min(lt[1], lt1[1])\n\n rb[0] = max(rb[0], rb1[0])\n rb[1] = max(rb[1], rb1[1])\n return lt, rb\n\ndef get_rectangle_area(lt, rb):\n return (rb[0] - lt[0]) * (rb[1] - lt[1])\n\ndef get_rectangle_intersect_ratio(lt0, rb0, lt1, rb1):\n (lt0, rb0), (lt1, rb1) = get_intersected_rectangle(lt0, rb0, lt1, rb1), get_union_rectangle(lt0, rb0, lt1, rb1)\n\n if lt0 is None:\n return 0.0\n else:\n return 1.0 * get_rectangle_area(lt0, rb0) / get_rectangle_area(lt1, rb1)\n\ndef convert_image_by_pixformat_normalize(src_image, pix_format, normalize):\n if pix_format == 'NCHW':\n src_image = src_image.transpose((2, 0, 1))\n\n if normalize:\n src_image = (src_image.astype(np.float) / 255) * 2.0 - 1.0\n\n return src_image\n\ndef align_by_root(joints):\n root_id = 0\n pelvis = joints[:, root_id, :]\n return joints - torch.unsqueeze(pelvis, dim=1)\n'''\n align ty pelvis\n joints: n x 14 x 3, by lsp order\n'''\ndef align_by_pelvis(joints):\n left_id = 3\n right_id = 2\n pelvis = (joints[:, left_id, :] + joints[:, right_id, :]) / 2.0\n return joints - torch.unsqueeze(pelvis, dim=1)\n\ndef align_by_pelvis_single(joints, get_pelvis=False):\n \"\"\"\n Assumes joints is 14 x 3 in LSP order.\n Then hips are: [3, 2]\n Takes mid point of these points, then subtracts it.\n \"\"\"\n left_id = 3\n right_id = 2\n\n pelvis = (joints[left_id, :] + joints[right_id, :]) / 2.\n if get_pelvis:\n return joints - np.expand_dims(pelvis, axis=0), pelvis\n else:\n return joints - np.expand_dims(pelvis, axis=0)\n\ndef copy_state_dict(cur_state_dict, pre_state_dict, prefix = ''):\n def _get_params(key):\n key = prefix + key\n if key in pre_state_dict:\n return pre_state_dict[key]\n return None\n\n for k in cur_state_dict.keys():\n v = _get_params(k)\n try:\n if v is None:\n print('parameter {} not found'.format(k))\n continue\n cur_state_dict[k].copy_(v)\n except:\n print('copy param {} failed'.format(k))\n continue\n\n# IO functions\n\ndef save_pkl(info,name='../data/info.pkl'):\n check_file_and_remake(name.replace(os.path.basename(name),''))\n if name[-4:] !='.pkl':\n name += '.pkl'\n with open(name,'wb') as outfile:\n pickle.dump(info, outfile, pickle.HIGHEST_PROTOCOL)\ndef read_pkl(name = '../data/info.pkl'):\n with open(name,'rb') as f:\n return pickle.load(f)\ndef read_pkl_coding(name = '../data/info.pkl'):\n with open(name, 'rb') as f:\n u = pickle._Unpickler(f)\n u.encoding = 'latin1'\n p = u.load()\n return p\ndef check_file_and_remake(path,remove=False):\n if remove:\n if os.path.isdir(path):\n shutil.rmtree(path)\n if not os.path.isdir(path):\n os.makedirs(path)\n\ndef save_h5(info,name):\n check_file_and_remake(name.replace(os.path.basename(name),''))\n if name[-3:] !='.h5':\n name += '.h5'\n f=h5py.File(name,'w')\n for item, value in info.items():\n f[item] = value\n f.close()\n\ndef read_h5(name):\n if name[-3:] !='.h5':\n name += '.h5'\n f=h5py.File(name,'r')\n info = {}\n for item, value in f.items():\n info[item] = np.array(value)\n f.close()\n return info\n\ndef h36m32_2_lsp14(h36m32):\n relation = [3,2,1,6,7,8,27,26,25,17,18,19,13,15]\n lsp14 = h36m32[:,relation,:]\n return lsp14\n" ]
[ [ "numpy.dot", "torch.abs", "numpy.expand_dims", "torch.sin", "torch.cat", "torch.zeros", "numpy.max", "numpy.mean", "numpy.linalg.svd", "torch.norm", "torch.ones", "numpy.linalg.eig", "numpy.eye", "torch.eye", "torch.from_numpy", "scipy.interpolate.interpn", "matplotlib.pyplot.subplot", "numpy.zeros", "torch.nn.functional.pad", "torch.cos", "torch.div", "matplotlib.pyplot.title", "numpy.min", "numpy.linalg.inv", "torch.unsqueeze", "matplotlib.pyplot.savefig", "numpy.cov", "numpy.transpose", "torch.stack", "numpy.array", "numpy.sum", "matplotlib.use", "numpy.tile", "numpy.linalg.norm", "numpy.ones", "torch.matmul" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.14", "1.6", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
allengrr/deadlock_project
[ "933878077c45a7df04daa087407bb2620c064617", "933878077c45a7df04daa087407bb2620c064617", "933878077c45a7df04daa087407bb2620c064617", "933878077c45a7df04daa087407bb2620c064617", "933878077c45a7df04daa087407bb2620c064617", "933878077c45a7df04daa087407bb2620c064617", "933878077c45a7df04daa087407bb2620c064617" ]
[ "4algo/manual/algo_six.py", "algo/test/plot_donut.py", "3algo/python_data/compute_avg.py", "4algo/manual/gui/algo_three.py", "3algo/16_edf_wait_die_NS.py", "2algo/results and data complilation/execution_report_c.py", "4algo/manual/gui/algo_five.py" ]
[ "from functools import reduce\nimport numpy as np\nimport random as r\nimport socket\nimport struct\nimport subprocess as sp\nimport threading\nfrom threading import Thread\nimport ast\nimport time\nimport datetime as dt\nimport os\nimport psutil\nfrom netifaces import interfaces, ifaddresses, AF_INET\nimport paho.mqtt.client as mqtt\nimport smtplib\nimport config\nimport paramiko\nimport argparse\nimport pickle\n\nhosts = {} # {hostname: ip}\n\n_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},\n 't2': {'wcet': 1, 'period': 5, 'deadline': 4},\n 't3': {'wcet': 2, 'period': 10, 'deadline': 8},\n 't4': {'wcet': 1, 'period': 10, 'deadline': 9},\n 't5': {'wcet': 3, 'period': 15, 'deadline': 12}\n }\n\n# mat = {'p0': ['cpu', 'mem', 'storage']}\n_need = {\n 't1': [7, 4, 3],\n 't2': [1, 2, 2],\n 't3': [6, 0, 0],\n 't4': [0, 1, 1],\n 't5': [4, 3, 1]\n\n}\nallocation = {\n 't1': [0, 1, 0],\n 't2': [2, 0, 0],\n 't3': [3, 0, 2],\n 't4': [2, 1, 1],\n 't5': [0, 0, 2]\n}\n\n_cpu = [] # cpu plot list\nprev_t = 0 # variable for cpu util\n_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec\n_off_cloud = 0 # used to keep a count of tasks offloaded to cloud\n_loc = 0 # used to keep a count of tasks executed locally\n_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec\ndeadlock = [1] # keeps count of how many deadlock is resolved\nmemory = []\nmec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}\nmec_rtt = {} # {ip: [RTT]}\n\noffload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload\nreoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.\ndiscovering = 0 # if discovering == 0 update host\ntest = []\n_time = []\n_pos = 0\nreceived_task_queue = [] # [[(task_list,wait_time), host_ip], ....]\nthread_record = []\n_port_ = 64000\ncloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud\ncloud_port = 63000\nreceived_time = []\ntask_record = {} # keeps record of task reoffloaded\ntask_id = 0 # id for each task reoffloaded\nshared_resource_lock = threading.Lock()\nt_track = 1\n\n\ndef ping(host):\n cmd = [f'ping -c 1 {host}']\n output = str(sp.check_output(cmd, shell=True), 'utf-8').split('\\n')\n try:\n value = float(output[-2].split('=')[-1].split('/')[0])\n except ValueError:\n value = None\n return value\n\n\ndef discovering_group():\n global sock1\n\n multicast_group = '224.3.29.71'\n server_address = ('', 10000)\n\n # Create the socket\n sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # Bind to the server address\n sock1.bind(server_address)\n # Tell the operating system to add the socket to the multicast group\n # on all interfaces.\n group = socket.inet_aton(multicast_group)\n mreq = struct.pack('4sL', group, socket.INADDR_ANY)\n sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n\n\ndef offloading_group():\n global sock2\n\n multicast_group = '224.5.5.55'\n server_address = ('', 20000)\n\n # Create the socket\n sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # Bind to the server address\n sock2.bind(server_address)\n # Tell the operating system to add the socket to the multicast group\n # on all interfaces.\n group = socket.inet_aton(multicast_group)\n mreq = struct.pack('4sL', group, socket.INADDR_ANY)\n sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n\n\ndef ip_address():\n try:\n cmd = ['ifconfig eth1 | grep inet | cut -d \":\" -f 2 | cut -d \" \" -f 1']\n address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]\n if len(address.strip().split('.')) == 4:\n return address.strip()\n else:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n return s.getsockname()[0]\n except Exception as e:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n return s.getsockname()[0]\n\n\ndef _memory():\n global memory\n\n memory.append(round(my_algo.memory_percent(), 4))\n\n\ndef m_cpu():\n global prev_t\n\n # get cpu\n next_t = psutil.cpu_percent(percpu=False)\n delta = abs(prev_t - next_t)\n prev_t = next_t\n _cpu.append(round(delta, 4))\n\n\ndef get_mec_rtts():\n for i in mec_rtt:\n mec_rtt[i].append(get_rtt(i))\n\n\ndef generate_results():\n _memory()\n m_cpu()\n get_mec_rtts()\n\n\ndef host_ip_set():\n global ip_set\n\n ip_set = set()\n for ifaceName in interfaces():\n addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]\n ip_set.add(', '.join(addresses))\n\n\ndef get_time():\n _time_ = []\n d = str(dt.datetime.utcnow()).split()\n _time_ += d[0].split('-')\n g = d[1].split('.')\n _time_ += g[0].split(':')\n try:\n _time_.append(g[1])\n except IndexError:\n print(f'indexError on Time: {g}')\n _time_.append('0')\n return _time_\n\n\ndef get_rtt(host):\n rtt = ping(host)\n if rtt:\n return round(rtt, 4)\n else:\n return get_rtt(host)\n\n\ndef gcd(a, b):\n if b == 0:\n return a\n return gcd(b, a % b)\n\n\ndef _lcm(a, b):\n return int(a * b / gcd(a, b))\n\n\ndef lcm(_list):\n return reduce(_lcm, _list)\n\n\ndef gosh_dist(_range):\n return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range\n\n\ndef on_connect(connect_client, userdata, flags, rc):\n # print(\"Connected with Code :\" +str(rc))\n # Subscribe Topic from here\n connect_client.subscribe(node_id)\n\n\n# Callback Function on Receiving the Subscribed Topic/Message\ndef on_message(message_client, userdata, msg):\n global run\n\n data = str(msg.payload, 'utf-8')\n if data[0] == 'c': # receive from cloud\n received_task = data[2:]\n # send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]])\n if received_task in task_record:\n del task_record[received_task]\n received_task = '.'.join(received_task.split('.')[:-1])\n _client.publish(topic=received_task.split('.')[2], payload=str({received_task: get_time() + ['cloud']}), )\n cooperate['cloud'] += 1\n count_task_sent(received_task)\n\n elif data[0] == 't': # receive from client\n received_task = ast.literal_eval(data[2:])\n received_task_queue.append(received_task)\n received_time.append(time.time())\n\n elif data.strip() == 'stop': # stop {hostname: ip}\n print('sending stop alert')\n run = 0\n\n\ndef connect_to_broker(stop):\n global _client\n\n username = 'mec'\n password = 'password'\n broker_port_no = 1883\n\n _client = mqtt.Client()\n _client.on_connect = on_connect\n _client.on_message = on_message\n\n _client.username_pw_set(username, password)\n _client.connect(broker_ip, broker_port_no, 60)\n _client.loop_start()\n while True:\n if stop():\n _client.loop_stop()\n _client.disconnect()\n print('broker loop terminated')\n break\n\n\ndef task_time_map(seq, process):\n exe_seq = []\n capacity_sum = 0\n for job in process:\n capacity_sum += process[job]['wcet']\n while capacity_sum > 0:\n for job in seq:\n if process[job]['wcet'] > 0:\n exe_seq.append(job)\n process[job]['wcet'] -= 1\n capacity_sum -= 1\n\n return exe_seq\n\n\ntotal_received_task = 0\n\n\ndef edf():\n global total_received_task\n t_lcm = lcm([tasks[i]['period'] for i in tasks])\n\n t_dead = {i: tasks[i]['deadline'] for i in tasks}\n\n sorted_dead = sorted(t_dead.items(), key=lambda kv: (kv[1], kv[0]))\n # print(sorted_dead)\n\n ready_task = []\n for i in sorted_dead:\n period = tasks[i[0]]['period']\n # print('lcm: ', t_lcm, ' period: ', period)\n t_range = int(t_lcm / period)\n last_dead = 0\n for j in range(t_range):\n ready_task.append((i[0], last_dead + tasks[i[0]]['deadline']))\n last_dead += period\n\n ready_task = sorted(ready_task, key=lambda t: t[1])\n print(ready_task)\n\n t_time_ = 0\n schedule = []\n missed = []\n register = {i: 0 for i in tasks.keys()} # {ti : amount executed}\n for i in ready_task:\n if (t_time_ // tasks[i[0]]['period']) + 1 <= register[i[0]]:\n while (t_time_ // tasks[i[0]]['period']) + 1 <= register[i[0]]:\n t_time_ += 1\n # schedule.append(('idle', t_time))\n if (t_time_ // tasks[i[0]]['period']) + 1 > register[i[0]]:\n if t_time_ + tasks[i[0]]['wcet'] <= i[1]:\n register[i[0]] += 1\n t_time_ += tasks[i[0]]['wcet']\n schedule.append(i[0])\n else:\n print('Deadline missed: ', i)\n missed.append(i[0])\n\n # print('s : ', schedule)\n # print('r: ', register)\n if len(missed) > 0:\n # print('missed deadline: ', missed)\n cooperative_mec(missed)\n _edf_ = task_time_map(schedule, tasks)\n total_received_task += len(_edf_)\n return _edf_\n\n\n# generate execution sequence\ndef wait_die(processes, avail, n_need, allocat):\n global deadlock\n\n offload = []\n\n # To store execution sequence\n exec_seq = []\n\n # Make a copy of available resources\n work = [0] * len(processes)\n\n # While all processes are not finished\n # or system is not in safe state.\n while 'w' or 0 in work:\n if 0 in work:\n ind = work.index(0)\n i = processes[ind]\n elif 'w' in work:\n # print('wk: ', work)\n ind = work.index('w')\n i = processes[ind]\n else:\n break\n\n # print('comparing| process: ', i, _need[i], 'work: ', avail)\n if not (False in list(np.greater_equal(avail, n_need[i]))):\n exec_seq.append(i)\n avail = np.add(avail, allocat[i])\n work[ind] = 1\n # print('added: ', exec_seq)\n\n else:\n a = list(set(processes) - set(exec_seq) - set(offload))\n n = {}\n for j in a:\n n[j] = sum(allocat[j])\n _max = max(n, key=n.get)\n # print('work: ', work, 'need: ', _need[_max])\n if processes.index(_max) > processes.index(i): # if true, i is older\n # if process is already waiting then offload process\n if work[ind] == 'w':\n offload.append(i)\n avail = np.array(avail) + np.array(allocat[i])\n work[processes.index(i)] = 1\n # print('offload reentry: ', i, offload)\n else:\n # wait put process to waiting\n work[processes.index(i)] = 'w'\n # print('waiting: ', i)\n\n else:\n # abort i\n offload.append(i)\n avail = np.array(avail) + np.array(allocat[i])\n work[processes.index(i)] = 1\n # print('offload: ', i)\n\n if len(offload) > 0:\n # print('offloading tasks: ', offload)\n cooperative_mec(offload)\n deadlock[0] += 1\n\n # print('Execution seq: ', exec_seq)\n\n return exec_seq\n\n\ndef get_exec_seq(pro):\n processes = ['{}_{}'.format(pro[i], i) for i in range(len(pro))]\n\n # Available instances of resources\n avail = [6, 5, 5]\n n_need = {i: _need[i[:2]] for i in processes}\n # print('need', n_need)\n # Resources allocated to processes\n allot = {i: allocation[i[:2]] for i in processes}\n\n # return execution sequence\n return wait_die(processes, avail, n_need, allot)\n\n\ndef calc_wait_time(list_seq):\n pre = 0\n time_dic = {}\n for i in list_seq:\n j = i.split('_')[0]\n time_dic[i] = round(t_time[j][0] + pre, 3)\n pre += t_time[j][0]\n # waiting time = total waiting time ÷ 2 average waiting time might be too tight\n w_send = round(time_dic[list(time_dic.keys())[-1]] / 2, 3)\n send_message('wt {} {}'.format(ip_address(), str(w_send))) # Broadcasting waiting time to cooperative MECs\n return time_dic\n\n\ndef compare_local_mec(list_seq):\n time_compare_dict = {i: t_time[i.split('_')[0]][1] > list_seq[i] for i in list_seq}\n print('local vs MEC comparison: ', time_compare_dict)\n execute_mec = []\n execute_locally = []\n for i in time_compare_dict:\n if time_compare_dict[i]:\n execute_locally.append(i)\n else:\n execute_mec.append(i)\n\n return execute_mec, execute_locally\n\n\ndef calculate_mov_avg(ma1, a1):\n if ma1 in mec_waiting_time:\n _count = len(mec_waiting_time[ma1])\n avg1 = mec_waiting_time[ma1][-1]\n else:\n _count = 0\n avg1 = 0\n _count += 1\n avg1 = ((_count - 1) * avg1 + a1) / _count\n # ma1.append(avg1) #cumulative average formula\n # μ_n=((n-1) μ_(n-1) + x_n)/n\n return round(avg1, 4)\n\n\ndef send_message(mg):\n _multicast_group = ('224.3.29.71', 10000)\n try:\n\n # Send data to the multicast group\n if mg == 'hello':\n smg = mg + ' ' + str([get_hostname(), ip_address()])\n sock1.sendto(str.encode(smg), _multicast_group)\n print('\\nHello message sent')\n\n else:\n sock1.sendto(str.encode(mg), _multicast_group)\n\n except Exception as e:\n print(e)\n\n\ndef get_hostname():\n cmd = ['cat /etc/hostname']\n hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]\n return hostname\n\n\ndef receive_message(stop): # used for multi-cast message exchange among MEC\n global hosts\n\n while True:\n if stop():\n print('Stopped: receive_message()')\n break\n else:\n data, address = sock1.recvfrom(1024)\n _d = data.decode()\n if _d[:5] == 'hello':\n _data = ast.literal_eval(_d[6:])\n hosts[_data[0]] = _data[1]\n\n if _data[1] != host_ip:\n mec_rtt[_data[1]] = []\n\n elif (_d[:6] == 'update') and (discovering == 0):\n hosts = ast.literal_eval(_d[7:])\n # print('received: ', hosts)\n for i in hosts:\n if i != host_ip:\n mec_rtt[i] = []\n\n elif _d[:2] == 'wt':\n\n split_data = _d.split()\n\n if split_data[1] != host_ip:\n\n w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(\n address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt\n\n if split_data[1] in mec_waiting_time:\n mec_waiting_time[split_data[1]].append(w_time)\n else:\n mec_waiting_time[split_data[1]] = [w_time]\n\n\ndef mec_comparison():\n # returns min average waiting for all mecs\n if len(mec_waiting_time) == 0:\n return 0\n min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}\n min_wt = min(min_mec, key=min_mec.get)\n return min_wt\n\n\ndef cooperative_mec(mec_list):\n global _off_cloud\n global _off_mec\n global task_id, task_record\n\n for i in mec_list:\n _host = mec_comparison()\n if _host == 0:\n # send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time]\n _send_task = f\"{i.split('_')[0]}.{task_id}\"\n _client.publish(cloud_ip, str([_send_task, t_time[i.split('_')[0]][0]]), )\n task_record[_send_task] = 'cloud'\n task_id += 1\n _off_cloud += 1\n # cloud_register[i.split('_')[0].split('.')[2]] = send_back_host\n\n print('\\n=========SENDING {} TO CLOUD==========='.format(i))\n\n else:\n j = i.split('_')[0]\n _max = np.array([6, 5, 5])\n send = 'false'\n if not (False in list(np.greater_equal(_max, _need[j[:2]]))):\n send = 'true'\n # CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY\n if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':\n _send_task = f\"{j}.{task_id}\"\n send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))\n task_record[_send_task] = 'mec'\n task_id += 1\n _off_mec += 1\n # SENDS TASK TO MEC FOR EXECUTION\n\n w_send = mec_waiting_time[_host][-1] + 0.001\n mec_waiting_time[_host].append(w_send) # adds a new average waiting time\n print('\\n======SENDING {} TO MEC {}========='.format(i, _host))\n elif send == 'true' and (get_rtt(_host) < get_rtt(cloud_ip)):\n _send_task = f\"{j}.{task_id}\"\n send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))\n task_record[_send_task] = 'mec'\n task_id += 1\n _off_mec += 1\n # SENDS TASK TO MEC FOR EXECUTION\n w_send = mec_waiting_time[_host][-1] + 0.001\n mec_waiting_time[_host].append(w_send) # adds a new average waiting time\n print('\\n======SENDING {} TO MEC {}========='.format(i, _host))\n\n else:\n _send_task = f\"{j}.{task_id}\"\n _client.publish(cloud_ip, str([_send_task, t_time[j][0]]), )\n task_record[_send_task] = 'cloud'\n task_id += 1\n _off_cloud += 1\n # send_cloud([j, t_time[j][0]]) # # [task_id,exec_time]\n\n # cloud_register[j.split('.')[2]] = send_back_host\n\n print('\\n=========SENDING {} TO CLOUD==========='.format(i))\n\n\noutward_mec = 0\noffload_check = [0, 0]\n\n\ndef execute_re_offloaded_task(offloaded_task):\n global outward_mec, offload_check\n exec_list = get_exec_seq(offloaded_task[0])\n outward_mec += len(exec_list)\n for i in offloaded_task[0]: # i = 't1.1.2.3*1_3'\n j = i.split('_')[0]\n time.sleep(offloaded_task[1][j] / 2)\n # print('j task: ', j)\n send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0]))\n\n\nclients_record = {}\n\n\ndef count_task_sent(task):\n global clients_record\n c_id = task.split('.')[2]\n if c_id in clients_record:\n clients_record[c_id] += 1\n else:\n clients_record[c_id] = 1\n\n\ndef execute(local):\n print('\\nExecuting :', local)\n\n for i in local:\n j = i.split('_')[0]\n _t = t_time[j][0] / 2\n time.sleep(_t)\n print('#{}'.format(local.index(i) + 1), ' Executed: ', i)\n _client.publish(j.split('.')[2], str({j: get_time() + ['local']}), )\n count_task_sent(j)\n print('============== EXECUTION DONE ===============')\n\n\ncooperate = {'mec': 0, 'cloud': 0}\n\n\ndef receive_offloaded_task_mec(stop): # run as a thread\n global _inward_mec\n global t_track\n\n while True:\n if stop():\n print('Stopped: receive_offloaded_task_mec()')\n break\n else:\n data, address = sock2.recvfrom(1024)\n if len(data.decode()) > 0:\n da = data.decode().split(' ')\n if (address[0] not in ip_set) and (da[0] == node_id): # send back to client\n # send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client\n if da[1] in task_record:\n del task_record[da[1]]\n task_new = '.'.join(da[1].split('.')[:-1])\n _client.publish(da[1].split('.')[2], str({task_new: get_time() + ['mec']}), )\n count_task_sent(da[1])\n cooperate['mec'] += 1\n else:\n print('*' * 30 + f'\\n{da[1]} Not in Task Record\\n' + '*' * 30)\n elif (address[0] not in ip_set) and (da[0] == 'ex') and (da[1] == node_id):\n _received = ast.literal_eval(da[2] + da[3])\n shared_resource_lock.acquire()\n task = _received[0] + '*{}'.format(t_track)\n reoffload_list[0].append(task)\n reoffload_list[1][task] = _received[1]\n shared_resource_lock.release()\n t_track += 1\n _inward_mec += 1\n\n\ndef call_execute_re_offload(stop):\n global reoffload_list, outward_mec\n global offload_check\n\n while True:\n if stop():\n print('Stopped: call_execute_re_offload()')\n break\n else:\n if len(reoffload_list[0]) == 1:\n t = reoffload_list[0][-1]\n time.sleep(reoffload_list[1][t] / 2)\n shared_resource_lock.acquire()\n reoffload_list[0].remove(t)\n del reoffload_list[1][t]\n shared_resource_lock.release()\n send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0]))\n outward_mec += 1\n offload_check[0] += 1\n elif len(reoffload_list[0]) > 1:\n o = reoffload_list.copy()\n offload_check[1] += len(o)\n execute_re_offloaded_task(o)\n for i in o[0]:\n shared_resource_lock.acquire()\n reoffload_list[0].remove(i)\n del reoffload_list[1][i]\n shared_resource_lock.release()\n\n\ndef send_email(msg, send_path):\n try:\n server = smtplib.SMTP_SSL('smtp.gmail.com')\n server.ehlo()\n server.login(config.email_address, config.password)\n subject = 'Deadlock results edf+wait-die {} {}'.format(get_hostname(), send_path)\n # msg = 'Attendance done for {}'.format(_timer)\n _message = 'Subject: {}\\n\\n{}\\n\\n SENT BY RIHANNA \\n\\n'.format(subject, msg)\n server.sendmail(config.email_address, config.send_email, _message)\n server.quit()\n print(\"Email sent!\")\n except Exception as e:\n print(e)\n\n\ndef send_offloaded_task_mec(msg):\n _multicast_group = ('224.5.5.55', 20000)\n try:\n sock2.sendto(str.encode(msg), _multicast_group)\n\n except Exception as e:\n print(e)\n\n\ndef mec_id(client_ip):\n _id = client_ip.split('.')[-1]\n if len(_id) == 1:\n return '00' + _id\n elif len(_id) == 2:\n return '0' + _id\n else:\n return _id\n\n\ndef send_result(host_, data):\n try:\n c = paramiko.SSHClient()\n\n un = 'mec'\n pw = 'password'\n port = 22\n\n c.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n c.connect(host_, port, un, pw)\n for i in data:\n cmd = ('echo \"{}\" >> /home/mec/result/data.py'.format(i)) # task share : host ip task\n stdin, stdout, stderr = c.exec_command(cmd)\n except Exception as e:\n print(e)\n\n\ndef save_and_send(send_path):\n _id_ = get_hostname()[-1]\n result = f\"\\nwt{_id_}_16_{mec_no} = {mec_waiting_time} \" \\\n f\"\\nrtt{_id_}_16_{mec_no} = {mec_rtt} \\ncpu{_id_}_16_{mec_no} = {_cpu} \" \\\n f\"\\noff_mec{_id_}_16_{mec_no} = {_off_mec} \" \\\n f\"\\noff_cloud{_id_}_16_{mec_no} = {_off_cloud} \" \\\n f\"\\ninward_mec{_id_}_16_{mec_no} = {_inward_mec}\" \\\n f\"\\nloc{_id_}_16_{mec_no} = {_loc} \" \\\n f\"\\ndeadlock{_id_}_16_{mec_no} = {deadlock} \\nmemory{_id_}_16_{mec_no} = {memory}\" \\\n f\"\\ntask_received = {total_received_task} \\nsent_t = {clients_record}\" \\\n f\"\\ncooperate{_id_}_16_{mec_no} = {cooperate} \\ntask_record{_id_}_16_{mec_no} = {task_record}\" \\\n f\"\\noutward_mec{_id_}_16_{mec_no} = {outward_mec}\" \\\n f\"\\noffload_check{_id_}_16_{mec_no} = {offload_check}\"\n list_result = [\n f\"\\nwt{_id_}_16_{mec_no} = {mec_waiting_time} \",\n f\"\\nrtt{_id_}_16_{mec_no} = {mec_rtt} \\ncpu{_id_}_16_{mec_no} = {_cpu} \",\n f\"\\noff_mec{_id_}_16_{mec_no} = {_off_mec} \\noff_cloud{_id_}_16_{mec_no} = {_off_cloud} \",\n f\"\\ninward_mec{_id_}_16_{mec_no} = {_inward_mec}\",\n f\"\\nloc{_id_}_16_{mec_no} = {_loc} \",\n f\"\\ndeadlock{_id_}_16_{mec_no} = {deadlock} \\nmemory{_id_}_16_{mec_no} = {memory}\",\n f\"\\ntask_received{_id_}_16_{mec_no} = {total_received_task} \\nsent_t{_id_}_16_{mec_no} = {clients_record}\",\n f\"\\ncooperate{_id_}_16_{mec_no} = {cooperate} \\ntask_record{_id_}_16_{mec_no} = {task_record} \"\n f\"\\noutward_mec{_id_}_16_{mec_no} = {outward_mec}\",\n f\"\\noffload_check{_id_}_16_{mec_no} = {offload_check}\"\n ]\n path_ = 'data/raw/'\n if os.path.exists(path_):\n cmd = f\"echo '' > {path_}{_id_}_16_{mec_no}datal.py\"\n os.system(cmd)\n cmd = f\"echo '' > {path_}{_id_}_16_{mec_no}datap.py\"\n os.system(cmd)\n else:\n os.system(f'mkdir -p {path_}')\n cmd = f\"echo '' > {path_}{_id_}_16_{mec_no}datal.py\"\n os.system(cmd)\n cmd = f\"echo '' > {path_}{_id_}_16_{mec_no}datap.py\"\n os.system(cmd)\n\n file_ = open(f'{path_}{_id_}_16_{mec_no}datap.py', 'w')\n for i in list_result:\n cmd = f'echo \"{i}\" >> {path_}{_id_}_16_{mec_no}datal.py'\n file_.write(i)\n os.system(cmd)\n file_.close()\n sp.run(\n [\"scp\", f\"{path_}{_id_}_16_{mec_no}datap.py\", f\"mec@{hosts['osboxes-0']}:{send_path}\"])\n\n send_result(hosts['osboxes-0'], list_result)\n send_email(result, send_path)\n if len(task_record) > 0:\n for _task_ in task_record:\n task_new = '.'.join(_task_.split('.')[:-1])\n _client.publish(task_new.split('.')[2], str({task_new: get_time() + [task_record[_task_]]}), )\n\n\ndef start_loop():\n global _loc\n global tasks\n global t_time\n global node_id\n\n print('\\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\\n')\n\n node_id = mec_id(ip_address())\n # print('node id: ', node_id)\n func_to_thread = [receive_message, receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]\n threads_ = []\n stop = False\n for i in func_to_thread:\n threads_.append(Thread(target=i, args=(lambda: stop,)))\n threads_[-1].daemon = True\n threads_[-1].start()\n\n input('start..')\n print('========= Waiting for tasks ==========')\n _time_ = dt.datetime.now()\n while True:\n try:\n if len(received_task_queue) > 0:\n info = received_task_queue.pop(0)\n tasks, t_time = info\n\n print('EDF List of Processes: ', tasks, '\\n')\n\n print('\\n========= Running Deadlock Algorithm ===========')\n list_seq = get_exec_seq(edf())\n if len(list_seq) > 0: # do only when there is a task in safe sequence\n wait_list = calc_wait_time(list_seq)\n print('\\nWaiting Time List: ', wait_list)\n compare_result = compare_local_mec(wait_list)\n print('\\nExecute Locally: ', compare_result[1])\n _loc += len(compare_result[1]) # total number of tasks to be executed locally\n print('\\nExecute in MEC: ', compare_result[0])\n\n print('\\nSending to cooperative platform')\n if len(compare_result[0]) > 0:\n cooperative_mec(compare_result[0])\n execute(compare_result[1])\n generate_results()\n _time_ = dt.datetime.now()\n else:\n send_message(str('wt {} 0.0'.format(ip_address())))\n time.sleep(.4)\n now = dt.datetime.now()\n delta = now - _time_\n if delta > dt.timedelta(minutes=4):\n print('terminating programme 3 mins elapsed')\n stop = False\n break\n\n except KeyboardInterrupt:\n print('\\nProgramme Terminated')\n cmd = 'kill -9 {}'.format(os.getpid())\n os.system(cmd)\n break\n print('algo stopped!')\n\n\nclass BrokerSend:\n def __init__(self, user, pw, ip, sub_topic, data):\n self.user = user\n self.pw = pw\n self.ip = ip\n self.port = 1883\n self.topic = sub_topic\n self.response = None\n self.client = mqtt.Client()\n self.client.username_pw_set(self.user, self.pw)\n self.client.connect(self.ip, self.port, 60)\n self.data = data\n\n def publish(self):\n self.client.publish(self.topic, self.data)\n\n def __del__(self):\n print('BrokerSend Object Deleted!')\n\n\ndef run_me(mec_no_, send_path, broker_ip_): # call this from agent\n global discovering\n global mec_no\n global host_ip\n global my_algo\n global broker_ip\n\n print('mec ip: ', ip_address())\n my_algo = psutil.Process()\n discovering_group()\n offloading_group()\n host_ip_set()\n\n mec_no = mec_no_\n broker_ip = broker_ip_\n\n host_ip = ip_address()\n print('MEC Details: ', hosts)\n discovering = 1\n time.sleep(2)\n for host in hosts:\n if hosts[host] != host_ip:\n mec_rtt[hosts[host]] = []\n os.system(f'echo {mec_no}/{send_path} >> started.txt')\n start_loop()\n print('saving data')\n save_and_send(send_path)\n print('send alert to control')\n time.sleep(r.uniform(1, 30))\n data = pickle.dumps([get_hostname(), host_ip])\n broker_dict = {'user': 'mec', 'pw': 'password', 'sub_topic': 'control', 'ip': '192.168.122.111', 'data': data}\n BrokerSend(**broker_dict).publish()\n print('Terminating process')\n cmd = 'kill -9 {}'.format(os.getpid())\n os.system(cmd)\n\n\ndef main():\n global hosts\n global cloud_ip\n # (--n, --mec_no_, --cloud_ip, --s_path, --b_ip) send_path = f'/home/mec/result/{kind}/{count}'\n mec_nodes = {'mec-9': '192.168.122.119', 'mec-8': '192.168.122.118', 'mec-7': '192.168.122.117',\n 'mec-6': '192.168.122.116', 'mec-5': '192.168.122.115', 'mec-4': '192.168.122.114',\n 'mec-3': '192.168.122.113', 'mec-2': '192.168.122.112', 'mec-1': '192.168.122.111',\n }\n gui = {'osboxes-0': '192.168.122.110'}\n cloud_ips = ['192.168.200.11', '192.168.200.12']\n b_ip = '192.168.122.111'\n parser = argparse.ArgumentParser()\n parser.add_argument('--n', type=int, default=1.0, help='Number of MEC nodes')\n parser.add_argument('--p', type=str, default='/home/mec/result/python', help='Path to send result: homo_1')\n args = parser.parse_args()\n\n kind, count = args.p.split('_')\n send_path = f'/home/mec/result/{kind}/{count}'\n\n ho = sorted(list(mec_nodes))[:args.n - 1]\n hosts = {**{host: mec_nodes[host] for host in ho if ho != get_hostname()}, **gui}\n\n ho += ['osboxes-0']\n cloud_ip = cloud_ips[ho.index(get_hostname()) % 2]\n os.system('clear')\n run_me(mec_no_=args.n, send_path=send_path, broker_ip_=b_ip)\n\n\nif __name__ == '__main__':\n main()\n", "import matplotlib.pyplot as plt\nimport numpy as np\nimport random as r\nimport time\nfrom drawnow import *\n\n\n\nrecipe = [\"225 g flour\",\n \"90 g sugar\",\n \"1 egg\",\n \"60 g butter\",\n \"100 ml milk\",\n \"1/2 package of yeast\",\n \"225 g flour\",\n \"90 g sugar\",\n \"1 egg\",\n \"60 g butter\",\n \"225 g flour\",\n \"90 g sugar\",\n \"1 egg\",\n \"60 g butter\",\n \"100 ml milk\"]\n\ndata = [277, 250, 267, 213, 274, 297]#[225, 90, 50, 60, 100, 5]\n\ndef gen_rand():\n global data\n data=[]\n for i in range(15):\n data.append(r.randint(50,300))\n\ndef plotme():\n print(data)\n \n #plt.subplots(figsize=(6, 3), subplot_kw=dict(aspect=\"equal\"))\n wedges, texts = plt.pie(data, wedgeprops=dict(width=0.5), startangle=-40)\n\n bbox_props = dict(boxstyle=\"square,pad=0.3\", fc=\"w\", ec=\"k\", lw=0.72)\n kw = dict(arrowprops=dict(arrowstyle=\"-\"),\n bbox=bbox_props, zorder=0, va=\"center\")\n\n for i, p in enumerate(wedges):\n ang = (p.theta2 - p.theta1)/2. + p.theta1\n y = np.sin(np.deg2rad(ang))\n x = np.cos(np.deg2rad(ang))\n horizontalalignment = {-1: \"right\", 1: \"left\"}[int(np.sign(x))]\n connectionstyle = \"angle,angleA=0,angleB={}\".format(ang)\n kw[\"arrowprops\"].update({\"connectionstyle\": connectionstyle})\n plt.annotate(recipe[i], xy=(x, y), xytext=(np.sign(x),y),\n horizontalalignment=horizontalalignment, **kw)\n\n plt.title(\"Matplotlib bakery: A donut\")\n #plt.legend()\n #plt.show()\n\ndef plotme_easy():\n print(data)\n \n #fig, ax = plt.subplots(figsize=(6, 3), subplot_kw=dict(aspect=\"equal\"))\n wedges, texts = plt.pie(data, wedgeprops=dict(width=0.5), startangle=-40, labels=recipe)\n\n #bbox_props = dict(boxstyle=\"square,pad=0.3\", fc=\"w\", ec=\"k\", lw=0.72)\n #kw = dict(arrowprops=dict(arrowstyle=\"-\"),\n # bbox=bbox_props, zorder=0, va=\"center\")\n\n #for i, p in enumerate(wedges):\n #ang = (p.theta2 - p.theta1)/2. + p.theta1\n #y = np.sin(np.deg2rad(ang))\n #x = np.cos(np.deg2rad(ang))\n #horizontalalignment = {-1: \"right\", 1: \"left\"}[int(np.sign(x))]\n #connectionstyle = \"angle,angleA=0,angleB={}\".format(ang)\n #kw[\"arrowprops\"].update({\"connectionstyle\": connectionstyle})\n #plt.annotate(recipe[i], xy=(x, y), xytext=(np.sign(x),y)\n # horizontalalignment=horizontalalignment, **kw)\n\n plt.title(\"Matplotlib bakery: A donut\")\n #plt.legend()\n #plt.show()\n\ndef main():\n while True:\n gen_rand()\n drawnow(plotme_easy)\n time.sleep(1)\n\n\nmain()\n", "import matplotlib.pyplot as plt\nimport numpy as np\nfrom textwrap import wrap\n\ntimely = ({4: [8556, 8354, 7130, 8138, 7598, 7649], 7: [8827, 8520, 7284, 8588, 7366, 8738], 10: [8799, 9078, 8979, 8776, 8904, 8956]}, {4: [618, 820, 2044, 1036, 1576, 1525], 7: [347, 654, 1890, 586, 1808, 436], 10: [375, 96, 195, 398, 270, 218]})\ntimely1 = {4: [93.2636, 91.0617, 77.7196, 88.7072, 82.821, 83.3769], 7: [96.2176, 92.8712, 79.3983, 93.6124, 80.2921, 95.2474], 10: [95.9124, 98.9536, 97.8744, 95.6617, 97.0569, 97.6237]}\nexec_report = {4: {0: [718, 7218, 1238], 1: [716, 7218, 1240], 2: [1300, 5975, 1899], 3: [1345, 5988, 1841], 4: [698, 7218, 1258], 5: [678, 7211, 1285]}, 7: {0: [772, 7218, 1184], 1: [1307, 5848, 2019], 2: [1230, 6291, 1653], 3: [700, 7211, 1263], 4: [722, 7218, 1234], 5: [786, 7211, 1177]}, 10: {0: [700, 7211, 1263], 1: [1235, 6334, 1605], 2: [756, 7211, 1207], 3: [694, 7218, 1262], 4: [1265, 5926, 1983], 5: [699, 7211, 1264]}}\nexec = {'mec': {4: [7.8265, 7.8047, 14.1705, 14.661, 7.6085, 7.3905], 7: [8.4151, 14.2468, 13.4075, 7.6303, 7.8701, 8.5677], 10: [7.6303, 13.462, 8.2407, 7.5649, 13.789, 7.6194]}, 'local': {4: [78.6789, 78.6789, 65.1297, 65.2714, 78.6789, 78.6026], 7: [78.6789, 63.7454, 68.5742, 78.6026, 78.6789, 78.6026], 10: [78.6026, 69.0429, 78.6026, 78.6789, 64.5956, 78.6026]}, 'cloud': {4: [13.4947, 13.5165, 20.6998, 20.0676, 13.7127, 14.007], 7: [12.906, 22.0078, 18.0183, 13.7672, 13.4511, 12.8297], 10: [13.7672, 17.4951, 13.1567, 13.7563, 21.6154, 13.7781]}}\navg_wt = {4: [17.9599, 17.3425, 8.3322, 7.9103, 7.9156, 8.1576], 7: [19.1244, 18.5187, 7.1498, 7.2783, 7.4104, 7.2206], 10: [19.2627, 6.2073, 7.1808, 7.2433, 7.2481, 7.315]}\navg_rtt = {4: [21.9067, 21.1495, 8.2049, 7.7117, 7.8877, 7.9763], 7: [22.1794, 22.099, 8.1193, 7.624, 7.2834, 7.6406], 10: [20.6891, 6.8877, 7.9085, 7.9939, 7.9865, 7.9827]}\navg_cpu = {4: [1.7479, 1.8976, 1.615, 1.6462, 1.5994, 1.5162], 7: [1.099, 1.2932, 0.9423, 0.8702, 0.9621, 0.9257], 10: [1.0134, 0.9768, 0.8683, 0.9988, 0.8821, 0.9492]}\nprint('-'*100)\n# homo\navg_cpu_homo = {4: [2.1136, 1.6612, 1.4683, 1.7405, 1.696, 1.6402], 7: [1.4591, 1.2978, 1.0349, 1.0812, 1.0565, 1.0685], 10: [1.1888, 1.2354, 0.9365, 0.9042, 0.9673, 0.9642]}\navg_rtt_homo = {4: [7.7384, 7.9969, 7.735, 7.9702, 8.1349, 7.8773], 7: [7.7978, 7.6699, 7.606, 7.8255, 7.8163, 7.8323], 10: [8.0141, 8.1465, 7.9518, 7.8366, 8.1125, 8.1812]}\navg_wt_homo = {4: [7.7331, 7.793, 7.6889, 8.0924, 7.9554, 7.9389], 7: [7.2592, 7.1101, 7.0876, 7.343, 7.3246, 7.2702], 10: [7.2452, 7.3179, 7.2795, 7.1666, 7.2656, 7.2886]}\nexec_report_homo = {4: {0: [742, 7218, 1214], 1: [726, 7218, 1230], 2: [2151, 4366, 2657], 3: [2201, 4355, 2618], 4: [743, 7218, 1213], 5: [681, 7211, 1282]}, 7: {0: [723, 7218, 1233], 1: [1851, 4841, 2482], 2: [2604, 3937, 2633], 3: [711, 7211, 1252], 4: [718, 7218, 1238], 5: [794, 7211, 1169]}, 10: {0: [719, 7211, 1244], 1: [2552, 3899, 2723], 2: [808, 7211, 1155], 3: [767, 7218, 1189], 4: [1812, 4839, 2523], 5: [727, 7211, 1236]}}\ntimely_homo = ({4: [7275, 8743, 8530, 8447, 8276, 8425], 7: [8917, 8953, 8851, 8715, 8787, 8808], 10: [8991, 9022, 8945, 8718, 9010, 9013]}, {4: [1899, 431, 644, 727, 898, 749], 7: [257, 221, 323, 459, 387, 366], 10: [183, 152, 229, 456, 164, 161]})\ntimely2 = {4: [79.3002, 95.3019, 92.9802, 92.0754, 90.2115, 91.8356], 7: [97.1986, 97.591, 96.4792, 94.9967, 95.7816, 96.0105], 10: [98.0052, 98.3431, 97.5038, 95.0294, 98.2123, 98.245]}\nexe_homo = {'mec': {4: [8.0881, 7.9137, 23.4467, 23.9917, 8.099, 7.4232], 7: [7.881, 20.1766, 28.3846, 7.7502, 7.8265, 8.6549], 10: [7.8374, 27.8177, 8.8075, 8.3606, 19.7515, 7.9246]}, 'local': {4: [78.6789, 78.6789, 47.591, 47.4711, 78.6789, 78.6026], 7: [78.6789, 52.7687, 42.9148, 78.6026, 78.6789, 78.6026], 10: [78.6026, 42.5005, 78.6026, 78.6789, 52.7469, 78.6026]}, 'cloud': {4: [13.233, 13.4075, 28.9623, 28.5372, 13.2221, 13.9743], 7: [13.4402, 27.0547, 28.7007, 13.6473, 13.4947, 12.7425], 10: [13.5601, 29.6817, 12.5899, 12.9605, 27.5016, 13.4729]}}\n\n\nfig = plt.figure()\nax1 = fig.add_subplot(361)\nax2 = fig.add_subplot(362)\nax3 = fig.add_subplot(363)\nax4 = fig.add_subplot(364)\nax5 = fig.add_subplot(365)\nax6 = fig.add_subplot(366)\nax7 = fig.add_subplot(367)\nax8 = fig.add_subplot(368)\nax9 = fig.add_subplot(369)\nax10 = fig.add_subplot(3, 6, 10)\nax11 = fig.add_subplot(3, 6, 11)\nax12 = fig.add_subplot(3, 6, 12)\nax13 = fig.add_subplot(3, 6, 13)\nax14 = fig.add_subplot(3, 6, 14)\nax15 = fig.add_subplot(3, 6, 15)\nax16 = fig.add_subplot(3, 6, 16)\nax17 = fig.add_subplot(3, 6, 17)\nax18 = fig.add_subplot(3, 6, 18)\n\nwidth = 0.35\nalgo_dict = {r'$ALG_1$',\n r'$ALG_2$',\n r'$ALG_3$',\n r'$ALG_4$',\n r'$ALG_5$',\n r'$ALG_6$'}\ndef plot_me(data, names, ax, unit, title, no=None,):\n ind = np.arange(len(data[0]))\n p1 = ax.bar(ind, data[0], width, color='r', alpha=0.4)\n p2 = ax.bar(ind, data[1], width, color='g', bottom=data[0], alpha=0.4)\n ax.set_xticks(ind)\n ax.set_xticklabels(names)\n for i in ind:\n d = 0\n a = 15.0\n if data[1][i] < a: d = a-data[1][i]\n ax.text(i, data[0][i] + data[1][i]+d, f'{round(data[1][i], 2)}{unit[i]}', rotation=0, fontsize=8,\n ha=\"center\", va=\"center\", bbox=dict(boxstyle=\"round\", facecolor='#00cc66', ec='black'))\n ax.text(i, data[0][i], f'{round(data[0][i], 2)}{unit[i]}', rotation=0, fontsize=8,\n ha=\"center\", va=\"center\", bbox=dict(boxstyle=\"round\", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8), ))\n ax.legend((p1[0], p2[0]), (r'$Exp1$', r'$Exp2$'), loc='upper left', prop={\"size\": 11})\n ax.set_title(rf'$ALG_{title}$')\n # ax.set_ylabel('\\n'.join(wrap(f'Plot for {no} MECs', 8))).set_rotation(0)\n #ax.set_ylabel(\"No of Processes\", fontsize=15)\n for label in ax.get_xticklabels():\n label.set_fontsize(9)\n if no:\n ax.xaxis.set_tick_params(labelsize=9)\n # ax.set_ylabel('\\n'.join(wrap(f'{no} MECs', 8)), rotation=0, fontsize=15, labelpad=30)\n axx = ax.twinx()\n axx.set_yticklabels([])\n axx.set_yticks([])\n axx.set_ylabel('\\n'.join(wrap(f'{no} MECs', 8)), rotation=0, fontsize=15, labelpad=30)\n\n\nhomo = {'time': timely2, **exe_homo, 'cpu': avg_cpu_homo, 'rtt': avg_rtt_homo, 'wt': avg_wt_homo}\nhet = {'time': timely1, **exec, 'cpu': avg_cpu, 'rtt': avg_rtt, 'wt': avg_wt}\n# timely2 = {4: [79.3002, 95.3019, 92.9802, 92.0754, 90.2115, 91.8356], 7: [97.1986, 97.591, 96.4792, 94.9967, 95.7816, 96.0105], 10: [98.0052, 98.3431, 97.5038, 95.0294, 98.2123, 98.245]}\n\ndef form_plot():\n d = homo, het\n data = {i: {j:(list(range(7)),list(range(7))) for j in range(6)} for i in [4,7,10]}\n x = ['rtt', 'wt', 'cpu', 'time', 'mec', 'cloud', 'local']\n unit = ['ms', 'ms', '%', '%', '%', '%', '%']\n y = list(range(7))\n names = dict(zip(x,y))\n for i in range(2):\n for k, v in d[i].items():\n for l, t in v.items():\n for j in range(len(t)):\n ind = names[k]\n data[l][j][i][ind] = t[j]\n\n axes = [ax1, ax2, ax3, ax4, ax5, ax6,\n ax7, ax8, ax9, ax10, ax11, ax12,\n ax13, ax14, ax15, ax16, ax17, ax18]\n # plot_me(data, names, ax, unit, no=None)\n a = 0\n n = [5, 11, 17]\n nam = ['RTT', 'WT', 'CPU', 'Time', 'MEC', 'Cloud', 'Local']\n for no, va in data.items():\n for k, v in va.items():\n if a in n:\n plot_me(data=v, names=nam, ax=axes[a], unit=unit, title=k+1, no=no)\n else:\n plot_me(data=v, names=nam, ax=axes[a], unit=unit, title=k+1, no=None)\n a += 1\n plt.show()\n\n\nform_plot()\n# file = open('spead.csv', 'w', encoding='utf-8')\n# file.write('homo\\n')\n# for k,v in homo.items():\n# file.write(f'{k}\\n')\n# for d,j in v.items():\n# file.write(f'{d}\\n')\n# t = [str(i) for i in j]\n# file.write(f'{\",\".join(t)}\\n')\n# file.write('het\\n')\n# for k,v in het.items():\n# file.write(f'{k}\\n')\n# for d,j in v.items():\n# file.write(f'{d}\\n')\n# t = [str(i) for i in j]\n# file.write(f'{\",\".join(t)}\\n')\n# file.close()\n\ndef do_me():\n names = ['rtt', 'wt', 'cpu', 'time']\n ks = [4,7,10]\n file = open('sp1.csv', 'w', encoding='utf-8')\n does = [homo, het]\n for name in names:\n ho = does[0][name]\n he = does[1][name]\n for k in ks:\n t = does[0][name][k]\n t = [str(i) for i in t]\n file.write(f'{\",\".join(t)}\\n')\n t = does[1][name][k]\n t = [str(i) for i in t]\n file.write(f'{\",\".join(t)}\\n')\n file.close()\n\n#do_me()\n\n\n\n\ndef percentage(x, total):\n return round((x/total)*100, 4)\n\ndef format_(data):\n avg = {4:[], 7:[], 10:[]}\n for k, v in data[0].items():\n #t = list(v)\n for j in range(len(v)):\n to = v[j] + data[1][k][j]\n avg[k].append(percentage(v[j], to))\n print(avg)\n\ndef format_exe(data):\n r = {}\n didi = {0:{4: [], 7: [], 10: []}, 1:{4: [], 7: [], 10: []}, 2:{4: [], 7: [], 10: []}}\n for k, v in data.items():\n #print(v)\n for t,d in v.items():\n su = sum(d)\n for j in range(len(d)):\n g = percentage(d[j], su)\n didi[j][k].append(g)\n pa = {0: 'mec', 1:'local', 2:'cloud'}\n for k,v in didi.items():\n r[pa[k]] = v\n print(r)\n \n# \n# format_(timely)\n# format_(timely_homo)\n#format_exe(exec_report)", "from functools import reduce\nfrom sys import *\nimport numpy as np\nimport random as r\nimport socket\nimport struct\nimport subprocess as sp\nimport threading\nfrom threading import Thread\nimport ast\nimport time\nimport datetime as dt\nimport os\nimport argparse\nimport psutil\nfrom drawnow import *\nfrom matplotlib import pyplot as plt\nfrom netifaces import interfaces, ifaddresses, AF_INET\nimport paho.mqtt.client as mqtt\nimport smtplib\nimport config\nimport matplotlib\nimport pickle\n\nmatplotlib.use('TkAgg')\n\nhosts = {} # {hostname: ip}\n\n_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},\n 't2': {'wcet': 1, 'period': 5, 'deadline': 4},\n 't3': {'wcet': 2, 'period': 10, 'deadline': 8},\n 't4': {'wcet': 1, 'period': 10, 'deadline': 9},\n 't5': {'wcet': 3, 'period': 15, 'deadline': 12}\n }\n\n# mat = {'p0': ['cpu', 'mem', 'storage']}\n_need = {\n 't1': [7, 4, 3],\n 't2': [1, 2, 2],\n 't3': [6, 0, 0],\n 't4': [0, 1, 1],\n 't5': [4, 3, 1]\n\n}\nallocation = {\n 't1': [0, 1, 0],\n 't2': [2, 0, 0],\n 't3': [3, 0, 2],\n 't4': [2, 1, 1],\n 't5': [0, 0, 2]\n}\n\ntest = []\n_time = []\ncolor_code = ['orange', 'brown', 'purple', 'pink', 'blue']\nstyle = ['g--^', 'r:o', 'b-.s', 'm--*', 'k-.>', 'c-.s']\nstyle1 = [{'color': 'g', 'marker': '^'}, {'color': 'aqua', 'marker': '*'}, {'color': 'purple', 'marker': 'X'},\n {'color': 'r', 'marker': 'v'}, {'color': 'k', 'marker': '>'}, {'color': 'brown', 'marker': 'D'},\n {'color': 'b', 'marker': 's'}, {'color': 'c', 'marker': '1'}, {'color': 'olive', 'marker': 'p'}, ]\nmec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}\n\noffload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload\nreoffload_list = [[], {}]\ndiscovering = 0\nmec_rtt = {} # {ip: [RTT]}\nthread_record = [] # keeps track of threads\nprev_t = 0 # variable for cpu util\n_cpu = [] # cpu plot list\n\n_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec\n_off_cloud = 0 # used to keep a count of tasks offloaded to cloud\n_loc = 0 # used to keep a count of tasks executed locally\n_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec\ndeadlock = [1] # keeps count of how many deadlock is resolved\n_pos = 0\n\nreceived_task_queue = [] # [[(task_list,wait_time), host_ip], ....]\nreceived_time = []\n_port_ = 64000\ncloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud\ncloud_port = 63000\nmemory = []\nt_track = 1\ntask_record = {} # keeps record of task reoffloaded\ntask_id = 0\nshared_resource_lock = threading.Lock()\n\nfig = plt.figure()\nax1 = fig.add_subplot(231)\nax2 = fig.add_subplot(232)\nax3 = fig.add_subplot(233)\nax4 = fig.add_subplot(234)\nax5 = fig.add_subplot(235)\nax6 = fig.add_subplot(236)\n\n\ndef discovering_group():\n global sock1\n\n multicast_group = '224.3.29.71'\n server_address = ('', 10000)\n\n # Create the socket\n sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # Bind to the server address\n sock1.bind(server_address)\n # Tell the operating system to add the socket to the multicast group\n # on all interfaces.\n group = socket.inet_aton(multicast_group)\n mreq = struct.pack('4sL', group, socket.INADDR_ANY)\n sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n\n\ndef offloading_group():\n global sock2\n\n multicast_group = '224.5.5.55'\n server_address = ('', 20000)\n\n # Create the socket\n sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # Bind to the server address\n sock2.bind(server_address)\n # Tell the operating system to add the socket to the multicast group\n # on all interfaces.\n group = socket.inet_aton(multicast_group)\n mreq = struct.pack('4sL', group, socket.INADDR_ANY)\n sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n\n\ndef _mov_avg(a1):\n ma1 = [] # moving average list\n avg1 = 0 # moving average pointwise\n count = 0\n for i in range(len(a1)):\n count += 1\n avg1 = ((count - 1) * avg1 + a1[i]) / count\n ma1.append(round(avg1, 4)) # cumulative average formula\n # μ_n=((n-1) μ_(n-1) + x_n)/n\n return ma1\n\n\ndef percent(value, total):\n if value > 0:\n return round((value / total) * 100, 2)\n else:\n return 0\n\n\ndef plot_offloaded_remote():\n keys = ['O-Out', 'Cloud', 'Local', 'O-In']\n total = _off_mec + _off_cloud + _loc + _inward_mec\n\n val = [percent(_off_mec, total),\n percent(_off_cloud, total),\n percent(_loc, total),\n percent(_inward_mec, total)]\n cols = ['r', 'g', 'b', 'm']\n ypos = ([0, 1, 2, 3])\n '''\n explode = []\n for i in val:\n if i == max(val):\n explode.append(0.1)\n else:\n explode.append(0)\n\n ax2.pie(val, labels=keys, autopct='%.3f%%', wedgeprops=dict(width=0.5), \n startangle=-40, shadow=True, explode=explode, colors=cols)\n '''\n values = [_off_mec, _off_cloud, _loc, _inward_mec]\n for i in values:\n j = values.index(i)\n ax2.text(j - 0.1, values[j], '{}%'.format(val[j]), rotation=0,\n ha=\"center\", va=\"center\", bbox=dict(boxstyle=\"round\", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8), ))\n ax2.set_xticks(ypos)\n ax2.set_xticklabels(keys)\n ax2.bar(ypos, values, align='center', color=cols, alpha=0.3)\n ax2.set_title('Local/Remote Execution Report')\n plt.subplot(ax2)\n\n\n# color=color_code[list(hosts.values()).index(i)]\n\n\ndef plot_deadlock():\n # cols = ['r']\n text = str(deadlock[-1] - 1) + \" Deadlock Resolved\"\n '''\n wedges, texts, autotexts = ax5.pie(deadlock, shadow=True, autopct=text,\n textprops=dict(rotation_mode='anchor', color=\"w\", ha='left'), colors=cols)\n\n plt.setp(autotexts, size=9, weight=\"bold\")\n '''\n ax5.text(0.5, 0.6, text, rotation=0, size=10,\n ha=\"center\", va=\"center\", bbox=dict(boxstyle=\"round\", ec=(0., 0., 0.), fc=(0.7, 0.9, 1.)))\n ax5.text(0.5, 0.45, '{} Tasks Received'.format(_loc + _inward_mec), rotation=0, size=10,\n ha=\"center\", va=\"center\", bbox=dict(boxstyle=\"round\", ec=(0., 0., 0.), fc=(0.98, 0.96, 0.59), ))\n # ax5.set_title(\"Deadlock Resolved Counter\")\n ax5.set_axis_off()\n plt.subplot(ax5)\n\n\ndef plot_memory():\n global memory\n\n memory.append(round(my_algo.memory_percent(), 4))\n\n ax6.grid(True)\n ax6.plot(list(range(len(_mov_avg(memory)))), _mov_avg(memory), linewidth=2, label='Memory', color='m')\n # ax6.set_title('Moving Memory Utilization')\n ax6.set_ylabel('Moving Memory')\n ax6.set_xlabel('Time (seconds)')\n ax6.fill_between(list(range(len(_mov_avg(memory)))), _mov_avg(memory), 0, alpha=0.5, color='m')\n ax6.legend()\n plt.subplot(ax6)\n\n\ndef plot_wait_time():\n ax1.grid(True)\n\n for i in mec_waiting_time:\n mv = _mov_avg(mec_waiting_time[i])\n pt = mv[0:len(mv):int((len(mv) / 7)) + 1]\n if pt[-1] != mv[-1]:\n pt.append(mv[-1])\n d = list(range(len(mv)))\n ptx = d[0:len(d):int((len(d) / 7)) + 1]\n if ptx[-1] != d[-1]:\n ptx.append(d[-1])\n if len(ptx) > len(pt):\n ptx = ptx[:-1]\n elif len(ptx) < len(pt):\n pt = pt[:-1]\n ax1.plot(ptx,\n pt,\n **style1[list(hosts.values()).index(i)],\n linestyle=(0, (3, 1, 1, 1, 1, 1)),\n linewidth=2,\n label=i)\n ax1.set_title('Waiting Time Queue')\n ax1.set_ylabel('Moving Wait + RTT')\n # ax2.set_xlabel('Time (seconds)')\n ax1.legend()\n plt.subplot(ax1)\n\n\ndef get_mec_rtts():\n for i in mec_rtt:\n mec_rtt[i].append(get_rtt(i))\n\n\ndef plot_rtts():\n get_mec_rtts()\n ax3.grid(True)\n for i in mec_rtt:\n mv = _mov_avg(mec_rtt[i])\n pt = mv[0:len(mv):int((len(mv) / 7)) + 1]\n if pt[-1] != mv[-1]:\n pt.append(mv[-1])\n d = list(range(len(mv)))\n ptx = d[0:len(d):int((len(d) / 7)) + 1]\n if ptx[-1] != d[-1]:\n ptx.append(d[-1])\n if len(ptx) > len(pt):\n ptx = ptx[:-1]\n elif len(ptx) < len(pt):\n pt = pt[:-1]\n ax3.plot(ptx,\n pt,\n **style1[list(hosts.values()).index(i)],\n linestyle=(0, (3, 1, 1, 1, 1, 1)),\n linewidth=2,\n label=i)\n ax3.set_title('RTT Utilization over Time')\n # ax3.set_ylabel('Moving RTT')\n # ax3.set_xlabel('Time (seconds)')\n ax3.legend()\n plt.subplot(ax3)\n\n\ndef plot_cpu():\n global prev_t\n\n # get cpu\n next_t = psutil.cpu_percent(percpu=False)\n delta = abs(prev_t - next_t)\n prev_t = next_t\n _cpu.append(round(delta, 4))\n\n # plot graph\n ax4.grid(True)\n ax4.plot(list(range(len(_mov_avg(_cpu)))), _mov_avg(_cpu), linewidth=2, label='CPU')\n # ax4.set_title('Moving CPU Utilization')\n ax4.set_ylabel('Moving CPU')\n ax4.set_xlabel('Time (seconds)')\n ax4.fill_between(list(range(len(_mov_avg(_cpu)))), _mov_avg(_cpu), 0, alpha=0.5)\n ax4.legend()\n plt.subplot(ax4)\n\n\ndef plot_graphs():\n plot_offloaded_remote()\n plot_wait_time()\n plot_rtts()\n plot_cpu()\n plot_deadlock()\n plot_memory()\n fig.suptitle('MEC Performance During Deadlock Experiment')\n\n\ndef show_graphs():\n drawnow(plot_graphs)\n\n\ndef ip_address():\n try:\n # cmd = ['ifconfig eth1 | grep inet | cut -d \":\" -f 2 | cut -d \" \" -f 1']\n cmd = ['ifconfig ens4 | grep inet | head -n 1 | cut -d \"t\" -f 2 | cut -d \" \" -f 2']\n address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]\n if len(address.strip().split('.')) == 4:\n return address.strip()\n else:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n return s.getsockname()[0]\n except Exception as e:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n return s.getsockname()[0]\n\n\ndef host_ip_set():\n global ip_set\n\n ip_set = set()\n for ifaceName in interfaces():\n addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]\n ip_set.add(', '.join(addresses))\n\n\ndef ping(host):\n cmd = [f'ping -c 1 {host}']\n output = str(sp.check_output(cmd, shell=True), 'utf-8').split('\\n')\n try:\n value = float(output[-2].split('=')[-1].split('/')[0])\n except ValueError:\n value = None\n return value\n\n\ndef get_rtt(host):\n rtt = ping(host)\n if rtt:\n return round(rtt, 4)\n else:\n return get_rtt(host)\n\n\ndef get_time():\n _time_ = []\n d = str(dt.datetime.utcnow()).split()\n _time_ += d[0].split('-')\n g = d[1].split('.')\n _time_ += g[0].split(':')\n try:\n _time_.append(g[1])\n except IndexError:\n _time_.append('0')\n return _time_\n\n\n\ndef gcd(a, b):\n if b == 0:\n return a\n return gcd(b, a % b)\n\n\ndef _lcm(a, b):\n return int(a * b / gcd(a, b))\n\n\ndef lcm(_list):\n return reduce(_lcm, _list)\n\n\ndef gosh_dist(_range):\n return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range\n\n\ndef on_connect(connect_client, userdata, flags, rc):\n # print(\"Connected with Code :\" +str(rc))\n # Subscribe Topic from here\n connect_client.subscribe(node_id)\n\n\n# Callback Function on Receiving the Subscribed Topic/Message\ndef on_message(message_client, userdata, msg):\n data = str(msg.payload, 'utf-8')\n if data[0] == 'c': # receive from cloud\n received_task = data[2:]\n # send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]])\n if received_task in task_record:\n del task_record[received_task]\n received_task = '.'.join(received_task.split('.')[:-1])\n _client.publish(topic=received_task.split('.')[2], payload=str({received_task: get_time() + ['cloud']}), )\n cooperate['cloud'] += 1\n count_task_sent(received_task)\n\n elif data[0] == 't': # receive from client\n received_task = ast.literal_eval(data[2:])\n received_task_queue.append(received_task)\n received_time.append(time.time())\n\n else:\n print('data: ', data)\n\n\ndef connect_to_broker(stop):\n global _client\n\n username = 'mec'\n password = 'password'\n broker_port_no = 1883\n\n _client = mqtt.Client()\n _client.on_connect = on_connect\n _client.on_message = on_message\n\n _client.username_pw_set(username, password)\n _client.connect(broker_ip, broker_port_no, 60)\n _client.loop_start()\n while True:\n if stop():\n _client.loop_stop()\n _client.disconnect()\n print('broker loop terminated')\n break\n\n\ndef task_time_map(seq, process):\n exe_seq = []\n capacity_sum = 0\n for job in process:\n capacity_sum += process[job]['wcet']\n while capacity_sum > 0:\n for job in seq:\n if process[job]['wcet'] > 0:\n exe_seq.append(job)\n process[job]['wcet'] -= 1\n capacity_sum -= 1\n\n return exe_seq\n\n\ndef load_tasks():\n period_list = [tasks[i]['period'] for i in tasks]\n\n lcm_period = lcm(period_list)\n # insert idle task\n s_task = {**tasks, 'idle': {'wcet': lcm_period, 'period': lcm_period + 1}}\n return lcm_period, s_task\n\n\ntotal_received_task = 0\n\n\ndef scheduler(_lcm_, s_tasks): # RMS algorithm\n global total_received_task\n queue = list(s_tasks.keys()) # initialize task queue\n schedule = []\n rms = []\n curr = '' # current task\n prev = '' # previous task\n tmp = {}\n for task in s_tasks.keys():\n tmp[task] = {} # temporary data for each task\n tmp[task]['deadline'] = s_tasks[task]['period']\n tmp[task]['executed'] = 0\n\n # start scheduling...\n # proceed by one timestamp to handle preemption\n for _time_ in range(_lcm_):\n # insert new tasks into the queue\n for t in tmp.keys():\n if _time_ == tmp[t]['deadline']:\n if s_tasks[t]['wcet'] > tmp[t]['executed']:\n # print('Scheduling Failed at %d' % time)\n exit(1)\n else:\n tmp[t]['deadline'] += s_tasks[t]['period']\n tmp[t]['executed'] = 0\n queue.append(t)\n # select next task to be scheduled\n _min_ = _lcm_ * 2\n for task in queue:\n if tmp[task]['deadline'] < _min_:\n _min_ = tmp[task]['deadline']\n curr = task\n tmp[curr]['executed'] += 1\n # print(time, queue, curr)\n\n # dequeue the execution-completed task\n if tmp[curr]['executed'] == s_tasks[curr]['wcet']:\n for i in range(len(queue)):\n if curr == queue[i]:\n del queue[i]\n break\n\n # record to the schedule trace\n if prev != curr:\n if prev in queue and prev != 'idle': # previous task is preempted..\n s = schedule.pop()\n schedule.append([s[0], s[1], '*'])\n rms.append(s[1])\n schedule.append([_time_, curr])\n if curr != 'idle':\n rms.append(curr)\n prev = curr\n process = {task: {'wcet': tasks[task]['wcet']} for task in tasks}\n rms = task_time_map(seq=rms, process=process)\n total_received_task += len(rms)\n return rms\n\n\n# generate execution sequence\ndef wound_wait(processes, avail, n_need, allocat):\n global deadlock\n\n offload = []\n\n # To store execution sequence\n exec_seq = []\n\n # Make a copy of available resources\n work = [0] * len(processes)\n\n # While all processes are not finished\n # or system is not in safe state.\n while 0 in work:\n ind = work.index(0)\n i = processes[ind]\n # print('comparing| process: ', i, n_need[i], 'work: ', avail)\n if not (False in list(np.greater_equal(avail, n_need[i]))):\n exec_seq.append(i)\n avail = np.add(avail, allocat[i])\n work[ind] = 1\n\n else:\n a = list(set(processes) - set(exec_seq) - set(offload))\n n = {}\n for j in a:\n n[j] = sum(allocat[j])\n _max = max(n, key=n.get)\n # print('work: ', work, 'need: ', _need[_max])\n if not (False in list(np.greater_equal(np.array(avail) + np.array(allocat[_max]), n_need[i]))):\n offload.append(_max)\n avail = np.array(avail) + np.array(allocat[_max])\n work[processes.index(_max)] = 1\n else:\n offload.append(i)\n avail = np.array(avail) + np.array(allocat[i])\n work[processes.index(i)] = 1\n\n if len(offload) > 0:\n print('offloading tasks: ', offload)\n cooperative_mec(offload)\n deadlock[0] += 1\n\n print('Execution seq: ', exec_seq)\n\n return exec_seq\n\n\ndef get_exec_seq(pro):\n # Number of processes\n # p = len(pro)\n\n processes = ['{}_{}'.format(pro[i], i) for i in range(len(pro))]\n\n # Available instances of resources\n avail = [6, 5, 5]\n n_need = {i: _need[i[:2]] for i in processes}\n # print('need', n_need)\n # Resources allocated to processes\n allot = {i: allocation[i[:2]] for i in processes}\n\n # return execution sequence\n return wound_wait(processes, avail, n_need, allot)\n\n\ndef calc_wait_time(list_seq):\n pre = 0\n time_dic = {}\n for i in list_seq:\n j = i.split('_')[0] # i = 't5_3_3', j = 't5_3'\n time_dic[i] = round(t_time[j][0] + pre, 3)\n pre += t_time[j][0]\n # waiting time = total waiting time ÷ 2 average waiting time might be too tight\n w_send = round(time_dic[list(time_dic.keys())[-1]] / 2, 3)\n\n send_message('wt {} {}'.format(ip_address(), str(w_send))) # Broadcasting waiting time to cooperative MECs\n return time_dic\n\n\ndef compare_local_mec(list_seq):\n time_compare_dict = {i: t_time[i.split('_')[0]][1] > list_seq[i] for i in list_seq}\n print('local vs MEC comparison: ', time_compare_dict)\n execute_mec = []\n execute_locally = []\n for i in time_compare_dict:\n if time_compare_dict[i]:\n execute_locally.append(i)\n else:\n execute_mec.append(i)\n\n return execute_mec, execute_locally\n\n\ndef calculate_mov_avg(ma1, a1):\n if ma1 in mec_waiting_time:\n _count = len(mec_waiting_time[ma1])\n avg1 = mec_waiting_time[ma1][-1]\n else:\n _count = 0\n avg1 = 0\n _count += 1\n avg1 = ((_count - 1) * avg1 + a1) / _count\n # ma1.append(avg1) #cumulative average formula\n # μ_n=((n-1) μ_(n-1) + x_n)/n\n return round(avg1, 4)\n\n\ndef send_message(mg):\n _multicast_group = ('224.3.29.71', 10000)\n try:\n\n # Send data to the multicast group\n if mg == 'hello':\n smg = mg + ' ' + str([get_hostname(), ip_address()])\n sock1.sendto(str.encode(smg), _multicast_group)\n print('\\nHello message sent')\n else:\n sock1.sendto(str.encode(mg), _multicast_group)\n\n except Exception as e:\n print(e)\n\n\ndef get_hostname():\n cmd = ['cat /etc/hostname']\n hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]\n return hostname\n\n\ndef receive_message(stop): # used for multi-cast message exchange among MEC\n global hosts\n\n while True:\n if stop():\n print('Stopped: receive_message()')\n break\n else:\n data, address = sock1.recvfrom(1024)\n _d = data.decode()\n if _d[:5] == 'hello':\n _data = ast.literal_eval(_d[6:])\n hosts[_data[0]] = _data[1]\n # print('received: ', hosts)\n if _data[1] != host_ip:\n mec_rtt[_data[1]] = []\n\n elif (data.decode()[:6] == 'update') and (discovering == 0):\n hosts = ast.literal_eval(data.decode()[7:])\n for i in hosts:\n if i != host_ip:\n mec_rtt[i] = []\n\n elif _d[:2] == 'wt':\n split_data = _d.split()\n if split_data[1] != host_ip:\n # calcuate moving average of mec wait time => w_time = wait time + rtt\n w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(address[0]))\n if split_data[1] in mec_waiting_time:\n mec_waiting_time[split_data[1]].append(w_time)\n else:\n mec_waiting_time[split_data[1]] = [w_time]\n\n\ndef mec_comparison():\n # returns min average waiting for all mecs\n if len(mec_waiting_time) == 0:\n return 0\n min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}\n min_wt = min(min_mec, key=min_mec.get)\n return min_wt\n\n\ndef cooperative_mec(mec_list):\n global _off_cloud\n global _off_mec\n global task_id, task_record\n\n for i in mec_list:\n _host = mec_comparison()\n if _host == 0:\n # send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time]\n _send_task = f\"{i.split('_')[0]}.{task_id}\"\n _client.publish(cloud_ip, str([_send_task, t_time[i.split('_')[0]][0]]), )\n task_record[_send_task] = 'cloud'\n task_id += 1\n _off_cloud += 1\n # cloud_register[i.split('_')[0].split('.')[2]] = send_back_host\n\n print('\\n=========SENDING {} TO CLOUD==========='.format(i))\n\n else:\n j = i.split('_')[0]\n _max = np.array([6, 5, 5])\n send = 'false'\n if not (False in list(np.greater_equal(_max, _need[j[:2]]))):\n send = 'true'\n # CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY\n if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':\n _send_task = f\"{j}.{task_id}\"\n send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))\n task_record[_send_task] = 'mec'\n task_id += 1\n _off_mec += 1\n # SENDS TASK TO MEC FOR EXECUTION\n\n w_send = mec_waiting_time[_host][-1] + 0.001\n mec_waiting_time[_host].append(w_send) # adds a new average waiting time\n print('\\n======SENDING {} TO MEC {}========='.format(i, _host))\n elif send == 'true' and (get_rtt(_host) < get_rtt(cloud_ip)):\n _send_task = f\"{j}.{task_id}\"\n send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))\n task_record[_send_task] = 'mec'\n task_id += 1\n _off_mec += 1\n # SENDS TASK TO MEC FOR EXECUTION\n w_send = mec_waiting_time[_host][-1] + 0.001\n mec_waiting_time[_host].append(w_send) # adds a new average waiting time\n print('\\n======SENDING {} TO MEC {}========='.format(i, _host))\n else:\n _send_task = f\"{j}.{task_id}\"\n _client.publish(cloud_ip, str([_send_task, t_time[j][0]]), )\n task_record[_send_task] = 'cloud'\n task_id += 1\n _off_cloud += 1\n # send_cloud([j, t_time[j][0]]) # # [task_id,exec_time]\n\n # cloud_register[j.split('.')[2]] = send_back_host\n\n print('\\n=========SENDING {} TO CLOUD==========='.format(i))\n\n\noutward_mec = 0\noffload_check = [0, 0]\n\n\ndef execute_re_offloaded_task(offloaded_task):\n global outward_mec, offload_check\n exec_list = get_exec_seq(offloaded_task[0])\n # if len(exec_list) != len(offloaded_task[0]):\n # print('\\n\\n', '@ ' * 50)\n # print('exec: ', exec_list, 'off: ', offloaded_task[0])\n # print('\\n\\n', '@ ' * 50)\n # offload_check.append((exec_list, offloaded_task[0]))\n outward_mec += len(exec_list)\n for i in offloaded_task[0]: # i = 't1.1.2.3*1_3'\n j = i.split('_')[0]\n time.sleep(offloaded_task[1][j] / 2)\n # print('j task: ', j)\n send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0]))\n\n\nclients_record = {}\n\n\ndef count_task_sent(task):\n global clients_record\n c_id = task.split('.')[2]\n if c_id in clients_record:\n clients_record[c_id] += 1\n else:\n clients_record[c_id] = 1\n\n\ndef execute(local):\n print('\\nExecuting :', local)\n\n for i in local:\n j = i.split('_')[0]\n _t = t_time[j][0] / 2\n time.sleep(_t)\n print('#{}'.format(local.index(i) + 1), ' Executed: ', i)\n _client.publish(j.split('.')[2], str({j: get_time() + ['local']}), )\n count_task_sent(j)\n print('============== EXECUTION DONE ===============')\n # return send\n\n\ncooperate = {'mec': 0, 'cloud': 0}\n\n\ndef receive_offloaded_task_mec(stop): # run as a thread\n global _inward_mec\n global t_track\n\n while True:\n if stop():\n print('Stopped: receive_offloaded_task_mec()')\n break\n else:\n data, address = sock2.recvfrom(1024)\n if len(data.decode()) > 0:\n da = data.decode().split(' ')\n if (address[0] not in ip_set) and (da[0] == node_id): # send back to client\n # send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client\n if da[1] in task_record:\n del task_record[da[1]]\n task_new = '.'.join(da[1].split('.')[:-1])\n _client.publish(da[1].split('.')[2], str({task_new: get_time() + ['mec']}), )\n count_task_sent(da[1])\n cooperate['mec'] += 1\n else:\n print('*' * 30 + f'\\n{da[1]} Not in Task Record\\n' + '*' * 30)\n elif (address[0] not in ip_set) and (da[0] == 'ex') and (da[1] == node_id):\n _received = ast.literal_eval(da[2] + da[3])\n shared_resource_lock.acquire()\n task = _received[0] + '*{}'.format(t_track)\n reoffload_list[0].append(task)\n reoffload_list[1][task] = _received[1]\n shared_resource_lock.release()\n t_track += 1\n _inward_mec += 1\n\n\ndef call_execute_re_offload(stop):\n global reoffload_list, outward_mec\n global offload_check\n while True:\n if stop():\n print('Stopped: call_execute_re_offload()')\n break\n else:\n if len(reoffload_list[0]) == 1:\n t = reoffload_list[0][-1]\n time.sleep(reoffload_list[1][t] / 2)\n shared_resource_lock.acquire()\n reoffload_list[0].remove(t)\n del reoffload_list[1][t]\n shared_resource_lock.release()\n send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0]))\n outward_mec += 1\n offload_check[0] += 1\n elif len(reoffload_list[0]) > 1:\n o = reoffload_list.copy()\n offload_check[1] += len(o)\n execute_re_offloaded_task(o)\n for i in o[0]:\n shared_resource_lock.acquire()\n reoffload_list[0].remove(i)\n del reoffload_list[1][i]\n shared_resource_lock.release()\n\n\ndef send_offloaded_task_mec(msg):\n _multicast_group = ('224.5.5.55', 20000)\n try:\n sock2.sendto(str.encode(msg), _multicast_group)\n\n except Exception as e:\n print(e)\n\n\ndef send_email(msg, send_path):\n try:\n server = smtplib.SMTP_SSL('smtp.gmail.com')\n server.ehlo()\n server.login(config.email_address, config.password)\n subject = 'Deadlock results rms+wound_wait {} {}'.format(get_hostname(), send_path)\n # msg = 'Attendance done for {}'.format(_timer)\n _message = 'Subject: {}\\n\\n{}\\n\\n SENT BY RIHANNA \\n\\n'.format(subject, msg)\n server.sendmail(config.email_address, config.send_email, _message)\n server.quit()\n print(\"Email sent!\")\n except Exception as e:\n print(e)\n\n\ndef save_and_send(send_path):\n _id_ = get_hostname()[-1]\n result = f\"wt{_id_}_7_{mec_no} = {mec_waiting_time} \" \\\n f\"\\nrtt{_id_}_7_{mec_no} = {mec_rtt} \\ncpu{_id_}_7_{mec_no} = {_cpu} \" \\\n f\"\\noff_mec{_id_}_7_{mec_no} = {_off_mec} \" \\\n f\"\\noff_cloud{_id_}_7_{mec_no} = {_off_cloud} \" \\\n f\"\\ninward_mec{_id_}_7_{mec_no} = {_inward_mec}\" \\\n f\"\\nloc{_id_}_7_{mec_no} = {_loc} \" \\\n f\"\\ndeadlock{_id_}_7_{mec_no} = {deadlock} \\nmemory{_id_}_7_{mec_no} = {memory}\" \\\n f\"\\ntask_received = {total_received_task} \\nsent_t = {clients_record}\" \\\n f\"\\ncooperate = {cooperate} \\ntask_record = {task_record}\" \\\n f\"\\noutward_mec = {outward_mec}\"\n list_result = [\n f\"wt{_id_}_7_{mec_no} = {mec_waiting_time} \",\n f\"\\nrtt{_id_}_7_{mec_no} = {mec_rtt} \\ncpu{_id_}_7_{mec_no} = {_cpu} \",\n f\"\\noff_mec{_id_}_7_{mec_no} = {_off_mec} \\noff_cloud{_id_}_7_{mec_no} = {_off_cloud} \",\n f\"\\ninward_mec{_id_}_7_{mec_no} = {_inward_mec}\",\n f\"\\nloc{_id_}_7_{mec_no} = {_loc} \",\n f\"\\ndeadlock{_id_}_7_{mec_no} = {deadlock} \\nmemory{_id_}_7_{mec_no} = {memory}\",\n f\"\\ntask_received = {total_received_task} \\nsent_t = {clients_record}\",\n f\"\\ncooperate = {cooperate} \\ntask_record = {task_record} \\noutward_mec = {outward_mec}\"\n ]\n file_ = open(f'{_id_}_7_{mec_no}datap.py', 'w')\n for i in list_result:\n file_.write(i)\n file_.close()\n cmd = f'mv {_id_}_7_{mec_no}datap.py {send_path}'\n os.system(cmd)\n\n send_email(result, send_path)\n if len(task_record) > 0:\n for _task_ in task_record:\n task_new = '.'.join(_task_.split('.')[:-1])\n _client.publish(task_new.split('.')[2], str({task_new: get_time() + [task_record[_task_]]}), )\n\n\ndef terminate_process():\n global prev_t, _loc, _off_mec, _off_cloud, _inward_mec, outward_mec, deadlock, memory, mec_waiting_time, mec_rtt\n global offload_register, reoffload_list, discovering, test, _time, _pos, received_task_queue, received_time\n global cloud_register, t_track, task_record, task_id, cooperate, clients_record, offload_check\n global timed_out_tasks, total_received_task, _cpu\n\n # reinitialize #\n _cpu = [] # cpu plot list\n prev_t = 0 # variable for cpu util\n _off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec\n _off_cloud = 0 # used to keep a count of tasks offloaded to cloud\n _loc = 0 # used to keep a count of tasks executed locally\n _inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec\n outward_mec = 0 # keeps count of tasks sent back to another mec after executing\n deadlock = [1] # keeps count of how many deadlock is resolved\n memory = []\n mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}\n mec_rtt = {} # {ip: [RTT]}\n offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload\n reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.\n discovering = 0 # if discovering == 0 update host\n test = []\n _time = []\n _pos = 0\n received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]\n received_time = []\n cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud\n t_track = 1\n task_record = {} # keeps record of task reoffloaded\n task_id = 0 # id for each task reoffloaded\n\n cooperate = {'mec': 0, 'cloud': 0}\n clients_record = {}\n offload_check = [0, 0]\n timed_out_tasks = 0\n total_received_task = 0\n\n time.sleep(1)\n\n\ndef mec_id(client_ip):\n _id = client_ip.split('.')[-1]\n if len(_id) == 1:\n return '00' + _id\n elif len(_id) == 2:\n return '0' + _id\n else:\n return _id\n\n\ndef start_loop():\n global _loc\n global tasks\n global t_time\n global node_id\n\n print('\\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\\n')\n\n node_id = mec_id(ip_address())\n # print('node id: ', node_id)\n func_to_thread = [receive_message, receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]\n threads_ = []\n stop = False\n for i in func_to_thread:\n threads_.append(Thread(target=i, args=(lambda: stop,)))\n threads_[-1].daemon = True\n threads_[-1].start()\n\n input('start..')\n print('========= Waiting for tasks ==========')\n _time_ = dt.datetime.now()\n while True:\n try:\n if len(received_task_queue) > 0:\n info = received_task_queue.pop(0)\n tasks, t_time = info\n\n print('EDF List of Processes: ', tasks, '\\n')\n\n print('\\n========= Running Deadlock Algorithm ===========')\n lcm_result, task_load = load_tasks()\n list_seq = get_exec_seq(scheduler(lcm_result, task_load))\n if len(list_seq) > 0: # do only when there is a task in safe sequence\n wait_list = calc_wait_time(list_seq)\n print('\\nWaiting Time List: ', wait_list)\n compare_result = compare_local_mec(wait_list)\n print('\\nExecute Locally: ', compare_result[1])\n _loc += len(compare_result[1]) # total number of tasks to be executed locally\n print('\\nExecute in MEC: ', compare_result[0])\n\n print('\\nSending to cooperative platform')\n if len(compare_result[0]) > 0:\n cooperative_mec(compare_result[0])\n execute(compare_result[1])\n show_graphs()\n _time_ = dt.datetime.now()\n else:\n send_message(str('wt {} 0.0'.format(ip_address())))\n time.sleep(.4)\n now = dt.datetime.now()\n delta = now - _time_\n if delta > dt.timedelta(minutes=4):\n print('terminating programme 3 mins elapsed')\n stop = False\n break\n\n except KeyboardInterrupt:\n print('\\nProgramme Terminated')\n break\n print('algo stopped!')\n\n\nclass BrokerSend:\n def __init__(self, user, pw, ip, sub_topic, data):\n self.user = user\n self.pw = pw\n self.ip = ip\n self.port = 1883\n self.topic = sub_topic\n self.response = None\n self.client = mqtt.Client()\n self.client.username_pw_set(self.user, self.pw)\n self.client.connect(self.ip, self.port, 60)\n self.data = data\n\n def publish(self):\n self.client.publish(self.topic, self.data)\n\n def __del__(self):\n print('BrokerSend Object Deleted!')\n\n\ndef run_me(mec_no_, send_path, broker_ip_): # call this from agent\n global discovering\n global mec_no\n global host_ip\n global my_algo\n global broker_ip\n\n print('mec ip: ', ip_address())\n my_algo = psutil.Process()\n discovering_group()\n offloading_group()\n host_ip_set()\n\n mec_no = mec_no_\n broker_ip = broker_ip_\n\n host_ip = ip_address()\n print('MEC Details: ', hosts)\n discovering = 1\n time.sleep(2)\n for host in hosts:\n if hosts[host] != host_ip:\n mec_rtt[hosts[host]] = []\n os.system(f'echo {mec_no}/{send_path} >> started.txt')\n start_loop()\n print('saving data')\n save_and_send(send_path)\n print('send alert to control')\n time.sleep(r.uniform(1, 30))\n data = pickle.dumps([get_hostname(), host_ip])\n broker_dict = {'user': 'mec', 'pw': 'password', 'sub_topic': 'control', 'ip': '192.168.122.111', 'data': data}\n BrokerSend(**broker_dict).publish()\n print('Terminating process')\n cmd = 'kill -9 {}'.format(os.getpid())\n os.system(cmd)\n\n\ndef main():\n global hosts\n global cloud_ip\n # (--n, --mec_no_, --cloud_ip, --s_path, --b_ip) send_path = f'/home/mec/result/{kind}/{count}'\n mec_nodes = {'mec-9': '192.168.122.119', 'mec-8': '192.168.122.118', 'mec-7': '192.168.122.117',\n 'mec-6': '192.168.122.116', 'mec-5': '192.168.122.115', 'mec-4': '192.168.122.114',\n 'mec-3': '192.168.122.113', 'mec-2': '192.168.122.112', 'mec-1': '192.168.122.111',\n }\n gui = {'osboxes-0': '192.168.122.110'}\n cloud_ips = ['192.168.200.11', '192.168.200.12']\n b_ip = '192.168.122.111'\n parser = argparse.ArgumentParser()\n parser.add_argument('--n', type=int, default=1.0, help='Number of MEC nodes')\n parser.add_argument('--p', type=str, default='/home/mec/result/python', help='Path to send result: homo_1')\n args = parser.parse_args()\n\n kind, count = args.p.split('_')\n send_path = f'/home/mec/result/{kind}/{count}'\n\n ho = sorted(list(mec_nodes))[:args.n - 1]\n hosts = {**{host: mec_nodes[host] for host in ho if ho != get_hostname()}, **gui}\n\n ho += ['osboxes-0']\n cloud_ip = cloud_ips[ho.index(get_hostname()) % 2]\n os.system('clear')\n run_me(mec_no_=args.n, send_path=send_path, broker_ip_=b_ip)\n\n\nif __name__ == '__main__':\n main()\n", "from functools import reduce\nfrom sys import *\nimport numpy as np\nimport random as r\nimport ping_code as pc\nimport socket\nimport struct\nimport subprocess as sp\nimport threading\nfrom threading import Thread\nimport ast\nimport time\nimport datetime as dt\nimport os\nimport psutil\nimport getpass as gp\nfrom netifaces import interfaces, ifaddresses, AF_INET\nimport paho.mqtt.client as mqtt\nimport smtplib\nimport config\nimport paramiko\n\n\nhosts = {} # {hostname: ip}\n\n_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},\n 't2': {'wcet': 1, 'period': 5, 'deadline': 4},\n 't3': {'wcet': 2, 'period': 10, 'deadline': 8},\n 't4': {'wcet': 1, 'period': 10, 'deadline': 9},\n 't5': {'wcet': 3, 'period': 15, 'deadline': 12}\n }\n\n# mat = {'p0': ['cpu', 'mem', 'storage']}\n_need = {\n 't1': [7, 4, 3],\n 't2': [1, 2, 2],\n 't3': [6, 0, 0],\n 't4': [0, 1, 1],\n 't5': [4, 3, 1]\n\n}\nallocation = {\n 't1': [0, 1, 0],\n 't2': [2, 0, 0],\n 't3': [3, 0, 2],\n 't4': [2, 1, 1],\n 't5': [0, 0, 2]\n}\n\n_cpu = [] # cpu plot list\nprev_t = 0 # variable for cpu util\n_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec\n_off_cloud = 0 # used to keep a count of tasks offloaded to cloud\n_loc = 0 # used to keep a count of tasks executed locally\n_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec\ndeadlock = [1] # keeps count of how many deadlock is resolved\nmemory = []\nmec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}\nmec_rtt = {} # {ip: [RTT]}\n\noffload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload\nreoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.\ndiscovering = 0 # if discovering == 0 update host\ntest = []\n_time = []\n_pos = 0\nreceived_task_queue = [] # [[(task_list,wait_time), host_ip], ....]\nthread_record = []\n_port_ = 64000\ncloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud\ncloud_port = 63000\nstop = 0\nreceived_time = []\ntask_record = {} # keeps record of task reoffloaded\ntask_id = 0 # id for each task reoffloaded\nshared_resource_lock = threading.Lock()\nt_track = 1\n\n\ndef discovering_group():\n global sock1\n\n multicast_group = '224.3.29.71'\n server_address = ('', 10000)\n\n # Create the socket\n sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # Bind to the server address\n sock1.bind(server_address)\n # Tell the operating system to add the socket to the multicast group\n # on all interfaces.\n group = socket.inet_aton(multicast_group)\n mreq = struct.pack('4sL', group, socket.INADDR_ANY)\n sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n\n\ndef offloading_group():\n global sock2\n\n multicast_group = '224.5.5.55'\n server_address = ('', 20000)\n\n # Create the socket\n sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # Bind to the server address\n sock2.bind(server_address)\n # Tell the operating system to add the socket to the multicast group\n # on all interfaces.\n group = socket.inet_aton(multicast_group)\n mreq = struct.pack('4sL', group, socket.INADDR_ANY)\n sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n\n\ndef ip_address():\n try:\n cmd = ['ifconfig eth1 | grep inet | cut -d \":\" -f 2 | cut -d \" \" -f 1']\n address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]\n if len(address.strip().split('.')) == 4:\n return address.strip()\n else:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n return s.getsockname()[0]\n except Exception as e:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n return s.getsockname()[0]\n\n\ndef _memory():\n global memory\n\n memory.append(round(algo.memory_percent(), 4))\n\n\ndef m_cpu():\n global prev_t\n\n # get cpu\n next_t = psutil.cpu_percent(percpu=False)\n delta = abs(prev_t - next_t)\n prev_t = next_t\n _cpu.append(round(delta, 4))\n\n\ndef get_mec_rtts():\n for i in mec_rtt:\n mec_rtt[i].append(get_rtt(i))\n\n\ndef generate_results():\n _memory()\n m_cpu()\n get_mec_rtts()\n\n\ndef host_ip_set():\n global ip_set\n\n ip_set = set()\n for ifaceName in interfaces():\n addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]\n ip_set.add(', '.join(addresses))\n\n\ndef get_time():\n _time_ = []\n d = str(dt.datetime.utcnow()).split()\n _time_ += d[0].split('-')\n g = d[1].split('.')\n _time_ += g[0].split(':')\n _time_.append(g[1])\n return _time_\n\n\ndef get_rtt(host):\n rtt = pc.verbose_ping(host)\n if rtt:\n return round(rtt, 4)\n else:\n return get_rtt(host)\n\n\ndef gcd(a, b):\n if b == 0:\n return a\n return gcd(b, a % b)\n\n\ndef _lcm(a, b):\n return int(a * b / gcd(a, b))\n\n\ndef lcm(_list):\n return reduce(_lcm, _list)\n\n\ndef gosh_dist(_range):\n return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range\n\n\ndef on_connect(connect_client, userdata, flags, rc):\n # print(\"Connected with Code :\" +str(rc))\n # Subscribe Topic from here\n connect_client.subscribe(node_id)\n\n\n# Callback Function on Receiving the Subscribed Topic/Message\ndef on_message(message_client, userdata, msg):\n data = str(msg.payload, 'utf-8')\n if data[0] == 'c': # receive from cloud\n received_task = data[2:]\n # send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]])\n if received_task in task_record:\n del task_record[received_task]\n received_task = '.'.join(received_task.split('.')[:-1])\n _client.publish(topic=received_task.split('.')[2], payload=str({received_task: get_time()+['cloud']}), )\n cooperate['cloud'] += 1\n count_task_sent(received_task)\n\n elif data[0] == 't': # receive from client\n received_task = ast.literal_eval(data[2:])\n received_task_queue.append(received_task)\n received_time.append(time.time())\n\n else:\n print('data: ', data)\n\n\ndef connect_to_broker():\n global _client\n global broker_ip\n\n username = 'mec'\n password = 'password'\n broker_ip = hosts['speaker']\n broker_port_no = 1883\n\n _client = mqtt.Client()\n _client.on_connect = on_connect\n _client.on_message = on_message\n\n _client.username_pw_set(username, password)\n _client.connect(broker_ip, broker_port_no, 60)\n _client.loop_forever()\n\n\n\ndef task_time_map(seq, process):\n exe_seq = []\n capacity_sum = 0\n for job in process:\n capacity_sum += process[job]['wcet']\n while capacity_sum > 0:\n for job in seq:\n if process[job]['wcet'] > 0:\n exe_seq.append(job)\n process[job]['wcet'] -= 1\n capacity_sum -= 1\n\n return exe_seq\n\n\ntotal_received_task = 0\n\n\ndef edf():\n global total_received_task\n t_lcm = lcm([tasks[i]['period'] for i in tasks])\n\n t_dead = {i: tasks[i]['deadline'] for i in tasks}\n\n sorted_dead = sorted(t_dead.items(), key=lambda kv: (kv[1], kv[0]))\n # print(sorted_dead)\n\n ready_task = []\n for i in sorted_dead:\n period = tasks[i[0]]['period']\n # print('lcm: ', t_lcm, ' period: ', period)\n t_range = int(t_lcm/period)\n last_dead = 0\n for j in range(t_range):\n ready_task.append((i[0], last_dead+tasks[i[0]]['deadline']))\n last_dead += period\n\n ready_task = sorted(ready_task, key=lambda t: t[1])\n print(ready_task)\n\n t_time_ = 0\n schedule = []\n missed = []\n register = {i: 0 for i in tasks.keys()} # {ti : amount executed}\n for i in ready_task:\n if (t_time_//tasks[i[0]]['period'])+1 <= register[i[0]]:\n while (t_time_//tasks[i[0]]['period'])+1 <= register[i[0]]:\n t_time_ += 1\n # schedule.append(('idle', t_time))\n if (t_time_//tasks[i[0]]['period'])+1 > register[i[0]]:\n if t_time_ + tasks[i[0]]['wcet'] <= i[1]:\n register[i[0]] += 1\n t_time_ += tasks[i[0]]['wcet']\n schedule.append(i[0])\n else:\n print('Deadline missed: ', i)\n missed.append(i[0])\n\n # print('s : ', schedule)\n # print('r: ', register)\n if len(missed) > 0:\n # print('missed deadline: ', missed)\n cooperative_mec(missed)\n _edf_ = task_time_map(schedule, tasks)\n total_received_task += len(_edf_)\n return _edf_\n\n\n# generate execution sequence\ndef wait_die(processes, avail, n_need, allocat):\n global deadlock\n\n offload = []\n\n # To store execution sequence\n exec_seq = []\n\n # Make a copy of available resources\n work = [0] * len(processes)\n\n # While all processes are not finished\n # or system is not in safe state.\n while 'w' or 0 in work:\n if 0 in work:\n ind = work.index(0)\n i = processes[ind]\n elif 'w' in work:\n # print('wk: ', work)\n ind = work.index('w')\n i = processes[ind]\n else:\n break\n\n # print('comparing| process: ', i, _need[i], 'work: ', avail)\n if not (False in list(np.greater_equal(avail, n_need[i]))):\n exec_seq.append(i)\n avail = np.add(avail, allocat[i])\n work[ind] = 1\n # print('added: ', exec_seq)\n\n else:\n a = list(set(processes) - set(exec_seq) - set(offload))\n n = {}\n for j in a:\n n[j] = sum(allocat[j])\n _max = max(n, key=n.get)\n # print('work: ', work, 'need: ', _need[_max])\n if processes.index(_max) > processes.index(i): # if true, i is older\n # if process is already waiting then offload process\n if work[ind] == 'w':\n offload.append(i)\n avail = np.array(avail) + np.array(allocat[i])\n work[processes.index(i)] = 1\n # print('offload reentry: ', i, offload)\n else:\n # wait put process to waiting\n work[processes.index(i)] = 'w'\n # print('waiting: ', i)\n\n else:\n # abort i\n offload.append(i)\n avail = np.array(avail) + np.array(allocat[i])\n work[processes.index(i)] = 1\n # print('offload: ', i)\n\n if len(offload) > 0:\n # print('offloading tasks: ', offload)\n cooperative_mec(offload)\n deadlock[0] += 1\n\n # print('Execution seq: ', exec_seq)\n\n return exec_seq\n\n\ndef get_exec_seq(pro):\n\n processes = ['{}_{}'.format(pro[i], i) for i in range(len(pro))]\n\n # Available instances of resources\n avail = [6, 5, 5]\n n_need = {i: _need[i[:2]] for i in processes}\n # print('need', n_need)\n # Resources allocated to processes\n allot = {i: allocation[i[:2]] for i in processes}\n\n # return execution sequence\n return wait_die(processes, avail, n_need, allot)\n\n\ndef calc_wait_time(list_seq):\n pre = 0\n time_dic = {}\n for i in list_seq:\n j = i.split('_')[0]\n time_dic[i] = round(t_time[j][0] + pre, 3)\n pre += t_time[j][0]\n # waiting time = total waiting time ÷ 2 average waiting time might be too tight\n w_send = round(time_dic[list(time_dic.keys())[-1]]/2, 3)\n send_message('wt {} {}'.format(ip_address(), str(w_send))) # Broadcasting waiting time to cooperative MECs\n return time_dic\n\n\ndef compare_local_mec(list_seq):\n time_compare_dict = {i: t_time[i.split('_')[0]][1] > list_seq[i] for i in list_seq}\n print('local vs MEC comparison: ', time_compare_dict)\n execute_mec = []\n execute_locally = []\n for i in time_compare_dict:\n if time_compare_dict[i]:\n execute_locally.append(i)\n else:\n execute_mec.append(i)\n\n return execute_mec, execute_locally\n\n\ndef calculate_mov_avg(ma1, a1):\n if ma1 in mec_waiting_time:\n _count = len(mec_waiting_time[ma1])\n avg1 = mec_waiting_time[ma1][-1]\n else:\n _count = 0\n avg1 = 0\n _count += 1\n avg1 = ((_count - 1) * avg1 + a1) / _count\n # ma1.append(avg1) #cumulative average formula\n # μ_n=((n-1) μ_(n-1) + x_n)/n\n return round(avg1, 4)\n\n\ndef send_message(mg):\n _multicast_group = ('224.3.29.71', 10000)\n try:\n\n # Send data to the multicast group\n if mg == 'hello':\n smg = mg + ' ' + str([get_hostname(), ip_address()])\n sock1.sendto(str.encode(smg), _multicast_group)\n print('\\nHello message sent')\n\n else:\n sock1.sendto(str.encode(mg), _multicast_group)\n\n except Exception as e:\n print(e)\n\n\ndef get_hostname():\n cmd = ['cat /etc/hostname']\n hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]\n return hostname\n\n\ndef receive_message(): # used for multi-cast message exchange among MEC\n global hosts\n\n while True:\n if stop == 1:\n print('Stopped: receive_message()')\n break\n else:\n data, address = sock1.recvfrom(1024)\n _d = data.decode()\n if _d[:5] == 'hello':\n _data = ast.literal_eval(_d[6:])\n hosts[_data[0]] = _data[1]\n\n if _data[1] != host_ip:\n mec_rtt[_data[1]] = []\n\n elif (_d[:6] == 'update') and (discovering == 0):\n hosts = ast.literal_eval(_d[7:])\n # print('received: ', hosts)\n for i in hosts:\n if i != host_ip:\n mec_rtt[i] = []\n\n elif _d[:2] == 'wt':\n\n split_data = _d.split()\n\n if split_data[1] != host_ip:\n\n w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(\n address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt\n\n if split_data[1] in mec_waiting_time:\n mec_waiting_time[split_data[1]].append(w_time)\n else:\n mec_waiting_time[split_data[1]] = [w_time]\n\n\ndef mec_comparison():\n # returns min average waiting for all mecs\n if len(mec_waiting_time) == 0:\n return 0\n min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}\n min_wt = min(min_mec, key=min_mec.get)\n return min_wt\n\n\n\ndef cooperative_mec(mec_list):\n global _off_cloud\n global _off_mec\n global task_id, task_record\n\n for i in mec_list:\n _host = mec_comparison()\n if _host == 0:\n # send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time]\n _send_task = f\"{i.split('_')[0]}.{task_id}\"\n _client.publish(cloud_ip, str([_send_task, t_time[i.split('_')[0]][0]]), )\n task_record[_send_task] = 'cloud'\n task_id += 1\n _off_cloud += 1\n # cloud_register[i.split('_')[0].split('.')[2]] = send_back_host\n\n print('\\n=========SENDING {} TO CLOUD==========='.format(i))\n\n else:\n j = i.split('_')[0]\n _max = np.array([6, 5, 5])\n send = 'false'\n if not (False in list(np.greater_equal(_max, _need[j[:2]]))):\n send = 'true'\n # CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY\n if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':\n _send_task = f\"{j}.{task_id}\"\n send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))\n task_record[_send_task] = 'mec'\n task_id += 1\n _off_mec += 1\n # SENDS TASK TO MEC FOR EXECUTION\n\n w_send = mec_waiting_time[_host][-1] + 0.001\n mec_waiting_time[_host].append(w_send) # adds a new average waiting time\n print('\\n======SENDING {} TO MEC {}========='.format(i, _host))\n elif send == 'true' and (get_rtt(_host) < get_rtt(cloud_ip)):\n _send_task = f\"{j}.{task_id}\"\n send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))\n task_record[_send_task] = 'mec'\n task_id += 1\n _off_mec += 1\n # SENDS TASK TO MEC FOR EXECUTION\n w_send = mec_waiting_time[_host][-1] + 0.001\n mec_waiting_time[_host].append(w_send) # adds a new average waiting time\n print('\\n======SENDING {} TO MEC {}========='.format(i, _host))\n\n else:\n _send_task = f\"{j}.{task_id}\"\n _client.publish(cloud_ip, str([_send_task, t_time[j][0]]), )\n task_record[_send_task] = 'cloud'\n task_id += 1\n _off_cloud += 1\n # send_cloud([j, t_time[j][0]]) # # [task_id,exec_time]\n\n # cloud_register[j.split('.')[2]] = send_back_host\n\n print('\\n=========SENDING {} TO CLOUD==========='.format(i))\n\n\noutward_mec = 0\noffload_check = [0,0]\ndef execute_re_offloaded_task(offloaded_task):\n global outward_mec, offload_check\n exec_list = get_exec_seq(offloaded_task[0])\n outward_mec += len(exec_list)\n for i in offloaded_task[0]: # i = 't1.1.2.3*1_3'\n j = i.split('_')[0]\n time.sleep(offloaded_task[1][j] / 2)\n # print('j task: ', j)\n send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0]))\n\nclients_record = {}\n\n\ndef count_task_sent(task):\n global clients_record\n c_id = task.split('.')[2]\n if c_id in clients_record:\n clients_record[c_id] += 1\n else:\n clients_record[c_id] = 1\n\n\ndef execute(local):\n print('\\nExecuting :', local)\n\n for i in local:\n j = i.split('_')[0]\n _t = t_time[j][0] / 2\n time.sleep(_t)\n print('#{}'.format(local.index(i) + 1), ' Executed: ', i)\n _client.publish(j.split('.')[2], str({j: get_time() + ['local']}), )\n count_task_sent(j)\n print('============== EXECUTION DONE ===============')\n\n\ncooperate = {'mec': 0, 'cloud': 0}\n\n\ndef receive_offloaded_task_mec(): # run as a thread\n global _inward_mec\n global t_track\n\n while True:\n if stop == 1:\n print('Stopped: receive_offloaded_task_mec()')\n break\n else:\n data, address = sock2.recvfrom(1024)\n if len(data.decode()) > 0:\n da = data.decode().split(' ')\n if (address[0] not in ip_set) and (da[0] == node_id): # send back to client\n # send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client\n if da[1] in task_record:\n del task_record[da[1]]\n task_new = '.'.join(da[1].split('.')[:-1])\n _client.publish(da[1].split('.')[2], str({task_new: get_time()+['mec']}), )\n count_task_sent(da[1])\n cooperate['mec'] += 1\n else:\n print('*'*30 + f'\\n{da[1]} Not in Task Record\\n' + '*'*30)\n elif (address[0] not in ip_set) and (da[0] == 'ex') and (da[1] == node_id):\n _received = ast.literal_eval(da[2] + da[3])\n shared_resource_lock.acquire()\n task = _received[0] + '*{}'.format(t_track)\n reoffload_list[0].append(task)\n reoffload_list[1][task] = _received[1]\n shared_resource_lock.release()\n t_track += 1\n _inward_mec += 1\n\n\ndef call_execute_re_offload():\n global reoffload_list, outward_mec\n global offload_check\n\n while True:\n if stop == 1:\n print('Stopped: call_execute_re_offload()')\n break\n else:\n if len(reoffload_list[0]) == 1:\n t = reoffload_list[0][-1]\n time.sleep(reoffload_list[1][t] / 2)\n shared_resource_lock.acquire()\n reoffload_list[0].remove(t)\n del reoffload_list[1][t]\n shared_resource_lock.release()\n send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0]))\n outward_mec += 1\n offload_check[0] += 1\n elif len(reoffload_list[0]) > 1:\n o = reoffload_list.copy()\n offload_check[1] += len(o)\n execute_re_offloaded_task(o)\n for i in o[0]:\n shared_resource_lock.acquire()\n reoffload_list[0].remove(i)\n del reoffload_list[1][i]\n shared_resource_lock.release()\n\n\ndef send_email(msg):\n\n try:\n server = smtplib.SMTP_SSL('smtp.gmail.com')\n server.ehlo()\n server.login(config.email_address, config.password)\n subject = 'Deadlock results {}'.format(get_hostname())\n # msg = 'Attendance done for {}'.format(_timer)\n _message = 'Subject: {}\\n\\n{}\\n\\n SENT BY RIHANNA \\n\\n'.format(subject, msg)\n server.sendmail(config.email_address, config.send_email, _message)\n server.quit()\n print(\"Email sent!\")\n except Exception as e:\n print(e)\n\n\ndef send_offloaded_task_mec(msg):\n _multicast_group = ('224.5.5.55', 20000)\n try:\n sock2.sendto(str.encode(msg), _multicast_group)\n\n except Exception as e:\n print(e)\n\n\ndef mec_id(client_ip):\n\n _id = client_ip.split('.')[-1]\n if len(_id) == 1:\n return '00' + _id\n elif len(_id) == 2:\n return '0' + _id\n else:\n return _id\n\n\ndef run_me():\n global discovering\n global hosts\n\n initialization()\n while True:\n if len(hosts) == mec_no:\n print('MEC Details: ', hosts)\n del hosts[get_hostname()]\n discovering = 1\n break\n time.sleep(2)\n\n start_loop()\n\n\ndef send_result(host_, data):\n try:\n c = paramiko.SSHClient()\n\n un = 'mec'\n pw = 'password'\n port = 22\n\n c.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n c.connect(host_, port, un, pw)\n for i in data:\n cmd = ('echo \"{}\" >> /home/mec/result/data.py'.format(i)) # task share : host ip task\n stdin, stdout, stderr = c.exec_command(cmd)\n except Exception as e:\n print(e)\n\n\ndef save_and_abort():\n global stop\n\n _id_ = get_hostname()[-1]\n result = f\"\\nwt{_id_}_16_{mec_no} = {mec_waiting_time} \" \\\n f\"\\nrtt{_id_}_16_{mec_no} = {mec_rtt} \\ncpu{_id_}_16_{mec_no} = {_cpu} \" \\\n f\"\\noff_mec{_id_}_16_{mec_no} = {_off_mec} \" \\\n f\"\\noff_cloud{_id_}_16_{mec_no} = {_off_cloud} \" \\\n f\"\\ninward_mec{_id_}_16_{mec_no} = {_inward_mec}\" \\\n f\"\\nloc{_id_}_16_{mec_no} = {_loc} \" \\\n f\"\\ndeadlock{_id_}_16_{mec_no} = {deadlock} \\nmemory{_id_}_16_{mec_no} = {memory}\" \\\n f\"\\ntask_received = {total_received_task} \\nsent_t = {clients_record}\" \\\n f\"\\ncooperate{_id_}_16_{mec_no} = {cooperate} \\ntask_record{_id_}_16_{mec_no} = {task_record}\" \\\n f\"\\noutward_mec{_id_}_16_{mec_no} = {outward_mec}\" \\\n f\"\\noffload_check{_id_}_16_{mec_no} = {offload_check}\"\n list_result = [\n f\"\\nwt{_id_}_16_{mec_no} = {mec_waiting_time} \",\n f\"\\nrtt{_id_}_16_{mec_no} = {mec_rtt} \\ncpu{_id_}_16_{mec_no} = {_cpu} \",\n f\"\\noff_mec{_id_}_16_{mec_no} = {_off_mec} \\noff_cloud{_id_}_16_{mec_no} = {_off_cloud} \",\n f\"\\ninward_mec{_id_}_16_{mec_no} = {_inward_mec}\",\n f\"\\nloc{_id_}_16_{mec_no} = {_loc} \",\n f\"\\ndeadlock{_id_}_16_{mec_no} = {deadlock} \\nmemory{_id_}_16_{mec_no} = {memory}\",\n f\"\\ntask_received{_id_}_16_{mec_no} = {total_received_task} \\nsent_t{_id_}_16_{mec_no} = {clients_record}\",\n f\"\\ncooperate{_id_}_16_{mec_no} = {cooperate} \\ntask_record{_id_}_16_{mec_no} = {task_record} \"\n f\"\\noutward_mec{_id_}_16_{mec_no} = {outward_mec}\",\n f\"\\noffload_check{_id_}_16_{mec_no} = {offload_check}\"\n ]\n path_ = 'data/raw/'\n if os.path.exists(path_):\n cmd = f\"echo '' > {path_}{_id_}_16_{mec_no}datal.py\"\n os.system(cmd)\n cmd = f\"echo '' > {path_}{_id_}_16_{mec_no}datap.py\"\n os.system(cmd)\n else:\n os.mkdir(path_)\n cmd = f\"echo '' > {path_}{_id_}_16_{mec_no}datal.py\"\n os.system(cmd)\n cmd = f\"echo '' > {path_}{_id_}_16_{mec_no}datap.py\"\n os.system(cmd)\n\n file_ = open(f'{path_}{_id_}_16_{mec_no}datap.py', 'w')\n for i in list_result:\n cmd = f'echo \"{i}\" >> {path_}{_id_}_16_{mec_no}datal.py'\n file_.write(i)\n os.system(cmd)\n file_.close()\n sp.run(\n [\"scp\", f\"{path_}{_id_}_16_{mec_no}datap.py\", f\"mec@{hosts['osboxes-0']}:/home/mec/result/python\"])\n sp.run(\n [\"scp\", f\"{path_}{_id_}_16_{mec_no}datal.py\", f\"mec@{hosts['osboxes-0']}:/home/mec/result/linux\"])\n\n send_result(hosts['osboxes-0'], list_result)\n send_email(result)\n if len(task_record) > 0:\n for _task_ in task_record:\n task_new = '.'.join(_task_.split('.')[:-1])\n _client.publish(task_new.split('.')[2], str({task_new: get_time() + [task_record[_task_]]}), )\n stop += 1\n '''\n for i in thread_record:\n i.join()\n '''\n _client.loop_stop()\n time.sleep(1)\n print('done')\n os.system('kill -9 {}'.format(os.getpid()))\n\n\ndef start_loop():\n global _loc\n global tasks\n global t_time\n global node_id\n global stop\n\n print('\\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\\n')\n\n node_id = mec_id(ip_address())\n # print('node id: ', node_id)\n _threads_ = [receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]\n for i in _threads_:\n Thread(target=i).daemon = True\n Thread(target=i).start()\n\n x = gp.getpass('Press any key to Start...').lower()\n if x != 'exit':\n print('========= Waiting for tasks ==========')\n _time_ = dt.datetime.now()\n while True:\n try:\n if len(received_task_queue) > 0:\n info = received_task_queue.pop(0)\n tasks, t_time = info\n\n print('EDF List of Processes: ', tasks, '\\n')\n\n print('\\n========= Running Deadlock Algorithm ===========')\n list_seq = get_exec_seq(edf())\n if len(list_seq) > 0: # do only when there is a task in safe sequence\n wait_list = calc_wait_time(list_seq)\n print('\\nWaiting Time List: ', wait_list)\n compare_result = compare_local_mec(wait_list)\n print('\\nExecute Locally: ', compare_result[1])\n _loc += len(compare_result[1]) # total number of tasks to be executed locally\n print('\\nExecute in MEC: ', compare_result[0])\n\n print('\\nSending to cooperative platform')\n if len(compare_result[0]) > 0:\n cooperative_mec(compare_result[0])\n execute(compare_result[1])\n generate_results()\n _time_ = dt.datetime.now()\n else:\n send_message(str('wt {} 0.0'.format(ip_address())))\n time.sleep(.4)\n now = dt.datetime.now()\n delta = now - _time_\n if delta > dt.timedelta(minutes=4):\n print('terminating programme 3 mins elapsed')\n save_and_abort()\n break\n except KeyboardInterrupt:\n print('\\nProgramme Terminated')\n save_and_abort()\n break\n\n\ndef initialization():\n global mec_no\n global host_ip\n global cloud_ip\n\n host_ip = ip_address()\n try:\n mec_no = int(input('Number of MECs: ').strip())\n cloud_ip = input('Cloud Server IP: ').strip()\n print('\\nCompiling MEC Details')\n h1 = Thread(target=receive_message)\n h2 = Thread(target=receive_offloaded_task_mec)\n h1.daemon = True\n h2.daemon = True\n h1.start()\n h2.start()\n while True:\n b = input('Send Hello Message (Y/N): ').strip().lower()\n if b == 'y':\n send_message('hello')\n break\n else:\n print('\\nPlease Type \"y\" to send Hello message\\n')\n except KeyboardInterrupt:\n print('\\nProgramme Terminated')\n exit(0)\n\n\ndef main():\n global algo\n\n os.system('clear')\n print('mec ip: ', ip_address())\n algo = psutil.Process()\n discovering_group()\n offloading_group()\n host_ip_set()\n run_me()\n\n\nif __name__ == \"__main__\":\n main()\n", "import matplotlib.pyplot as plt\nimport data as rd\n\nfig = plt.figure()\nax1 = fig.add_subplot(461)\nax2 = fig.add_subplot(462)\nax3 = fig.add_subplot(463)\nax4 = fig.add_subplot(464)\nax5 = fig.add_subplot(465)\nax6 = fig.add_subplot(466)\nax7 = fig.add_subplot(467)\nax8 = fig.add_subplot(468)\nax9 = fig.add_subplot(469)\nax10 = fig.add_subplot(4, 6, 10)\nax11 = fig.add_subplot(4, 6, 11)\nax12 = fig.add_subplot(4, 6, 12)\nax13 = fig.add_subplot(4, 6, 13)\nax14 = fig.add_subplot(4, 6, 14)\nax15 = fig.add_subplot(4, 6, 15)\nax16 = fig.add_subplot(4, 6, 16)\nax17 = fig.add_subplot(4, 6, 17)\nax18 = fig.add_subplot(4, 6, 18)\nax19 = fig.add_subplot(4, 6, 19)\nax20 = fig.add_subplot(4, 6, 20)\nax21 = fig.add_subplot(4, 6, 21)\nax22 = fig.add_subplot(4, 6, 22)\nax23 = fig.add_subplot(4, 6, 23)\nax24 = fig.add_subplot(4, 6, 24)\n\n_loc_ = {\n 24: [rd.loc0_2_4, rd.loc1_2_4, rd.loc2_2_4, rd.loc3_2_4],\n 25: [rd.loc0_2_5, rd.loc1_2_5, rd.loc2_2_5, rd.loc3_2_5, rd.loc4_2_5, ],\n 26: [rd.loc0_2_6, rd.loc1_2_6, rd.loc2_2_6, rd.loc3_2_6, rd.loc4_2_6, rd.loc5_2_6],\n 27: [rd.loc0_2_7, rd.loc1_2_7, rd.loc2_2_7, rd.loc3_2_7, rd.loc4_2_7, rd.loc5_2_7, rd.loc6_2_7],\n\n 34: [rd.loc0_3_4, rd.loc1_3_4, rd.loc2_3_4, rd.loc3_3_4],\n 35: [rd.loc0_3_5, rd.loc1_3_5, rd.loc2_3_5, rd.loc3_3_5, rd.loc4_3_5],\n 36: [rd.loc0_3_6, rd.loc1_3_6, rd.loc2_3_6, rd.loc3_3_6, rd.loc4_3_6, rd.loc5_3_6],\n 37: [rd.loc0_3_7, rd.loc1_3_7, rd.loc2_3_7, rd.loc3_3_7, rd.loc4_3_7, rd.loc5_3_7, rd.loc6_3_7],\n\n 74: [rd.loc0_7_4, rd.loc1_7_4, rd.loc2_7_4, rd.loc3_7_4],\n 75: [rd.loc0_7_5, rd.loc1_7_5, rd.loc2_7_5, rd.loc3_7_5, rd.loc4_7_5],\n 76: [rd.loc0_7_6, rd.loc1_7_6, rd.loc2_7_6, rd.loc3_7_6, rd.loc4_7_6, rd.loc5_7_6],\n 77: [rd.loc0_7_7, rd.loc1_7_7, rd.loc2_7_7, rd.loc3_7_7, rd.loc4_7_7, rd.loc5_7_7, rd.loc6_7_7],\n\n 104: [rd.loc0_10_4, rd.loc1_10_4, rd.loc2_10_4, rd.loc3_10_4],\n 105: [rd.loc0_10_5, rd.loc1_10_5, rd.loc2_10_5, rd.loc3_10_5, rd.loc4_10_5],\n 106: [rd.loc0_10_6, rd.loc1_10_6, rd.loc2_10_6, rd.loc3_10_6, rd.loc4_10_6, rd.loc5_10_6],\n 107: [rd.loc0_10_7, rd.loc1_10_7, rd.loc2_10_7, rd.loc3_10_7, rd.loc4_10_7, rd.loc5_10_7, rd.loc6_10_7],\n\n 124: [rd.loc0_12_4, rd.loc1_12_4, rd.loc2_12_4, rd.loc3_12_4],\n 125: [rd.loc0_12_5, rd.loc1_12_5, rd.loc2_12_5, rd.loc3_12_5, rd.loc4_12_5],\n 126: [rd.loc0_12_6, rd.loc1_12_6, rd.loc2_12_6, rd.loc3_12_6, rd.loc4_12_6, rd.loc5_12_6],\n 127: [rd.loc0_12_7, rd.loc1_12_7, rd.loc2_12_7, rd.loc3_12_7, rd.loc4_12_7, rd.loc5_12_7, rd.loc6_12_7],\n\n 164: [rd.loc0_16_4, rd.loc1_16_4, rd.loc2_16_4, rd.loc3_16_4],\n 165: [rd.loc0_16_5, rd.loc1_16_5, rd.loc2_16_5, rd.loc3_16_5, rd.loc4_16_5],\n 166: [rd.loc0_16_6, rd.loc1_16_6, rd.loc2_16_6, rd.loc3_16_6, rd.loc4_16_6, rd.loc5_16_6],\n 167: [rd.loc0_16_7, rd.loc1_16_7, rd.loc2_16_7, rd.loc3_16_7, rd.loc4_16_7, rd.loc5_16_7, rd.loc6_16_7],\n}\n\n_off_cloud_ = {\n 24: [rd.off_cloud0_2_4, rd.off_cloud1_2_4, rd.off_cloud2_2_4, rd.off_cloud3_2_4],\n 25: [rd.off_cloud0_2_5, rd.off_cloud1_2_5, rd.off_cloud2_2_5, rd.off_cloud3_2_5, rd.off_cloud4_2_5, ],\n 26: [rd.off_cloud0_2_6, rd.off_cloud1_2_6, rd.off_cloud2_2_6, rd.off_cloud3_2_6, rd.off_cloud4_2_6,\n rd.off_cloud5_2_6],\n 27: [rd.off_cloud0_2_7, rd.off_cloud1_2_7, rd.off_cloud2_2_7, rd.off_cloud3_2_7, rd.off_cloud4_2_7,\n rd.off_cloud5_2_7, rd.off_cloud6_2_7],\n\n 34: [rd.off_cloud0_3_4, rd.off_cloud1_3_4, rd.off_cloud2_3_4, rd.off_cloud3_3_4],\n 35: [rd.off_cloud0_3_5, rd.off_cloud1_3_5, rd.off_cloud2_3_5, rd.off_cloud3_3_5, rd.off_cloud4_3_5],\n 36: [rd.off_cloud0_3_6, rd.off_cloud1_3_6, rd.off_cloud2_3_6, rd.off_cloud3_3_6, rd.off_cloud4_3_6,\n rd.off_cloud5_3_6],\n 37: [rd.off_cloud0_3_7, rd.off_cloud1_3_7, rd.off_cloud2_3_7, rd.off_cloud3_3_7, rd.off_cloud4_3_7,\n rd.off_cloud5_3_7, rd.off_cloud6_3_7],\n\n 74: [rd.off_cloud0_7_4, rd.off_cloud1_7_4, rd.off_cloud2_7_4, rd.off_cloud3_7_4],\n 75: [rd.off_cloud0_7_5, rd.off_cloud1_7_5, rd.off_cloud2_7_5, rd.off_cloud3_7_5, rd.off_cloud4_7_5],\n 76: [rd.off_cloud0_7_6, rd.off_cloud1_7_6, rd.off_cloud2_7_6, rd.off_cloud3_7_6, rd.off_cloud4_7_6,\n rd.off_cloud5_7_6],\n 77: [rd.off_cloud0_7_7, rd.off_cloud1_7_7, rd.off_cloud2_7_7, rd.off_cloud3_7_7, rd.off_cloud4_7_7,\n rd.off_cloud5_7_7, rd.off_cloud6_7_7],\n\n 104: [rd.off_cloud0_10_4, rd.off_cloud1_10_4, rd.off_cloud2_10_4, rd.off_cloud3_10_4],\n 105: [rd.off_cloud0_10_5, rd.off_cloud1_10_5, rd.off_cloud2_10_5, rd.off_cloud3_10_5, rd.off_cloud4_10_5],\n 106: [rd.off_cloud0_10_6, rd.off_cloud1_10_6, rd.off_cloud2_10_6, rd.off_cloud3_10_6, rd.off_cloud4_10_6,\n rd.off_cloud5_10_6],\n 107: [rd.off_cloud0_10_7, rd.off_cloud1_10_7, rd.off_cloud2_10_7, rd.off_cloud3_10_7, rd.off_cloud4_10_7,\n rd.off_cloud5_10_7, rd.off_cloud6_10_7],\n\n 124: [rd.off_cloud0_12_4, rd.off_cloud1_12_4, rd.off_cloud2_12_4, rd.off_cloud3_12_4],\n 125: [rd.off_cloud0_12_5, rd.off_cloud1_12_5, rd.off_cloud2_12_5, rd.off_cloud3_12_5, rd.off_cloud4_12_5],\n 126: [rd.off_cloud0_12_6, rd.off_cloud1_12_6, rd.off_cloud2_12_6, rd.off_cloud3_12_6, rd.off_cloud4_12_6,\n rd.off_cloud5_12_6],\n 127: [rd.off_cloud0_12_7, rd.off_cloud1_12_7, rd.off_cloud2_12_7, rd.off_cloud3_12_7, rd.off_cloud4_12_7,\n rd.off_cloud5_12_7, rd.off_cloud6_12_7],\n\n 164: [rd.off_cloud0_16_4, rd.off_cloud1_16_4, rd.off_cloud2_16_4, rd.off_cloud3_16_4],\n 165: [rd.off_cloud0_16_5, rd.off_cloud1_16_5, rd.off_cloud2_16_5, rd.off_cloud3_16_5, rd.off_cloud4_16_5],\n 166: [rd.off_cloud0_16_6, rd.off_cloud1_16_6, rd.off_cloud2_16_6, rd.off_cloud3_16_6, rd.off_cloud4_16_6,\n rd.off_cloud5_16_6],\n 167: [rd.off_cloud0_16_7, rd.off_cloud1_16_7, rd.off_cloud2_16_7, rd.off_cloud3_16_7, rd.off_cloud4_16_7,\n rd.off_cloud5_16_7, rd.off_cloud6_16_7],\n}\n\n_off_mec_ = {\n 24: [rd.off_mec0_2_4, rd.off_mec1_2_4, rd.off_mec2_2_4, rd.off_mec3_2_4],\n 25: [rd.off_mec0_2_5, rd.off_mec1_2_5, rd.off_mec2_2_5, rd.off_mec3_2_5, rd.off_mec4_2_5, ],\n 26: [rd.off_mec0_2_6, rd.off_mec1_2_6, rd.off_mec2_2_6, rd.off_mec3_2_6, rd.off_mec4_2_6, rd.off_mec5_2_6],\n 27: [rd.off_mec0_2_7, rd.off_mec1_2_7, rd.off_mec2_2_7, rd.off_mec3_2_7, rd.off_mec4_2_7, rd.off_mec5_2_7,\n rd.off_mec6_2_7],\n\n 34: [rd.off_mec0_3_4, rd.off_mec1_3_4, rd.off_mec2_3_4, rd.off_mec3_3_4],\n 35: [rd.off_mec0_3_5, rd.off_mec1_3_5, rd.off_mec2_3_5, rd.off_mec3_3_5, rd.off_mec4_3_5],\n 36: [rd.off_mec0_3_6, rd.off_mec1_3_6, rd.off_mec2_3_6, rd.off_mec3_3_6, rd.off_mec4_3_6, rd.off_mec5_3_6],\n 37: [rd.off_mec0_3_7, rd.off_mec1_3_7, rd.off_mec2_3_7, rd.off_mec3_3_7, rd.off_mec4_3_7, rd.off_mec5_3_7,\n rd.off_mec6_3_7],\n\n 74: [rd.off_mec0_7_4, rd.off_mec1_7_4, rd.off_mec2_7_4, rd.off_mec3_7_4],\n 75: [rd.off_mec0_7_5, rd.off_mec1_7_5, rd.off_mec2_7_5, rd.off_mec3_7_5, rd.off_mec4_7_5],\n 76: [rd.off_mec0_7_6, rd.off_mec1_7_6, rd.off_mec2_7_6, rd.off_mec3_7_6, rd.off_mec4_7_6, rd.off_mec5_7_6],\n 77: [rd.off_mec0_7_7, rd.off_mec1_7_7, rd.off_mec2_7_7, rd.off_mec3_7_7, rd.off_mec4_7_7, rd.off_mec5_7_7,\n rd.off_mec6_7_7],\n\n 104: [rd.off_mec0_10_4, rd.off_mec1_10_4, rd.off_mec2_10_4, rd.off_mec3_10_4],\n 105: [rd.off_mec0_10_5, rd.off_mec1_10_5, rd.off_mec2_10_5, rd.off_mec3_10_5, rd.off_mec4_10_5],\n 106: [rd.off_mec0_10_6, rd.off_mec1_10_6, rd.off_mec2_10_6, rd.off_mec3_10_6, rd.off_mec4_10_6, rd.off_mec5_10_6],\n 107: [rd.off_mec0_10_7, rd.off_mec1_10_7, rd.off_mec2_10_7, rd.off_mec3_10_7, rd.off_mec4_10_7, rd.off_mec5_10_7,\n rd.off_mec6_10_7],\n\n 124: [rd.off_mec0_12_4, rd.off_mec1_12_4, rd.off_mec2_12_4, rd.off_mec3_12_4],\n 125: [rd.off_mec0_12_5, rd.off_mec1_12_5, rd.off_mec2_12_5, rd.off_mec3_12_5, rd.off_mec4_12_5],\n 126: [rd.off_mec0_12_6, rd.off_mec1_12_6, rd.off_mec2_12_6, rd.off_mec3_12_6, rd.off_mec4_12_6, rd.off_mec5_12_6],\n 127: [rd.off_mec0_12_7, rd.off_mec1_12_7, rd.off_mec2_12_7, rd.off_mec3_12_7, rd.off_mec4_12_7, rd.off_mec5_12_7,\n rd.off_mec6_12_7],\n\n 164: [rd.off_mec0_16_4, rd.off_mec1_16_4, rd.off_mec2_16_4, rd.off_mec3_16_4],\n 165: [rd.off_mec0_16_5, rd.off_mec1_16_5, rd.off_mec2_16_5, rd.off_mec3_16_5, rd.off_mec4_16_5],\n 166: [rd.off_mec0_16_6, rd.off_mec1_16_6, rd.off_mec2_16_6, rd.off_mec3_16_6, rd.off_mec4_16_6, rd.off_mec5_16_6],\n 167: [rd.off_mec0_16_7, rd.off_mec1_16_7, rd.off_mec2_16_7, rd.off_mec3_16_7, rd.off_mec4_16_7, rd.off_mec5_16_7,\n rd.off_mec6_16_7],\n}\n\n_inward_mec_ = {\n 24: [rd.inward_mec0_2_4, rd.inward_mec1_2_4, rd.inward_mec2_2_4, rd.inward_mec3_2_4],\n 25: [rd.inward_mec0_2_5, rd.inward_mec1_2_5, rd.inward_mec2_2_5, rd.inward_mec3_2_5, rd.inward_mec4_2_5, ],\n 26: [rd.inward_mec0_2_6, rd.inward_mec1_2_6, rd.inward_mec2_2_6, rd.inward_mec3_2_6, rd.inward_mec4_2_6,\n rd.inward_mec5_2_6],\n 27: [rd.inward_mec0_2_7, rd.inward_mec1_2_7, rd.inward_mec2_2_7, rd.inward_mec3_2_7, rd.inward_mec4_2_7,\n rd.inward_mec5_2_7, rd.inward_mec6_2_7],\n\n 34: [rd.inward_mec0_3_4, rd.inward_mec1_3_4, rd.inward_mec2_3_4, rd.inward_mec3_3_4],\n 35: [rd.inward_mec0_3_5, rd.inward_mec1_3_5, rd.inward_mec2_3_5, rd.inward_mec3_3_5, rd.inward_mec4_3_5],\n 36: [rd.inward_mec0_3_6, rd.inward_mec1_3_6, rd.inward_mec2_3_6, rd.inward_mec3_3_6, rd.inward_mec4_3_6,\n rd.inward_mec5_3_6],\n 37: [rd.inward_mec0_3_7, rd.inward_mec1_3_7, rd.inward_mec2_3_7, rd.inward_mec3_3_7, rd.inward_mec4_3_7,\n rd.inward_mec5_3_7, rd.inward_mec6_3_7],\n\n 74: [rd.inward_mec0_7_4, rd.inward_mec1_7_4, rd.inward_mec2_7_4, rd.inward_mec3_7_4],\n 75: [rd.inward_mec0_7_5, rd.inward_mec1_7_5, rd.inward_mec2_7_5, rd.inward_mec3_7_5, rd.inward_mec4_7_5],\n 76: [rd.inward_mec0_7_6, rd.inward_mec1_7_6, rd.inward_mec2_7_6, rd.inward_mec3_7_6, rd.inward_mec4_7_6,\n rd.inward_mec5_7_6],\n 77: [rd.inward_mec0_7_7, rd.inward_mec1_7_7, rd.inward_mec2_7_7, rd.inward_mec3_7_7, rd.inward_mec4_7_7,\n rd.inward_mec5_7_7, rd.inward_mec6_7_7],\n\n 104: [rd.inward_mec0_10_4, rd.inward_mec1_10_4, rd.inward_mec2_10_4, rd.inward_mec3_10_4],\n 105: [rd.inward_mec0_10_5, rd.inward_mec1_10_5, rd.inward_mec2_10_5, rd.inward_mec3_10_5, rd.inward_mec4_10_5],\n 106: [rd.inward_mec0_10_6, rd.inward_mec1_10_6, rd.inward_mec2_10_6, rd.inward_mec3_10_6, rd.inward_mec4_10_6,\n rd.inward_mec5_10_6],\n 107: [rd.inward_mec0_10_7, rd.inward_mec1_10_7, rd.inward_mec2_10_7, rd.inward_mec3_10_7, rd.inward_mec4_10_7,\n rd.inward_mec5_10_7, rd.inward_mec6_10_7],\n\n 124: [rd.inward_mec0_12_4, rd.inward_mec1_12_4, rd.inward_mec2_12_4, rd.inward_mec3_12_4],\n 125: [rd.inward_mec0_12_5, rd.inward_mec1_12_5, rd.inward_mec2_12_5, rd.inward_mec3_12_5, rd.inward_mec4_12_5],\n 126: [rd.inward_mec0_12_6, rd.inward_mec1_12_6, rd.inward_mec2_12_6, rd.inward_mec3_12_6, rd.inward_mec4_12_6,\n rd.inward_mec5_12_6],\n 127: [rd.inward_mec0_12_7, rd.inward_mec1_12_7, rd.inward_mec2_12_7, rd.inward_mec3_12_7, rd.inward_mec4_12_7,\n rd.inward_mec5_12_7, rd.inward_mec6_12_7],\n\n 164: [rd.inward_mec0_16_4, rd.inward_mec1_16_4, rd.inward_mec2_16_4, rd.inward_mec3_16_4],\n 165: [rd.inward_mec0_16_5, rd.inward_mec1_16_5, rd.inward_mec2_16_5, rd.inward_mec3_16_5, rd.inward_mec4_16_5],\n 166: [rd.inward_mec0_16_6, rd.inward_mec1_16_6, rd.inward_mec2_16_6, rd.inward_mec3_16_6, rd.inward_mec4_16_6,\n rd.inward_mec5_16_6],\n 167: [rd.inward_mec0_16_7, rd.inward_mec1_16_7, rd.inward_mec2_16_7, rd.inward_mec3_16_7, rd.inward_mec4_16_7,\n rd.inward_mec5_16_7, rd.inward_mec6_16_7],\n}\n\n_data_ = [_off_mec_, _loc_, _off_cloud_] # _inward_mec_]\n\n\ndef sum_data():\n off_mec = {}\n off_cloud = {}\n loc = {}\n inward_mec = {}\n d_list = [off_mec, loc, off_cloud] # inward_mec]\n t = 0\n for data in _data_:\n name = d_list[t]\n for key in data:\n name[key] = sum(data[key])\n t += 1\n\n # print(d_list)\n return d_list\n\n\ndef format_data(d_dict):\n t_data = {}\n _keys = list(d_dict.keys())\n s4 = 0\n s5 = 1\n s6 = 2\n s7 = 3\n for i in range(len(_keys)):\n j = _keys[i]\n if i == s4:\n if 4 in t_data:\n t_data[4].append(d_dict[j])\n\n s4 += 4\n else:\n t_data[4] = [d_dict[j]]\n\n s4 += 4\n elif i == s5:\n if 5 in t_data:\n t_data[5].append(d_dict[j])\n\n s5 += 4\n else:\n t_data[5] = [d_dict[j]]\n\n s5 += 4\n elif i == s6:\n if 6 in t_data:\n t_data[6].append(d_dict[j])\n\n s6 += 4\n else:\n t_data[6] = [d_dict[j]]\n\n s6 += 4\n elif i == s7:\n if 7 in t_data:\n t_data[7].append(d_dict[j])\n\n s7 += 4\n else:\n t_data[7] = [d_dict[j]]\n\n s7 += 4\n\n return t_data\n\n\ndef group_format(data_list):\n format_list = []\n for i in data_list:\n format_list.append(format_data(i))\n\n group_list = {4: {},\n 5: {},\n 6: {},\n 7: {}\n }\n\n for i in format_list:\n for j in i:\n _list_ = i[j]\n for key in range(len(_list_)):\n value = _list_[key]\n d_dict = group_list[j]\n if key in d_dict:\n d_dict[key].append(value)\n else:\n d_dict[key] = [value]\n\n # print(\"Grouplist: \", group_list)\n # print(f\"format_list: {format_list}\")\n\n return group_list\n\n\ndef percent(value, total):\n if value > 0:\n return round((value / total) * 100, 2)\n else:\n return 0\n\n\ndef plot_offloaded_remote(data_list, ax, _id_):\n # data_list = [off_mec, off_cloud, loc, inward_mec]\n ax_list = (ax1, ax7, ax13, ax19)\n axx_list = {ax6: 4, ax12: 5, ax18: 6, ax24: 7}\n title = [ax1, ax2, ax3, ax4, ax5, ax6]\n names = ('RMS+Bankers',\n 'EDF+Bankers',\n 'RMS+wound wait',\n 'RMS+wait die',\n 'EDF+wound wait',\n 'EDF+wait die')\n algo_dict = {'RMS+Bankers': r'$ALG_1$',\n 'EDF+Bankers': r'$ALG_2$',\n 'RMS+wound wait': r'$ALG_3$',\n 'RMS+wait die': r'$ALG_4$',\n 'EDF+wound wait': r'$ALG_5$',\n 'EDF+wait die': r'$ALG_6$'}\n font = {\n 'weight': 'medium',\n 'size': 15,\n }\n\n keys = ['Off-mec', 'Local', 'Cloud'] # , 'O-In']\n total = sum(data_list)\n\n val = [percent(data_list[0], total),\n percent(data_list[1], total),\n percent(data_list[2], total)] # percent(data_list[3], total)]\n cols = ['r', 'g', 'b'] # , 'm']\n ypos = ([0, 1, 2]) # , 3])\n\n values = data_list\n # print(values)\n axx = ax.twinx()\n # axx.yaxis.set_label_position(\"right\")\n # axx.yaxis.tick_right()\n # axx.set_axis_off()\n axx.set_yticklabels([])\n axx.set_yticks([])\n if ax in axx_list:\n axx.set_ylabel(f'{axx_list[ax]} MECs', rotation=0, fontsize=15, labelpad=30)\n for i in values:\n j = values.index(i)\n # print(j)\n ax.text(j - 0.1, values[j], '{}%'.format(val[j]), rotation=0,\n ha=\"center\", va=\"center\", bbox=dict(boxstyle=\"round\", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8), ))\n ax.set_xticks(ypos)\n ax.set_xticklabels(keys, fontdict={'weight': 'medium', 'size': 12})\n ax.bar(ypos, values, align='center', color=cols, alpha=0.3)\n\n if ax in ax_list:\n ax.set_ylabel('No of Processes', fontdict={'weight': 'medium', 'size': 12})\n\n # ax.set_ylabel('RTT ')\n # ax.legend()\n\n\n if ax in title:\n ax.set_title(algo_dict[names[_id_]], fontdict=font)\n #plt.subplot(ax)\n\n\ndef plot_av_times():\n axes = [ax1, ax2, ax3, ax4, ax5, ax6,\n ax7, ax8, ax9, ax10, ax11, ax12,\n ax13, ax14, ax15, ax16, ax17, ax18,\n ax19, ax20, ax21, ax22, ax23, ax24]\n _data = group_format(sum_data())\n # plot_offloaded_remote(data_list, ax, _id_)\n no = 0\n for i in _data:\n # i = keys 4 5 6 7\n for j in _data[i]:\n # _data[i] = dictionary => {0: [], 1: [] ...}\n data_plot = _data[i][j]\n plot_offloaded_remote(data_plot, axes[no], j)\n no += 1\n #fig.suptitle('MEC CPU Utilization During Deadlock Experiment')\n plt.subplots_adjust(wspace=0.3, hspace=0.2)\n plt.show()\n\n\nplot_av_times()\n# group_format(sum_data())\n# print(\"data: \", _data_)", "from functools import reduce\nimport numpy as np\nimport random as r\nimport socket\nimport struct\nimport subprocess as sp\nimport threading\nfrom threading import Thread\nimport ast\nimport time\nimport datetime as dt\nimport os\nimport argparse\nimport psutil\nfrom drawnow import *\nfrom matplotlib import pyplot as plt\nfrom netifaces import interfaces, ifaddresses, AF_INET\nimport paho.mqtt.client as mqtt\nimport smtplib\nimport config\nimport matplotlib\nimport pickle\n\nmatplotlib.use('TkAgg')\n\nhosts = {} # {hostname: ip}\n\n_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},\n 't2': {'wcet': 1, 'period': 5, 'deadline': 4},\n 't3': {'wcet': 2, 'period': 10, 'deadline': 8},\n 't4': {'wcet': 1, 'period': 10, 'deadline': 9},\n 't5': {'wcet': 3, 'period': 15, 'deadline': 12}\n }\n\n# mat = {'p0': ['cpu', 'mem', 'storage']}\n_need = {\n 't1': [7, 4, 3],\n 't2': [1, 2, 2],\n 't3': [6, 0, 0],\n 't4': [0, 1, 1],\n 't5': [4, 3, 1]\n\n}\nallocation = {\n 't1': [0, 1, 0],\n 't2': [2, 0, 0],\n 't3': [3, 0, 2],\n 't4': [2, 1, 1],\n 't5': [0, 0, 2]\n}\n\ntest = []\n_time = []\ncolor_code = ['orange', 'brown', 'purple', 'pink', 'blue']\nstyle = ['g--^', 'r:o', 'b-.s', 'm--*', 'k-.>', 'c-.s']\nstyle1 = [{'color': 'g', 'marker': '^'}, {'color': 'aqua', 'marker': '*'}, {'color': 'purple', 'marker': 'X'},\n {'color': 'r', 'marker': 'v'}, {'color': 'k', 'marker': '>'}, {'color': 'brown', 'marker': 'D'},\n {'color': 'b', 'marker': 's'}, {'color': 'c', 'marker': '1'}, {'color': 'olive', 'marker': 'p'}, ]\nmec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}\n\noffload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload\nreoffload_list = [[], {}]\ndiscovering = 0\nmec_rtt = {} # {ip: [RTT]}\nthread_record = [] # keeps track of threads\nprev_t = 0 # variable for cpu util\n_cpu = [] # cpu plot list\n\n_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec\n_off_cloud = 0 # used to keep a count of tasks offloaded to cloud\n_loc = 0 # used to keep a count of tasks executed locally\n_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec\ndeadlock = [1] # keeps count of how many deadlock is resolved\n_pos = 0\n\nreceived_task_queue = [] # [[(task_list,wait_time), host_ip], ....]\n_port_ = 64000\ncloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud\ncloud_port = 63000\nmemory = []\nt_track = 1\nreceived_time = []\ntask_record = {} # keeps record of task reoffloaded\ntask_id = 0 # id for each task reoffloaded\nshared_resource_lock = threading.Lock()\n\nfig = plt.figure()\nax1 = fig.add_subplot(231)\nax2 = fig.add_subplot(232)\nax3 = fig.add_subplot(233)\nax4 = fig.add_subplot(234)\nax5 = fig.add_subplot(235)\nax6 = fig.add_subplot(236)\n\n\ndef discovering_group():\n global sock1\n\n multicast_group = '224.3.29.71'\n server_address = ('', 10000)\n\n # Create the socket\n sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # Bind to the server address\n sock1.bind(server_address)\n # Tell the operating system to add the socket to the multicast group\n # on all interfaces.\n group = socket.inet_aton(multicast_group)\n mreq = struct.pack('4sL', group, socket.INADDR_ANY)\n sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n\n\ndef offloading_group():\n global sock2\n\n multicast_group = '224.5.5.55'\n server_address = ('', 20000)\n\n # Create the socket\n sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # Bind to the server address\n sock2.bind(server_address)\n # Tell the operating system to add the socket to the multicast group\n # on all interfaces.\n group = socket.inet_aton(multicast_group)\n mreq = struct.pack('4sL', group, socket.INADDR_ANY)\n sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n\n\ndef _mov_avg(a1):\n ma1 = [] # moving average list\n avg1 = 0 # moving average pointwise\n count = 0\n for i in range(len(a1)):\n count += 1\n avg1 = ((count - 1) * avg1 + a1[i]) / count\n ma1.append(round(avg1, 4)) # cumulative average formula\n # μ_n=((n-1) μ_(n-1) + x_n)/n\n return ma1\n\n\ndef percent(value, total):\n if value > 0:\n return round((value / total) * 100, 2)\n else:\n return 0\n\n\ndef plot_offloaded_remote():\n keys = ['O-Out', 'Cloud', 'Local', 'O-In']\n total = _off_mec + _off_cloud + _loc + _inward_mec\n\n val = [percent(_off_mec, total),\n percent(_off_cloud, total),\n percent(_loc, total),\n percent(_inward_mec, total)]\n cols = ['r', 'g', 'b', 'm']\n ypos = ([0, 1, 2, 3])\n '''\n explode = []\n for i in val:\n if i == max(val):\n explode.append(0.1)\n else:\n explode.append(0)\n\n ax2.pie(val, labels=keys, autopct='%.3f%%', wedgeprops=dict(width=0.5), \n startangle=-40, shadow=True, explode=explode, colors=cols)\n '''\n values = [_off_mec, _off_cloud, _loc, _inward_mec]\n for i in values:\n j = values.index(i)\n ax2.text(j - 0.1, values[j], '{}%'.format(val[j]), rotation=0,\n ha=\"center\", va=\"center\", bbox=dict(boxstyle=\"round\", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8), ))\n ax2.set_xticks(ypos)\n ax2.set_xticklabels(keys)\n ax2.bar(ypos, values, align='center', color=cols, alpha=0.3)\n ax2.set_title('Local/Remote Execution Report')\n plt.subplot(ax2)\n\n\n# color=color_code[list(hosts.values()).index(i)]\n\n\ndef plot_deadlock():\n # cols = ['r']\n text = str(deadlock[-1] - 1) + \" Deadlock Resolved\"\n '''\n wedges, texts, autotexts = ax5.pie(deadlock, shadow=True, autopct=text,\n textprops=dict(rotation_mode='anchor', color=\"w\", ha='left'), colors=cols)\n\n plt.setp(autotexts, size=9, weight=\"bold\")\n '''\n ax5.text(0.5, 0.6, text, rotation=0, size=10,\n ha=\"center\", va=\"center\", bbox=dict(boxstyle=\"round\", ec=(0., 0., 0.), fc=(0.7, 0.9, 1.)))\n ax5.text(0.5, 0.45, '{} Tasks Received'.format(_loc + _inward_mec), rotation=0, size=10,\n ha=\"center\", va=\"center\", bbox=dict(boxstyle=\"round\", ec=(0., 0., 0.), fc=(0.98, 0.96, 0.59), ))\n # ax5.set_title(\"Deadlock Resolved Counter\")\n ax5.set_axis_off()\n plt.subplot(ax5)\n\n\ndef plot_memory():\n global memory\n\n memory.append(round(my_algo.memory_percent(), 4))\n\n ax6.grid(True)\n ax6.plot(list(range(len(_mov_avg(memory)))), _mov_avg(memory), linewidth=2, label='Memory', color='m')\n # ax6.set_title('Moving Memory Utilization')\n ax6.set_ylabel('Moving Memory')\n ax6.set_xlabel('Time (seconds)')\n ax6.fill_between(list(range(len(_mov_avg(memory)))), _mov_avg(memory), 0, alpha=0.5, color='m')\n ax6.legend()\n plt.subplot(ax6)\n\n\ndef plot_wait_time():\n ax1.grid(True)\n\n for i in mec_waiting_time:\n mv = _mov_avg(mec_waiting_time[i])\n pt = mv[0:len(mv):int((len(mv) / 7)) + 1]\n if pt[-1] != mv[-1]:\n pt.append(mv[-1])\n d = list(range(len(mv)))\n ptx = d[0:len(d):int((len(d) / 7)) + 1]\n if ptx[-1] != d[-1]:\n ptx.append(d[-1])\n if len(ptx) > len(pt):\n ptx = ptx[:-1]\n elif len(ptx) < len(pt):\n pt = pt[:-1]\n ax1.plot(ptx,\n pt,\n **style1[list(hosts.values()).index(i)],\n linestyle=(0, (3, 1, 1, 1, 1, 1)),\n linewidth=2,\n label=i)\n ax1.set_title('Waiting Time Queue')\n ax1.set_ylabel('Moving Wait + RTT')\n # ax2.set_xlabel('Time (seconds)')\n ax1.legend()\n plt.subplot(ax1)\n\n\ndef get_mec_rtts():\n for i in mec_rtt:\n mec_rtt[i].append(get_rtt(i))\n\n\ndef plot_rtts():\n get_mec_rtts()\n ax3.grid(True)\n for i in mec_rtt:\n mv = _mov_avg(mec_rtt[i])\n pt = mv[0:len(mv):int((len(mv) / 7)) + 1]\n if pt[-1] != mv[-1]:\n pt.append(mv[-1])\n d = list(range(len(mv)))\n ptx = d[0:len(d):int((len(d) / 7)) + 1]\n if ptx[-1] != d[-1]:\n ptx.append(d[-1])\n if len(ptx) > len(pt):\n ptx = ptx[:-1]\n elif len(ptx) < len(pt):\n pt = pt[:-1]\n ax3.plot(ptx,\n pt,\n **style1[list(hosts.values()).index(i)],\n linestyle=(0, (3, 1, 1, 1, 1, 1)),\n linewidth=2,\n label=i)\n ax3.set_title('RTT Utilization over Time')\n ax3.set_ylabel('Moving RTT')\n # ax3.set_xlabel('Time (seconds)')\n ax3.legend()\n plt.subplot(ax3)\n\n\ndef plot_cpu():\n global prev_t\n\n # get cpu\n next_t = psutil.cpu_percent(percpu=False)\n delta = abs(prev_t - next_t)\n prev_t = next_t\n _cpu.append(round(delta, 4))\n\n # plot graph\n ax4.grid(True)\n ax4.plot(list(range(len(_mov_avg(_cpu)))), _mov_avg(_cpu), linewidth=2, label='CPU')\n # ax4.set_title('Moving CPU Utilization')\n ax4.set_ylabel('Moving CPU')\n ax4.set_xlabel('Time (seconds)')\n ax4.fill_between(list(range(len(_mov_avg(_cpu)))), _mov_avg(_cpu), 0, alpha=0.5)\n ax4.legend()\n plt.subplot(ax4)\n\n\ndef plot_graphs():\n plot_offloaded_remote()\n plot_wait_time()\n plot_rtts()\n plot_cpu()\n plot_deadlock()\n plot_memory()\n fig.suptitle('MEC Performance During Deadlock Experiment')\n\n\ndef show_graphs():\n drawnow(plot_graphs)\n\n\ndef ip_address():\n try:\n # cmd = ['ifconfig eth1 | grep inet | cut -d \":\" -f 2 | cut -d \" \" -f 1']\n cmd = ['ifconfig ens4 | grep inet | head -n 1 | cut -d \"t\" -f 2 | cut -d \" \" -f 2']\n address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]\n if len(address.strip().split('.')) == 4:\n return address.strip()\n else:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n return s.getsockname()[0]\n except Exception as e:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n return s.getsockname()[0]\n\n\ndef host_ip_set():\n global ip_set\n\n ip_set = set()\n for ifaceName in interfaces():\n addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]\n ip_set.add(', '.join(addresses))\n\n\ndef ping(host):\n cmd = [f'ping -c 1 {host}']\n output = str(sp.check_output(cmd, shell=True), 'utf-8').split('\\n')\n try:\n value = float(output[-2].split('=')[-1].split('/')[0])\n except ValueError:\n value = None\n return value\n\n\ndef get_rtt(host):\n rtt = ping(host)\n if rtt:\n return round(rtt, 4)\n else:\n return get_rtt(host)\n\n\ndef get_time():\n _time_ = []\n d = str(dt.datetime.utcnow()).split()\n _time_ += d[0].split('-')\n g = d[1].split('.')\n _time_ += g[0].split(':')\n try:\n _time_.append(g[1])\n except IndexError:\n _time_.append('0')\n return _time_\n\n\ndef gcd(a, b):\n if b == 0:\n return a\n return gcd(b, a % b)\n\n\ndef _lcm(a, b):\n return int(a * b / gcd(a, b))\n\n\ndef lcm(_list):\n return reduce(_lcm, _list)\n\n\ndef gosh_dist(_range):\n return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range\n\n\ndef on_connect(connect_client, userdata, flags, rc):\n # print(\"Connected with Code :\" +str(rc))\n # Subscribe Topic from here\n connect_client.subscribe(node_id)\n\n\n# Callback Function on Receiving the Subscribed Topic/Message\ndef on_message(message_client, userdata, msg):\n data = str(msg.payload, 'utf-8')\n if data[0] == 'c': # receive from cloud\n received_task = data[2:]\n # send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]])\n if received_task in task_record:\n del task_record[received_task]\n received_task = '.'.join(received_task.split('.')[:-1])\n _client.publish(topic=received_task.split('.')[2], payload=str({received_task: get_time() + ['cloud']}), )\n cooperate['cloud'] += 1\n count_task_sent(received_task)\n\n elif data[0] == 't': # receive from client\n received_task = ast.literal_eval(data[2:])\n received_task_queue.append(received_task)\n received_time.append(time.time())\n\n else:\n print('data: ', data)\n\n\ndef connect_to_broker(stop):\n global _client\n\n username = 'mec'\n password = 'password'\n broker_port_no = 1883\n\n _client = mqtt.Client()\n _client.on_connect = on_connect\n _client.on_message = on_message\n\n _client.username_pw_set(username, password)\n _client.connect(broker_ip, broker_port_no, 60)\n _client.loop_start()\n while True:\n if stop():\n _client.loop_stop()\n _client.disconnect()\n print('broker loop terminated')\n break\n\n\ndef task_time_map(seq, process):\n exe_seq = []\n capacity_sum = 0\n for job in process:\n capacity_sum += process[job]['wcet']\n while capacity_sum > 0:\n for job in seq:\n if process[job]['wcet'] > 0:\n exe_seq.append(job)\n process[job]['wcet'] -= 1\n capacity_sum -= 1\n\n return exe_seq\n\n\ntotal_received_task = 0\n\n\ndef edf():\n global total_received_task\n t_lcm = lcm([tasks[i]['period'] for i in tasks])\n\n t_dead = {i: tasks[i]['deadline'] for i in tasks}\n\n sorted_dead = sorted(t_dead.items(), key=lambda kv: (kv[1], kv[0]))\n # print(sorted_dead)\n\n ready_task = []\n for i in sorted_dead:\n period = tasks[i[0]]['period']\n # print('lcm: ', t_lcm, ' period: ', period)\n t_range = int(t_lcm / period)\n last_dead = 0\n for j in range(t_range):\n ready_task.append((i[0], last_dead + tasks[i[0]]['deadline']))\n last_dead += period\n\n ready_task = sorted(ready_task, key=lambda t: t[1])\n print(ready_task)\n\n t_time_ = 0\n schedule = []\n missed = []\n register = {i: 0 for i in tasks.keys()} # {ti : amount executed}\n for i in ready_task:\n if (t_time_ // tasks[i[0]]['period']) + 1 <= register[i[0]]:\n while (t_time_ // tasks[i[0]]['period']) + 1 <= register[i[0]]:\n t_time_ += 1\n # schedule.append(('idle', t_time))\n if (t_time_ // tasks[i[0]]['period']) + 1 > register[i[0]]:\n if t_time_ + tasks[i[0]]['wcet'] <= i[1]:\n register[i[0]] += 1\n t_time_ += tasks[i[0]]['wcet']\n schedule.append(i[0])\n else:\n print('Deadline missed: ', i)\n missed.append(i[0])\n\n # print('s : ', schedule)\n # print('r: ', register)\n if len(missed) > 0:\n # print('missed deadline: ', missed)\n cooperative_mec(missed)\n _edf_ = task_time_map(schedule, tasks)\n total_received_task += len(_edf_)\n return _edf_\n\n\n# generate execution sequence using wound wait algorithm\ndef wound_wait(processes, avail, n_need, allocat):\n global deadlock\n\n offload = []\n\n # To store execution sequence\n exec_seq = []\n\n # Make a copy of available resources\n work = [0] * len(processes)\n\n # While all processes are not finished\n # or system is not in safe state.\n while 0 in work:\n ind = work.index(0)\n i = processes[ind]\n # print('comparing| process: ', i, n_need[i], 'work: ', avail)\n if not (False in list(np.greater_equal(avail, n_need[i]))):\n exec_seq.append(i)\n avail = np.add(avail, allocat[i])\n work[ind] = 1\n\n else:\n a = list(set(processes) - set(exec_seq) - set(offload))\n n = {}\n for j in a:\n n[j] = sum(allocat[j])\n _max = max(n, key=n.get)\n # print('work: ', work, 'need: ', _need[_max])\n if not (False in list(np.greater_equal(np.array(avail) + np.array(allocat[_max]), n_need[i]))):\n offload.append(_max)\n avail = np.array(avail) + np.array(allocat[_max])\n work[processes.index(_max)] = 1\n else:\n offload.append(i)\n avail = np.array(avail) + np.array(allocat[i])\n work[processes.index(i)] = 1\n\n if len(offload) > 0:\n print('offloading tasks: ', offload)\n cooperative_mec(offload)\n deadlock[0] += 1\n\n print('Execution seq: ', exec_seq)\n\n return exec_seq\n\n\ndef get_exec_seq(pro):\n # Number of processes\n # p = len(pro)\n\n processes = ['{}_{}'.format(pro[i], i) for i in range(len(pro))]\n\n # Available instances of resources\n avail = [6, 5, 5]\n n_need = {i: _need[i[:2]] for i in processes}\n # print('need', n_need)\n # Resources allocated to processes\n allot = {i: allocation[i[:2]] for i in processes}\n\n # return execution sequence\n return wound_wait(processes, avail, n_need, allot)\n\n\ndef calc_wait_time(list_seq):\n pre = 0\n time_dic = {}\n for i in list_seq:\n j = i.split('_')[0] # i = 't5_3_3', j = 't5_3'\n time_dic[i] = round(t_time[j][0] + pre, 3)\n pre += t_time[j][0]\n # waiting time = total waiting time ÷ 2 average waiting time might be too tight\n w_send = round(time_dic[list(time_dic.keys())[-1]] / 2, 3)\n\n send_message('wt {} {}'.format(ip_address(), str(w_send))) # Broadcasting waiting time to cooperative MECs\n return time_dic\n\n\ndef compare_local_mec(list_seq):\n time_compare_dict = {i: t_time[i.split('_')[0]][1] > list_seq[i] for i in list_seq}\n print('local vs MEC comparison: ', time_compare_dict)\n execute_mec = []\n execute_locally = []\n for i in time_compare_dict:\n if time_compare_dict[i]:\n execute_locally.append(i)\n else:\n execute_mec.append(i)\n\n return execute_mec, execute_locally\n\n\ndef calculate_mov_avg(ma1, a1):\n if ma1 in mec_waiting_time:\n _count = len(mec_waiting_time[ma1])\n avg1 = mec_waiting_time[ma1][-1]\n else:\n _count = 0\n avg1 = 0\n _count += 1\n avg1 = ((_count - 1) * avg1 + a1) / _count\n # ma1.append(avg1) #cumulative average formula\n # μ_n=((n-1) μ_(n-1) + x_n)/n\n return round(avg1, 4)\n\n\ndef send_message(mg):\n _multicast_group = ('224.3.29.71', 10000)\n try:\n\n # Send data to the multicast group\n if mg == 'hello':\n smg = mg + ' ' + str([get_hostname(), ip_address()])\n sock1.sendto(str.encode(smg), _multicast_group)\n print('\\nHello message sent')\n else:\n sock1.sendto(str.encode(mg), _multicast_group)\n\n except Exception as e:\n print(e)\n\n\ndef get_hostname():\n cmd = ['cat /etc/hostname']\n hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]\n return hostname\n\n\ndef receive_message(stop): # used for multi-cast message exchange among MEC\n global hosts\n\n while True:\n if stop():\n print('Stopped: receive_message()')\n break\n else:\n data, address = sock1.recvfrom(1024)\n _d = data.decode()\n if _d[:5] == 'hello':\n _data = ast.literal_eval(_d[6:])\n hosts[_data[0]] = _data[1]\n # print('received: ', hosts)\n if _data[1] != host_ip:\n mec_rtt[_data[1]] = []\n\n elif (data.decode()[:6] == 'update') and (discovering == 0):\n hosts = ast.literal_eval(data.decode()[7:])\n for i in hosts:\n if i != host_ip:\n mec_rtt[i] = []\n\n elif _d[:2] == 'wt':\n split_data = _d.split()\n if split_data[1] != host_ip:\n # calcuate moving average of mec wait time => w_time = wait time + rtt\n w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(address[0]))\n if split_data[1] in mec_waiting_time:\n mec_waiting_time[split_data[1]].append(w_time)\n else:\n mec_waiting_time[split_data[1]] = [w_time]\n\n\ndef mec_comparison():\n # returns min average waiting for all mecs\n if len(mec_waiting_time) == 0:\n return 0\n min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}\n min_wt = min(min_mec, key=min_mec.get)\n return min_wt\n\n\ndef cooperative_mec(mec_list):\n global _off_cloud\n global _off_mec\n global task_id, task_record\n\n for i in mec_list:\n _host = mec_comparison()\n if _host == 0:\n # send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time]\n _send_task = f\"{i.split('_')[0]}.{task_id}\"\n _client.publish(cloud_ip, str([_send_task, t_time[i.split('_')[0]][0]]), )\n task_record[_send_task] = 'cloud'\n task_id += 1\n _off_cloud += 1\n # cloud_register[i.split('_')[0].split('.')[2]] = send_back_host\n\n print('\\n=========SENDING {} TO CLOUD==========='.format(i))\n\n else:\n j = i.split('_')[0]\n _max = np.array([6, 5, 5])\n send = 'false'\n if not (False in list(np.greater_equal(_max, _need[j[:2]]))):\n send = 'true'\n # CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY\n if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':\n _send_task = f\"{j}.{task_id}\"\n send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))\n task_record[_send_task] = 'mec'\n task_id += 1\n _off_mec += 1\n # SENDS TASK TO MEC FOR EXECUTION\n\n w_send = mec_waiting_time[_host][-1] + 0.001\n mec_waiting_time[_host].append(w_send) # adds a new average waiting time\n print('\\n======SENDING {} TO MEC {}========='.format(i, _host))\n elif send == 'true' and (get_rtt(_host) < get_rtt(cloud_ip)):\n _send_task = f\"{j}.{task_id}\"\n send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))\n task_record[_send_task] = 'mec'\n task_id += 1\n _off_mec += 1\n # SENDS TASK TO MEC FOR EXECUTION\n w_send = mec_waiting_time[_host][-1] + 0.001\n mec_waiting_time[_host].append(w_send) # adds a new average waiting time\n print('\\n======SENDING {} TO MEC {}========='.format(i, _host))\n\n else:\n _send_task = f\"{j}.{task_id}\"\n _client.publish(cloud_ip, str([_send_task, t_time[j][0]]), )\n task_record[_send_task] = 'cloud'\n task_id += 1\n _off_cloud += 1\n # send_cloud([j, t_time[j][0]]) # # [task_id,exec_time]\n\n # cloud_register[j.split('.')[2]] = send_back_host\n\n print('\\n=========SENDING {} TO CLOUD==========='.format(i))\n\n\noutward_mec = 0\noffload_check = [0, 0]\n\n\ndef execute_re_offloaded_task(offloaded_task):\n global outward_mec, offload_check\n exec_list = get_exec_seq(offloaded_task[0])\n outward_mec += len(exec_list)\n for i in offloaded_task[0]: # i = 't1.1.2.3*1_3'\n j = i.split('_')[0]\n time.sleep(offloaded_task[1][j] / 2)\n # print('j task: ', j)\n send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0]))\n\n\nclients_record = {}\n\n\ndef count_task_sent(task):\n global clients_record\n c_id = task.split('.')[2]\n if c_id in clients_record:\n clients_record[c_id] += 1\n else:\n clients_record[c_id] = 1\n\n\ndef execute(local):\n print('\\nExecuting :', local)\n\n for i in local:\n j = i.split('_')[0]\n _t = t_time[j][0] / 2\n time.sleep(_t)\n print('#{}'.format(local.index(i) + 1), ' Executed: ', i)\n _client.publish(j.split('.')[2], str({j: get_time() + ['local']}), )\n count_task_sent(j)\n print('============== EXECUTION DONE ===============')\n\n\ncooperate = {'mec': 0, 'cloud': 0}\n\n\ndef receive_offloaded_task_mec(stop): # run as a thread\n global _inward_mec\n global t_track\n\n while True:\n if stop():\n print('Stopped: receive_offloaded_task_mec()')\n break\n else:\n data, address = sock2.recvfrom(1024)\n if len(data.decode()) > 0:\n da = data.decode().split(' ')\n if (address[0] not in ip_set) and (da[0] == node_id): # send back to client\n # send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client\n if da[1] in task_record:\n del task_record[da[1]]\n task_new = '.'.join(da[1].split('.')[:-1])\n _client.publish(da[1].split('.')[2], str({task_new: get_time() + ['mec']}), )\n count_task_sent(da[1])\n cooperate['mec'] += 1\n else:\n print('*' * 30 + f'\\n{da[1]} Not in Task Record\\n' + '*' * 30)\n elif (address[0] not in ip_set) and (da[0] == 'ex') and (da[1] == node_id):\n _received = ast.literal_eval(da[2] + da[3])\n shared_resource_lock.acquire()\n task = _received[0] + '*{}'.format(t_track)\n reoffload_list[0].append(task)\n reoffload_list[1][task] = _received[1]\n shared_resource_lock.release()\n t_track += 1\n _inward_mec += 1\n\n\ndef call_execute_re_offload(stop):\n global reoffload_list, outward_mec\n global offload_check\n while True:\n if stop():\n print('Stopped: call_execute_re_offload()')\n break\n else:\n if len(reoffload_list[0]) == 1:\n t = reoffload_list[0][-1]\n time.sleep(reoffload_list[1][t] / 2)\n shared_resource_lock.acquire()\n reoffload_list[0].remove(t)\n del reoffload_list[1][t]\n shared_resource_lock.release()\n send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0]))\n outward_mec += 1\n offload_check[0] += 1\n elif len(reoffload_list[0]) > 1:\n o = reoffload_list.copy()\n offload_check[1] += len(o)\n execute_re_offloaded_task(o)\n for i in o[0]:\n shared_resource_lock.acquire()\n reoffload_list[0].remove(i)\n del reoffload_list[1][i]\n shared_resource_lock.release()\n\n\ndef send_offloaded_task_mec(msg):\n _multicast_group = ('224.5.5.55', 20000)\n try:\n sock2.sendto(str.encode(msg), _multicast_group)\n\n except Exception as e:\n print(e)\n\n\ndef send_email(msg, send_path):\n try:\n server = smtplib.SMTP_SSL('smtp.gmail.com')\n server.ehlo()\n server.login(config.email_address, config.password)\n subject = 'Deadlock results edf+wound_wait {}, {}'.format(get_hostname(), send_path)\n # msg = 'Attendance done for {}'.format(_timer)\n _message = 'Subject: {}\\n\\n{}\\n\\n SENT BY RIHANNA \\n\\n'.format(subject, msg)\n server.sendmail(config.email_address, config.send_email, _message)\n server.quit()\n print(\"Email sent!\")\n except Exception as e:\n print(e)\n\n\ndef mec_id(client_ip):\n _id = client_ip.split('.')[-1]\n if len(_id) == 1:\n return '00' + _id\n elif len(_id) == 2:\n return '0' + _id\n else:\n return _id\n\n\ndef save_and_send(send_path):\n _id_ = get_hostname()[-1]\n result = f\"\\nwt{_id_}_12_{mec_no} = {mec_waiting_time} \" \\\n f\"\\nrtt{_id_}_12_{mec_no} = {mec_rtt} \\ncpu{_id_}_12_{mec_no} = {_cpu} \" \\\n f\"\\noff_mec{_id_}_12_{mec_no} = {_off_mec} \" \\\n f\"\\noff_cloud{_id_}_12_{mec_no} = {_off_cloud} \" \\\n f\"\\ninward_mec{_id_}_12_{mec_no} = {_inward_mec}\" \\\n f\"\\nloc{_id_}_12_{mec_no} = {_loc} \" \\\n f\"\\ndeadlock{_id_}_12_{mec_no} = {deadlock} \\nmemory{_id_}_12_{mec_no} = {memory}\" \\\n f\"\\ntask_received{_id_}_12_{mec_no} = {total_received_task} \\nsent_t{_id_}_12_{mec_no} = {clients_record}\" \\\n f\"\\ncooperate{_id_}_12_{mec_no} = {cooperate} \\ntask_record{_id_}_12_{mec_no} = {task_record}\" \\\n f\"\\noutward_mec{_id_}_12_{mec_no} = {outward_mec}\" \\\n f\"\\noffload_check{_id_}_12_{mec_no} = {offload_check}\"\n list_result = [\n f\"\\nwt{_id_}_12_{mec_no} = {mec_waiting_time} \",\n f\"\\nrtt{_id_}_12_{mec_no} = {mec_rtt} \\ncpu{_id_}_12_{mec_no} = {_cpu} \",\n f\"\\noff_mec{_id_}_12_{mec_no} = {_off_mec} \\noff_cloud{_id_}_12_{mec_no} = {_off_cloud} \",\n f\"\\ninward_mec{_id_}_12_{mec_no} = {_inward_mec}\",\n f\"\\nloc{_id_}_12_{mec_no} = {_loc} \",\n f\"\\ndeadlock{_id_}_12_{mec_no} = {deadlock} \\nmemory{_id_}_12_{mec_no} = {memory}\",\n f\"\\ntask_received{_id_}_12_{mec_no} = {total_received_task} \\nsent_t{_id_}_12_{mec_no} = {clients_record}\",\n f\"\\ncooperate{_id_}_12_{mec_no} = {cooperate} \\ntask_record{_id_}_12_{mec_no} = {task_record} \"\n f\"\\noutward_mec{_id_}_12_{mec_no} = {outward_mec}\",\n f\"\\noffload_check{_id_}_12_{mec_no} = {offload_check}\"\n ]\n file_ = open(f'{_id_}_12_{mec_no}datap.py', 'w')\n for i in list_result:\n file_.write(i)\n file_.close()\n cmd = f'mv {_id_}_12_{mec_no}datap.py {send_path}'\n os.system(cmd)\n\n send_email(result, send_path)\n if len(task_record) > 0:\n for _task_ in task_record:\n task_new = '.'.join(_task_.split('.')[:-1])\n _client.publish(task_new.split('.')[2], str({task_new: get_time() + [task_record[_task_]]}), )\n\n\ndef terminate_process():\n global prev_t, _loc, _off_mec, _off_cloud, _inward_mec, outward_mec, deadlock, memory, mec_waiting_time, mec_rtt\n global offload_register, reoffload_list, discovering, test, _time, _pos, received_task_queue, received_time\n global cloud_register, t_track, task_record, task_id, cooperate, clients_record, offload_check\n global timed_out_tasks, total_received_task, _cpu\n\n # reinitialize #\n _cpu = [] # cpu plot list\n prev_t = 0 # variable for cpu util\n _off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec\n _off_cloud = 0 # used to keep a count of tasks offloaded to cloud\n _loc = 0 # used to keep a count of tasks executed locally\n _inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec\n outward_mec = 0 # keeps count of tasks sent back to another mec after executing\n deadlock = [1] # keeps count of how many deadlock is resolved\n memory = []\n mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}\n mec_rtt = {} # {ip: [RTT]}\n offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload\n reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.\n discovering = 0 # if discovering == 0 update host\n test = []\n _time = []\n _pos = 0\n received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]\n received_time = []\n cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud\n t_track = 1\n task_record = {} # keeps record of task reoffloaded\n task_id = 0 # id for each task reoffloaded\n\n cooperate = {'mec': 0, 'cloud': 0}\n clients_record = {}\n offload_check = [0, 0]\n timed_out_tasks = 0\n total_received_task = 0\n\n time.sleep(1)\n\n\nrun = 1 # tell agents child when to stop\n\n\ndef start_loop():\n global _loc\n global tasks\n global t_time\n global node_id\n\n print('\\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\\n')\n\n node_id = mec_id(ip_address())\n # print('node id: ', node_id)\n func_to_thread = [receive_message, receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]\n threads_ = []\n stop = False\n for i in func_to_thread:\n threads_.append(Thread(target=i, args=(lambda: stop,)))\n threads_[-1].daemon = True\n threads_[-1].start()\n\n input('start..')\n print('========= Waiting for tasks ==========')\n _time_ = dt.datetime.now()\n while True:\n try:\n if len(received_task_queue) > 0:\n info = received_task_queue.pop(0)\n tasks, t_time = info\n\n print('EDF List of Processes: ', tasks, '\\n')\n\n print('\\n========= Running Deadlock Algorithm ===========')\n\n list_seq = get_exec_seq(edf())\n if len(list_seq) > 0: # do only when there is a task in safe sequence\n wait_list = calc_wait_time(list_seq)\n print('\\nWaiting Time List: ', wait_list)\n compare_result = compare_local_mec(wait_list)\n print('\\nExecute Locally: ', compare_result[1])\n _loc += len(compare_result[1]) # total number of tasks to be executed locally\n print('\\nExecute in MEC: ', compare_result[0])\n\n print('\\nSending to cooperative platform')\n if len(compare_result[0]) > 0:\n cooperative_mec(compare_result[0])\n execute(compare_result[1])\n show_graphs()\n _time_ = dt.datetime.now()\n else:\n send_message(str('wt {} 0.0'.format(ip_address())))\n time.sleep(.5)\n now = dt.datetime.now()\n delta = now - _time_\n if delta > dt.timedelta(minutes=4):\n print('terminating programme 3 mins elapsed')\n stop = False\n break\n\n except KeyboardInterrupt:\n print('\\nProgramme Terminated')\n break\n print('algo stopped!')\n\n\nclass BrokerSend:\n def __init__(self, user, pw, ip, sub_topic, data):\n self.user = user\n self.pw = pw\n self.ip = ip\n self.port = 1883\n self.topic = sub_topic\n self.response = None\n self.client = mqtt.Client()\n self.client.username_pw_set(self.user, self.pw)\n self.client.connect(self.ip, self.port, 60)\n self.data = data\n\n def publish(self):\n self.client.publish(self.topic, self.data)\n\n def __del__(self):\n print('BrokerSend Object Deleted!')\n\n\ndef run_me(mec_no_, send_path, broker_ip_): # call this from agent\n global discovering\n global mec_no\n global host_ip\n global my_algo\n global broker_ip\n\n print('mec ip: ', ip_address())\n my_algo = psutil.Process()\n discovering_group()\n offloading_group()\n host_ip_set()\n\n mec_no = mec_no_\n broker_ip = broker_ip_\n\n host_ip = ip_address()\n print('MEC Details: ', hosts)\n discovering = 1\n time.sleep(2)\n for host in hosts:\n if hosts[host] != host_ip:\n mec_rtt[hosts[host]] = []\n os.system(f'echo {mec_no}/{send_path} >> /home/mec/deadlock_project/started.txt')\n start_loop()\n print('saving data')\n save_and_send(send_path)\n print('send alert to control')\n time.sleep(r.uniform(1, 30))\n data = pickle.dumps([get_hostname(), host_ip])\n broker_dict = {'user': 'mec', 'pw': 'password', 'sub_topic': 'control', 'ip': '192.168.122.111', 'data': data}\n BrokerSend(**broker_dict).publish()\n print('Terminating process')\n cmd = 'kill -9 {}'.format(os.getpid())\n os.system(cmd)\n\n\ndef main():\n global hosts\n global cloud_ip\n # (--n, --mec_no_, --cloud_ip, --s_path, --b_ip) send_path = f'/home/mec/result/{kind}/{count}'\n mec_nodes = {'mec-9': '192.168.122.119', 'mec-8': '192.168.122.118', 'mec-7': '192.168.122.117',\n 'mec-6': '192.168.122.116', 'mec-5': '192.168.122.115', 'mec-4': '192.168.122.114',\n 'mec-3': '192.168.122.113', 'mec-2': '192.168.122.112', 'mec-1': '192.168.122.111',\n }\n gui = {'osboxes-0': '192.168.122.110'}\n cloud_ips = ['192.168.200.11', '192.168.200.12']\n b_ip = '192.168.122.111'\n parser = argparse.ArgumentParser()\n parser.add_argument('--n', type=int, default=1.0, help='Number of MEC nodes')\n parser.add_argument('--p', type=str, default='/home/mec/result/python', help='Path to send result: homo_1')\n args = parser.parse_args()\n\n kind, count = args.p.split('_')\n send_path = f'/home/mec/result/{kind}/{count}'\n\n ho = sorted(list(mec_nodes))[:args.n - 1]\n hosts = {**{host: mec_nodes[host] for host in ho if ho != get_hostname()}, **gui}\n\n ho += ['osboxes-0']\n cloud_ip = cloud_ips[ho.index(get_hostname()) % 2]\n os.system('clear')\n run_me(mec_no_=args.n, send_path=send_path, broker_ip_=b_ip)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.greater_equal", "numpy.add", "numpy.array" ], [ "numpy.sign", "numpy.deg2rad", "matplotlib.pyplot.title" ], [ "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "matplotlib.use", "numpy.greater_equal", "matplotlib.pyplot.subplot", "numpy.add", "numpy.array", "matplotlib.pyplot.figure" ], [ "numpy.greater_equal", "numpy.add", "numpy.array" ], [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.figure" ], [ "matplotlib.use", "numpy.greater_equal", "matplotlib.pyplot.subplot", "numpy.add", "numpy.array", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Geonhee-LEE/PythonLinearNonlinearControl
[ "2a2467098108641483778c09ceb7906cb49f6cee" ]
[ "PythonLinearNonlinearControl/envs/first_order_lag.py" ]
[ "import numpy as np\nimport scipy\nfrom scipy import integrate\nfrom .env import Env\n\n\nclass FirstOrderLagEnv(Env):\n \"\"\" First Order Lag System Env\n \"\"\"\n\n def __init__(self, tau=0.63):\n \"\"\"\n \"\"\"\n self.config = {\"state_size\": 4,\n \"input_size\": 2,\n \"dt\": 0.05,\n \"max_step\": 500,\n \"input_lower_bound\": [-0.5, -0.5],\n \"input_upper_bound\": [0.5, 0.5],\n }\n\n super(FirstOrderLagEnv, self).__init__(self.config)\n\n # to get discrete system matrix\n self.A, self.B = self._to_state_space(tau, dt=self.config[\"dt\"])\n\n @staticmethod\n def _to_state_space(tau, dt=0.05):\n \"\"\"\n Args:\n tau (float): time constant\n dt (float): discrte time\n Returns:\n A (numpy.ndarray): discrete A matrix \n B (numpy.ndarray): discrete B matrix \n \"\"\"\n # continuous\n Ac = np.array([[-1./tau, 0., 0., 0.],\n [0., -1./tau, 0., 0.],\n [1., 0., 0., 0.],\n [0., 1., 0., 0.]])\n Bc = np.array([[1./tau, 0.],\n [0., 1./tau],\n [0., 0.],\n [0., 0.]])\n # to discrete system\n A = scipy.linalg.expm(dt*Ac)\n # B = np.matmul(np.matmul(scipy.linalg.expm(Ac*dt) -\n # scipy.linalg.expm(Ac*0.), np.linalg.inv(Ac)),\\\n # Bc)\n B = np.zeros_like(Bc)\n for m in range(Bc.shape[0]):\n for n in range(Bc.shape[1]):\n integrate_fn =\\\n lambda tau: np.matmul(scipy.linalg.expm(Ac*tau), Bc)[m, n]\n sol = integrate.quad(integrate_fn, 0, dt)\n B[m, n] = sol[0]\n\n return A, B\n\n def reset(self, init_x=None):\n \"\"\" reset state\n Returns:\n init_x (numpy.ndarray): initial state, shape(state_size, ) \n info (dict): information\n \"\"\"\n self.step_count = 0\n\n self.curr_x = np.zeros(self.config[\"state_size\"])\n\n if init_x is not None:\n self.curr_x = init_x\n\n # goal\n self.g_x = np.array([0., 0, -2., 3.])\n\n # clear memory\n self.history_x = []\n self.history_g_x = []\n\n return self.curr_x, {\"goal_state\": self.g_x}\n\n def step(self, u):\n \"\"\"\n Args:\n u (numpy.ndarray) : input, shape(input_size, )\n Returns:\n next_x (numpy.ndarray): next state, shape(state_size, ) \n cost (float): costs\n done (bool): end the simulation or not\n info (dict): information \n \"\"\"\n # clip action\n u = np.clip(u,\n self.config[\"input_lower_bound\"],\n self.config[\"input_upper_bound\"])\n\n next_x = np.matmul(self.A, self.curr_x[:, np.newaxis]) \\\n + np.matmul(self.B, u[:, np.newaxis])\n\n # cost\n cost = 0\n cost = np.sum(u**2)\n cost += np.sum((self.curr_x - self.g_x)**2)\n\n # save history\n self.history_x.append(next_x.flatten())\n self.history_g_x.append(self.g_x.flatten())\n\n # update\n self.curr_x = next_x.flatten()\n # update costs\n self.step_count += 1\n\n return next_x.flatten(), cost, \\\n self.step_count > self.config[\"max_step\"], \\\n {\"goal_state\": self.g_x}\n\n def plot_func(self, to_plot, i=None, history_x=None, history_g_x=None):\n \"\"\"\n \"\"\"\n raise ValueError(\"FirstOrderLag does not have animation\")\n" ]
[ [ "numpy.clip", "numpy.matmul", "scipy.linalg.expm", "numpy.zeros_like", "scipy.integrate.quad", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] } ]
mbsariyildiz/autoencoder-pytorch
[ "986ad3f84bfd7671a350ba0d2edd72ded5154142" ]
[ "src/config.py" ]
[ "import torch\nimport dataset\nimport encoder\nimport decoder\nimport torchvision\nimport torchvision.transforms as transforms\n\ndef load_dataset(args):\n\n if args.dataset == 'celebA':\n train_transform = transforms.Compose([\n transforms.Resize([64, 64]),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor()])\n test_transform = transforms.Compose([\n transforms.Resize([64, 64]),\n transforms.ToTensor()])\n\n celeba = dataset.celebA(\n args.data_dir, args.red_rate, args.test_split, args.validation_split)\n train_set = dataset.celebA_Subset(celeba.train_images, train_transform)\n test_set = dataset.celebA_Subset(celeba.test_images, test_transform)\n\n return train_set, test_set\n\ndef load_model(args):\n\n if args.dataset == 'celebA':\n enc = encoder.celebA_Encoder(args.d_latent, args.device, args.exp_dir)\n dec = decoder.celebA_Decoder(args.d_latent, args.device, args.exp_dir)\n\n if (args.device == 'cuda') and ('multi_gpu' in args) and (args.multi_gpu == True):\n print ('replicating the model on multiple gpus ... ')\n enc = torch.nn.DataParallel(enc)\n dec = torch.nn.DataParallel(dec)\n\n return enc, dec" ]
[ [ "torch.nn.DataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
accessnash/pandas
[ "366241386282272328333256b6e267a68f8133a4", "366241386282272328333256b6e267a68f8133a4" ]
[ "pandas/tests/indexes/test_numeric.py", "pandas/tests/types/test_inference.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nfrom pandas import compat\nfrom pandas.compat import range, lrange, u, PY3\n\nimport numpy as np\n\nfrom pandas import (date_range, Series, DataFrame,\n Index, Float64Index, Int64Index, RangeIndex)\nfrom pandas.util.testing import assertRaisesRegexp\n\nimport pandas.util.testing as tm\nimport pandas.core.config as cf\n\nimport pandas as pd\nfrom pandas.lib import Timestamp\n\nfrom .common import Base\n\n\ndef full_like(array, value):\n \"\"\"Compatibility for numpy<1.8.0\n \"\"\"\n ret = np.empty(array.shape, dtype=np.array(value).dtype)\n ret.fill(value)\n return ret\n\n\nclass Numeric(Base):\n\n def test_numeric_compat(self):\n\n idx = self.create_index()\n didx = idx * idx\n\n result = idx * 1\n tm.assert_index_equal(result, idx)\n\n result = 1 * idx\n tm.assert_index_equal(result, idx)\n\n # in general not true for RangeIndex\n if not isinstance(idx, RangeIndex):\n result = idx * idx\n tm.assert_index_equal(result, idx ** 2)\n\n # truediv under PY3\n result = idx / 1\n expected = idx\n if PY3:\n expected = expected.astype('float64')\n tm.assert_index_equal(result, expected)\n\n result = idx / 2\n if PY3:\n expected = expected.astype('float64')\n expected = Index(idx.values / 2)\n tm.assert_index_equal(result, expected)\n\n result = idx // 1\n tm.assert_index_equal(result, idx)\n\n result = idx * np.array(5, dtype='int64')\n tm.assert_index_equal(result, idx * 5)\n\n result = idx * np.arange(5, dtype='int64')\n tm.assert_index_equal(result, didx)\n\n result = idx * Series(np.arange(5, dtype='int64'))\n tm.assert_index_equal(result, didx)\n\n result = idx * Series(np.arange(5, dtype='float64') + 0.1)\n expected = Float64Index(np.arange(5, dtype='float64') *\n (np.arange(5, dtype='float64') + 0.1))\n tm.assert_index_equal(result, expected)\n\n # invalid\n self.assertRaises(TypeError,\n lambda: idx * date_range('20130101', periods=5))\n self.assertRaises(ValueError, lambda: idx * idx[0:3])\n self.assertRaises(ValueError, lambda: idx * np.array([1, 2]))\n\n result = divmod(idx, 2)\n with np.errstate(all='ignore'):\n div, mod = divmod(idx.values, 2)\n expected = Index(div), Index(mod)\n for r, e in zip(result, expected):\n tm.assert_index_equal(r, e)\n\n result = divmod(idx, full_like(idx.values, 2))\n with np.errstate(all='ignore'):\n div, mod = divmod(idx.values, full_like(idx.values, 2))\n expected = Index(div), Index(mod)\n for r, e in zip(result, expected):\n tm.assert_index_equal(r, e)\n\n result = divmod(idx, Series(full_like(idx.values, 2)))\n with np.errstate(all='ignore'):\n div, mod = divmod(\n idx.values,\n full_like(idx.values, 2),\n )\n expected = Index(div), Index(mod)\n for r, e in zip(result, expected):\n tm.assert_index_equal(r, e)\n\n # test power calculations both ways, GH 14973\n expected = pd.Float64Index(2.0**idx.values)\n result = 2.0**idx\n tm.assert_index_equal(result, expected)\n\n expected = pd.Float64Index(idx.values**2.0)\n result = idx**2.0\n tm.assert_index_equal(result, expected)\n\n def test_explicit_conversions(self):\n\n # GH 8608\n # add/sub are overriden explicity for Float/Int Index\n idx = self._holder(np.arange(5, dtype='int64'))\n\n # float conversions\n arr = np.arange(5, dtype='int64') * 3.2\n expected = Float64Index(arr)\n fidx = idx * 3.2\n tm.assert_index_equal(fidx, expected)\n fidx = 3.2 * idx\n tm.assert_index_equal(fidx, expected)\n\n # interops with numpy arrays\n expected = Float64Index(arr)\n a = np.zeros(5, dtype='float64')\n result = fidx - a\n tm.assert_index_equal(result, expected)\n\n expected = Float64Index(-arr)\n a = np.zeros(5, dtype='float64')\n result = a - fidx\n tm.assert_index_equal(result, expected)\n\n def test_ufunc_compat(self):\n idx = self._holder(np.arange(5, dtype='int64'))\n result = np.sin(idx)\n expected = Float64Index(np.sin(np.arange(5, dtype='int64')))\n tm.assert_index_equal(result, expected)\n\n def test_index_groupby(self):\n int_idx = Index(range(6))\n float_idx = Index(np.arange(0, 0.6, 0.1))\n obj_idx = Index('A B C D E F'.split())\n dt_idx = pd.date_range('2013-01-01', freq='M', periods=6)\n\n for idx in [int_idx, float_idx, obj_idx, dt_idx]:\n to_groupby = np.array([1, 2, np.nan, np.nan, 2, 1])\n tm.assert_dict_equal(idx.groupby(to_groupby),\n {1.0: idx[[0, 5]], 2.0: idx[[1, 4]]})\n\n to_groupby = Index([datetime(2011, 11, 1),\n datetime(2011, 12, 1),\n pd.NaT,\n pd.NaT,\n datetime(2011, 12, 1),\n datetime(2011, 11, 1)],\n tz='UTC').values\n\n ex_keys = [Timestamp('2011-11-01'), Timestamp('2011-12-01')]\n expected = {ex_keys[0]: idx[[0, 5]],\n ex_keys[1]: idx[[1, 4]]}\n tm.assert_dict_equal(idx.groupby(to_groupby), expected)\n\n def test_modulo(self):\n # GH 9244\n index = self.create_index()\n expected = Index(index.values % 2)\n self.assert_index_equal(index % 2, expected)\n\n\nclass TestFloat64Index(Numeric, tm.TestCase):\n _holder = Float64Index\n _multiprocess_can_split_ = True\n\n def setUp(self):\n self.indices = dict(mixed=Float64Index([1.5, 2, 3, 4, 5]),\n float=Float64Index(np.arange(5) * 2.5))\n self.setup_indices()\n\n def create_index(self):\n return Float64Index(np.arange(5, dtype='float64'))\n\n def test_repr_roundtrip(self):\n for ind in (self.mixed, self.float):\n tm.assert_index_equal(eval(repr(ind)), ind)\n\n def check_is_index(self, i):\n self.assertIsInstance(i, Index)\n self.assertNotIsInstance(i, Float64Index)\n\n def check_coerce(self, a, b, is_float_index=True):\n self.assertTrue(a.equals(b))\n self.assert_index_equal(a, b, exact=False)\n if is_float_index:\n self.assertIsInstance(b, Float64Index)\n else:\n self.check_is_index(b)\n\n def test_constructor(self):\n\n # explicit construction\n index = Float64Index([1, 2, 3, 4, 5])\n self.assertIsInstance(index, Float64Index)\n expected = np.array([1, 2, 3, 4, 5], dtype='float64')\n self.assert_numpy_array_equal(index.values, expected)\n index = Float64Index(np.array([1, 2, 3, 4, 5]))\n self.assertIsInstance(index, Float64Index)\n index = Float64Index([1., 2, 3, 4, 5])\n self.assertIsInstance(index, Float64Index)\n index = Float64Index(np.array([1., 2, 3, 4, 5]))\n self.assertIsInstance(index, Float64Index)\n self.assertEqual(index.dtype, float)\n\n index = Float64Index(np.array([1., 2, 3, 4, 5]), dtype=np.float32)\n self.assertIsInstance(index, Float64Index)\n self.assertEqual(index.dtype, np.float64)\n\n index = Float64Index(np.array([1, 2, 3, 4, 5]), dtype=np.float32)\n self.assertIsInstance(index, Float64Index)\n self.assertEqual(index.dtype, np.float64)\n\n # nan handling\n result = Float64Index([np.nan, np.nan])\n self.assertTrue(pd.isnull(result.values).all())\n result = Float64Index(np.array([np.nan]))\n self.assertTrue(pd.isnull(result.values).all())\n result = Index(np.array([np.nan]))\n self.assertTrue(pd.isnull(result.values).all())\n\n def test_constructor_invalid(self):\n\n # invalid\n self.assertRaises(TypeError, Float64Index, 0.)\n self.assertRaises(TypeError, Float64Index, ['a', 'b', 0.])\n self.assertRaises(TypeError, Float64Index, [Timestamp('20130101')])\n\n def test_constructor_coerce(self):\n\n self.check_coerce(self.mixed, Index([1.5, 2, 3, 4, 5]))\n self.check_coerce(self.float, Index(np.arange(5) * 2.5))\n self.check_coerce(self.float, Index(np.array(\n np.arange(5) * 2.5, dtype=object)))\n\n def test_constructor_explicit(self):\n\n # these don't auto convert\n self.check_coerce(self.float,\n Index((np.arange(5) * 2.5), dtype=object),\n is_float_index=False)\n self.check_coerce(self.mixed, Index(\n [1.5, 2, 3, 4, 5], dtype=object), is_float_index=False)\n\n def test_astype(self):\n\n result = self.float.astype(object)\n self.assertTrue(result.equals(self.float))\n self.assertTrue(self.float.equals(result))\n self.check_is_index(result)\n\n i = self.mixed.copy()\n i.name = 'foo'\n result = i.astype(object)\n self.assertTrue(result.equals(i))\n self.assertTrue(i.equals(result))\n self.check_is_index(result)\n\n # GH 12881\n # a float astype int\n for dtype in ['int16', 'int32', 'int64']:\n i = Float64Index([0, 1, 2])\n result = i.astype(dtype)\n expected = Int64Index([0, 1, 2])\n tm.assert_index_equal(result, expected)\n\n i = Float64Index([0, 1.1, 2])\n result = i.astype(dtype)\n expected = Int64Index([0, 1, 2])\n tm.assert_index_equal(result, expected)\n\n for dtype in ['float32', 'float64']:\n i = Float64Index([0, 1, 2])\n result = i.astype(dtype)\n expected = i\n tm.assert_index_equal(result, expected)\n\n i = Float64Index([0, 1.1, 2])\n result = i.astype(dtype)\n expected = Index(i.values.astype(dtype))\n tm.assert_index_equal(result, expected)\n\n # invalid\n for dtype in ['M8[ns]', 'm8[ns]']:\n self.assertRaises(TypeError, lambda: i.astype(dtype))\n\n # GH 13149\n for dtype in ['int16', 'int32', 'int64']:\n i = Float64Index([0, 1.1, np.NAN])\n self.assertRaises(ValueError, lambda: i.astype(dtype))\n\n def test_equals_numeric(self):\n\n i = Float64Index([1.0, 2.0])\n self.assertTrue(i.equals(i))\n self.assertTrue(i.identical(i))\n\n i2 = Float64Index([1.0, 2.0])\n self.assertTrue(i.equals(i2))\n\n i = Float64Index([1.0, np.nan])\n self.assertTrue(i.equals(i))\n self.assertTrue(i.identical(i))\n\n i2 = Float64Index([1.0, np.nan])\n self.assertTrue(i.equals(i2))\n\n def test_get_indexer(self):\n idx = Float64Index([0.0, 1.0, 2.0])\n tm.assert_numpy_array_equal(idx.get_indexer(idx),\n np.array([0, 1, 2], dtype=np.intp))\n\n target = [-0.1, 0.5, 1.1]\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),\n np.array([-1, 0, 1], dtype=np.intp))\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),\n np.array([0, 1, 2], dtype=np.intp))\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),\n np.array([0, 1, 1], dtype=np.intp))\n\n def test_get_loc(self):\n idx = Float64Index([0.0, 1.0, 2.0])\n for method in [None, 'pad', 'backfill', 'nearest']:\n self.assertEqual(idx.get_loc(1, method), 1)\n if method is not None:\n self.assertEqual(idx.get_loc(1, method, tolerance=0), 1)\n\n for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:\n self.assertEqual(idx.get_loc(1.1, method), loc)\n self.assertEqual(idx.get_loc(1.1, method, tolerance=0.9), loc)\n\n self.assertRaises(KeyError, idx.get_loc, 'foo')\n self.assertRaises(KeyError, idx.get_loc, 1.5)\n self.assertRaises(KeyError, idx.get_loc, 1.5, method='pad',\n tolerance=0.1)\n\n with tm.assertRaisesRegexp(ValueError, 'must be numeric'):\n idx.get_loc(1.4, method='nearest', tolerance='foo')\n\n def test_get_loc_na(self):\n idx = Float64Index([np.nan, 1, 2])\n self.assertEqual(idx.get_loc(1), 1)\n self.assertEqual(idx.get_loc(np.nan), 0)\n\n idx = Float64Index([np.nan, 1, np.nan])\n self.assertEqual(idx.get_loc(1), 1)\n\n # representable by slice [0:2:2]\n # self.assertRaises(KeyError, idx.slice_locs, np.nan)\n sliced = idx.slice_locs(np.nan)\n self.assertTrue(isinstance(sliced, tuple))\n self.assertEqual(sliced, (0, 3))\n\n # not representable by slice\n idx = Float64Index([np.nan, 1, np.nan, np.nan])\n self.assertEqual(idx.get_loc(1), 1)\n self.assertRaises(KeyError, idx.slice_locs, np.nan)\n\n def test_contains_nans(self):\n i = Float64Index([1.0, 2.0, np.nan])\n self.assertTrue(np.nan in i)\n\n def test_contains_not_nans(self):\n i = Float64Index([1.0, 2.0, np.nan])\n self.assertTrue(1.0 in i)\n\n def test_doesnt_contain_all_the_things(self):\n i = Float64Index([np.nan])\n self.assertFalse(i.isin([0]).item())\n self.assertFalse(i.isin([1]).item())\n self.assertTrue(i.isin([np.nan]).item())\n\n def test_nan_multiple_containment(self):\n i = Float64Index([1.0, np.nan])\n tm.assert_numpy_array_equal(i.isin([1.0]), np.array([True, False]))\n tm.assert_numpy_array_equal(i.isin([2.0, np.pi]),\n np.array([False, False]))\n tm.assert_numpy_array_equal(i.isin([np.nan]), np.array([False, True]))\n tm.assert_numpy_array_equal(i.isin([1.0, np.nan]),\n np.array([True, True]))\n i = Float64Index([1.0, 2.0])\n tm.assert_numpy_array_equal(i.isin([np.nan]), np.array([False, False]))\n\n def test_astype_from_object(self):\n index = Index([1.0, np.nan, 0.2], dtype='object')\n result = index.astype(float)\n expected = Float64Index([1.0, np.nan, 0.2])\n self.assertEqual(result.dtype, expected.dtype)\n tm.assert_index_equal(result, expected)\n\n def test_fillna_float64(self):\n # GH 11343\n idx = Index([1.0, np.nan, 3.0], dtype=float, name='x')\n # can't downcast\n exp = Index([1.0, 0.1, 3.0], name='x')\n self.assert_index_equal(idx.fillna(0.1), exp)\n\n # downcast\n exp = Float64Index([1.0, 2.0, 3.0], name='x')\n self.assert_index_equal(idx.fillna(2), exp)\n\n # object\n exp = Index([1.0, 'obj', 3.0], name='x')\n self.assert_index_equal(idx.fillna('obj'), exp)\n\n def test_take_fill_value(self):\n # GH 12631\n idx = pd.Float64Index([1., 2., 3.], name='xxx')\n result = idx.take(np.array([1, 0, -1]))\n expected = pd.Float64Index([2., 1., 3.], name='xxx')\n tm.assert_index_equal(result, expected)\n\n # fill_value\n result = idx.take(np.array([1, 0, -1]), fill_value=True)\n expected = pd.Float64Index([2., 1., np.nan], name='xxx')\n tm.assert_index_equal(result, expected)\n\n # allow_fill=False\n result = idx.take(np.array([1, 0, -1]), allow_fill=False,\n fill_value=True)\n expected = pd.Float64Index([2., 1., 3.], name='xxx')\n tm.assert_index_equal(result, expected)\n\n msg = ('When allow_fill=True and fill_value is not None, '\n 'all indices must be >= -1')\n with tm.assertRaisesRegexp(ValueError, msg):\n idx.take(np.array([1, 0, -2]), fill_value=True)\n with tm.assertRaisesRegexp(ValueError, msg):\n idx.take(np.array([1, 0, -5]), fill_value=True)\n\n with tm.assertRaises(IndexError):\n idx.take(np.array([1, -5]))\n\n\nclass TestInt64Index(Numeric, tm.TestCase):\n _holder = Int64Index\n _multiprocess_can_split_ = True\n\n def setUp(self):\n self.indices = dict(index=Int64Index(np.arange(0, 20, 2)))\n self.setup_indices()\n\n def create_index(self):\n return Int64Index(np.arange(5, dtype='int64'))\n\n def test_too_many_names(self):\n def testit():\n self.index.names = [\"roger\", \"harold\"]\n\n assertRaisesRegexp(ValueError, \"^Length\", testit)\n\n def test_constructor(self):\n # pass list, coerce fine\n index = Int64Index([-5, 0, 1, 2])\n expected = Index([-5, 0, 1, 2], dtype=np.int64)\n tm.assert_index_equal(index, expected)\n\n # from iterable\n index = Int64Index(iter([-5, 0, 1, 2]))\n tm.assert_index_equal(index, expected)\n\n # scalar raise Exception\n self.assertRaises(TypeError, Int64Index, 5)\n\n # copy\n arr = self.index.values\n new_index = Int64Index(arr, copy=True)\n tm.assert_index_equal(new_index, self.index)\n val = arr[0] + 3000\n\n # this should not change index\n arr[0] = val\n self.assertNotEqual(new_index[0], val)\n\n # interpret list-like\n expected = Int64Index([5, 0])\n for cls in [Index, Int64Index]:\n for idx in [cls([5, 0], dtype='int64'),\n cls(np.array([5, 0]), dtype='int64'),\n cls(Series([5, 0]), dtype='int64')]:\n tm.assert_index_equal(idx, expected)\n\n def test_constructor_corner(self):\n arr = np.array([1, 2, 3, 4], dtype=object)\n index = Int64Index(arr)\n self.assertEqual(index.values.dtype, np.int64)\n self.assert_index_equal(index, Index(arr))\n\n # preventing casting\n arr = np.array([1, '2', 3, '4'], dtype=object)\n with tm.assertRaisesRegexp(TypeError, 'casting'):\n Int64Index(arr)\n\n arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]\n with tm.assertRaisesRegexp(TypeError, 'casting'):\n Int64Index(arr_with_floats)\n\n def test_copy(self):\n i = Int64Index([], name='Foo')\n i_copy = i.copy()\n self.assertEqual(i_copy.name, 'Foo')\n\n def test_view(self):\n super(TestInt64Index, self).test_view()\n\n i = Int64Index([], name='Foo')\n i_view = i.view()\n self.assertEqual(i_view.name, 'Foo')\n\n i_view = i.view('i8')\n tm.assert_index_equal(i, Int64Index(i_view, name='Foo'))\n\n i_view = i.view(Int64Index)\n tm.assert_index_equal(i, Int64Index(i_view, name='Foo'))\n\n def test_coerce_list(self):\n # coerce things\n arr = Index([1, 2, 3, 4])\n tm.assertIsInstance(arr, Int64Index)\n\n # but not if explicit dtype passed\n arr = Index([1, 2, 3, 4], dtype=object)\n tm.assertIsInstance(arr, Index)\n\n def test_dtype(self):\n self.assertEqual(self.index.dtype, np.int64)\n\n def test_is_monotonic(self):\n self.assertTrue(self.index.is_monotonic)\n self.assertTrue(self.index.is_monotonic_increasing)\n self.assertFalse(self.index.is_monotonic_decreasing)\n\n index = Int64Index([4, 3, 2, 1])\n self.assertFalse(index.is_monotonic)\n self.assertTrue(index.is_monotonic_decreasing)\n\n index = Int64Index([1])\n self.assertTrue(index.is_monotonic)\n self.assertTrue(index.is_monotonic_increasing)\n self.assertTrue(index.is_monotonic_decreasing)\n\n def test_is_monotonic_na(self):\n examples = [Index([np.nan]),\n Index([np.nan, 1]),\n Index([1, 2, np.nan]),\n Index(['a', 'b', np.nan]),\n pd.to_datetime(['NaT']),\n pd.to_datetime(['NaT', '2000-01-01']),\n pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),\n pd.to_timedelta(['1 day', 'NaT']), ]\n for index in examples:\n self.assertFalse(index.is_monotonic_increasing)\n self.assertFalse(index.is_monotonic_decreasing)\n\n def test_equals(self):\n same_values = Index(self.index, dtype=object)\n self.assertTrue(self.index.equals(same_values))\n self.assertTrue(same_values.equals(self.index))\n\n def test_logical_compat(self):\n idx = self.create_index()\n self.assertEqual(idx.all(), idx.values.all())\n self.assertEqual(idx.any(), idx.values.any())\n\n def test_identical(self):\n i = Index(self.index.copy())\n self.assertTrue(i.identical(self.index))\n\n same_values_different_type = Index(i, dtype=object)\n self.assertFalse(i.identical(same_values_different_type))\n\n i = self.index.copy(dtype=object)\n i = i.rename('foo')\n same_values = Index(i, dtype=object)\n self.assertTrue(same_values.identical(i))\n\n self.assertFalse(i.identical(self.index))\n self.assertTrue(Index(same_values, name='foo', dtype=object).identical(\n i))\n\n self.assertFalse(self.index.copy(dtype=object)\n .identical(self.index.copy(dtype='int64')))\n\n def test_get_indexer(self):\n target = Int64Index(np.arange(10))\n indexer = self.index.get_indexer(target)\n expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1], dtype=np.intp)\n tm.assert_numpy_array_equal(indexer, expected)\n\n def test_get_indexer_pad(self):\n target = Int64Index(np.arange(10))\n indexer = self.index.get_indexer(target, method='pad')\n expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype=np.intp)\n tm.assert_numpy_array_equal(indexer, expected)\n\n def test_get_indexer_backfill(self):\n target = Int64Index(np.arange(10))\n indexer = self.index.get_indexer(target, method='backfill')\n expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp)\n tm.assert_numpy_array_equal(indexer, expected)\n\n def test_join_outer(self):\n other = Int64Index([7, 12, 25, 1, 2, 5])\n other_mono = Int64Index([1, 2, 5, 7, 12, 25])\n\n # not monotonic\n # guarantee of sortedness\n res, lidx, ridx = self.index.join(other, how='outer',\n return_indexers=True)\n noidx_res = self.index.join(other, how='outer')\n self.assert_index_equal(res, noidx_res)\n\n eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25])\n elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1],\n dtype=np.intp)\n eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2],\n dtype=np.intp)\n\n tm.assertIsInstance(res, Int64Index)\n self.assert_index_equal(res, eres)\n tm.assert_numpy_array_equal(lidx, elidx)\n tm.assert_numpy_array_equal(ridx, eridx)\n\n # monotonic\n res, lidx, ridx = self.index.join(other_mono, how='outer',\n return_indexers=True)\n noidx_res = self.index.join(other_mono, how='outer')\n self.assert_index_equal(res, noidx_res)\n\n elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1],\n dtype=np.intp)\n eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5],\n dtype=np.intp)\n tm.assertIsInstance(res, Int64Index)\n self.assert_index_equal(res, eres)\n tm.assert_numpy_array_equal(lidx, elidx)\n tm.assert_numpy_array_equal(ridx, eridx)\n\n def test_join_inner(self):\n other = Int64Index([7, 12, 25, 1, 2, 5])\n other_mono = Int64Index([1, 2, 5, 7, 12, 25])\n\n # not monotonic\n res, lidx, ridx = self.index.join(other, how='inner',\n return_indexers=True)\n\n # no guarantee of sortedness, so sort for comparison purposes\n ind = res.argsort()\n res = res.take(ind)\n lidx = lidx.take(ind)\n ridx = ridx.take(ind)\n\n eres = Int64Index([2, 12])\n elidx = np.array([1, 6], dtype=np.intp)\n eridx = np.array([4, 1], dtype=np.intp)\n\n tm.assertIsInstance(res, Int64Index)\n self.assert_index_equal(res, eres)\n tm.assert_numpy_array_equal(lidx, elidx)\n tm.assert_numpy_array_equal(ridx, eridx)\n\n # monotonic\n res, lidx, ridx = self.index.join(other_mono, how='inner',\n return_indexers=True)\n\n res2 = self.index.intersection(other_mono)\n self.assert_index_equal(res, res2)\n\n elidx = np.array([1, 6], dtype=np.intp)\n eridx = np.array([1, 4], dtype=np.intp)\n tm.assertIsInstance(res, Int64Index)\n self.assert_index_equal(res, eres)\n tm.assert_numpy_array_equal(lidx, elidx)\n tm.assert_numpy_array_equal(ridx, eridx)\n\n def test_join_left(self):\n other = Int64Index([7, 12, 25, 1, 2, 5])\n other_mono = Int64Index([1, 2, 5, 7, 12, 25])\n\n # not monotonic\n res, lidx, ridx = self.index.join(other, how='left',\n return_indexers=True)\n eres = self.index\n eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1],\n dtype=np.intp)\n\n tm.assertIsInstance(res, Int64Index)\n self.assert_index_equal(res, eres)\n self.assertIsNone(lidx)\n tm.assert_numpy_array_equal(ridx, eridx)\n\n # monotonic\n res, lidx, ridx = self.index.join(other_mono, how='left',\n return_indexers=True)\n eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1],\n dtype=np.intp)\n tm.assertIsInstance(res, Int64Index)\n self.assert_index_equal(res, eres)\n self.assertIsNone(lidx)\n tm.assert_numpy_array_equal(ridx, eridx)\n\n # non-unique\n idx = Index([1, 1, 2, 5])\n idx2 = Index([1, 2, 5, 7, 9])\n res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True)\n eres = Index([1, 1, 2, 5, 7, 9]) # 1 is in idx2, so it should be x2\n eridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp)\n elidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp)\n self.assert_index_equal(res, eres)\n tm.assert_numpy_array_equal(lidx, elidx)\n tm.assert_numpy_array_equal(ridx, eridx)\n\n def test_join_right(self):\n other = Int64Index([7, 12, 25, 1, 2, 5])\n other_mono = Int64Index([1, 2, 5, 7, 12, 25])\n\n # not monotonic\n res, lidx, ridx = self.index.join(other, how='right',\n return_indexers=True)\n eres = other\n elidx = np.array([-1, 6, -1, -1, 1, -1], dtype=np.intp)\n\n tm.assertIsInstance(other, Int64Index)\n self.assert_index_equal(res, eres)\n tm.assert_numpy_array_equal(lidx, elidx)\n self.assertIsNone(ridx)\n\n # monotonic\n res, lidx, ridx = self.index.join(other_mono, how='right',\n return_indexers=True)\n eres = other_mono\n elidx = np.array([-1, 1, -1, -1, 6, -1], dtype=np.intp)\n tm.assertIsInstance(other, Int64Index)\n self.assert_index_equal(res, eres)\n tm.assert_numpy_array_equal(lidx, elidx)\n self.assertIsNone(ridx)\n\n # non-unique\n idx = Index([1, 1, 2, 5])\n idx2 = Index([1, 2, 5, 7, 9])\n res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True)\n eres = Index([1, 1, 2, 5, 7, 9]) # 1 is in idx2, so it should be x2\n elidx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp)\n eridx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp)\n self.assert_index_equal(res, eres)\n tm.assert_numpy_array_equal(lidx, elidx)\n tm.assert_numpy_array_equal(ridx, eridx)\n\n def test_join_non_int_index(self):\n other = Index([3, 6, 7, 8, 10], dtype=object)\n\n outer = self.index.join(other, how='outer')\n outer2 = other.join(self.index, how='outer')\n expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14, 16, 18])\n self.assert_index_equal(outer, outer2)\n self.assert_index_equal(outer, expected)\n\n inner = self.index.join(other, how='inner')\n inner2 = other.join(self.index, how='inner')\n expected = Index([6, 8, 10])\n self.assert_index_equal(inner, inner2)\n self.assert_index_equal(inner, expected)\n\n left = self.index.join(other, how='left')\n self.assert_index_equal(left, self.index.astype(object))\n\n left2 = other.join(self.index, how='left')\n self.assert_index_equal(left2, other)\n\n right = self.index.join(other, how='right')\n self.assert_index_equal(right, other)\n\n right2 = other.join(self.index, how='right')\n self.assert_index_equal(right2, self.index.astype(object))\n\n def test_join_non_unique(self):\n left = Index([4, 4, 3, 3])\n\n joined, lidx, ridx = left.join(left, return_indexers=True)\n\n exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4])\n self.assert_index_equal(joined, exp_joined)\n\n exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.intp)\n tm.assert_numpy_array_equal(lidx, exp_lidx)\n\n exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.intp)\n tm.assert_numpy_array_equal(ridx, exp_ridx)\n\n def test_join_self(self):\n kinds = 'outer', 'inner', 'left', 'right'\n for kind in kinds:\n joined = self.index.join(self.index, how=kind)\n self.assertIs(self.index, joined)\n\n def test_intersection(self):\n other = Index([1, 2, 3, 4, 5])\n result = self.index.intersection(other)\n expected = Index(np.sort(np.intersect1d(self.index.values,\n other.values)))\n tm.assert_index_equal(result, expected)\n\n result = other.intersection(self.index)\n expected = Index(np.sort(np.asarray(np.intersect1d(self.index.values,\n other.values))))\n tm.assert_index_equal(result, expected)\n\n def test_intersect_str_dates(self):\n dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]\n\n i1 = Index(dt_dates, dtype=object)\n i2 = Index(['aa'], dtype=object)\n res = i2.intersection(i1)\n\n self.assertEqual(len(res), 0)\n\n def test_union_noncomparable(self):\n from datetime import datetime, timedelta\n # corner case, non-Int64Index\n now = datetime.now()\n other = Index([now + timedelta(i) for i in range(4)], dtype=object)\n result = self.index.union(other)\n expected = Index(np.concatenate((self.index, other)))\n tm.assert_index_equal(result, expected)\n\n result = other.union(self.index)\n expected = Index(np.concatenate((other, self.index)))\n tm.assert_index_equal(result, expected)\n\n def test_cant_or_shouldnt_cast(self):\n # can't\n data = ['foo', 'bar', 'baz']\n self.assertRaises(TypeError, Int64Index, data)\n\n # shouldn't\n data = ['0', '1', '2']\n self.assertRaises(TypeError, Int64Index, data)\n\n def test_view_Index(self):\n self.index.view(Index)\n\n def test_prevent_casting(self):\n result = self.index.astype('O')\n self.assertEqual(result.dtype, np.object_)\n\n def test_take_preserve_name(self):\n index = Int64Index([1, 2, 3, 4], name='foo')\n taken = index.take([3, 0, 1])\n self.assertEqual(index.name, taken.name)\n\n def test_take_fill_value(self):\n # GH 12631\n idx = pd.Int64Index([1, 2, 3], name='xxx')\n result = idx.take(np.array([1, 0, -1]))\n expected = pd.Int64Index([2, 1, 3], name='xxx')\n tm.assert_index_equal(result, expected)\n\n # fill_value\n msg = \"Unable to fill values because Int64Index cannot contain NA\"\n with tm.assertRaisesRegexp(ValueError, msg):\n idx.take(np.array([1, 0, -1]), fill_value=True)\n\n # allow_fill=False\n result = idx.take(np.array([1, 0, -1]), allow_fill=False,\n fill_value=True)\n expected = pd.Int64Index([2, 1, 3], name='xxx')\n tm.assert_index_equal(result, expected)\n\n msg = \"Unable to fill values because Int64Index cannot contain NA\"\n with tm.assertRaisesRegexp(ValueError, msg):\n idx.take(np.array([1, 0, -2]), fill_value=True)\n with tm.assertRaisesRegexp(ValueError, msg):\n idx.take(np.array([1, 0, -5]), fill_value=True)\n\n with tm.assertRaises(IndexError):\n idx.take(np.array([1, -5]))\n\n def test_int_name_format(self):\n index = Index(['a', 'b', 'c'], name=0)\n s = Series(lrange(3), index)\n df = DataFrame(lrange(3), index=index)\n repr(s)\n repr(df)\n\n def test_print_unicode_columns(self):\n df = pd.DataFrame({u(\"\\u05d0\"): [1, 2, 3],\n \"\\u05d1\": [4, 5, 6],\n \"c\": [7, 8, 9]})\n repr(df.columns) # should not raise UnicodeDecodeError\n\n def test_repr_summary(self):\n with cf.option_context('display.max_seq_items', 10):\n r = repr(pd.Index(np.arange(1000)))\n self.assertTrue(len(r) < 200)\n self.assertTrue(\"...\" in r)\n\n def test_repr_roundtrip(self):\n tm.assert_index_equal(eval(repr(self.index)), self.index)\n\n def test_unicode_string_with_unicode(self):\n idx = Index(lrange(1000))\n\n if PY3:\n str(idx)\n else:\n compat.text_type(idx)\n\n def test_bytestring_with_unicode(self):\n idx = Index(lrange(1000))\n if PY3:\n bytes(idx)\n else:\n str(idx)\n\n def test_slice_keep_name(self):\n idx = Int64Index([1, 2], name='asdf')\n self.assertEqual(idx.name, idx[1:].name)\n\n def test_ufunc_coercions(self):\n idx = Int64Index([1, 2, 3, 4, 5], name='x')\n\n result = np.sqrt(idx)\n tm.assertIsInstance(result, Float64Index)\n exp = Float64Index(np.sqrt(np.array([1, 2, 3, 4, 5])), name='x')\n tm.assert_index_equal(result, exp)\n\n result = np.divide(idx, 2.)\n tm.assertIsInstance(result, Float64Index)\n exp = Float64Index([0.5, 1., 1.5, 2., 2.5], name='x')\n tm.assert_index_equal(result, exp)\n\n # _evaluate_numeric_binop\n result = idx + 2.\n tm.assertIsInstance(result, Float64Index)\n exp = Float64Index([3., 4., 5., 6., 7.], name='x')\n tm.assert_index_equal(result, exp)\n\n result = idx - 2.\n tm.assertIsInstance(result, Float64Index)\n exp = Float64Index([-1., 0., 1., 2., 3.], name='x')\n tm.assert_index_equal(result, exp)\n\n result = idx * 1.\n tm.assertIsInstance(result, Float64Index)\n exp = Float64Index([1., 2., 3., 4., 5.], name='x')\n tm.assert_index_equal(result, exp)\n\n result = idx / 2.\n tm.assertIsInstance(result, Float64Index)\n exp = Float64Index([0.5, 1., 1.5, 2., 2.5], name='x')\n tm.assert_index_equal(result, exp)\n", "# -*- coding: utf-8 -*-\n\n\"\"\"\nThese the test the public routines exposed in types/common.py\nrelated to inference and not otherwise tested in types/test_common.py\n\n\"\"\"\n\nimport nose\nimport collections\nimport re\nfrom datetime import datetime, date, timedelta, time\nimport numpy as np\nimport pytz\n\nimport pandas as pd\nfrom pandas import lib, tslib\nfrom pandas import (Series, Index, DataFrame, Timedelta,\n DatetimeIndex, TimedeltaIndex, Timestamp,\n Panel, Period, Categorical)\nfrom pandas.compat import u, PY2, lrange\nfrom pandas.types import inference\nfrom pandas.types.common import (is_timedelta64_dtype,\n is_timedelta64_ns_dtype,\n is_number,\n is_integer,\n is_float,\n is_bool,\n is_scalar,\n _ensure_int32,\n _ensure_categorical)\nfrom pandas.types.missing import isnull\nfrom pandas.util import testing as tm\n\n_multiprocess_can_split_ = True\n\n\ndef test_is_sequence():\n is_seq = inference.is_sequence\n assert (is_seq((1, 2)))\n assert (is_seq([1, 2]))\n assert (not is_seq(\"abcd\"))\n assert (not is_seq(u(\"abcd\")))\n assert (not is_seq(np.int64))\n\n class A(object):\n\n def __getitem__(self):\n return 1\n\n assert (not is_seq(A()))\n\n\ndef test_is_list_like():\n passes = ([], [1], (1, ), (1, 2), {'a': 1}, set([1, 'a']), Series([1]),\n Series([]), Series(['a']).str)\n fails = (1, '2', object())\n\n for p in passes:\n assert inference.is_list_like(p)\n\n for f in fails:\n assert not inference.is_list_like(f)\n\n\ndef test_is_dict_like():\n passes = [{}, {'A': 1}, Series([1])]\n fails = ['1', 1, [1, 2], (1, 2), range(2), Index([1])]\n\n for p in passes:\n assert inference.is_dict_like(p)\n\n for f in fails:\n assert not inference.is_dict_like(f)\n\n\ndef test_is_named_tuple():\n passes = (collections.namedtuple('Test', list('abc'))(1, 2, 3), )\n fails = ((1, 2, 3), 'a', Series({'pi': 3.14}))\n\n for p in passes:\n assert inference.is_named_tuple(p)\n\n for f in fails:\n assert not inference.is_named_tuple(f)\n\n\ndef test_is_hashable():\n\n # all new-style classes are hashable by default\n class HashableClass(object):\n pass\n\n class UnhashableClass1(object):\n __hash__ = None\n\n class UnhashableClass2(object):\n\n def __hash__(self):\n raise TypeError(\"Not hashable\")\n\n hashable = (1,\n 3.14,\n np.float64(3.14),\n 'a',\n tuple(),\n (1, ),\n HashableClass(), )\n not_hashable = ([], UnhashableClass1(), )\n abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )\n\n for i in hashable:\n assert inference.is_hashable(i)\n for i in not_hashable:\n assert not inference.is_hashable(i)\n for i in abc_hashable_not_really_hashable:\n assert not inference.is_hashable(i)\n\n # numpy.array is no longer collections.Hashable as of\n # https://github.com/numpy/numpy/pull/5326, just test\n # is_hashable()\n assert not inference.is_hashable(np.array([]))\n\n # old-style classes in Python 2 don't appear hashable to\n # collections.Hashable but also seem to support hash() by default\n if PY2:\n\n class OldStyleClass():\n pass\n\n c = OldStyleClass()\n assert not isinstance(c, collections.Hashable)\n assert inference.is_hashable(c)\n hash(c) # this will not raise\n\n\ndef test_is_re():\n passes = re.compile('ad'),\n fails = 'x', 2, 3, object()\n\n for p in passes:\n assert inference.is_re(p)\n\n for f in fails:\n assert not inference.is_re(f)\n\n\ndef test_is_recompilable():\n passes = (r'a', u('x'), r'asdf', re.compile('adsf'), u(r'\\u2233\\s*'),\n re.compile(r''))\n fails = 1, [], object()\n\n for p in passes:\n assert inference.is_re_compilable(p)\n\n for f in fails:\n assert not inference.is_re_compilable(f)\n\n\nclass TestInference(tm.TestCase):\n\n def test_infer_dtype_bytes(self):\n compare = 'string' if PY2 else 'bytes'\n\n # string array of bytes\n arr = np.array(list('abc'), dtype='S1')\n self.assertEqual(lib.infer_dtype(arr), compare)\n\n # object array of bytes\n arr = arr.astype(object)\n self.assertEqual(lib.infer_dtype(arr), compare)\n\n def test_isinf_scalar(self):\n # GH 11352\n self.assertTrue(lib.isposinf_scalar(float('inf')))\n self.assertTrue(lib.isposinf_scalar(np.inf))\n self.assertFalse(lib.isposinf_scalar(-np.inf))\n self.assertFalse(lib.isposinf_scalar(1))\n self.assertFalse(lib.isposinf_scalar('a'))\n\n self.assertTrue(lib.isneginf_scalar(float('-inf')))\n self.assertTrue(lib.isneginf_scalar(-np.inf))\n self.assertFalse(lib.isneginf_scalar(np.inf))\n self.assertFalse(lib.isneginf_scalar(1))\n self.assertFalse(lib.isneginf_scalar('a'))\n\n def test_maybe_convert_numeric_infinities(self):\n # see gh-13274\n infinities = ['inf', 'inF', 'iNf', 'Inf',\n 'iNF', 'InF', 'INf', 'INF']\n na_values = set(['', 'NULL', 'nan'])\n\n pos = np.array(['inf'], dtype=np.float64)\n neg = np.array(['-inf'], dtype=np.float64)\n\n msg = \"Unable to parse string\"\n\n for infinity in infinities:\n for maybe_int in (True, False):\n out = lib.maybe_convert_numeric(\n np.array([infinity], dtype=object),\n na_values, maybe_int)\n tm.assert_numpy_array_equal(out, pos)\n\n out = lib.maybe_convert_numeric(\n np.array(['-' + infinity], dtype=object),\n na_values, maybe_int)\n tm.assert_numpy_array_equal(out, neg)\n\n out = lib.maybe_convert_numeric(\n np.array([u(infinity)], dtype=object),\n na_values, maybe_int)\n tm.assert_numpy_array_equal(out, pos)\n\n out = lib.maybe_convert_numeric(\n np.array(['+' + infinity], dtype=object),\n na_values, maybe_int)\n tm.assert_numpy_array_equal(out, pos)\n\n # too many characters\n with tm.assertRaisesRegexp(ValueError, msg):\n lib.maybe_convert_numeric(\n np.array(['foo_' + infinity], dtype=object),\n na_values, maybe_int)\n\n def test_maybe_convert_numeric_post_floatify_nan(self):\n # see gh-13314\n data = np.array(['1.200', '-999.000', '4.500'], dtype=object)\n expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)\n nan_values = set([-999, -999.0])\n\n for coerce_type in (True, False):\n out = lib.maybe_convert_numeric(data, nan_values, coerce_type)\n tm.assert_numpy_array_equal(out, expected)\n\n def test_convert_infs(self):\n arr = np.array(['inf', 'inf', 'inf'], dtype='O')\n result = lib.maybe_convert_numeric(arr, set(), False)\n self.assertTrue(result.dtype == np.float64)\n\n arr = np.array(['-inf', '-inf', '-inf'], dtype='O')\n result = lib.maybe_convert_numeric(arr, set(), False)\n self.assertTrue(result.dtype == np.float64)\n\n def test_scientific_no_exponent(self):\n # See PR 12215\n arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')\n result = lib.maybe_convert_numeric(arr, set(), False, True)\n self.assertTrue(np.all(np.isnan(result)))\n\n def test_convert_non_hashable(self):\n # GH13324\n # make sure that we are handing non-hashables\n arr = np.array([[10.0, 2], 1.0, 'apple'])\n result = lib.maybe_convert_numeric(arr, set(), False, True)\n tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))\n\n def test_convert_numeric_uint64(self):\n arr = np.array([2**63], dtype=object)\n exp = np.array([2**63], dtype=np.uint64)\n tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)\n\n arr = np.array([str(2**63)], dtype=object)\n exp = np.array([2**63], dtype=np.uint64)\n tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)\n\n arr = np.array([np.uint64(2**63)], dtype=object)\n exp = np.array([2**63], dtype=np.uint64)\n tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)\n\n def test_convert_numeric_uint64_nan(self):\n msg = 'uint64 array detected'\n cases = [(np.array([2**63, np.nan], dtype=object), set()),\n (np.array([str(2**63), np.nan], dtype=object), set()),\n (np.array([np.nan, 2**63], dtype=object), set()),\n (np.array([np.nan, str(2**63)], dtype=object), set()),\n (np.array([2**63, 2**63 + 1], dtype=object), set([2**63])),\n (np.array([str(2**63), str(2**63 + 1)],\n dtype=object), set([2**63]))]\n\n for coerce in (True, False):\n for arr, na_values in cases:\n if coerce:\n with tm.assertRaisesRegexp(ValueError, msg):\n lib.maybe_convert_numeric(arr, na_values,\n coerce_numeric=coerce)\n else:\n tm.assert_numpy_array_equal(lib.maybe_convert_numeric(\n arr, na_values), arr)\n\n def test_convert_numeric_int64_uint64(self):\n msg = 'uint64 and negative values detected'\n cases = [np.array([2**63, -1], dtype=object),\n np.array([str(2**63), -1], dtype=object),\n np.array([str(2**63), str(-1)], dtype=object),\n np.array([-1, 2**63], dtype=object),\n np.array([-1, str(2**63)], dtype=object),\n np.array([str(-1), str(2**63)], dtype=object)]\n\n for coerce in (True, False):\n for case in cases:\n if coerce:\n with tm.assertRaisesRegexp(ValueError, msg):\n print(case)\n lib.maybe_convert_numeric(case, set(),\n coerce_numeric=coerce)\n else:\n tm.assert_numpy_array_equal(lib.maybe_convert_numeric(\n case, set()), case)\n\n def test_maybe_convert_objects_uint64(self):\n # see gh-4471\n arr = np.array([2**63], dtype=object)\n exp = np.array([2**63], dtype=np.uint64)\n tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)\n\n # NumPy bug: can't compare uint64 to int64, as that\n # results in both casting to float64, so we should\n # make sure that this function is robust against it\n arr = np.array([np.uint64(2**63)], dtype=object)\n exp = np.array([2**63], dtype=np.uint64)\n tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)\n\n arr = np.array([2, -1], dtype=object)\n exp = np.array([2, -1], dtype=np.int64)\n tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)\n\n arr = np.array([2**63, -1], dtype=object)\n exp = np.array([2**63, -1], dtype=object)\n tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)\n\n def test_mixed_dtypes_remain_object_array(self):\n # GH14956\n array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],\n dtype=object)\n result = lib.maybe_convert_objects(array, convert_datetime=1)\n tm.assert_numpy_array_equal(result, array)\n\n\nclass TestTypeInference(tm.TestCase):\n _multiprocess_can_split_ = True\n\n def test_length_zero(self):\n result = lib.infer_dtype(np.array([], dtype='i4'))\n self.assertEqual(result, 'integer')\n\n result = lib.infer_dtype([])\n self.assertEqual(result, 'empty')\n\n def test_integers(self):\n arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'integer')\n\n arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'mixed-integer')\n\n arr = np.array([1, 2, 3, 4, 5], dtype='i4')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'integer')\n\n def test_bools(self):\n arr = np.array([True, False, True, True, True], dtype='O')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'boolean')\n\n arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'boolean')\n\n arr = np.array([True, False, True, 'foo'], dtype='O')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'mixed')\n\n arr = np.array([True, False, True], dtype=bool)\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'boolean')\n\n def test_floats(self):\n arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'floating')\n\n arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],\n dtype='O')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'mixed-integer')\n\n arr = np.array([1, 2, 3, 4, 5], dtype='f4')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'floating')\n\n arr = np.array([1, 2, 3, 4, 5], dtype='f8')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'floating')\n\n def test_string(self):\n pass\n\n def test_unicode(self):\n pass\n\n def test_datetime(self):\n\n dates = [datetime(2012, 1, x) for x in range(1, 20)]\n index = Index(dates)\n self.assertEqual(index.inferred_type, 'datetime64')\n\n def test_infer_dtype_datetime(self):\n\n arr = np.array([Timestamp('2011-01-01'),\n Timestamp('2011-01-02')])\n self.assertEqual(lib.infer_dtype(arr), 'datetime')\n\n arr = np.array([np.datetime64('2011-01-01'),\n np.datetime64('2011-01-01')], dtype=object)\n self.assertEqual(lib.infer_dtype(arr), 'datetime64')\n\n arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])\n self.assertEqual(lib.infer_dtype(arr), 'datetime')\n\n # starts with nan\n for n in [pd.NaT, np.nan]:\n arr = np.array([n, pd.Timestamp('2011-01-02')])\n self.assertEqual(lib.infer_dtype(arr), 'datetime')\n\n arr = np.array([n, np.datetime64('2011-01-02')])\n self.assertEqual(lib.infer_dtype(arr), 'datetime64')\n\n arr = np.array([n, datetime(2011, 1, 1)])\n self.assertEqual(lib.infer_dtype(arr), 'datetime')\n\n arr = np.array([n, pd.Timestamp('2011-01-02'), n])\n self.assertEqual(lib.infer_dtype(arr), 'datetime')\n\n arr = np.array([n, np.datetime64('2011-01-02'), n])\n self.assertEqual(lib.infer_dtype(arr), 'datetime64')\n\n arr = np.array([n, datetime(2011, 1, 1), n])\n self.assertEqual(lib.infer_dtype(arr), 'datetime')\n\n # different type of nat\n arr = np.array([np.timedelta64('nat'),\n np.datetime64('2011-01-02')], dtype=object)\n self.assertEqual(lib.infer_dtype(arr), 'mixed')\n\n arr = np.array([np.datetime64('2011-01-02'),\n np.timedelta64('nat')], dtype=object)\n self.assertEqual(lib.infer_dtype(arr), 'mixed')\n\n # mixed datetime\n arr = np.array([datetime(2011, 1, 1),\n pd.Timestamp('2011-01-02')])\n self.assertEqual(lib.infer_dtype(arr), 'datetime')\n\n # should be datetime?\n arr = np.array([np.datetime64('2011-01-01'),\n pd.Timestamp('2011-01-02')])\n self.assertEqual(lib.infer_dtype(arr), 'mixed')\n\n arr = np.array([pd.Timestamp('2011-01-02'),\n np.datetime64('2011-01-01')])\n self.assertEqual(lib.infer_dtype(arr), 'mixed')\n\n arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])\n self.assertEqual(lib.infer_dtype(arr), 'mixed-integer')\n\n arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])\n self.assertEqual(lib.infer_dtype(arr), 'mixed')\n\n arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])\n self.assertEqual(lib.infer_dtype(arr), 'mixed')\n\n def test_infer_dtype_timedelta(self):\n\n arr = np.array([pd.Timedelta('1 days'),\n pd.Timedelta('2 days')])\n self.assertEqual(lib.infer_dtype(arr), 'timedelta')\n\n arr = np.array([np.timedelta64(1, 'D'),\n np.timedelta64(2, 'D')], dtype=object)\n self.assertEqual(lib.infer_dtype(arr), 'timedelta')\n\n arr = np.array([timedelta(1), timedelta(2)])\n self.assertEqual(lib.infer_dtype(arr), 'timedelta')\n\n # starts with nan\n for n in [pd.NaT, np.nan]:\n arr = np.array([n, Timedelta('1 days')])\n self.assertEqual(lib.infer_dtype(arr), 'timedelta')\n\n arr = np.array([n, np.timedelta64(1, 'D')])\n self.assertEqual(lib.infer_dtype(arr), 'timedelta')\n\n arr = np.array([n, timedelta(1)])\n self.assertEqual(lib.infer_dtype(arr), 'timedelta')\n\n arr = np.array([n, pd.Timedelta('1 days'), n])\n self.assertEqual(lib.infer_dtype(arr), 'timedelta')\n\n arr = np.array([n, np.timedelta64(1, 'D'), n])\n self.assertEqual(lib.infer_dtype(arr), 'timedelta')\n\n arr = np.array([n, timedelta(1), n])\n self.assertEqual(lib.infer_dtype(arr), 'timedelta')\n\n # different type of nat\n arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],\n dtype=object)\n self.assertEqual(lib.infer_dtype(arr), 'mixed')\n\n arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],\n dtype=object)\n self.assertEqual(lib.infer_dtype(arr), 'mixed')\n\n def test_infer_dtype_period(self):\n # GH 13664\n arr = np.array([pd.Period('2011-01', freq='D'),\n pd.Period('2011-02', freq='D')])\n self.assertEqual(pd.lib.infer_dtype(arr), 'period')\n\n arr = np.array([pd.Period('2011-01', freq='D'),\n pd.Period('2011-02', freq='M')])\n self.assertEqual(pd.lib.infer_dtype(arr), 'period')\n\n # starts with nan\n for n in [pd.NaT, np.nan]:\n arr = np.array([n, pd.Period('2011-01', freq='D')])\n self.assertEqual(pd.lib.infer_dtype(arr), 'period')\n\n arr = np.array([n, pd.Period('2011-01', freq='D'), n])\n self.assertEqual(pd.lib.infer_dtype(arr), 'period')\n\n # different type of nat\n arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],\n dtype=object)\n self.assertEqual(pd.lib.infer_dtype(arr), 'mixed')\n\n arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],\n dtype=object)\n self.assertEqual(pd.lib.infer_dtype(arr), 'mixed')\n\n def test_infer_dtype_all_nan_nat_like(self):\n arr = np.array([np.nan, np.nan])\n self.assertEqual(lib.infer_dtype(arr), 'floating')\n\n # nan and None mix are result in mixed\n arr = np.array([np.nan, np.nan, None])\n self.assertEqual(lib.infer_dtype(arr), 'mixed')\n\n arr = np.array([None, np.nan, np.nan])\n self.assertEqual(lib.infer_dtype(arr), 'mixed')\n\n # pd.NaT\n arr = np.array([pd.NaT])\n self.assertEqual(lib.infer_dtype(arr), 'datetime')\n\n arr = np.array([pd.NaT, np.nan])\n self.assertEqual(lib.infer_dtype(arr), 'datetime')\n\n arr = np.array([np.nan, pd.NaT])\n self.assertEqual(lib.infer_dtype(arr), 'datetime')\n\n arr = np.array([np.nan, pd.NaT, np.nan])\n self.assertEqual(lib.infer_dtype(arr), 'datetime')\n\n arr = np.array([None, pd.NaT, None])\n self.assertEqual(lib.infer_dtype(arr), 'datetime')\n\n # np.datetime64(nat)\n arr = np.array([np.datetime64('nat')])\n self.assertEqual(lib.infer_dtype(arr), 'datetime64')\n\n for n in [np.nan, pd.NaT, None]:\n arr = np.array([n, np.datetime64('nat'), n])\n self.assertEqual(lib.infer_dtype(arr), 'datetime64')\n\n arr = np.array([pd.NaT, n, np.datetime64('nat'), n])\n self.assertEqual(lib.infer_dtype(arr), 'datetime64')\n\n arr = np.array([np.timedelta64('nat')], dtype=object)\n self.assertEqual(lib.infer_dtype(arr), 'timedelta')\n\n for n in [np.nan, pd.NaT, None]:\n arr = np.array([n, np.timedelta64('nat'), n])\n self.assertEqual(lib.infer_dtype(arr), 'timedelta')\n\n arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])\n self.assertEqual(lib.infer_dtype(arr), 'timedelta')\n\n # datetime / timedelta mixed\n arr = np.array([pd.NaT, np.datetime64('nat'),\n np.timedelta64('nat'), np.nan])\n self.assertEqual(lib.infer_dtype(arr), 'mixed')\n\n arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],\n dtype=object)\n self.assertEqual(lib.infer_dtype(arr), 'mixed')\n\n def test_is_datetimelike_array_all_nan_nat_like(self):\n arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])\n self.assertTrue(lib.is_datetime_array(arr))\n self.assertTrue(lib.is_datetime64_array(arr))\n self.assertFalse(lib.is_timedelta_array(arr))\n self.assertFalse(lib.is_timedelta64_array(arr))\n self.assertFalse(lib.is_timedelta_or_timedelta64_array(arr))\n\n arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])\n self.assertFalse(lib.is_datetime_array(arr))\n self.assertFalse(lib.is_datetime64_array(arr))\n self.assertTrue(lib.is_timedelta_array(arr))\n self.assertTrue(lib.is_timedelta64_array(arr))\n self.assertTrue(lib.is_timedelta_or_timedelta64_array(arr))\n\n arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),\n np.timedelta64('nat')])\n self.assertFalse(lib.is_datetime_array(arr))\n self.assertFalse(lib.is_datetime64_array(arr))\n self.assertFalse(lib.is_timedelta_array(arr))\n self.assertFalse(lib.is_timedelta64_array(arr))\n self.assertFalse(lib.is_timedelta_or_timedelta64_array(arr))\n\n arr = np.array([np.nan, pd.NaT])\n self.assertTrue(lib.is_datetime_array(arr))\n self.assertTrue(lib.is_datetime64_array(arr))\n self.assertTrue(lib.is_timedelta_array(arr))\n self.assertTrue(lib.is_timedelta64_array(arr))\n self.assertTrue(lib.is_timedelta_or_timedelta64_array(arr))\n\n arr = np.array([np.nan, np.nan], dtype=object)\n self.assertFalse(lib.is_datetime_array(arr))\n self.assertFalse(lib.is_datetime64_array(arr))\n self.assertFalse(lib.is_timedelta_array(arr))\n self.assertFalse(lib.is_timedelta64_array(arr))\n self.assertFalse(lib.is_timedelta_or_timedelta64_array(arr))\n\n def test_date(self):\n\n dates = [date(2012, 1, x) for x in range(1, 20)]\n index = Index(dates)\n self.assertEqual(index.inferred_type, 'date')\n\n def test_to_object_array_tuples(self):\n r = (5, 6)\n values = [r]\n result = lib.to_object_array_tuples(values)\n\n try:\n # make sure record array works\n from collections import namedtuple\n record = namedtuple('record', 'x y')\n r = record(5, 6)\n values = [r]\n result = lib.to_object_array_tuples(values) # noqa\n except ImportError:\n pass\n\n def test_object(self):\n\n # GH 7431\n # cannot infer more than this as only a single element\n arr = np.array([None], dtype='O')\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'mixed')\n\n def test_to_object_array_width(self):\n # see gh-13320\n rows = [[1, 2, 3], [4, 5, 6]]\n\n expected = np.array(rows, dtype=object)\n out = lib.to_object_array(rows)\n tm.assert_numpy_array_equal(out, expected)\n\n expected = np.array(rows, dtype=object)\n out = lib.to_object_array(rows, min_width=1)\n tm.assert_numpy_array_equal(out, expected)\n\n expected = np.array([[1, 2, 3, None, None],\n [4, 5, 6, None, None]], dtype=object)\n out = lib.to_object_array(rows, min_width=5)\n tm.assert_numpy_array_equal(out, expected)\n\n def test_is_period(self):\n self.assertTrue(lib.is_period(pd.Period('2011-01', freq='M')))\n self.assertFalse(lib.is_period(pd.PeriodIndex(['2011-01'], freq='M')))\n self.assertFalse(lib.is_period(pd.Timestamp('2011-01')))\n self.assertFalse(lib.is_period(1))\n self.assertFalse(lib.is_period(np.nan))\n\n def test_categorical(self):\n\n # GH 8974\n from pandas import Categorical, Series\n arr = Categorical(list('abc'))\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'categorical')\n\n result = lib.infer_dtype(Series(arr))\n self.assertEqual(result, 'categorical')\n\n arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)\n result = lib.infer_dtype(arr)\n self.assertEqual(result, 'categorical')\n\n result = lib.infer_dtype(Series(arr))\n self.assertEqual(result, 'categorical')\n\n\nclass TestNumberScalar(tm.TestCase):\n\n def test_is_number(self):\n\n self.assertTrue(is_number(True))\n self.assertTrue(is_number(1))\n self.assertTrue(is_number(1.1))\n self.assertTrue(is_number(1 + 3j))\n self.assertTrue(is_number(np.bool(False)))\n self.assertTrue(is_number(np.int64(1)))\n self.assertTrue(is_number(np.float64(1.1)))\n self.assertTrue(is_number(np.complex128(1 + 3j)))\n self.assertTrue(is_number(np.nan))\n\n self.assertFalse(is_number(None))\n self.assertFalse(is_number('x'))\n self.assertFalse(is_number(datetime(2011, 1, 1)))\n self.assertFalse(is_number(np.datetime64('2011-01-01')))\n self.assertFalse(is_number(Timestamp('2011-01-01')))\n self.assertFalse(is_number(Timestamp('2011-01-01',\n tz='US/Eastern')))\n self.assertFalse(is_number(timedelta(1000)))\n self.assertFalse(is_number(Timedelta('1 days')))\n\n # questionable\n self.assertFalse(is_number(np.bool_(False)))\n self.assertTrue(is_number(np.timedelta64(1, 'D')))\n\n def test_is_bool(self):\n self.assertTrue(is_bool(True))\n self.assertTrue(is_bool(np.bool(False)))\n self.assertTrue(is_bool(np.bool_(False)))\n\n self.assertFalse(is_bool(1))\n self.assertFalse(is_bool(1.1))\n self.assertFalse(is_bool(1 + 3j))\n self.assertFalse(is_bool(np.int64(1)))\n self.assertFalse(is_bool(np.float64(1.1)))\n self.assertFalse(is_bool(np.complex128(1 + 3j)))\n self.assertFalse(is_bool(np.nan))\n self.assertFalse(is_bool(None))\n self.assertFalse(is_bool('x'))\n self.assertFalse(is_bool(datetime(2011, 1, 1)))\n self.assertFalse(is_bool(np.datetime64('2011-01-01')))\n self.assertFalse(is_bool(Timestamp('2011-01-01')))\n self.assertFalse(is_bool(Timestamp('2011-01-01',\n tz='US/Eastern')))\n self.assertFalse(is_bool(timedelta(1000)))\n self.assertFalse(is_bool(np.timedelta64(1, 'D')))\n self.assertFalse(is_bool(Timedelta('1 days')))\n\n def test_is_integer(self):\n self.assertTrue(is_integer(1))\n self.assertTrue(is_integer(np.int64(1)))\n\n self.assertFalse(is_integer(True))\n self.assertFalse(is_integer(1.1))\n self.assertFalse(is_integer(1 + 3j))\n self.assertFalse(is_integer(np.bool(False)))\n self.assertFalse(is_integer(np.bool_(False)))\n self.assertFalse(is_integer(np.float64(1.1)))\n self.assertFalse(is_integer(np.complex128(1 + 3j)))\n self.assertFalse(is_integer(np.nan))\n self.assertFalse(is_integer(None))\n self.assertFalse(is_integer('x'))\n self.assertFalse(is_integer(datetime(2011, 1, 1)))\n self.assertFalse(is_integer(np.datetime64('2011-01-01')))\n self.assertFalse(is_integer(Timestamp('2011-01-01')))\n self.assertFalse(is_integer(Timestamp('2011-01-01',\n tz='US/Eastern')))\n self.assertFalse(is_integer(timedelta(1000)))\n self.assertFalse(is_integer(Timedelta('1 days')))\n\n # questionable\n self.assertTrue(is_integer(np.timedelta64(1, 'D')))\n\n def test_is_float(self):\n self.assertTrue(is_float(1.1))\n self.assertTrue(is_float(np.float64(1.1)))\n self.assertTrue(is_float(np.nan))\n\n self.assertFalse(is_float(True))\n self.assertFalse(is_float(1))\n self.assertFalse(is_float(1 + 3j))\n self.assertFalse(is_float(np.bool(False)))\n self.assertFalse(is_float(np.bool_(False)))\n self.assertFalse(is_float(np.int64(1)))\n self.assertFalse(is_float(np.complex128(1 + 3j)))\n self.assertFalse(is_float(None))\n self.assertFalse(is_float('x'))\n self.assertFalse(is_float(datetime(2011, 1, 1)))\n self.assertFalse(is_float(np.datetime64('2011-01-01')))\n self.assertFalse(is_float(Timestamp('2011-01-01')))\n self.assertFalse(is_float(Timestamp('2011-01-01',\n tz='US/Eastern')))\n self.assertFalse(is_float(timedelta(1000)))\n self.assertFalse(is_float(np.timedelta64(1, 'D')))\n self.assertFalse(is_float(Timedelta('1 days')))\n\n def test_is_timedelta(self):\n self.assertTrue(is_timedelta64_dtype('timedelta64'))\n self.assertTrue(is_timedelta64_dtype('timedelta64[ns]'))\n self.assertFalse(is_timedelta64_ns_dtype('timedelta64'))\n self.assertTrue(is_timedelta64_ns_dtype('timedelta64[ns]'))\n\n tdi = TimedeltaIndex([1e14, 2e14], dtype='timedelta64')\n self.assertTrue(is_timedelta64_dtype(tdi))\n self.assertTrue(is_timedelta64_ns_dtype(tdi))\n self.assertTrue(is_timedelta64_ns_dtype(tdi.astype('timedelta64[ns]')))\n\n # Conversion to Int64Index:\n self.assertFalse(is_timedelta64_ns_dtype(tdi.astype('timedelta64')))\n self.assertFalse(is_timedelta64_ns_dtype(tdi.astype('timedelta64[h]')))\n\n\nclass Testisscalar(tm.TestCase):\n\n def test_isscalar_builtin_scalars(self):\n self.assertTrue(is_scalar(None))\n self.assertTrue(is_scalar(True))\n self.assertTrue(is_scalar(False))\n self.assertTrue(is_scalar(0.))\n self.assertTrue(is_scalar(np.nan))\n self.assertTrue(is_scalar('foobar'))\n self.assertTrue(is_scalar(b'foobar'))\n self.assertTrue(is_scalar(u('efoobar')))\n self.assertTrue(is_scalar(datetime(2014, 1, 1)))\n self.assertTrue(is_scalar(date(2014, 1, 1)))\n self.assertTrue(is_scalar(time(12, 0)))\n self.assertTrue(is_scalar(timedelta(hours=1)))\n self.assertTrue(is_scalar(pd.NaT))\n\n def test_isscalar_builtin_nonscalars(self):\n self.assertFalse(is_scalar({}))\n self.assertFalse(is_scalar([]))\n self.assertFalse(is_scalar([1]))\n self.assertFalse(is_scalar(()))\n self.assertFalse(is_scalar((1, )))\n self.assertFalse(is_scalar(slice(None)))\n self.assertFalse(is_scalar(Ellipsis))\n\n def test_isscalar_numpy_array_scalars(self):\n self.assertTrue(is_scalar(np.int64(1)))\n self.assertTrue(is_scalar(np.float64(1.)))\n self.assertTrue(is_scalar(np.int32(1)))\n self.assertTrue(is_scalar(np.object_('foobar')))\n self.assertTrue(is_scalar(np.str_('foobar')))\n self.assertTrue(is_scalar(np.unicode_(u('foobar'))))\n self.assertTrue(is_scalar(np.bytes_(b'foobar')))\n self.assertTrue(is_scalar(np.datetime64('2014-01-01')))\n self.assertTrue(is_scalar(np.timedelta64(1, 'h')))\n\n def test_isscalar_numpy_zerodim_arrays(self):\n for zerodim in [np.array(1), np.array('foobar'),\n np.array(np.datetime64('2014-01-01')),\n np.array(np.timedelta64(1, 'h')),\n np.array(np.datetime64('NaT'))]:\n self.assertFalse(is_scalar(zerodim))\n self.assertTrue(is_scalar(lib.item_from_zerodim(zerodim)))\n\n def test_isscalar_numpy_arrays(self):\n self.assertFalse(is_scalar(np.array([])))\n self.assertFalse(is_scalar(np.array([[]])))\n self.assertFalse(is_scalar(np.matrix('1; 2')))\n\n def test_isscalar_pandas_scalars(self):\n self.assertTrue(is_scalar(Timestamp('2014-01-01')))\n self.assertTrue(is_scalar(Timedelta(hours=1)))\n self.assertTrue(is_scalar(Period('2014-01-01')))\n\n def test_lisscalar_pandas_containers(self):\n self.assertFalse(is_scalar(Series()))\n self.assertFalse(is_scalar(Series([1])))\n self.assertFalse(is_scalar(DataFrame()))\n self.assertFalse(is_scalar(DataFrame([[1]])))\n self.assertFalse(is_scalar(Panel()))\n self.assertFalse(is_scalar(Panel([[[1]]])))\n self.assertFalse(is_scalar(Index([])))\n self.assertFalse(is_scalar(Index([1])))\n\n\ndef test_datetimeindex_from_empty_datetime64_array():\n for unit in ['ms', 'us', 'ns']:\n idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))\n assert (len(idx) == 0)\n\n\ndef test_nan_to_nat_conversions():\n\n df = DataFrame(dict({\n 'A': np.asarray(\n lrange(10), dtype='float64'),\n 'B': Timestamp('20010101')\n }))\n df.iloc[3:6, :] = np.nan\n result = df.loc[4, 'B'].value\n assert (result == tslib.iNaT)\n\n s = df['B'].copy()\n s._data = s._data.setitem(indexer=tuple([slice(8, 9)]), value=np.nan)\n assert (isnull(s[8]))\n\n # numpy < 1.7.0 is wrong\n from distutils.version import LooseVersion\n if LooseVersion(np.__version__) >= '1.7.0':\n assert (s[8].value == np.datetime64('NaT').astype(np.int64))\n\n\ndef test_ensure_int32():\n values = np.arange(10, dtype=np.int32)\n result = _ensure_int32(values)\n assert (result.dtype == np.int32)\n\n values = np.arange(10, dtype=np.int64)\n result = _ensure_int32(values)\n assert (result.dtype == np.int32)\n\n\ndef test_ensure_categorical():\n values = np.arange(10, dtype=np.int32)\n result = _ensure_categorical(values)\n assert (result.dtype == 'category')\n\n values = Categorical(values)\n result = _ensure_categorical(values)\n tm.assert_categorical_equal(result, values)\n\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n" ]
[ [ "pandas.to_datetime", "pandas.util.testing.assertIsInstance", "numpy.sqrt", "pandas.Series", "numpy.concatenate", "pandas.util.testing.assert_index_equal", "numpy.divide", "pandas.util.testing.assert_numpy_array_equal", "numpy.arange", "pandas.compat.text_type", "pandas.Index", "numpy.sin", "numpy.intersect1d", "pandas.core.config.option_context", "pandas.Int64Index", "numpy.zeros", "pandas.compat.u", "pandas.Float64Index", "pandas.date_range", "numpy.errstate", "numpy.array", "pandas.isnull", "pandas.util.testing.assertRaisesRegexp", "pandas.util.testing.assertRaises", "pandas.lib.Timestamp", "pandas.to_timedelta", "pandas.compat.lrange", "pandas.compat.range" ], [ "numpy.matrix", "pandas.lib.is_period", "numpy.complex128", "numpy.object_", "pandas.Series", "pandas.PeriodIndex", "pandas.types.missing.isnull", "numpy.bool", "pandas.types.common.is_float", "pandas.DataFrame", "pandas.lib.maybe_convert_objects", "pandas.types.common.is_timedelta64_dtype", "numpy.str_", "pandas.types.inference.is_re", "numpy.bool_", "pandas.util.testing.assert_numpy_array_equal", "pandas.lib.isposinf_scalar", "pandas.util.testing.assert_categorical_equal", "numpy.arange", "pandas.Panel", "pandas.types.inference.is_named_tuple", "pandas.lib.is_datetime64_array", "pandas.Index", "pandas.types.inference.is_dict_like", "pandas.types.inference.is_hashable", "pandas.types.common.is_number", "numpy.float32", "numpy.bytes_", "pandas.types.common.is_timedelta64_ns_dtype", "pandas.types.common.is_integer", "pandas.lib.item_from_zerodim", "pandas.lib.is_timedelta_array", "pandas.lib.is_timedelta_or_timedelta64_array", "pandas.types.common.is_scalar", "pandas.lib.to_object_array_tuples", "pandas.compat.u", "pandas.lib.maybe_convert_numeric", "pandas.Categorical", "numpy.isnan", "pandas.types.common.is_bool", "pandas.Timedelta", "numpy.timedelta64", "pandas.lib.isneginf_scalar", "numpy.int64", "numpy.array", "pandas.lib.is_timedelta64_array", "pandas.TimedeltaIndex", "pandas.lib.to_object_array", "pandas.types.inference.is_list_like", "pandas.lib.is_datetime_array", "pandas.util.testing.assertRaisesRegexp", "numpy.int32", "numpy.datetime64", "pandas.types.inference.is_re_compilable", "pandas.lib.infer_dtype", "numpy.uint64", "numpy.float64", "pandas.types.common._ensure_categorical", "pandas.Period", "pandas.compat.lrange", "pandas.Timestamp", "pandas.types.common._ensure_int32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gourie/AE
[ "5e094b5874aadccafa7ad4160e6f9dd273d2654c" ]
[ "Bayes_VAE.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm\n\nfrom keras.layers import Input, Dense, Lambda, Layer, Conv2D, MaxPooling2D, UpSampling2D\nfrom keras.models import Model\nfrom keras import backend as K\nfrom keras import metrics\nfrom keras.callbacks import TensorBoard\n\nclass BayesVAE(object):\n ''' Class to build a simple variational autoencoder with Keras.\n #Reference\n - Auto-Encoding Variational Bayes\n https://arxiv.org/abs/1312.6114\n '''\n\n def __init__(self, original_dim, intermediate_dim, batch_size=32, epsilon_std=1.0, latent_dim=2):\n self.input_img = Input(batch_shape=(batch_size, original_dim))\n self.original_dim = original_dim\n self.intermediate_dim = intermediate_dim\n self.latent_dim = latent_dim\n self.batch_size = batch_size\n self.epsilon_std = epsilon_std\n self.setup_dense_training_model()\n\n def setup_dense_training_model(self):\n\n x = self.input_img\n h = Dense(self.intermediate_dim, activation='relu')(x)\n self.z_mean = Dense(self.latent_dim)(h)\n # self.z_log_sigma = Dense(self.latent_dim)(h)\n self.z_log_var = Dense(self.latent_dim)(h)\n # sample latent variable z assuming normal distribution\n z = Lambda(self.normal_sampling, output_shape=(self.latent_dim,))([self.z_mean, self.z_log_var])\n self.decoder_h = Dense(self.intermediate_dim, activation='relu')\n self.decoder_mean = Dense(self.original_dim, activation='sigmoid')\n h_decoded = self.decoder_h(z)\n x_decoded_mean = self.decoder_mean(h_decoded)\n\n y = CustomVariationalLayer(self.original_dim, self.z_mean, self.z_log_var)([x, x_decoded_mean])\n self.vae = Model(x, y)\n\n def normal_sampling(self, args):\n z_mean, z_log_var = args\n epsilon = K.random_normal(shape=(K.shape(self.z_mean)[0], self.latent_dim), mean=0.,\n stddev=self.epsilon_std)\n return z_mean + K.exp(z_log_var / 2) * epsilon\n\n def encoder_model(self):\n # model mapping input to its encoded representation\n return Model(self.input_img, self.z_mean)\n\n def decoder_model(self):\n # decoder that samples from the learned distribution and decodes the mean back to input space\n decoder_input = Input(shape=(self.latent_dim,))\n _h_decoded = self.decoder_h(decoder_input)\n _x_decoded_mean = self.decoder_mean(_h_decoded)\n return Model(decoder_input, _x_decoded_mean)\n\n def train(self, x_train, x_test, nb_epochs=50):\n self.vae.compile(optimizer='rmsprop', loss=None)\n self.nb_epochs = nb_epochs\n self.vae.fit(x_train,\n shuffle=True,\n epochs=self.nb_epochs,\n batch_size=self.batch_size,\n validation_data=(x_test, None),\n callbacks=[TensorBoard(log_dir='/tmp/autoencoder')])\n\n# Custom loss layer\nclass CustomVariationalLayer(Layer):\n def __init__(self, original_dim, z_mean, z_log_var, **kwargs):\n self.is_placeholder = True\n self.original_dim = original_dim\n self.z_mean = z_mean\n self.z_log_var = z_log_var\n super(CustomVariationalLayer, self).__init__(**kwargs)\n\n def vae_loss(self, x, x_decoded_mean):\n xent_loss = self.original_dim * metrics.binary_crossentropy(x, x_decoded_mean)\n kl_loss = - 0.5 * K.sum(1 + self.z_log_var - K.square(self.z_mean) - K.exp(self.z_log_var), axis=-1)\n return K.mean(xent_loss + kl_loss)\n\n def call(self, inputs):\n x = inputs[0]\n x_decoded_mean = inputs[1]\n loss = self.vae_loss(x, x_decoded_mean)\n self.add_loss(loss, inputs=inputs)\n # We won't actually use the output.\n return x\n\n\ndef scatterplot_latent_space(encoder, x_test, y_test, batch_size):\n \"\"\"\n Display a 2D plot of the digit classes in the latent space learned with VAE\n :return: None\n \"\"\"\n x_test_encoded = encoder.predict(x_test, batch_size=batch_size)\n plt.figure(figsize=(6, 6))\n plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], c=y_test)\n plt.colorbar()\n plt.show()\n\ndef plot_manifold(generator):\n \"\"\"\n Display a 2D scatterplot of the input manifold learned with VAE\n :param generator:\n :return:\n \"\"\"\n n = 15 # figure with 15x15 digits\n digit_size = 28\n figure = np.zeros((digit_size * n, digit_size * n))\n # linearly spaced coordinates on the unit square were transformed through the inverse CDF (ppf) of the Gaussian\n # to produce values of the latent variables z, since the prior of the latent space is Gaussian\n grid_x = norm.ppf(np.linspace(0.05, 0.95, n))\n grid_y = norm.ppf(np.linspace(0.05, 0.95, n))\n\n for i, yi in enumerate(grid_x):\n for j, xi in enumerate(grid_y):\n z_sample = np.array([[xi, yi]])\n x_decoded = generator.predict(z_sample)\n digit = x_decoded[0].reshape(digit_size, digit_size)\n figure[i * digit_size: (i + 1) * digit_size,\n j * digit_size: (j + 1) * digit_size] = digit\n\n plt.figure(figsize=(10, 10))\n plt.imshow(figure, cmap='Greys_r')\n plt.show()\n\ndef load_mnist_data():\n (x_train, _), (x_test, _) = mnist.load_data()\n x_train = x_train.astype('float32') / 255.\n x_test = x_test.astype('float32') / 255.\n x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))\n x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))\n print(x_train.shape)\n print(x_test.shape)\n return x_train, x_test" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.scatter", "numpy.linspace", "matplotlib.pyplot.colorbar", "numpy.prod", "numpy.array", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nichollskc/biclust_comp
[ "1b01d44e8a4ab4f40cf015e60bd39afb8616f89d" ]
[ "biclust_comp/analysis/enrichment.py" ]
[ "import logging\nfrom pathlib import Path\nimport re\n\nimport scipy.stats as ss\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport statsmodels.stats.multitest as multitest\nimport sklearn.metrics\n\nfrom intermine.webservice import Service\n\nimport biclust_comp.utils as utils\n\ndef plot_sample_enrichment_impc(X_file, max_factors=None, max_traits=None):\n sample_info = read_sample_info_IMPC(\"data/real/IMPC/sample_info.txt\")\n\n X = utils.read_matrix_tsv(X_file)\n\n trait_dummies = pd.get_dummies(sample_info[['tissue', 'genotype']])\n return plot_enrichment(trait_dummies, X, max_factors, max_traits)\n\n\ndef plot_pathway_enrichment(B_file, gene_ensembl_ids_file,\n full_pathways_file=\"analysis/IMPC/full_pathways.tsv\",\n max_factors=None, max_pathways=None):\n with open(gene_ensembl_ids_file) as f:\n gene_ensembl_ids = [line.strip() for line in f.readlines()]\n\n B = pd.read_csv(B_file, sep=\"\\t\")\n full_pathways_df = pd.read_csv(full_pathways_file, sep=\"\\t\")\n\n pathways_df = construct_pathways_df(gene_ensembl_ids, full_pathways_df)\n\n return plot_enrichment(pathways_df, B, max_factors, max_pathways)\n\n\ndef construct_ko_pathways_df():\n sample_info = read_sample_info_IMPC(\"data/real/IMPC/sample_info.txt\")\n\n service = Service(\"http://www.mousemine.org/mousemine/service\")\n knocked_out_genes = []\n for genotype in sample_info.genotype.unique():\n match = re.match(r\"(.*) knockout\", genotype)\n if match:\n knocked_out_genes.append(match[1])\n\n ko_genes_pathways = {}\n pathway_names_dict = {}\n\n for knocked_out_gene in knocked_out_genes:\n query = service.new_query(\"ProteinCodingGene\")\n query.add_view(\"pathways.identifier\", \"pathways.name\", \"symbol\")\n query.add_constraint(\"symbol\", \"=\", knocked_out_gene)\n pathways = [f\"{row['pathways.name']}_-_{row['pathways.identifier']}\" for row in query.rows()]\n ko_genes_pathways[knocked_out_gene] = pathways\n\n for row in query.rows():\n pathway_names_dict[row[\"pathways.identifier\"]] = row[\"pathways.name\"]\n\n ko_genes_pathways_df = utils.transform_dict_to_count_df(ko_genes_pathways)\n return ko_genes_pathways_df, pathway_names_dict\n\n\ndef construct_full_pathways_df(pathways):\n service = Service(\"http://www.mousemine.org/mousemine/service\")\n\n pathways_dict = {}\n for pathway in pathways:\n query = service.new_query(\"Pathway\")\n query.add_view(\n \"genes.primaryIdentifier\", \"genes.symbol\", \"genes.name\",\n \"genes.sequenceOntologyTerm.name\", \"genes.chromosome.primaryIdentifier\"\n )\n query.add_constraint(\"identifier\", \"=\", pathway)\n pathways_dict[pathway] = [row[\"genes.primaryIdentifier\"]\n for row in query.rows()]\n\n pathways_df = utils.transform_dict_to_count_df(pathways_dict).T\n return pathways_df\n\n\ndef construct_pathways_df(gene_ensembl_ids, full_pathways_df,\n ensembl_to_mgi_file=\"analysis/mart_export.txt\"):\n ensembl_to_mgi = pd.read_csv(ensembl_to_mgi_file,\n sep=\"\\t\",\n index_col=0)\n pathways_df = pd.DataFrame(index=gene_ensembl_ids,\n columns=full_pathways_df.columns,\n dtype=int,\n data=0)\n\n for ensembl_id in gene_ensembl_ids:\n unversioned_id = ensembl_id.split('.')[0]\n try:\n mgi_id = ensembl_to_mgi.loc[unversioned_id, 'MGI ID']\n if isinstance(mgi_id, str) and mgi_id.startswith('MGI'):\n pass\n else:\n raise KeyError\n except KeyError as e:\n print(f\"Unable to translate ID {ensembl_id}\")\n try:\n pathways_df.loc[ensembl_id, :] = full_pathways_df.loc[mgi_id, :]\n except KeyError as e:\n print(f\"MGI ID not found in pathways matrix {mgi_id}\")\n\n return pathways_df\n\n\ndef plot_enrichment(trait_df, factor_df, max_factors, max_traits):\n f1_scores, intersections, _fisher_pvals = calculate_trait_enrichment(factor_df, trait_df)\n if max_factors:\n num_factors = min(factor_df.shape[1], max_factors)\n else:\n num_factors = factor_df.shape[1]\n\n if max_traits:\n num_traits = min(trait_df.shape[1], max_traits)\n else:\n num_traits = trait_df.shape[1]\n\n # Sort the columns and rows by maximum f1 score, so that the factors with\n # best enrichment will be left-most in the chart, and traits with best\n # enrichment will be highest in the chart\n ordered_columns = sorted(list(f1_scores.columns),\n key=lambda k: f1_scores.iloc[:, k].max(),\n reverse=True)\n ordered_rows = sorted(list(f1_scores.index),\n key=lambda row: f1_scores.loc[row, :].max(),\n reverse=True)\n\n intersections.loc['total', :] = (factor_df != 0).sum()\n f1_scores.loc['total', :] = 0\n ordered_rows.insert(0, 'total')\n\n ordered_intersections = intersections.loc[ordered_rows, ordered_columns]\n ordered_intersections.insert(0, 'total', trait_df.sum())\n ordered_f1_scores = f1_scores.loc[ordered_rows, ordered_columns]\n ordered_f1_scores.insert(0, 'total', 0)\n\n fig, ax = plt.subplots(figsize=(num_factors * 0.7 + 3,\n num_traits * 0.7))\n\n # Colour each square by the F1 score\n plt.imshow(ordered_f1_scores.iloc[:num_traits + 1, :num_factors + 1],\n aspect='auto',\n cmap='Blues')\n\n # Sort out axis labels\n ax.set_yticks(np.arange(num_traits + 1))\n ax.set_xticks(np.arange(num_factors + 1))\n ax.set_yticklabels(ordered_f1_scores.index)\n ax.set_xticklabels(ordered_f1_scores.columns)\n\n # Add text that notes the number of samples in intersection of trait and factor\n threshold_black = 0.5\n for j in range(num_factors + 1):\n for i in range(num_traits + 1):\n value = ordered_intersections.iloc[i, j]\n opacity = ordered_f1_scores.iloc[i, j]\n if opacity < threshold_black and value != 0:\n color=\"black\"\n else:\n color=\"white\"\n text = ax.text(j, i, value,\n ha=\"center\", va=\"center\", color=color)\n\n plt.axvline(x=0.5, color='black')\n plt.axhline(y=0.5, color='black')\n plt.colorbar()\n fig.tight_layout()\n plt.show()\n return ordered_f1_scores, ordered_intersections\n\n\ndef calculate_trait_enrichment(factor_df, trait_df):\n f1_scores = pd.DataFrame(index=trait_df.columns,\n columns=factor_df.columns,\n dtype=float)\n fisher_pvals = pd.DataFrame(index=trait_df.columns,\n columns=factor_df.columns,\n dtype=float)\n odds_ratios = pd.DataFrame(index=trait_df.columns,\n columns=factor_df.columns,\n dtype=float)\n intersections = pd.DataFrame(index=trait_df.columns,\n columns=factor_df.columns,\n dtype=int)\n\n for trait_name, trait_column in trait_df.items():\n for factor_index, factor_column in factor_df.items():\n total_from_trait = trait_column.sum()\n total_population = len(trait_column)\n factor_size = (factor_column != 0).sum()\n trait_non_zero = np.where(trait_column)[0]\n intersection_size = ((factor_column.iloc[trait_non_zero]) != 0).sum()\n trait_size = trait_column.sum()\n\n intersections.loc[trait_name, factor_index] = intersection_size\n f1_scores.loc[trait_name, factor_index] = sklearn.metrics.f1_score(trait_column,\n factor_column != 0)\n # sf is the 'survival' function i.e. 1 - cdf\n # So we are finding the probability that the intersection size is at least\n # equal to the intersection size we have observed, under the assumption that this\n # has Hypergeometric distribution with M=total_population, n=trait_size and N=factor_size\n # where M is 'total number of objects in the bin', N is 'number of objects we pick'\n # n is 'total number of objects which are successes' and\n # m is 'number of objects we pick which are successes'\n fisher_pvals.loc[trait_name, factor_index] = ss.hypergeom.sf(intersection_size - 1,\n total_population,\n trait_size,\n factor_size)\n\n odds_in_factor = intersection_size / (factor_size - intersection_size)\n notfactor_nottrait = total_population - trait_size - factor_size + intersection_size\n odds_out_of_factor = (trait_size - intersection_size) / notfactor_nottrait\n odds_ratios.loc[trait_name, factor_index] = odds_in_factor / odds_out_of_factor\n\n _reject, corrected_fisher_pvals = utils.correct_multiple_testing(fisher_pvals)\n\n return f1_scores, intersections, corrected_fisher_pvals, odds_ratios\n\n\ndef summarise_enrichment(sort_measure_name, measures_dict, factor_df, trait_df):\n trait_enrichment_dicts = []\n sort_measure_df = measures_dict[sort_measure_name]\n\n for trait in sort_measure_df.index:\n best_factor = sort_measure_df.loc[trait, :].argmax()\n trait_enrichment_dict = {'trait': trait,\n 'best factor (by F1 score)': best_factor,\n 'factor size': (factor_df.loc[:, best_factor] != 0).sum(),\n 'trait size': (trait_df.loc[:, trait] != 0).sum()}\n for measure, measure_df in measures_dict.items():\n trait_enrichment_dict[measure] = measure_df.loc[trait, best_factor]\n trait_enrichment_dicts.append(trait_enrichment_dict)\n\n return pd.DataFrame(trait_enrichment_dicts)\n\n\ndef read_sample_info_IMPC(filename, read_ID=False):\n sample_info = pd.read_csv(filename, sep=\"\\t\")\n sample_info['genotype'] = sample_info['Factor Value[genotype]']\n sample_info['tissue'] = sample_info['Factor Value[organism part]']\n if read_ID:\n sample_info['ID'] = sample_info['Comment[ENA_SAMPLE]']\n return sample_info\n\n\ndef summarise_pathways_summary_IMPC(folder, postprocessing='*'):\n logging.info(f\"Looking in folder {folder} for files of the form 'pathways_summary{postprocessing}.tsv'\")\n files = [str(filename) for filename in Path(folder).rglob(f\"pathways_summary{postprocessing}.tsv\")]\n logging.info(f\"Found {len(files)} files\")\n file_pattern = re.compile(r'analysis/IMPC/(\\w+)/real/IMPC/([\\w/]+)/(run_.+)/pathways_summary(.*).tsv')\n run_info_dicts = []\n\n for file in files:\n logging.info(f\"Processing file {file}\")\n match = re.match(file_pattern, str(file))\n if match:\n run_info = {'method': match[1],\n 'dataset': match[2],\n 'run_id': match[3],\n 'postprocessing': match[4]}\n try:\n pathways = pd.read_csv(str(file), sep=\"\\t\", header=0)\n # Mean (over factors) of log10 of the smallest p-value\n run_info['factors_pathways_mean_min_pval'] = np.log10(pathways['min_pval']).mean()\n for alpha_col in pathways.columns[pathways.columns.str.startswith('alpha')]:\n # For each threshold, the mean (over factors) number of pathways significant at that threshold and\n # the proportion of factors that had at least one pathway significant at that threshold\n run_info[f\"factors_pathways_mean_{alpha_col}\"] = pathways[alpha_col].mean()\n run_info[f\"factors_pathways_nz_{alpha_col}\"] = (pathways[alpha_col] != 0).mean()\n except pd.errors.EmptyDataError as e:\n logging.warning(f\"Empty file: {file}\")\n except KeyError as e:\n logging.warning(f\"Required columns not found: {file}\")\n\n run_info_dicts.append(run_info)\n else:\n logging.warning(f\"Failed to decode file name: {file}\")\n\n return pd.DataFrame(run_info_dicts)\n\n\ndef summarise_traits_summary_IMPC(folder, postprocessing='*'):\n logging.info(f\"Looking in folder {folder} for files of the form 'traits_summary{postprocessing}.tsv'\")\n files = [str(filename) for filename in Path(folder).rglob(f\"traits_summary{postprocessing}.tsv\")]\n logging.info(f\"Found {len(files)} files\")\n file_pattern = re.compile(r'analysis/IMPC/(\\w+)/real/IMPC/([\\w/]+)/(run_.+)/traits_summary(.*).tsv')\n run_info_dicts = []\n\n for file in files:\n logging.info(f\"Processing file {file}\")\n match = re.match(file_pattern, str(file))\n if match:\n run_info = {'method': match[1],\n 'dataset': match[2],\n 'run_id': match[3],\n 'postprocessing': match[4]}\n try:\n traits = pd.read_csv(str(file), sep=\"\\t\", header=0)\n tissue_rows = traits['trait'].str.startswith('tissue')\n genotype_rows = traits['trait'].str.startswith('genotype')\n # Mean (over traits) of f1 score from best factor, mean (over traits) of log of Fisher exact p-value\n # (again from best factor), min p-value (min over traits, of p-value from best factor), max (over traits)\n # of f1 score from best factor\n run_info['traits_mean_f1_score'] = traits.loc[:, 'F1 score'].mean()\n run_info['traits_mean_log10_pval'] = np.log10(traits.loc[:, 'Fisher\\'s exact test']).mean()\n run_info['traits_min_pval'] = traits.loc[:, 'Fisher\\'s exact test'].min()\n run_info['traits_max_f1_score'] = traits.loc[:, 'F1 score'].max()\n # Same as above, but only for 'genotype traits'\n run_info['traits_genotype_mean_f1_score'] = traits.loc[genotype_rows, 'F1 score'].mean()\n run_info['traits_genotype_mean_log10_pval'] = np.log10(traits.loc[genotype_rows, 'Fisher\\'s exact test']).mean()\n run_info['traits_genotype_min_pval'] = traits.loc[genotype_rows, 'Fisher\\'s exact test'].min()\n run_info['traits_genotype_max_f1_score'] = traits.loc[genotype_rows, 'F1 score'].max()\n # Same as above, but only for 'tissue traits'\n run_info['traits_tissue_mean_f1_score'] = traits.loc[tissue_rows, 'F1 score'].mean()\n run_info['traits_tissue_mean_log10_pval'] = np.log10(traits.loc[tissue_rows, 'Fisher\\'s exact test']).mean()\n run_info['traits_tissue_min_pval'] = traits.loc[tissue_rows, 'Fisher\\'s exact test'].min()\n run_info['traits_tissue_max_f1_score'] = traits.loc[tissue_rows, 'F1 score'].max()\n\n # Proportion of traits which have a factor significant for them, with threshold 0.01 and 0.05 resp.\n run_info['traits_sig_traits 0.01'] = (traits.loc[:, 'Fisher\\'s exact test'] < 0.01).sum() / len(traits)\n run_info['traits_sig_traits 0.05'] = (traits.loc[:, 'Fisher\\'s exact test'] < 0.05).sum() / len(traits)\n except pd.errors.EmptyDataError as e:\n logging.warning(f\"Empty file: {file}\")\n except KeyError as e:\n logging.warning(f\"Required columns not found: {file}\")\n\n run_info_dicts.append(run_info)\n else:\n logging.warning(f\"Failed to decode file name: {file}\")\n\n return pd.DataFrame(run_info_dicts)\n\n\ndef summarise_traits_fisherpvals_IMPC(folder, postprocessing='*'):\n logging.info(f\"Looking in folder {folder} for files of the form 'traits_fisherpvals{postprocessing}.tsv'\")\n files = [str(filename) for filename in Path(folder).rglob(f\"traits_fisherpvals{postprocessing}.tsv\")]\n logging.info(f\"Found {len(files)} files\")\n file_pattern = re.compile(r'analysis/IMPC/(\\w+)/real/IMPC/([\\w/]+)/(run_.+)/traits_fisherpvals(.*).tsv')\n run_info_dicts = []\n\n for file in files:\n logging.info(f\"Processing file {file}\")\n match = re.match(file_pattern, str(file))\n if match:\n run_info = {'method': match[1],\n 'dataset': match[2],\n 'run_id': match[3],\n 'postprocessing': match[4]}\n try:\n traits_pvals = pd.read_csv(str(file), header=0, index_col=0, sep=\"\\t\")\n min_pvals_per_factor = traits_pvals.min(axis=0)\n # For each threshold, the proportion of factors that are enriched for at least one trait\n for threshold in [1, 0.1, 0.05, 0.01, 0.001, 0.0001, 0.00001]:\n run_info[f\"traits_factors_alpha {threshold}\"] = (min_pvals_per_factor < threshold).mean()\n except pd.errors.EmptyDataError as e:\n logging.warning(f\"Empty file: {file}\")\n except KeyError as e:\n logging.warning(f\"Required columns not found: {file}\")\n\n run_info_dicts.append(run_info)\n else:\n logging.warning(f\"Failed to decode file name: {file}\")\n\n return pd.DataFrame(run_info_dicts)\n\n\ndef summarise_traits_f1scores_IMPC(folder, postprocessing='*'):\n logging.info(f\"Looking in folder {folder} for files of the form 'traits_f1scores{postprocessing}.tsv'\")\n files = [str(filename) for filename in Path(folder).rglob(f\"traits_f1scores{postprocessing}.tsv\")]\n logging.info(f\"Found {len(files)} files\")\n file_pattern = re.compile(r'analysis/IMPC/(\\w+)/real/IMPC/([\\w/]+)/(run_.+)/traits_f1scores(.*).tsv')\n run_info_dicts = []\n\n for file in files:\n logging.info(f\"Processing file {file}\")\n match = re.match(file_pattern, str(file))\n if match:\n run_info = {'method': match[1],\n 'dataset': match[2],\n 'run_id': match[3],\n 'postprocessing': match[4]}\n try:\n traits_f1scores = pd.read_csv(str(file), header=0, index_col=0, sep=\"\\t\")\n # Mean (over factors) of the best F1 score that factor attains (across all traits)\n run_info['traits_factors_mean_max_f1_score'] = traits_f1scores.max(axis=0).mean()\n except pd.errors.EmptyDataError as e:\n logging.warning(f\"Empty file: {file}\")\n except KeyError as e:\n logging.warning(f\"Required columns not found: {file}\")\n\n run_info_dicts.append(run_info)\n else:\n logging.warning(f\"Failed to decode file name: {file}\")\n\n return pd.DataFrame(run_info_dicts)\n\n\ndef summarise_ko_enrichment_summary_IMPC(folder, postprocessing='*'):\n logging.info(f\"Looking in folder {folder} for files of the form 'ko_enrichment_summary{postprocessing}.tsv'\")\n files = [str(filename) for filename in Path(folder).rglob(f\"ko_enrichment_summary{postprocessing}.tsv\")]\n logging.info(f\"Found {len(files)} files\")\n file_pattern = re.compile(r'analysis/IMPC/(\\w+)/real/IMPC/([\\w/]+)/(run_.+)/ko_enrichment_summary(.*).tsv')\n run_info_dicts = []\n\n for file in files:\n logging.info(f\"Processing file {file}\")\n match = re.match(file_pattern, str(file))\n if match:\n run_info = {'method': match[1],\n 'dataset': match[2],\n 'run_id': match[3],\n 'postprocessing': match[4]}\n try:\n ko_enrichment = pd.read_csv(str(file), sep=\"\\t\", header=0)\n # Mean (over traits - only knockout genes) of the best F1 score obtained by any factor on that trait,\n # also minimum pvalue\n run_info['ko_traits_mean_f1_score'] = ko_enrichment['f1_score (trait)'].mean()\n run_info['ko_traits_mean_min_pval'] = np.log10(ko_enrichment['min_pval']).mean()\n # For the threshold 0.05, the mean of precision and recall, considering the set of pathways\n # significantly enriched at that threshold as the set of predictions, and the set\n # of pathways that contained the gene knocked out as successes\n run_info['ko_traits_mean_precision_0.05'] = (ko_enrichment['alpha 0.05'] / ko_enrichment['all_pathways alpha 0.05']).mean()\n run_info['ko_traits_mean_recall_0.05'] = (ko_enrichment['alpha 0.05'] / ko_enrichment['pathways']).mean()\n for alpha_col in ko_enrichment.columns[ko_enrichment.columns.str.startswith('alpha')]:\n # Mean recall, as above but for different thresholds\n run_info[f\"ko_traits_mean_recall_{alpha_col}\"] = (ko_enrichment[alpha_col] / ko_enrichment['pathways']).mean()\n # Proportion of traits (only ko genotype traits) that had at least one relevant pathway\n # (i.e. one containing this knocked out gene) significant at this threshold\n run_info[f\"ko_traits_nz_{alpha_col}\"] = (ko_enrichment[alpha_col] != 0).mean()\n except pd.errors.EmptyDataError as e:\n logging.warning(f\"Empty file: {file}\")\n except KeyError as e:\n logging.warning(f\"Required columns not found: {file}\")\n\n run_info_dicts.append(run_info)\n else:\n logging.warning(f\"Failed to decode file name: {file}\")\n\n return pd.DataFrame(run_info_dicts)\n\n\ndef summarise_factor_info_IMPC(folder, postprocessing='*'):\n logging.info(f\"Looking in folder {folder} for files of the form 'factor_info{postprocessing}.tsv'\")\n files = [str(filename) for filename in Path(folder).rglob(f\"factor_info{postprocessing}.tsv\")]\n logging.info(f\"Found {len(files)} files\")\n files = Path(folder).rglob(f\"factor_info{postprocessing}.tsv\")\n file_pattern = re.compile(r'analysis/IMPC/(\\w+)/real/IMPC/([\\w/]+)/(run_.+)/factor_info(.*).tsv')\n run_info_dicts = []\n\n for file in files:\n logging.info(f\"Processing file {file}\")\n match = re.match(file_pattern, str(file))\n if match:\n run_info = {'method': match[1],\n 'dataset': match[2],\n 'run_id': match[3],\n 'postprocessing': match[4]}\n try:\n factor_info = pd.read_csv(str(file), sep=\"\\t\", index_col=0, header=0)\n # Number of factors, mean number of genes and samples in factor,\n # mean of genes*samples (over factors), which I'm calling number of cells\n run_info['recovered_K'] = factor_info.shape[0]\n run_info['mean_num_genes'] = factor_info['num_genes'].mean()\n run_info['mean_num_samples'] = factor_info['num_samples'].mean()\n run_info['mean_num_cells'] = (factor_info['num_samples'] * factor_info['num_genes']).mean()\n # Mean (over factors) of the maximum (over other factors) Jaccard similarity\n run_info['mean_redundancy_max'] = factor_info['redundancy_max'].mean()\n # Mean (over factors) of the mean (over other factors) Jaccard similarity\n run_info['mean_redundancy_mean'] = factor_info['redundancy_mean'].mean()\n except pd.errors.EmptyDataError as e:\n logging.warning(f\"Empty file: {file}\")\n except KeyError as e:\n logging.warning(f\"Required columns not found: {file}\")\n\n run_info_dicts.append(run_info)\n else:\n logging.warning(f\"Failed to decode file name: {file}\")\n\n return pd.DataFrame(run_info_dicts)\n\ndef get_number_unique_pathways_mdr(method_dataset_run_id, enrich_thresholds=[0.001, 0.01, 0.05]):\n if 'Plaid' in method_dataset_run_id:\n thresh = \"0e+0\"\n else:\n thresh = \"1e-2\"\n pathway_pvals = pd.read_csv(f\"analysis/IMPC/{method_dataset_run_id}/pathways_fisherpvals_thresh_{thresh}.tsv\",\n sep='\\t',\n index_col=0)\n main_pathways = (pathway_pvals.values.argmin(axis=0))\n\n results = {'method_dataset_run_id': method_dataset_run_id}\n results[\"unique_best_pathways\"] = len(set(main_pathways))\n for threshold in enrich_thresholds:\n results[f\"pathways_{threshold}\"] = sum(pathway_pvals.min(axis=1) < threshold)\n\n return results\n\ndef get_number_unique_pathways(error_df_file):\n error_df = pd.read_csv(error_df_file)\n\n pathways_dicts = []\n for mdr in error_df[error_df['run_complete']]['method_dataset_run_id'].unique():\n try:\n results = get_number_unique_pathways_mdr(mdr)\n pathways_dicts.append(results)\n except FileNotFoundError:\n logging.warn(f\"Skipping mdr {mdr}\")\n continue\n\n return pd.DataFrame(pathways_dicts)\n" ]
[ [ "matplotlib.pyplot.imshow", "pandas.read_csv", "matplotlib.pyplot.axvline", "matplotlib.pyplot.axhline", "numpy.arange", "matplotlib.pyplot.subplots", "pandas.DataFrame", "matplotlib.pyplot.colorbar", "scipy.stats.hypergeom.sf", "numpy.log10", "matplotlib.pyplot.show", "numpy.where", "pandas.get_dummies" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
yongma/keras-yolo3
[ "7ea23719a054ac116ac36b182fba3fd5186c1652" ]
[ "yolo3/utils.py" ]
[ "\"\"\"Miscellaneous utility functions.\"\"\"\n\nfrom functools import reduce\n\nfrom PIL import Image\nimport numpy as np\nfrom matplotlib.colors import rgb_to_hsv, hsv_to_rgb\n\ndef compose(*funcs):\n \"\"\"Compose arbitrarily many functions, evaluated left to right.\n\n Reference: https://mathieularose.com/function-composition-in-python/\n \"\"\"\n # return lambda x: reduce(lambda v, f: f(v), funcs, x)\n if funcs:\n return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)\n else:\n raise ValueError('Composition of empty sequence not supported.')\n\ndef letterbox_image(image, size):\n '''resize image with unchanged aspect ratio using padding'''\n iw, ih = image.size\n w, h = size\n scale = min(1.0 * w/iw, 1.0 * h/ih)\n nw = int(iw*scale)\n nh = int(ih*scale)\n\n image = image.resize((nw,nh), Image.BICUBIC)\n new_image = Image.new('RGB', size, (128,128,128))\n new_image.paste(image, ((w-nw)//2, (h-nh)//2))\n return new_image\n\ndef rand(a=0, b=1):\n return np.random.rand()*(b-a) + a\n\ndef get_random_data(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True):\n '''random preprocessing for real-time data augmentation'''\n line = annotation_line.split()\n image = Image.open(line[0])\n iw, ih = image.size\n h, w = input_shape\n box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])\n\n if not random:\n # resize image\n scale = min(w/iw, h/ih)\n nw = int(iw*scale)\n nh = int(ih*scale)\n dx = (w-nw)//2\n dy = (h-nh)//2\n image_data=0\n if proc_img:\n image = image.resize((nw,nh), Image.BICUBIC)\n new_image = Image.new('RGB', (w,h), (128,128,128))\n new_image.paste(image, (dx, dy))\n image_data = np.array(new_image)/255.\n\n # correct boxes\n box_data = np.zeros((max_boxes,5))\n if len(box)>0:\n np.random.shuffle(box)\n if len(box)>max_boxes: box = box[:max_boxes]\n box[:, [0,2]] = box[:, [0,2]]*scale + dx\n box[:, [1,3]] = box[:, [1,3]]*scale + dy\n box_data[:len(box)] = box\n\n return image_data, box_data\n\n # resize image\n new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)\n scale = rand(.25, 2)\n if new_ar < 1:\n nh = int(scale*h)\n nw = int(nh*new_ar)\n else:\n nw = int(scale*w)\n nh = int(nw/new_ar)\n image = image.resize((nw,nh), Image.BICUBIC)\n\n # place image\n dx = int(rand(0, w-nw))\n dy = int(rand(0, h-nh))\n new_image = Image.new('RGB', (w,h), (128,128,128))\n new_image.paste(image, (dx, dy))\n image = new_image\n\n # flip image or not\n flip = rand()<.5\n if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)\n\n # distort image\n hue = rand(-hue, hue)\n sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)\n val = rand(1, val) if rand()<.5 else 1/rand(1, val)\n x = rgb_to_hsv(np.array(image)/255.)\n x[..., 0] += hue\n x[..., 0][x[..., 0]>1] -= 1\n x[..., 0][x[..., 0]<0] += 1\n x[..., 1] *= sat\n x[..., 2] *= val\n x[x>1] = 1\n x[x<0] = 0\n image_data = hsv_to_rgb(x) # numpy array, 0 to 1\n\n # correct boxes\n box_data = np.zeros((max_boxes,5))\n if len(box)>0:\n np.random.shuffle(box)\n box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx\n box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy\n if flip: box[:, [0,2]] = w - box[:, [2,0]]\n box[:, 0:2][box[:, 0:2]<0] = 0\n box[:, 2][box[:, 2]>w] = w\n box[:, 3][box[:, 3]>h] = h\n box_w = box[:, 2] - box[:, 0]\n box_h = box[:, 3] - box[:, 1]\n box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box\n if len(box)>max_boxes: box = box[:max_boxes]\n box_data[:len(box)] = box\n\n return image_data, box_data\n" ]
[ [ "numpy.logical_and", "matplotlib.colors.hsv_to_rgb", "numpy.random.shuffle", "numpy.random.rand", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
YutouTaro/poselstm-pytorch
[ "640af6667fb28d4e0bc47a54a33f28662dfc63aa" ]
[ "data/unaligned_posenet_dataset.py" ]
[ "import os.path\nimport torchvision.transforms as transforms\nfrom data.base_dataset import BaseDataset, get_posenet_transform\nfrom data.image_folder import make_dataset\nfrom PIL import Image\nimport PIL\nimport random\nimport numpy\n# from util.util import scale\n\nclass UnalignedPoseNetDataset(BaseDataset):\n def initialize(self, opt):\n self.opt = opt\n self.root = opt.dataroot\n\n split_file = os.path.join(self.root , 'dataset_'+opt.phase+'.txt')\n self.A_paths = numpy.loadtxt(split_file, dtype=str, delimiter=' ', skiprows=3, usecols=(0))\n self.A_paths = [os.path.join(self.root, path) for path in self.A_paths]\n self.A_poses = numpy.loadtxt(split_file, dtype=float, delimiter=' ', skiprows=3, usecols=(1,2,3,4,5,6,7))\n # if opt.isTrain:\n # # scale values of location to defined range\n # self.A_poses[:, :3], opt.position_range = scale(self.A_poses[:, :3], self.opt.scale_range)\n # # TODO find a better way to store position_range\n # file_name = os.path.join(self.opt.checkpoints_dir, self.opt.name, 'opt_'+self.opt.phase+'.txt')\n # with open(file_name, 'at') as opt_file:\n # opt_file.write('position_range: {}, {}\\n'.format(opt.position_range[0], opt.position_range[1]))\n # else:\n # # read the position_range used for training from opt_train.txt\n # path_train_file = os.path.join(opt.checkpoints_dir, opt.name, 'opt_train.txt')\n # with open(path_train_file, 'rt') as ftrain:\n # for line in ftrain:\n # l = line.split(':')\n # if 'position_range' == l[0]:\n # opt.position_range = tuple(map(float, l[1].split(',')))\n\n if opt.model == \"poselstm\":\n self.mean_image = None\n print(\"mean image subtraction is deactivated\")\n else:\n self.mean_image = numpy.load(os.path.join(self.root, 'mean_image.npy'))\n\n self.A_size = len(self.A_paths)\n self.transform = get_posenet_transform(opt, self.mean_image)\n\n def __getitem__(self, index):\n A_path = self.A_paths[index % self.A_size]\n # index_A = index % self.A_size\n # print('(A, B) = (%d, %d)' % (index_A, index_B))\n A_img = Image.open(A_path).convert('RGB')\n A_pose = self.A_poses[index % self.A_size]\n\n A = self.transform(A_img)\n\n return {'A': A, 'B': A_pose,\n 'A_paths': A_path}\n\n def __len__(self):\n return self.A_size\n\n def name(self):\n return 'UnalignedPoseNetDataset'\n" ]
[ [ "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
oori/OrganizedPointFilters
[ "9030badec776e4bb4d98a397767630efb9dc078f" ]
[ "examples/python/example-laplacian.py" ]
[ "\"\"\"\nDemonstrates Laplacian Smoothing using CPU and GPU acceleration\n\"\"\"\nfrom os import path\nimport numpy as np\nimport logging\n\nimport organizedpointfilters as opf\nfrom organizedpointfilters import Matrix3f, Matrix3fRef\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(\"PPB\")\nlogger.setLevel(logging.INFO)\n\nfrom .utility.helper import load_pcd_file, laplacian_opc, create_mesh_from_organized_point_cloud_with_o3d, laplacian_opc_cuda\nfrom .utility.o3d_util import plot_meshes\n\n\ndef main():\n THIS_DIR = path.dirname(path.realpath(__file__))\n PCD_DIR = path.join(THIS_DIR, '..', '..', 'fixtures', 'pcd')\n mesh_file = path.join(PCD_DIR, 'pc_01.pcd')\n pc, pc_image = load_pcd_file(\n mesh_file, stride=2)\n\n # Not Smooth Mesh\n tri_mesh_noisy, tri_mesh_noisy_o3d = create_mesh_from_organized_point_cloud_with_o3d(\n np.ascontiguousarray(pc[:, :3]))\n\n kwargs = dict(loops=5, _lambda=1.0, kernel_size=3)\n opc_smooth = laplacian_opc(pc_image, **kwargs, max_dist=0.25)\n tri_mesh_opc, tri_mesh_opc_o3d = create_mesh_from_organized_point_cloud_with_o3d(opc_smooth)\n\n opc_smooth_gpu = laplacian_opc_cuda(pc_image, **kwargs)\n tri_mesh_opc_gpu, tri_mesh_opc_gpu_o3d = create_mesh_from_organized_point_cloud_with_o3d(opc_smooth_gpu)\n\n kwargs['kernel_size'] = 5\n opc_smooth_gpu_k5 = laplacian_opc_cuda(pc_image, **kwargs)\n tri_mesh_opc_gpu_k5, tri_mesh_opc_gpu_o3d_k5 = create_mesh_from_organized_point_cloud_with_o3d(opc_smooth_gpu_k5)\n\n print(\"Meshes from left to right - Input Noisy Mesh, Smoothed with CPU Laplacian, Smoothed with GPU Laplacian, Smoothed with GPU Laplacian with kernel=5\")\n\n plot_meshes(tri_mesh_noisy_o3d, tri_mesh_opc_o3d, tri_mesh_opc_gpu_o3d, tri_mesh_opc_gpu_o3d_k5)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.ascontiguousarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nakazono1011/pj_horidasimono
[ "5dcbd804b29066954b62b35c78fb324a53fdf493" ]
[ "code/model_ridge.py" ]
[ "from util import Util\nimport numpy as np\nfrom sklearn.linear_model import Ridge\nfrom sklearn.preprocessing import StandardScaler\n\nclass ModelRidge():\n \n def __init__(self, run_name=\"Ridge\"):\n self.run_name = run_name\n self.params = {\n }\n self.model = None\n \n def fit(self, tr_x, tr_y, va_x=None, va_y=None):\n y_scaler = StandardScaler() \n tr_y_std = y_scaler.fit_transform(np.log1p(tr_y.values.reshape(-1, 1)))\n\n # モデルの構築\n model = Ridge()\n model.fit(tr_x, tr_y_std)\n\n # モデル・スケーラーの保持\n self.model = model\n self.scaler = y_scaler\n\n def predict(self, te_x):\n pred = self.model.predict(te_x).reshape(-1, 1)\n pred = self.scaler.inverse_transform(pred)[:, 0]\n return pred\n \n def save_model(self):\n model_path = os.path.join('../model/model', f'{self.run_name}.h5')\n scaler_path = os.path.join('../model/model', f'{self.run_name}-scaler.pkl')\n os.makedirs(os.path.dirname(model_path), exist_ok=True)\n self.model.save(model_path)\n Util.dump(self.scaler, scaler_path)\n\n def load_model(self):\n model_path = os.path.join('../model/model', f'{self.run_name}.h5')\n scaler_path = os.path.join('../model/model', f'{self.run_name}-scaler.pkl')\n self.model = load_model(model_path)\n self.scaler = Util.load(scaler_path)\n" ]
[ [ "sklearn.linear_model.Ridge", "sklearn.preprocessing.StandardScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
byewokko/opengl-snippets
[ "d6b8439298b45197b2e204df80f1c632f0266348" ]
[ "glittercurtain.py" ]
[ "import numpy as np\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\n\n\nclass Grid:\n def __init__(self):\n self.size = (30, 20, 3)\n self.grid = np.random.random(self.size)\n self.xspacing = 8\n self.yspacing = 8\n self.width_ratio = 1\n self.height_ratio = 1\n self.color = np.array((0.5, 0, 0.2))\n self.flash_color = np.array((0.5, 0.3, 0.3))\n self.color_smoothing = 3\n\n def update(self, t):\n flash = (np.random.random(self.size) > 0.999) * self.flash_color\n self.grid = (\n (self.color_smoothing*self.grid + np.random.random(self.size) * self.color) / (self.color_smoothing+1)\n ) + flash\n\n def draw(self, t):\n glPolygonMode(GL_FRONT, GL_FILL)\n for (i, j) in np.ndindex(self.size[:2]):\n glPushMatrix()\n glScale(self.xspacing, self.yspacing, 1)\n glTranslate(i, j, 0)\n glRotate(5*(12*t + 5*i + 2*j) % 360, 0, 0, 1)\n glColor3f(*self.grid[i, j, :])\n\n glBegin(GL_QUADS)\n glVertex2fv((0, 0))\n glVertex2fv((self.height_ratio, 0))\n glVertex2fv((self.height_ratio, self.width_ratio))\n glVertex2fv((0, self.width_ratio))\n glEnd()\n\n glPopMatrix()\n\n\nif __name__ == \"__main__\":\n import pygame as pg\n pg.init()\n pg.display.set_mode((800, 640), pg.DOUBLEBUF | pg.OPENGL)\n display_compensation = (1, 800/640, 1)\n clock = pg.time.Clock()\n grid = Grid()\n\n stop = False\n while not stop:\n t = pg.time.get_ticks() / 1000\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n stop = True\n if event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE:\n stop = True\n\n grid.update(t)\n\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glPushMatrix()\n\n # Scale to fit the whole field\n glScale(1/100, 1/100, 1)\n # Translate so that 0, 0 is bottom left\n glTranslate(-100, -100, 0)\n # Compensate display ratio distortion\n glScale(*display_compensation)\n\n grid.draw(t)\n\n glPopMatrix()\n pg.display.flip()\n\n clock.tick(40)\n" ]
[ [ "numpy.ndindex", "numpy.array", "numpy.random.random" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AaronLeong/cvlib
[ "5afe9804df2c162d8132f18ad0d9c9f7c2220dd0" ]
[ "cvlib/nn/instconv2d.py" ]
[ "# coding=utf-8\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.parameter import Parameter\nfrom torch.nn.modules import conv\nfrom torch.nn.modules.utils import _pair\n\n\nclass InstConv2d(conv._ConvNd):\n\n r\"\"\"Applies a 2D convolution over an input signal composed of several input\n planes.\n\n In the simplest case, the output value of the layer with input size\n :math:`(N, C_{in}, H, W)` and output :math:`(N, C_{out}, H_{out}, W_{out})`\n can be precisely described as:\n\n .. math::\n\n \\begin{array}{ll}\n out(N_i, C_{out_j}) = bias(C_{out_j})\n + \\sum_{{k}=0}^{C_{in}-1} weight(C_{out_j}, k) \\star input(N_i, k)\n \\end{array}\n\n where :math:`\\star` is the valid 2D `cross-correlation`_ operator,\n :math:`N` is a batch size, :math:`C` denotes a number of channels,\n :math:`H` is a height of input planes in pixels, and :math:`W` is\n width in pixels.\n\n | :attr:`stride` controls the stride for the cross-correlation, a single\n number or a tuple.\n | :attr:`padding` controls the amount of implicit zero-paddings on both\n | sides for :attr:`padding` number of points for each dimension.\n | :attr:`dilation` controls the spacing between the kernel points; also\n known as the à trous algorithm. It is harder to describe, but this `link`_\n has a nice visualization of what :attr:`dilation` does.\n | :attr:`groups` controls the connections between inputs and outputs.\n `in_channels` and `out_channels` must both be divisible by `groups`.\n | At groups=1, all inputs are convolved to all outputs.\n | At groups=2, the operation becomes equivalent to having two conv\n layers side by side, each seeing half the input channels,\n and producing half the output channels, and both subsequently\n concatenated.\n At groups=`in_channels`, each input channel is convolved with its\n own set of filters (of size `out_channels // in_channels`).\n\n The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:\n\n - a single ``int`` -- in which case the same value is used for the height and width dimension\n - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,\n and the second `int` for the width dimension\n\n .. note::\n\n Depending of the size of your kernel, several (of the last)\n columns of the input might be lost, because it is a valid `cross-correlation`_,\n and not a full `cross-correlation`_.\n It is up to the user to add proper padding.\n\n .. note::\n\n The configuration when `groups == in_channels` and `out_channels = K * in_channels`\n where `K` is a positive integer is termed in literature as depthwise convolution.\n\n In other words, for an input of size :math:`(N, C_{in}, H_{in}, W_{in})`, if you want a\n depthwise convolution with a depthwise multiplier `K`,\n then you use the constructor arguments\n :math:`(in\\_channels=C_{in}, out\\_channels=C_{in} * K, ..., groups=C_{in})`\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``\n\n Shape:\n - Input: :math:`(N, C_{in}, H_{in}, W_{in})`\n - Output: :math:`(N, C_{out}, H_{out}, W_{out})` where\n :math:`H_{out} = floor((H_{in} + 2 * padding[0] - dilation[0] * (kernel\\_size[0] - 1) - 1) / stride[0] + 1)`\n :math:`W_{out} = floor((W_{in} + 2 * padding[1] - dilation[1] * (kernel\\_size[1] - 1) - 1) / stride[1] + 1)`\n\n Attributes:\n weight (Tensor): the learnable weights of the module of shape\n (out_channels, in_channels,\n kernel_size[0], kernel_size[1])\n bias (Tensor): the learnable bias of the module of shape (out_channels)\n\n W(Tensor): Spectrally normalized weight\n\n u (Tensor): the right largest singular value of W.\n\n .. _cross-correlation:\n https://en.wikipedia.org/wiki/Cross-correlation\n\n .. _link:\n https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md\n \"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):\n kernel_size = _pair(kernel_size)\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n super(InstConv2d, self).__init__(\n in_channels, out_channels, kernel_size, stride, padding, dilation,\n False, _pair(0), groups, bias)\n # self.register_buffer('dropout_mask', torch.Tensor(1, out_channels).normal_())\n \n self.activation = nn.ReLU()\n self.weight_size = (1, in_channels) + kernel_size\n\n \n\n self.out_channels = out_channels\n self.in_channels = in_channels\n self.num_features = in_channels * out_channels\n #Affine transform parameters\n self.dropout_mask = nn.Parameter(torch.Tensor(\n out_channels, in_channels)) \n # self.weight = Parameter(torch.Tensor(\n # in_channels, out_channels // groups, *kernel_size)) \n \n \n #Parameter dropout initilization\n self.set_dropout_parameters()\n\n def set_dropout_parameters(self):\n self.dropout_mask.data.uniform_()\n\n def _inst_dropout(self,input,channel_mask):\n # input size n c h w\n # print('input', input.size())\n # print(channel_mask)\n channel_mask = self.activation(channel_mask)\n channel_mask = channel_mask.view(1,self.in_channels,1,1).expand(input.size())\n # print('channel_mask', channel_mask.size())\n # print(channel_mask)\n # channel_mask = channel_mask\n # print(channel_mask)\n # print('channel_mask', channel_mask.size())\n # x = F.dropout(input)\n return input * channel_mask\n # return input.mul_(channel_mask)\n\n def forward(self, input):\n # print('weight',self.weight.data)\n # print('size',self.weight.size())\n # w_ = torch.split(self.weight.data,3,dim=0)\n # print('w_',w_[0].size())\n # print(self.weight.size())\n # print(self.weight[0].view(1,2,3,3).size())\n # print(input.size())\n x = self._inst_dropout(input, self.dropout_mask[0])\n channel_output = F.conv2d(x, self.weight[0].view(self.weight_size), self.bias[0].view(1), self.stride,\n self.padding, self.dilation, self.groups)\n # self.weight.size(0): output channel size \n # self.weight.size(0)\n # W_ = torch.split(self.weight,1)\n for i in range(1, self.out_channels):\n # print(i)\n x = self._inst_dropout(input, self.dropout_mask[i])\n channel_output = torch.cat([channel_output,F.conv2d(x, self.weight[i].view(self.weight_size), self.bias[i].view(1), self.stride,\n self.padding, self.dilation, self.groups)], 1)\n\n # print('channel_output', channel_output.size())\n return channel_output\n # return F.conv2d(input, self.weight[0].view(1,2,3,3), self.bias[0].view(1), self.stride,\n # self.padding, self.dilation, self.groups)\n\nclass InstConv2dv2(conv._ConvNd):\n r'''\n _inst_dropout weight\n '''\n\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):\n kernel_size = _pair(kernel_size)\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n super(InstConv2dv2, self).__init__(\n in_channels, out_channels, kernel_size, stride, padding, dilation,\n False, _pair(0), groups, bias)\n # self.register_buffer('dropout_mask', torch.Tensor(1, out_channels).normal_())\n \n self.activation = nn.ReLU(True)\n self.weight_size = (1, in_channels) + kernel_size\n\n \n\n self.out_channels = out_channels\n self.in_channels = in_channels\n self.num_features = in_channels * out_channels\n #Affine transform parameters\n self.dropout_mask = nn.Parameter(torch.Tensor(\n out_channels, in_channels)) \n # self.weight = Parameter(torch.Tensor(\n # in_channels, out_channels // groups, *kernel_size)) \n \n \n #Parameter dropout initilization\n self.set_dropout_parameters()\n\n def set_dropout_parameters(self):\n self.dropout_mask.data.fill_(1)\n\n def _inst_dropout(self,input,channel_mask):\n # input size n c h w\n # print('input', input.size())\n # print(channel_mask)\n channel_mask = self.activation(channel_mask)\n channel_mask = channel_mask.view(1,self.in_channels,1,1).expand(input.size())\n # print('channel_mask', channel_mask.size())\n # print(channel_mask)\n # channel_mask = channel_mask\n # print(channel_mask)\n # print('channel_mask', channel_mask.size())\n # x = F.dropout(input)\n return input * channel_mask\n # return input.mul_(channel_mask)\n\n def forward(self, input):\n # print('weight',self.weight.data)\n # print('size',self.weight.size())\n # w_ = torch.split(self.weight.data,3,dim=0)\n # print('w_',w_[0].size())\n # print(self.weight.size())\n # print(self.weight[0].view(1,2,3,3).size())\n # print(input.size())\n x = self._inst_dropout(input, self.dropout_mask[0])\n channel_output = F.conv2d(x, self.weight[0].view(self.weight_size), self.bias[0].view(1), self.stride,\n self.padding, self.dilation, self.groups)\n # self.weight.size(0): output channel size \n # self.weight.size(0)\n # W_ = torch.split(self.weight,1)\n for i in range(1, self.out_channels):\n # print(i)\n x = self._inst_dropout(input, self.dropout_mask[i])\n channel_output = torch.cat([channel_output,F.conv2d(x, self.weight[i].view(self.weight_size), self.bias[i].view(1), self.stride,\n self.padding, self.dilation, self.groups)], 1)\n\n print('channel_output', channel_output.size())\n return channel_output\n # return F.conv2d(input, self.weight[0].view(1,2,3,3), self.bias[0].view(1), self.stride,\n # self.padding, self.dilation, self.groups)\n\nclass InstConv2dv3(nn.Module):\n r'''\n __setattr__ conv\n '''\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):\n super(InstConv2dv3, self).__init__()\n if in_channels % groups != 0:\n raise ValueError('in_channels must be divisible by groups')\n if out_channels % groups != 0:\n raise ValueError('out_channels must be divisible by groups')\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = padding\n self.dilation = dilation\n self.groups = groups\n self.activation = nn.ReLU()\n self.weight = Parameter(torch.Tensor(\n out_channels, in_channels))\n\n \n \n for i in range(out_channels):\n self.__setattr__('conv_%d'%i,nn.Conv2d(in_channels, 1,kernel_size,stride,padding))\n \n # self.reset_parameters()\n\n # for m in self.modules():\n # if isinstance(m, nn.Conv2d):\n # nn.init.xavier_uniform_(m.weight)\n # nn.init.constant_(m.bias, 0.1)\n\n def reset_parameters(self):\n self.weight.data.fill_(1)\n\n def _dropout(self,input,_weight):\n \n # print(_weight.size())\n _w0 = self.activation(_weight)\n _w1 = _w0.view(1,_weight.size(0),1,1).expand_as(input)\n return input.mul_(_w1)\n # return input*_weight\n\n def forward(self, input):\n ouput = self.__getattr__('conv_0')(input)\n for i in range(1, self.out_channels):\n x = self._dropout(input, self.weight[i])\n ouput = torch.cat((ouput,self.__getattr__('conv_%d'%i)(x)),1)\n return ouput\n\n\nclass InstConv2dv5(conv._ConvNd):\n\n r\"\"\"Applies a 2D convolution over an input signal composed of several input\n planes.\n\n In the simplest case, the output value of the layer with input size\n :math:`(N, C_{in}, H, W)` and output :math:`(N, C_{out}, H_{out}, W_{out})`\n can be precisely described as:\n\n .. math::\n\n \\begin{array}{ll}\n out(N_i, C_{out_j}) = bias(C_{out_j})\n + \\sum_{{k}=0}^{C_{in}-1} weight(C_{out_j}, k) \\star input(N_i, k)\n \\end{array}\n\n where :math:`\\star` is the valid 2D `cross-correlation`_ operator,\n :math:`N` is a batch size, :math:`C` denotes a number of channels,\n :math:`H` is a height of input planes in pixels, and :math:`W` is\n width in pixels.\n\n | :attr:`stride` controls the stride for the cross-correlation, a single\n number or a tuple.\n | :attr:`padding` controls the amount of implicit zero-paddings on both\n | sides for :attr:`padding` number of points for each dimension.\n | :attr:`dilation` controls the spacing between the kernel points; also\n known as the à trous algorithm. It is harder to describe, but this `link`_\n has a nice visualization of what :attr:`dilation` does.\n | :attr:`groups` controls the connections between inputs and outputs.\n `in_channels` and `out_channels` must both be divisible by `groups`.\n | At groups=1, all inputs are convolved to all outputs.\n | At groups=2, the operation becomes equivalent to having two conv\n layers side by side, each seeing half the input channels,\n and producing half the output channels, and both subsequently\n concatenated.\n At groups=`in_channels`, each input channel is convolved with its\n own set of filters (of size `out_channels // in_channels`).\n\n The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:\n\n - a single ``int`` -- in which case the same value is used for the height and width dimension\n - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,\n and the second `int` for the width dimension\n\n .. note::\n\n Depending of the size of your kernel, several (of the last)\n columns of the input might be lost, because it is a valid `cross-correlation`_,\n and not a full `cross-correlation`_.\n It is up to the user to add proper padding.\n\n .. note::\n\n The configuration when `groups == in_channels` and `out_channels = K * in_channels`\n where `K` is a positive integer is termed in literature as depthwise convolution.\n\n In other words, for an input of size :math:`(N, C_{in}, H_{in}, W_{in})`, if you want a\n depthwise convolution with a depthwise multiplier `K`,\n then you use the constructor arguments\n :math:`(in\\_channels=C_{in}, out\\_channels=C_{in} * K, ..., groups=C_{in})`\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``\n\n Shape:\n - Input: :math:`(N, C_{in}, H_{in}, W_{in})`\n - Output: :math:`(N, C_{out}, H_{out}, W_{out})` where\n :math:`H_{out} = floor((H_{in} + 2 * padding[0] - dilation[0] * (kernel\\_size[0] - 1) - 1) / stride[0] + 1)`\n :math:`W_{out} = floor((W_{in} + 2 * padding[1] - dilation[1] * (kernel\\_size[1] - 1) - 1) / stride[1] + 1)`\n\n Attributes:\n weight (Tensor): the learnable weights of the module of shape\n (out_channels, in_channels, kernel_size[0], kernel_size[1])\n bias (Tensor): the learnable bias of the module of shape (out_channels)\n\n W(Tensor): Spectrally normalized weight\n\n u (Tensor): the right largest singular value of W.\n\n .. _cross-correlation:\n https://en.wikipedia.org/wiki/Cross-correlation\n\n .. _link:\n https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md\n \"\"\"\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):\n kernel_size = _pair(kernel_size)\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n super(InstConv2dv5, self).__init__(\n in_channels, out_channels, kernel_size, stride, padding, dilation,\n False, _pair(0), groups, bias)\n self.register_buffer('dropout_mask',nn.Parameter(torch.Tensor(\n out_channels, in_channels, 1, 1)))\n # self.dropout_mask = nn.Parameter(torch.Tensor(\n # out_channels, in_channels, 1, 1)) \n # self.weight = Parameter(torch.Tensor(\n # in_channels, out_channels // groups, *kernel_size)) \n self.activation = nn.ReLU(True)\n \n #Parameter dropout initilization\n self.set_dropout_parameters()\n\n def set_dropout_parameters(self):\n self.dropout_mask.data.fill_(1)\n \n @property\n def W_(self):\n _m = self.activation(self.self.dropout_mask.data)\n w_mask = _m.expand_as(self.weight.data)\n return self.weight.data.mul_(w_mask)\n\n def forward(self, input):\n return F.conv2d(input, self.W_, self.bias, self.stride,\n self.padding, self.dilation, self.groups)\n\n\n\n\nclass InstConv2dv4(conv._ConvNd):\n\n r\"\"\"Applies a 2D convolution over an input signal composed of several input\n planes.\n\n In the simplest case, the output value of the layer with input size\n :math:`(N, C_{in}, H, W)` and output :math:`(N, C_{out}, H_{out}, W_{out})`\n can be precisely described as:\n\n .. math::\n\n \\begin{array}{ll}\n out(N_i, C_{out_j}) = bias(C_{out_j})\n + \\sum_{{k}=0}^{C_{in}-1} weight(C_{out_j}, k) \\star input(N_i, k)\n \\end{array}\n\n where :math:`\\star` is the valid 2D `cross-correlation`_ operator,\n :math:`N` is a batch size, :math:`C` denotes a number of channels,\n :math:`H` is a height of input planes in pixels, and :math:`W` is\n width in pixels.\n\n | :attr:`stride` controls the stride for the cross-correlation, a single\n number or a tuple.\n | :attr:`padding` controls the amount of implicit zero-paddings on both\n | sides for :attr:`padding` number of points for each dimension.\n | :attr:`dilation` controls the spacing between the kernel points; also\n known as the à trous algorithm. It is harder to describe, but this `link`_\n has a nice visualization of what :attr:`dilation` does.\n | :attr:`groups` controls the connections between inputs and outputs.\n `in_channels` and `out_channels` must both be divisible by `groups`.\n | At groups=1, all inputs are convolved to all outputs.\n | At groups=2, the operation becomes equivalent to having two conv\n layers side by side, each seeing half the input channels,\n and producing half the output channels, and both subsequently\n concatenated.\n At groups=`in_channels`, each input channel is convolved with its\n own set of filters (of size `out_channels // in_channels`).\n\n The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:\n\n - a single ``int`` -- in which case the same value is used for the height and width dimension\n - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,\n and the second `int` for the width dimension\n\n .. note::\n\n Depending of the size of your kernel, several (of the last)\n columns of the input might be lost, because it is a valid `cross-correlation`_,\n and not a full `cross-correlation`_.\n It is up to the user to add proper padding.\n\n .. note::\n\n The configuration when `groups == in_channels` and `out_channels = K * in_channels`\n where `K` is a positive integer is termed in literature as depthwise convolution.\n\n In other words, for an input of size :math:`(N, C_{in}, H_{in}, W_{in})`, if you want a\n depthwise convolution with a depthwise multiplier `K`,\n then you use the constructor arguments\n :math:`(in\\_channels=C_{in}, out\\_channels=C_{in} * K, ..., groups=C_{in})`\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``\n\n Shape:\n - Input: :math:`(N, C_{in}, H_{in}, W_{in})`\n - Output: :math:`(N, C_{out}, H_{out}, W_{out})` where\n :math:`H_{out} = floor((H_{in} + 2 * padding[0] - dilation[0] * (kernel\\_size[0] - 1) - 1) / stride[0] + 1)`\n :math:`W_{out} = floor((W_{in} + 2 * padding[1] - dilation[1] * (kernel\\_size[1] - 1) - 1) / stride[1] + 1)`\n\n Attributes:\n weight (Tensor): the learnable weights of the module of shape\n (out_channels, in_channels, kernel_size[0], kernel_size[1])\n bias (Tensor): the learnable bias of the module of shape (out_channels)\n\n W(Tensor): Spectrally normalized weight\n\n u (Tensor): the right largest singular value of W.\n\n .. _cross-correlation:\n https://en.wikipedia.org/wiki/Cross-correlation\n\n .. _link:\n https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md\n \"\"\"\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):\n kernel_size = _pair(kernel_size)\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n super(InstConv2dv4, self).__init__(\n in_channels, out_channels, kernel_size, stride, padding, dilation,\n False, _pair(0), groups, bias)\n \n self.dropout_mask = nn.Parameter(torch.Tensor(\n out_channels, in_channels,1,1)) \n self.size = out_channels* in_channels\n self.activation = nn.ReLU(True)\n \n #Parameter dropout initilization\n self.set_dropout_parameters()\n self.m = torch.Tensor(out_channels, in_channels,1,1).fill_(1).cuda(0)\n\n def set_dropout_parameters(self):\n self.dropout_mask.data.uniform_()\n def getinfo(self):\n # print(self.dropout_mask.data)\n idx = self.dropout_mask.data<=0\n idx = idx.resize_(self.size).type_as(self.m)\n # print(idx.size())\n # print(idx)\n m1 = self.m.clone().resize_(self.size)\n # print(m1.size())\n # print(m1)\n print('idx:',torch.dot(idx,m1),self.dropout_mask.data[0])\n @property\n def W_(self): \n\n # print(self.dropout_mask.size())\n # print(self.m.size())\n y = self.dropout_mask * self.m\n\n # print(y.size())\n return self.weight * self.activation(y).expand_as(self.weight)\n # return self.weight * self.activation(self.dropout_mask.data).expand_as(self.weight)\n\n\n def forward(self, input):\n self.getinfo()\n return F.conv2d(input, self.W_, self.bias, self.stride,\n self.padding, self.dilation, self.groups)\n\nfrom torch.autograd import Variable\nif __name__ == '__main__':\n\n model = InstConv2dv4(3,1,1,1,0,bias=False)\n # print(model)\n x = torch.ones([1,3,3,3])\n # x = torch.split(x,2,dim=1)\n # print('x',x,x[0].size())\n x = Variable(x)\n \n print(model(x))\n\n # m1 = torch.Tensor(2,2,1,1).fill_(2)\n # m2 = torch.Tensor(2,2,1,1).fill_(2)\n # print(m1.resize_(4))\n # print(torch.dot(m1.resize_(1),m2.resize_(1)))\n" ]
[ [ "torch.ones", "torch.Tensor", "torch.nn.functional.conv2d", "torch.nn.Conv2d", "torch.nn.modules.utils._pair", "torch.nn.ReLU", "torch.dot", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TomWagg/gala
[ "1d207b746aba93cbdd26733e38ec312d7e9310bf" ]
[ "gala/potential/scf/tests/test_computecoeff_discrete.py" ]
[ "# coding: utf-8\n\nimport os\n\n# Third-party\nimport numpy as np\nfrom astropy.utils.data import get_pkg_data_filename\nfrom astropy.constants import G\nimport pytest\n\n# Project\nimport gala.potential as gp\nfrom gala.units import galactic\nfrom gala._cconfig import GSL_ENABLED\nfrom ..core import compute_coeffs_discrete\nfrom .._bfe import potential\n\n_G = G.decompose(galactic).value\n\nif not GSL_ENABLED:\n pytest.skip(\n \"skipping SCF tests: they depend on GSL\", allow_module_level=True\n )\n\n\ndef test_plummer():\n pos_path = os.path.abspath(get_pkg_data_filename(\"data/plummer-pos.dat.gz\"))\n\n scfbi = scfbi = np.loadtxt(pos_path)\n m_k = scfbi[:, 0] * 10 # masses sum to 0.1\n xyz = scfbi[:, 1:4]\n\n G = 1.0\n r_s = 1.0\n M = m_k.sum()\n pot = gp.PlummerPotential(m=1 / _G, b=r_s, units=galactic)\n\n nmax = 10\n lmax = 0\n\n Snlm, Tnlm = compute_coeffs_discrete(\n xyz, m_k, nmax=nmax, lmax=lmax, r_s=r_s\n )\n\n x = np.logspace(-2, 1, 512)\n xyz = np.zeros((len(x), 3))\n xyz[:, 0] = x\n\n # plot discrete vs. analytic potential\n true_pot = pot.energy(xyz.T).value\n bfe_pot = potential(xyz, Snlm, Tnlm, G, M, r_s)\n\n assert np.allclose(true_pot, bfe_pot, rtol=1e-2)\n\n\ndef test_coefficients():\n pos_path = os.path.abspath(get_pkg_data_filename(\"data/plummer-pos.dat.gz\"))\n coeff_path = os.path.abspath(\n get_pkg_data_filename(\"data/plummer_coeff_nmax10_lmax5.txt\")\n )\n scfbi = np.loadtxt(pos_path)\n m_k = scfbi[:, 0] # masses sum to 0.1\n xyz = scfbi[:, 1:4]\n\n scfcoeff = np.loadtxt(coeff_path)\n Snlm_true = scfcoeff[:, 0]\n Tnlm_true = scfcoeff[:, 1]\n\n r_s = 1.0\n nmax = 10\n lmax = 5\n\n Snlm, Tnlm = compute_coeffs_discrete(\n xyz, m_k, nmax=nmax, lmax=lmax, r_s=r_s\n )\n\n assert np.allclose(Snlm_true, Snlm.flatten(), rtol=1e-3)\n assert np.allclose(Tnlm_true, Tnlm.flatten(), rtol=1e-3)\n\n\ndef test_coeff_variances():\n pos_path = os.path.abspath(get_pkg_data_filename(\"data/plummer-pos.dat.gz\"))\n coeff_path = os.path.abspath(\n get_pkg_data_filename(\"data/plummer_coeff_var_nmax10_lmax5.txt\")\n )\n scfbi = np.loadtxt(pos_path)\n m_k = scfbi[:, 0] # masses sum to 0.1\n xyz = scfbi[:, 1:4]\n\n scfcoeff = np.loadtxt(coeff_path)\n Snlm_var_true = scfcoeff[:, 0]\n Tnlm_var_true = scfcoeff[:, 1]\n STnlm_var_true = scfcoeff[:, 2]\n\n r_s = 1.0\n nmax = 10\n lmax = 5\n\n *_, STnlm_Cov = compute_coeffs_discrete(\n xyz, m_k, nmax=nmax, lmax=lmax, r_s=r_s, compute_var=True\n )\n assert np.allclose(Snlm_var_true, STnlm_Cov[0, 0].flatten(), rtol=1e-3)\n assert np.allclose(Tnlm_var_true, STnlm_Cov[1, 1].flatten(), rtol=1e-3)\n assert np.allclose(STnlm_var_true, STnlm_Cov[0, 1].flatten(), rtol=1e-3)\n" ]
[ [ "numpy.logspace", "numpy.allclose", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
watsonjj/CHECLabPySB
[ "91330d3a6f510a392f635bd7f4abd2f77871322c", "91330d3a6f510a392f635bd7f4abd2f77871322c", "91330d3a6f510a392f635bd7f4abd2f77871322c", "91330d3a6f510a392f635bd7f4abd2f77871322c" ]
[ "sstcam_sandbox/old/mc_config/fadc_noise.py", "sstcam_sandbox/d190209_spectra/spe_c.py", "sstcam_sandbox/d190506_astri_publicity/create_gifs.py", "sstcam_sandbox/d190117_trigger_stability/extract_trigger.py" ]
[ "import os\nimport numpy as np\nimport pandas as pd\nfrom CHECLabPy.plotting.setup import Plotter\nfrom CHECLabPy.spectrum_fitters.gentile import pedestal_signal, K\nfrom CHECLabPy.waveform_reducers.cross_correlation import CrossCorrelation\nfrom target_calib import CameraConfiguration\n\n\nclass SPEHist(Plotter):\n def plot(self, hist, edges, between, x, y, label):\n self.ax.hist(between, bins=edges, weights=hist, alpha=0.2)\n self.ax.plot(x, y, label=label)\n\n def finish(self):\n self.ax.set_xlabel(\"Amplitude (mV)\")\n self.ax.set_ylabel(\"N\")\n self.add_legend()\n\n\ndef main():\n input_path = \"/Volumes/gct-jason/thesis_data/checs/lab/dynrange/tf/tf_poly/spe.h5\"\n file_dir = os.path.dirname(os.path.abspath(__file__))\n output_dir = os.path.join(file_dir, \"outputs\")\n\n dead = [677, 293, 27, 1925]\n\n store = pd.HDFStore(input_path)\n df = store['coeff_pixel']\n df_array = store['array_camera']\n df = df.loc[~df['pixel'].isin(dead)]\n df_mean = df.mean().to_dict()\n\n norm = (df_mean['norm0'] + df_mean['norm1'] + df_mean['norm2'])/3\n\n config = CameraConfiguration(\"1.1.0\")\n ref_path = config.GetReferencePulsePath()\n cc = CrossCorrelation(1, 96, reference_pulse_path=ref_path)\n\n d = dict(\n norm=1,\n eped=cc.get_pulse_height(df_mean['eped']),\n eped_sigma=cc.get_pulse_height(df_mean['eped_sigma']),\n lambda_=1\n )\n\n hist = df_array.loc[0, 'hist'] / (norm * 1000)\n edges = cc.get_pulse_height(df_array.loc[0, 'edges'])\n between = cc.get_pulse_height(df_array.loc[0, 'between'])\n\n x = np.linspace(-5, 15, 1000)\n y = pedestal_signal(x, **d)\n\n p_hist = SPEHist()\n label = \"fadc_noise = eped_sigma = {:.3f}\".format(d['eped_sigma'])\n p_hist.plot(hist, edges, between, x, y, label)\n output_path = os.path.join(output_dir, \"checs_fadc_noise.pdf\")\n p_hist.save(output_path)\n\n\nif __name__ == '__main__':\n main()\n", "import ctypes\nimport numpy as np\nimport os\n\n\nlib = np.ctypeslib.load_library(\"spe_functions\", os.path.dirname(__file__))\nmapm_c = lib.mapm\nmapm_c.restype = None\nmapm_c.argtypes = [\n np.ctypeslib.ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n np.ctypeslib.ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_double,\n ctypes.c_double,\n ctypes.c_double,\n ctypes.c_double,\n ctypes.c_double,\n ctypes.c_double,\n]\n\nsipm_c = lib.sipm\nsipm_c.restype = None\nsipm_c.argtypes = [\n np.ctypeslib.ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n np.ctypeslib.ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_double,\n ctypes.c_double,\n ctypes.c_double,\n ctypes.c_double,\n ctypes.c_double,\n ctypes.c_double,\n ctypes.c_double,\n ctypes.c_double,\n ctypes.c_double,\n]\n\n\ndef mapm(x, norm, eped, eped_sigma, spe, spe_sigma, lambda_, **kwargs):\n y = np.zeros(x.size, dtype=np.double)\n mapm_c(x, y, y.size, norm, eped, eped_sigma, spe, spe_sigma, lambda_)\n return y\n\n\ndef sipm(x, norm, eped, eped_sigma, spe, spe_sigma, lambda_, opct, pap, dap, **kwargs):\n y = np.zeros(x.size, dtype=np.double)\n sipm_c(x, y, y.size, norm, eped, eped_sigma, spe, spe_sigma,\n lambda_, opct, pap, dap)\n return y\n", "from CHECLabPy.plotting.setup import Plotter\nfrom CHECLabPy.plotting.camera import CameraImage\nfrom CHECLabPy.utils.mapping import get_ctapipe_camera_geometry\nfrom CHECLabPy.core.io import TIOReader\nfrom CHECLabPy.calib import TimeCalibrator\nfrom CHECOnsky.calib import OnskyAmplitudeCalibrator, \\\n get_nudge_and_temperature_from_reader, OnskyExtractor\nfrom CHECOnsky.calib import obtain_cleaning_mask\nimport argparse\nfrom argparse import ArgumentDefaultsHelpFormatter as Formatter\nfrom os.path import dirname, splitext, join\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom matplotlib import animation\nfrom matplotlib import pyplot as plt\n\n\nclass CameraAnimation(Plotter):\n def __init__(self, mapping, **kwargs):\n super().__init__(sidebyside=True, **kwargs)\n\n figsize = self.get_figsize()\n\n self.fig_image = plt.figure(figsize=figsize)\n self.ax_image = self.fig_image.add_subplot(1, 1, 1)\n self.ci_image = CameraImage.from_mapping(mapping, ax=self.ax_image)\n self.ci_image.add_colorbar(\"Pixel Amplitude (p.e.)\", pad=-0.2)\n\n self.fig_gf = plt.figure(figsize=figsize)\n self.ax_gf = self.fig_gf.add_subplot(1, 1, 1)\n self.ci_gf = CameraImage.from_mapping(mapping, ax=self.ax_gf)\n self.ci_gf.add_colorbar(\"Pixel Amplitude (mV)\", pad=-0.2)\n\n figsize_combined = figsize\n figsize_combined[0] *= 1.5\n self.fig_combined = plt.figure(figsize=figsize_combined)\n self.ax_cgf = self.fig_combined.add_subplot(1, 2, 1)\n self.ax_cimage = self.fig_combined.add_subplot(1, 2, 2)\n self.ci_cgf = CameraImage.from_mapping(mapping, ax=self.ax_cgf)\n # self.ci_cgf.add_colorbar(\"Pixel Amplitude (mV)\", pad=0)\n self.ci_cimage = CameraImage.from_mapping(mapping, ax=self.ax_cimage)\n self.ci_cimage.add_colorbar(\"Pixel Amplitude (p.e.)\", pad=0)\n self.fig_combined.subplots_adjust(\n left=0.01, right=0.95, top=0.90, bottom=0.05, wspace=0, hspace=0\n )\n\n self.meta = None\n self.image = None\n self.waveforms = None\n\n def set_meta(self, i, iobs, iev, tduration):\n self.meta = (i, iobs, iev, tduration)\n\n def set_image(self, image, min_, max_):\n self.image = (image, min_, max_)\n\n def set_waveforms(self, waveforms, min_, max_):\n self.waveforms = (waveforms, min_, max_)\n\n @property\n def n_timeslices(self):\n return self.waveforms[0].shape[1]\n\n def animate(self, output_dir, interval=20):\n dpi = 115\n\n dir_image = join(output_dir, \"image\")\n dir_goldfish = join(output_dir, \"goldfish\")\n dir_combined = join(output_dir, \"combined\")\n\n self.create_directory(dir_image)\n self.create_directory(dir_goldfish)\n self.create_directory(dir_combined)\n\n index, iobs, iev, tduration = self.meta\n filename = f\"i{index}_r{iobs}_e{iev}\"\n\n image, imin_, imax_ = self.image\n self.ci_image.set_limits_minmax(imin_, imax_)\n self.ci_image.image = image\n self.ci_image.save(join(dir_image, f\"{filename}.png\"), dpi=dpi)\n\n waveforms, fmin_, fmax_ = self.waveforms\n self.ci_gf.set_limits_minmax(fmin_, fmax_)\n self.ci_cgf.set_limits_minmax(fmin_, fmax_)\n self.ci_cimage.set_limits_minmax(imin_, imax_)\n self.ci_cimage.image = image\n self.ci_cimage.ax.set_title(f\"Duration = {tduration:.0f} ns\")\n with tqdm(total=self.n_timeslices*2, desc=\"Animating\") as pbar:\n def animate_goldfish(i):\n pbar.update(1)\n frame = waveforms[:, i]\n self.ci_gf.image = frame\n\n def animate_combined(i):\n pbar.update(1)\n frame = waveforms[:, i]\n self.ci_cgf.image = frame\n self.ci_cgf.ax.set_title(f\"T= {i*4} ns\")\n\n anim_goldfish = animation.FuncAnimation(\n self.fig_gf, animate_goldfish, frames=self.n_timeslices,\n interval=interval\n )\n anim_goldfish.save(\n join(dir_goldfish, f\"{filename}.gif\"),\n writer='imagemagick', dpi=dpi\n )\n\n anim_combined = animation.FuncAnimation(\n self.fig_combined, animate_combined, frames=self.n_timeslices,\n interval=interval\n )\n anim_combined.save(\n join(dir_combined, f\"{filename}.gif\"),\n writer='imagemagick', dpi=dpi\n )\n\n\ndef main():\n description = 'Loop over R0 or R1 file and plot camera images'\n parser = argparse.ArgumentParser(description=description,\n formatter_class=Formatter)\n parser.add_argument('-f', '--file', dest='input_path', required=True,\n help='path to a hillas list file created by '\n 'generate_list_from_hillas')\n parser.add_argument('-n', '--max_events', dest='max_events', type=int,\n help='number of events to plot')\n args = parser.parse_args()\n\n input_path = args.input_path\n output_dir = splitext(input_path)[0]\n max_events = args.max_events\n\n df = pd.read_csv(input_path, sep='\\t')\n\n first_path = df.iloc[0]['path'].replace(\"_hillas.h5\", \"_r1.tio\")\n first_reader = TIOReader(first_path)\n n_pixels = first_reader.n_pixels\n n_samples = first_reader.n_samples\n mapping = first_reader.mapping\n mapping.metadata['size'] *= 1.01 # TODO: WHY?!\n reference_pulse_path = first_reader.reference_pulse_path\n geom = get_ctapipe_camera_geometry(\n mapping, plate_scale=37.56e-3\n )\n charge_extractor = OnskyExtractor(\n n_pixels, n_samples,\n mapping=mapping,\n reference_pulse_path=reference_pulse_path,\n )\n time_calibrator = TimeCalibrator()\n\n # Open all files\n hillas_paths = set()\n for _, row in df.iterrows():\n hillas_paths.add(row['path'])\n readers = dict()\n amplitude_calibrators = dict()\n for path in hillas_paths:\n r1_path = path.replace(\"_hillas.h5\", \"_r1.tio\")\n reader = TIOReader(r1_path)\n nudge, temperature = get_nudge_and_temperature_from_reader(reader)\n amplitude_calibrator = OnskyAmplitudeCalibrator(nudge, temperature)\n readers[path] = reader\n amplitude_calibrators[path] = amplitude_calibrator\n\n p_animation = CameraAnimation(mapping)\n\n n_events = df.index.size\n if max_events is not None and n_events > max_events:\n n_events = max_events\n\n desc = \"Looping over events\"\n for i, row in tqdm(df.iterrows(), total=n_events, desc=desc):\n if i >= n_events:\n break\n\n hillas_path = row['path']\n iev = row['iev']\n iobs = row['iobs']\n tduration = row['tduration']\n\n reader = readers[hillas_path]\n amplitude_calibrator = amplitude_calibrators[hillas_path]\n\n waveforms = reader[iev]\n\n shifted = time_calibrator(waveforms)\n extracted = charge_extractor.process(shifted)\n charge = extracted['charge_onsky']\n time = extracted['t_onsky']\n photons = amplitude_calibrator(charge, np.arange(n_pixels))\n pe = photons * 0.25\n\n mask = obtain_cleaning_mask(geom, photons, time)\n if not mask.any():\n msg = f\"No pixels survived cleaning for: RUN {iobs} IEV {iev}\"\n raise ValueError(msg)\n\n photons_ma = np.ma.masked_array(photons, mask=~mask)\n\n min_pixel = photons_ma.argmin()\n max_pixel = photons_ma.argmax()\n\n min_image = -4\n max_image = 0.7 * pe.max()\n\n min_gf = shifted[max_pixel, :20].min()\n max_gf = shifted[max_pixel].max() * 0.8\n\n st = int(np.min(time[mask]) - 3)\n et = int(np.max(time[mask]) + 6)\n st = st if st > 0 else 0\n et = et if et < n_samples else n_samples\n\n p_animation.set_meta(i, iobs, iev, tduration)\n p_animation.set_image(pe, min_image, max_image)\n p_animation.set_waveforms(shifted[:, st:et:4], min_gf, max_gf)\n p_animation.animate(output_dir, interval=50)\n\n\nif __name__ == '__main__':\n main()\n", "from sstcam_sandbox.d190117_trigger_stability import *\nfrom sstcam_sandbox import get_data, HDF5Writer\nimport numpy as np\nimport pandas as pd\nfrom IPython import embed\n\n\ndef read_trigger_file(path):\n sp_data = np.loadtxt(path, skiprows=1)\n superpixels = np.arange(512)\n\n df_list = []\n\n for iev, entry in enumerate(sp_data):\n time = pd.to_datetime(entry[0], unit='s')\n counts = entry[1:513]\n\n df_list.append(pd.DataFrame(dict(\n iev=iev,\n t_cpu=time,\n superpixel=superpixels,\n count=counts,\n )))\n\n df = pd.concat(df_list, ignore_index=True)\n return df\n\n\ndef process(trigger_path, output_path):\n df = read_trigger_file(trigger_path)\n\n with HDF5Writer(output_path) as writer:\n writer.write(data=df)\n\n\ndef process_file(file):\n trigger_path = file.trigger_path\n name = file.__class__.__name__\n output_path = get_data(\"d190117_trigger_stability/{}/trigger.h5\".format(name))\n process(trigger_path, output_path)\n\n\ndef main():\n files = [\n # d190111(),\n d190118(),\n ]\n [process_file(file) for file in files]\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.HDFStore", "numpy.linspace" ], [ "numpy.ctypeslib.ndpointer", "numpy.zeros" ], [ "pandas.read_csv", "numpy.min", "numpy.arange", "numpy.max", "matplotlib.animation.FuncAnimation", "numpy.ma.masked_array", "matplotlib.pyplot.figure" ], [ "numpy.arange", "pandas.concat", "pandas.to_datetime", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.13", "1.16", "1.9", "1.18", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
toniklenk/pyins
[ "db18a6083dbd7397315095d9a5096cd515f7e248" ]
[ "pyins/integrate.py" ]
[ "\"\"\"Compute a navigation solution by integration of inertial readings.\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom . import earth\nfrom . import dcm\nfrom ._integrate import integrate_fast, integrate_fast_stationary\n\n\ndef coning_sculling(gyro, accel, order=1):\n \"\"\"Apply coning and sculling corrections to inertial readings.\n\n The algorithm assumes a polynomial model for the angular velocity and the\n specific force, fitting coefficients by considering previous time\n intervals. The algorithm for a linear approximation is well known and\n described in [1]_ and [2]_.\n\n The accelerometer readings are also corrected for body frame rotation\n during a sampling period.\n\n Parameters\n ----------\n gyro : array_like, shape (n_readings, 3)\n Gyro readings.\n accel : array_like, shape (n_readings, 3)\n Accelerometer readings.\n order : {0, 1, 2}, optional\n Angular velocity and specific force polynomial model order.\n Note that 0 means not applying non-commutative corrections at all.\n Default is 1.\n\n Returns\n -------\n theta : ndarray, shape (n_readings, 3)\n Estimated rotation vectors.\n dv : ndarray, shape (n_readings, 3)\n Estimated velocity increments.\n\n References\n ----------\n .. [1] P. G. Savage, \"Strapdown Inertial Navigation Integration Algorithm\n Design Part 1: Attitude Algorithms\", Journal of Guidance, Control,\n and Dynamics 1998, Vol. 21, no. 2.\n .. [2] P. G. Savage, \"Strapdown Inertial Navigation Integration Algorithm\n Design Part 2: Velocity and Position Algorithms\", Journal of\n Guidance, Control, and Dynamics 1998, Vol. 21, no. 2.\n \"\"\"\n if order not in [0, 1, 2]:\n raise ValueError(\"`order` must be 1, 2 or 3.\")\n\n gyro = np.asarray(gyro)\n accel = np.asarray(accel)\n\n if order == 0:\n coning = 0\n sculling = 0\n elif order == 1:\n coning = np.vstack((np.zeros(3), np.cross(gyro[:-1], gyro[1:]) / 12))\n sculling = np.vstack((np.zeros(3),\n (np.cross(gyro[:-1], accel[1:]) +\n np.cross(accel[:-1], gyro[1:])) / 12))\n elif order == 2:\n coning = (-121 * np.cross(gyro[2:], gyro[1:-1]) +\n 31 * np.cross(gyro[2:], gyro[:-2]) -\n np.cross(gyro[1:-1], gyro[:-2])) / 720\n sculling = (-121 * np.cross(gyro[2:], accel[1:-1]) +\n 31 * np.cross(gyro[2:], accel[:-2]) -\n np.cross(gyro[1:-1], accel[:-2]) -\n 121 * np.cross(accel[2:], gyro[1:-1]) +\n 31 * np.cross(accel[2:], gyro[:-2]) -\n np.cross(accel[1:-1], gyro[:-2])) / 720\n coning = np.vstack((np.zeros((2, 3)), coning))\n sculling = np.vstack((np.zeros((2, 3)), sculling))\n else:\n assert False\n\n rc = 0.5 * np.cross(gyro, accel)\n\n return gyro + coning, accel + sculling + rc\n\n\ndef integrate(dt, lat, lon, VE, VN, h, p, r, theta, dv, stamp=0):\n \"\"\"Integrate inertial readings.\n\n The algorithm described in [1]_ and [2]_ is used with slight\n simplifications. The position is updated using the trapezoid rule.\n\n Parameters\n ----------\n dt : float\n Sensors sampling period.\n lat, lon : float\n Initial latitude and longitude.\n VE, VN : float\n Initial East and North velocity.\n h, p, r : float\n Initial heading, pitch and roll.\n theta, dv : array_like, shape (n_readings, 3)\n Rotation vectors and velocity increments computed from gyro and\n accelerometer readings after applying coning and sculling\n corrections.\n stamp : int, optional\n Stamp of the initial point.\n\n Returns\n -------\n traj : DataFrame\n Computed trajectory.\n\n See Also\n --------\n coning_sculling : Apply coning and sculling corrections.\n\n References\n ----------\n .. [1] P. G. Savage, \"Strapdown Inertial Navigation Integration Algorithm\n Design Part 1: Attitude Algorithms\", Journal of Guidance, Control,\n and Dynamics 1998, Vol. 21, no. 2.\n .. [2] P. G. Savage, \"Strapdown Inertial Navigation Integration Algorithm\n Design Part 2: Velocity and Position Algorithms\", Journal of\n Guidance, Control, and Dynamics 1998, Vol. 21, no. 2.\n \"\"\"\n n_readings = theta.shape[0]\n lat_arr = np.empty(n_readings + 1)\n lon_arr = np.empty(n_readings + 1)\n VE_arr = np.empty(n_readings + 1)\n VN_arr = np.empty(n_readings + 1)\n Cnb_arr = np.empty((n_readings + 1, 3, 3))\n lat_arr[0] = np.deg2rad(lat)\n lon_arr[0] = np.deg2rad(lon)\n VE_arr[0] = VE\n VN_arr[0] = VN\n Cnb_arr[0] = dcm.from_hpr(h, p, r)\n\n earth_model = 0\n integrate_fast(dt, lat_arr, lon_arr, VE_arr, VN_arr, Cnb_arr, theta, dv,\n earth_model)\n\n lat_arr = np.rad2deg(lat_arr)\n lon_arr = np.rad2deg(lon_arr)\n h, p, r = dcm.to_hpr(Cnb_arr)\n\n index = pd.Index(stamp + np.arange(n_readings + 1), name='stamp')\n traj = pd.DataFrame(index=index)\n traj['lat'] = lat_arr\n traj['lon'] = lon_arr\n traj['VE'] = VE_arr\n traj['VN'] = VN_arr\n traj['h'] = h\n traj['p'] = p\n traj['r'] = r\n\n return traj\n\n\nclass Integrator:\n \"\"\"Class interface for integration of inertial readings.\n\n The algorithm described in [1]_ and [2]_ is used with slight simplifications.\n The position is updated using the trapezoid rule.\n\n Parameters\n ----------\n dt : float\n Sensors sampling period.\n lat, lon : float\n Initial latitude and longitude.\n VE, VN : float\n Initial East and North velocity.\n h, p, r : float\n Initial heading, pitch and roll.\n stamp : int, optional\n Time stamp of the initial point. Default is 0.\n\n Attributes\n ----------\n traj : DataFrame\n Computed trajectory so far.\n\n See Also\n --------\n coning_sculling : Apply coning and sculling corrections.\n\n References\n ----------\n .. [1] P. G. Savage, \"Strapdown Inertial Navigation Integration Algorithm\n Design Part 1: Attitude Algorithms\", Journal of Guidance, Control,\n and Dynamics 1998, Vol. 21, no. 2.\n .. [2] P. G. Savage, \"Strapdown Inertial Navigation Integration Algorithm\n Design Part 2: Velocity and Position Algorithms\", Journal of\n Guidance, Control, and Dynamics 1998, Vol. 21, no. 2.\n \"\"\"\n INITIAL_SIZE = 10000\n\n def __init__(self, dt, lat, lon, VE, VN, h, p, r, stamp=0):\n self.dt = dt\n\n self.lat_arr = np.empty(self.INITIAL_SIZE)\n self.lon_arr = np.empty(self.INITIAL_SIZE)\n self.VE_arr = np.empty(self.INITIAL_SIZE)\n self.VN_arr = np.empty(self.INITIAL_SIZE)\n self.Cnb_arr = np.empty((self.INITIAL_SIZE, 3, 3))\n self.traj = None\n\n self._init_values = [lat, lon, VE, VN, h, p, r, stamp]\n self.reset()\n\n def reset(self):\n \"\"\"Clear computed trajectory except the initial point.\"\"\"\n lat, lon, VE, VN, h, p, r, stamp = self._init_values\n\n self.lat_arr[0] = np.deg2rad(lat)\n self.lon_arr[0] = np.deg2rad(lon)\n self.VE_arr[0] = VE\n self.VN_arr[0] = VN\n self.Cnb_arr[0] = dcm.from_hpr(h, p, r)\n\n self.traj = pd.DataFrame(index=pd.Index([stamp], name='stamp'))\n self.traj['lat'] = [lat]\n self.traj['lon'] = [lon]\n self.traj['VE'] = [VE]\n self.traj['VN'] = [VN]\n self.traj['h'] = [h]\n self.traj['p'] = [p]\n self.traj['r'] = [r]\n\n def integrate(self, theta, dv):\n \"\"\"Integrate inertial readings.\n\n The integration continues from the last computed value.\n\n Parameters\n ----------\n theta, dv : array_like, shape (n_readings, 3)\n Rotation vectors and velocity increments computed from gyro and\n accelerometer readings after applying coning and sculling\n corrections.\n\n Returns\n -------\n traj_last : DataFrame\n Added chunk of the trajectory. It contains n_readings + 1 rows\n including the last point before `theta` and `dv` where integrated.\n \"\"\"\n theta = np.asarray(theta)\n dv = np.asarray(dv)\n\n n_data = self.traj.shape[0]\n if n_data == 0:\n raise ValueError(\"No point to start integration from. \"\n \"Call `init` first.\")\n\n n_readings = theta.shape[0]\n size = self.lat_arr.shape[0]\n\n required_size = n_data + n_readings\n if required_size > self.lat_arr.shape[0]:\n new_size = max(2 * size, required_size)\n self.lat_arr.resize(new_size)\n self.lon_arr.resize(new_size)\n self.VE_arr.resize(new_size)\n self.VN_arr.resize(new_size)\n self.Cnb_arr.resize((new_size, 3, 3))\n\n earth_model = 0\n integrate_fast(self.dt, self.lat_arr, self.lon_arr, self.VE_arr,\n self.VN_arr, self.Cnb_arr, theta, dv, earth_model,\n offset=n_data-1)\n\n lat_arr = np.rad2deg(self.lat_arr[n_data: n_data + n_readings])\n lon_arr = np.rad2deg(self.lon_arr[n_data: n_data + n_readings])\n VE_arr = self.VE_arr[n_data: n_data + n_readings]\n VN_arr = self.VN_arr[n_data: n_data + n_readings]\n h, p, r = dcm.to_hpr(self.Cnb_arr[n_data: n_data + n_readings])\n\n index = pd.Index(self.traj.index[-1] + 1 + np.arange(n_readings),\n name='stamp')\n traj = pd.DataFrame(index=index)\n traj['lat'] = lat_arr\n traj['lon'] = lon_arr\n traj['VE'] = VE_arr\n traj['VN'] = VN_arr\n traj['h'] = h\n traj['p'] = p\n traj['r'] = r\n\n self.traj = self.traj.append(traj)\n\n return self.traj.iloc[-n_readings - 1:]\n\n def _correct(self, x):\n i = self.traj.shape[0] - 1\n d_lat = x[1] / earth.R0\n d_lon = x[0] / (earth.R0 * np.cos(self.lat_arr[i]))\n self.lat_arr[i] -= d_lat\n self.lon_arr[i] -= d_lon\n\n phi = x[4:7]\n phi[2] += d_lon * np.sin(self.lat_arr[i])\n\n VE_new = self.VE_arr[i] - x[2]\n VN_new = self.VN_arr[i] - x[3]\n\n self.VE_arr[i] = VE_new - phi[2] * VN_new\n self.VN_arr[i] = VN_new + phi[2] * VE_new\n\n self.Cnb_arr[i] = dcm.from_rv(phi).dot(self.Cnb_arr[i])\n h, p, r = dcm.to_hpr(self.Cnb_arr[i])\n\n self.traj.iloc[-1] = [np.rad2deg(self.lat_arr[i]),\n np.rad2deg(self.lon_arr[i]),\n self.VE_arr[i], self.VN_arr[i], h, p, r]\n\n\ndef integrate_stationary(dt, lat, Cnb, theta, dv):\n n_readings = theta.shape[0]\n V_arr = np.empty((n_readings + 1, 3))\n Cnb_arr = np.empty((n_readings + 1, 3, 3))\n lat = np.deg2rad(lat)\n V_arr[0] = 0\n Cnb_arr[0] = Cnb\n integrate_fast_stationary(dt, lat, Cnb_arr, V_arr, theta, dv)\n\n return Cnb_arr, V_arr\n" ]
[ [ "numpy.asarray", "numpy.arange", "pandas.Index", "pandas.DataFrame", "numpy.rad2deg", "numpy.sin", "numpy.cos", "numpy.deg2rad", "numpy.cross", "numpy.zeros", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
ConnectedSystems/pyapprox
[ "4f405654c707cba83d211f327c0f0fdbc95efa29", "4f405654c707cba83d211f327c0f0fdbc95efa29", "4f405654c707cba83d211f327c0f0fdbc95efa29" ]
[ "pyapprox/tests/test_utilities.py", "pyapprox/sparse_grid.py", "pyapprox/benchmarks/sensitivity_benchmarks.py" ]
[ "import unittest\nimport numpy as np\nfrom scipy.linalg import lu_factor, lu as scipy_lu, qr as qr_factorization\n\nfrom pyapprox.utilities import *\nfrom pyapprox.univariate_quadrature import gauss_jacobi_pts_wts_1D\n\n\nclass TestUtilities(unittest.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n\n def test_total_degree_space_dimension(self):\n nvars, degree = 2, 3\n nterms = total_degree_space_dimension(nvars, degree)\n assert nterms == 10\n\n nvars, degree = 3, 2\n nterms = total_degree_space_dimension(nvars, degree)\n assert nterms == 10\n\n for nvars in range(1, 5):\n for degree in range(1, 5):\n nterms_kk = total_degree_subspace_dimension(nvars, degree)\n assert nterms_kk == total_degree_space_dimension(\n nvars, degree)-total_degree_space_dimension(\n nvars, degree-1)\n\n def test_cartesian_product(self):\n # test when num elems = 1\n s1 = np.arange(0, 3)\n s2 = np.arange(3, 5)\n\n sets = np.array([[0, 3], [1, 3], [2, 3], [0, 4],\n [1, 4], [2, 4]], np.int)\n output_sets = cartesian_product([s1, s2], 1)\n assert np.array_equal(output_sets.T, sets)\n\n # # test when num elems > 1\n # s1 = np.arange( 0, 6 )\n # s2 = np.arange( 6, 10 )\n\n # sets = np.array( [[ 0, 1, 6, 7], [ 2, 3, 6, 7],\n # [ 4, 5, 6, 7], [ 0, 1, 8, 9],\n # [ 2, 3, 8, 9], [ 4, 5, 8, 9]], np.int )\n # output_sets = cartesian_product( [s1,s2], 2 )\n # assert np.array_equal( output_sets.T, sets )\n\n def test_outer_product(self):\n s1 = np.arange(0, 3)\n s2 = np.arange(3, 5)\n\n test_vals = np.array([0., 3., 6., 0., 4., 8.])\n output = outer_product([s1, s2])\n assert np.allclose(test_vals, output)\n\n output = outer_product([s1])\n assert np.allclose(output, s1)\n\n def test_truncated_pivoted_lu_factorization(self):\n np.random.seed(2)\n # test truncated_pivoted lu factorization\n A = np.random.normal(0, 1, (4, 4))\n scipy_LU, scipy_p = lu_factor(A)\n scipy_pivots = get_final_pivots_from_sequential_pivots(scipy_p)\n num_pivots = 3\n L, U, pivots = truncated_pivoted_lu_factorization(A, num_pivots)\n assert np.allclose(pivots, scipy_pivots[:num_pivots])\n assert np.allclose(A[pivots, :num_pivots], np.dot(L, U))\n P = get_pivot_matrix_from_vector(pivots, A.shape[0])\n assert np.allclose(P.dot(A[:, :num_pivots]), np.dot(L, U))\n\n # test truncated_pivoted lu factorization which enforces first\n # n rows to be chosen in exact order\n # mess up array so that if pivots are not enforced correctly a different\n # pivot order would be returne, Put best pivot in last place in matrix\n # and worst in first row, then enforce first and second rows to be chosen\n # first.\n tmp = A[pivots[0], :].copy()\n A[pivots[0], :] = A[pivots[-1], :].copy()\n A[pivots[-1], :] = tmp\n num_pivots = 3\n num_initial_rows = np.array([0, 1])\n L, U, pivots = truncated_pivoted_lu_factorization(\n A, num_pivots, num_initial_rows)\n assert np.allclose(A[pivots, :num_pivots], np.dot(L, U))\n assert np.allclose(pivots, [0, 1, 3])\n\n # test truncated_pivoted lu factorization which enforces first\n # n rows to be chosen in any order\n tmp = A[pivots[0], :].copy()\n A[pivots[0], :] = A[0, :].copy()\n A[0, :] = tmp\n num_pivots = 3\n num_initial_rows = 1\n L, U, pivots = truncated_pivoted_lu_factorization(A, num_pivots,\n num_initial_rows)\n assert np.allclose(A[pivots, :num_pivots], np.dot(L, U))\n assert np.allclose(pivots, [0, 3, 1])\n\n # Modify the above test to first factorize 4,3 A then factorize\n # B = [A; C] where C is 2*3 and if B was factorized without enforcing\n # A then the factors would be different. Then check that first\n # 4 rows of LU factors of B are the same as when A was factored.\n\n def test_tensor_product_quadrature(self):\n num_vars = 2\n alpha_poly = 1\n beta_poly = 2\n\n def univariate_quadrature_rule(n):\n x, w = gauss_jacobi_pts_wts_1D(n, alpha_poly, beta_poly)\n x = (x+1)/2.\n return x, w\n\n x, w = get_tensor_product_quadrature_rule(\n 100, num_vars, univariate_quadrature_rule)\n\n def function(x): return np.sum(x**2, axis=0)\n assert np.allclose(np.dot(function(x), w), 0.8)\n\n # samples = np.random.beta(beta_poly+1,alpha_poly+1,(num_vars,10000))\n # print function(samples).mean()\n\n def test_canonical_piecewise_quadratic_interpolation(self):\n num_mesh_points = 101\n mesh = np.linspace(0., 1., 3)\n mesh_vals = mesh**2\n # do not compare at right boundary because it will be zero\n interp_mesh = np.linspace(0., 1., num_mesh_points)[:-1]\n interp_vals = canonical_piecewise_quadratic_interpolation(\n interp_mesh, mesh_vals)\n assert np.allclose(interp_vals, interp_mesh**2)\n\n def test_piecewise_quadratic_interpolation(self):\n def function(x):\n return (x-0.5)**3\n num_mesh_points = 301\n mesh = np.linspace(0., 1., num_mesh_points)\n mesh_vals = function(mesh)\n # interp_mesh = np.random.uniform(0.,1.,101)\n interp_mesh = np.linspace(0., 1., 1001)\n ranges = [0, 1]\n interp_vals = piecewise_quadratic_interpolation(\n interp_mesh, mesh, mesh_vals, ranges)\n # print np.linalg.norm(interp_vals-function(interp_mesh))\n # import pylab as plt\n # I= np.argsort(interp_mesh)\n # plt.plot(interp_mesh[I],interp_vals[I],'k-')\n # plt.plot(mesh,mesh_vals,'o')\n # plt.show()\n assert np.linalg.norm(interp_vals-function(interp_mesh)) < 1e-6\n\n def test_add_columns_to_pivoted_lu_factorization(self):\n \"\"\"\n Let\n A = [1 2 4]\n [2 1 3]\n [3 2 4]\n\n Recursive Algorithm\n -------------------\n The following Permutation swaps the thrid and first rows\n P1 = [0 0 1]\n [0 1 0]\n [1 0 0]\n\n Gives\n P1*A = [3 2 4]\n [2 1 3]\n [1 2 4]\n\n Conceptually partition matrix into block matrix\n P1*A = [A11 A12]\n [A21 A22]\n\n = [1 0 ][u11 U12]\n [L21 L22][ 0 U22]\n = [u11 U12 ]\n [u11*L21 L21*U12+L22*U22]\n\n Then\n u11 = a11\n L21 = 1/a11 A21\n U12 = A12\n\n e.g.\n a11 = 3 L21 = [2/3] U12 = [2 4] u11 = 3\n [1/3]\n\n Because A22 = L21*U12+L22*U22\n L22*U22 = A22-L21*U12\n We also know L22=I\n\n LU sublock after 1 step is\n S1 = L22*U22 = A22-L21*U12\n\n = [1 3]-[4/3 8/3] = [-1/3 1/3]\n [2 4] [2/3 4/3] [ 4/3 8/3]\n\n LU after 1 step is\n LU1 = [u11 U12]\n [L21 S1 ]\n\n [3 2 4 ]\n = [1/3 -1/3 1/3 ]\n [2/3 4/3 8/32]\n\n The following Permutation swaps the first and second rows of S1\n P2 = [0 1]\n [1 0]\n\n Conceptually partition matrix into block matrix\n P2*S1 = [ 4/3 8/3] = [A11 A12]\n [-1/3 1/3] = [A21 A22]\n\n L21 = 1/a11 A21\n U12 = A12\n\n e.g.\n a11 = 4/3 L21 = [-1/4] U12 = [8/3] u11 = 4/3\n\n LU sublock after 1 step is\n S2 = A22-L21*U12\n = 1/3 + 1/4*8/3 = 1\n\n LU after 2 step is\n LU2 = [ 3 2 4 ]\n [1/3 u11 U12]\n [2/3 L21 S2 ]\n\n = [ 3 2 4 ]\n [1/3 4/3 8/3]\n [2/3 -1/4 S2 ]\n\n\n Matrix multiplication algorithm\n -------------------------------\n The following Permutation swaps the thrid and first rows\n P1 = [0 0 1]\n [0 1 0]\n [1 0 0]\n\n Gives\n P1*A = [3 2 4]\n [2 1 3]\n [1 2 4]\n\n Use Matrix M1 to eliminate entries in second and third row of column 1\n [ 1 0 1]\n M1 = [-2/3 1 0]\n [-1/3 0 1]\n\n So U factor after step 1 is\n U1 = M1*P1*A\n\n [3 2 4 ]\n = [0 -1/3 1/3 ]\n [0 4/3 8/32]\n\n The following Permutation swaps the third and second rows\n P2 = [1 0 0]\n [0 0 1]\n [0 1 0]\n\n M2 = [1 0 0]\n [0 1 0]\n [0 1/4 1]\n\n U factor after step 2 is\n U2 = M2*P2*M1*P1*A\n\n [3 2 4 ]\n = [0 4/3 8/3 ]\n [0 0 1 ]\n\n L2 = (M2P2M1P1)^{-1}\n = [ 1 0 0]\n [1/3 1 0]\n [2/3 -1/4 1]\n\n P*A = P2*P1*A = L2U2\n \"\"\"\n A = np.random.normal(0, 1, (6, 6))\n\n num_pivots = 6\n LU_factor, pivots = truncated_pivoted_lu_factorization(\n A, num_pivots, truncate_L_factor=False)\n\n LU_factor_init, pivots_init = \\\n truncated_pivoted_lu_factorization(\n A[:, :num_pivots], num_pivots, truncate_L_factor=False)\n\n new_cols = A[:, LU_factor_init.shape[1]:].copy()\n\n LU_factor_final = add_columns_to_pivoted_lu_factorization(\n LU_factor_init, new_cols, pivots_init[:num_pivots])\n assert np.allclose(LU_factor_final, LU_factor)\n\n A = np.random.normal(0, 1, (6, 6))\n\n num_pivots = 2\n LU_factor, pivots = truncated_pivoted_lu_factorization(\n A, num_pivots, truncate_L_factor=False)\n\n LU_factor_init, pivots_init = \\\n truncated_pivoted_lu_factorization(\n A[:, :num_pivots], num_pivots, truncate_L_factor=False)\n\n new_cols = A[:, LU_factor_init.shape[1]:].copy()\n\n LU_factor_final = add_columns_to_pivoted_lu_factorization(\n LU_factor_init, new_cols, pivots_init[:num_pivots])\n assert np.allclose(LU_factor_final, LU_factor)\n\n def test_split_lu_factorization_matrix(self):\n A = np.random.normal(0, 1, (4, 4))\n num_pivots = A.shape[0]\n LU_factor, pivots = truncated_pivoted_lu_factorization(\n A, num_pivots, truncate_L_factor=False)\n L_factor, U_factor = split_lu_factorization_matrix(LU_factor)\n assert np.allclose(L_factor.dot(U_factor),\n pivot_rows(pivots, A, False))\n\n A = np.random.normal(0, 1, (4, 4))\n num_pivots = 2\n LU_factor, pivots = truncated_pivoted_lu_factorization(\n A, num_pivots, truncate_L_factor=False)\n\n L_factor, U_factor = split_lu_factorization_matrix(\n LU_factor, num_pivots)\n assert np.allclose(L_factor.dot(U_factor),\n pivot_rows(pivots, A, False))\n\n def test_add_rows_to_pivoted_lu_factorization(self):\n\n np.random.seed(3)\n A = np.random.normal(0, 1, (10, 3))\n\n num_pivots = A.shape[1]\n LU_factor, pivots = truncated_pivoted_lu_factorization(\n A, num_pivots, truncate_L_factor=False)\n\n # create matrix for which pivots do not matter\n A = pivot_rows(pivots, A, False)\n # check no pivoting is necessary\n L, U, pivots = truncated_pivoted_lu_factorization(\n A, num_pivots, truncate_L_factor=True)\n assert np.allclose(pivots, np.arange(num_pivots))\n\n LU_factor_init, pivots_init = \\\n truncated_pivoted_lu_factorization(\n A[:num_pivots, :], num_pivots, truncate_L_factor=False)\n\n new_rows = A[num_pivots:, :].copy()\n\n LU_factor_final = add_rows_to_pivoted_lu_factorization(\n LU_factor_init, new_rows, num_pivots)\n assert np.allclose(LU_factor_final, LU_factor)\n\n #######\n # only pivot some of the rows\n\n A = np.random.normal(0, 1, (10, 5))\n\n num_pivots = 3\n LU_factor, pivots = truncated_pivoted_lu_factorization(\n A, num_pivots, truncate_L_factor=False)\n\n # create matrix for which pivots do not matter\n A = pivot_rows(pivots, A, False)\n # print(A.shape)\n # check no pivoting is necessary\n L, U, pivots = truncated_pivoted_lu_factorization(\n A, num_pivots, truncate_L_factor=True)\n assert np.allclose(pivots, np.arange(num_pivots))\n\n LU_factor_init, pivots_init = \\\n truncated_pivoted_lu_factorization(\n A[:num_pivots, :], num_pivots, truncate_L_factor=False)\n\n new_rows = A[num_pivots:, :].copy()\n\n LU_factor_final = add_rows_to_pivoted_lu_factorization(\n LU_factor_init, new_rows, num_pivots)\n assert np.allclose(LU_factor_final, LU_factor)\n\n def test_unprecondition_LU_factor(self):\n A = np.random.normal(0, 1, (4, 4))\n num_pivots = A.shape[0]\n precond_weights = 1/np.linalg.norm(A, axis=1)[:, np.newaxis]\n LU_factor, pivots = truncated_pivoted_lu_factorization(\n A*precond_weights, num_pivots, truncate_L_factor=False)\n\n unprecond_LU_factor, unprecond_pivots = truncated_pivoted_lu_factorization(\n A, num_pivots, truncate_L_factor=False,\n num_initial_rows=pivots)\n L_unprecond, U_unprecond = split_lu_factorization_matrix(\n unprecond_LU_factor)\n assert np.allclose(unprecond_pivots, pivots)\n assert np.allclose(\n L_unprecond.dot(U_unprecond), pivot_rows(unprecond_pivots, A, False))\n\n precond_weights = pivot_rows(pivots, precond_weights, False)\n\n L, U = split_lu_factorization_matrix(LU_factor)\n W = np.diag(precond_weights[:, 0])\n Wi = np.linalg.inv(W)\n assert np.allclose(Wi.dot(L).dot(U), pivot_rows(pivots, A, False))\n assert np.allclose(\n (L/precond_weights).dot(U), pivot_rows(pivots, A, False))\n # inv(W)*L*W*inv(W)*U\n L = L/precond_weights*precond_weights.T\n U = U/precond_weights\n assert np.allclose(L.dot(U), pivot_rows(pivots, A, False))\n assert np.allclose(L, L_unprecond)\n assert np.allclose(U, U_unprecond)\n\n LU_factor = unprecondition_LU_factor(LU_factor, precond_weights)\n assert np.allclose(LU_factor, unprecond_LU_factor)\n\n A = np.random.normal(0, 1, (4, 4))\n num_pivots = 2\n precond_weights = 1/np.linalg.norm(A, axis=1)[:, np.newaxis]\n LU_factor, pivots = truncated_pivoted_lu_factorization(\n A*precond_weights, num_pivots, truncate_L_factor=False)\n L, U = split_lu_factorization_matrix(LU_factor, num_pivots)\n assert np.allclose(\n L.dot(U), pivot_rows(pivots[:num_pivots], A*precond_weights, False))\n\n unprecond_LU_factor, unprecond_pivots = truncated_pivoted_lu_factorization(\n A, num_pivots, truncate_L_factor=False,\n num_initial_rows=pivots)\n L_unprecond, U_unprecond = split_lu_factorization_matrix(\n unprecond_LU_factor, num_pivots)\n assert np.allclose(unprecond_pivots, pivots)\n assert np.allclose(\n L_unprecond.dot(U_unprecond),\n pivot_rows(unprecond_pivots[:num_pivots], A, False))\n\n precond_weights = pivot_rows(pivots, precond_weights, False)\n LU_factor = unprecondition_LU_factor(\n LU_factor, precond_weights, num_pivots)\n assert np.allclose(LU_factor, unprecond_LU_factor)\n\n A = np.random.normal(0, 1, (5, 4))\n num_pivots = 3\n precond_weights = 1/np.linalg.norm(A, axis=1)[:, np.newaxis]\n LU_factor, pivots = truncated_pivoted_lu_factorization(\n A*precond_weights, num_pivots, truncate_L_factor=False)\n L, U = split_lu_factorization_matrix(LU_factor, num_pivots)\n assert np.allclose(\n L.dot(U), pivot_rows(pivots[:num_pivots], A*precond_weights, False))\n\n unprecond_LU_factor, unprecond_pivots = truncated_pivoted_lu_factorization(\n A, num_pivots, truncate_L_factor=False,\n num_initial_rows=pivots)\n L_unprecond, U_unprecond = split_lu_factorization_matrix(\n unprecond_LU_factor, num_pivots)\n assert np.allclose(unprecond_pivots, pivots)\n assert np.allclose(\n L_unprecond.dot(U_unprecond),\n pivot_rows(unprecond_pivots[:num_pivots], A, False))\n\n precond_weights = pivot_rows(pivots, precond_weights, False)\n LU_factor = unprecondition_LU_factor(\n LU_factor, precond_weights, num_pivots)\n assert np.allclose(LU_factor, unprecond_LU_factor)\n\n def check_LU_factor(self, LU_factor, pivots, num_pivots, A):\n L, U = split_lu_factorization_matrix(LU_factor, num_pivots)\n return np.allclose(L.dot(U), pivot_rows(pivots, A, False))\n\n def test_update_christoffel_preconditioned_lu_factorization(self):\n np.random.seed(3)\n A = np.random.normal(0, 1, (4, 4))\n\n precond_weights = 1/np.linalg.norm(A, axis=1)[:, np.newaxis]\n\n num_pivots = A.shape[1]\n LU_factor, pivots = truncated_pivoted_lu_factorization(\n A*precond_weights, num_pivots, truncate_L_factor=False)\n\n # create matrix for which pivots do not matter\n A_precond = pivot_rows(pivots, A*precond_weights, False)\n # check no pivoting is necessary\n L, U, pivots = truncated_pivoted_lu_factorization(\n A_precond, num_pivots, truncate_L_factor=True)\n assert np.allclose(pivots, np.arange(num_pivots))\n\n ii = 1\n A_sub = A[:, :ii].copy()\n precond_weights = 1/np.linalg.norm(A_sub, axis=1)[:, np.newaxis]\n A_sub *= precond_weights\n LU_factor, pivots = truncated_pivoted_lu_factorization(\n A_sub, num_pivots, truncate_L_factor=False)\n for ii in range(2, A.shape[1]):\n A_sub = A[:, :ii].copy()\n precond_weights_prev = precond_weights.copy()\n precond_weights = 1/np.linalg.norm(A_sub, axis=1)[:, np.newaxis]\n pivots_prev = pivots.copy()\n pivoted_precond_weights_prev = pivot_rows(\n pivots_prev, precond_weights_prev, False)\n pivoted_precond_weights = pivot_rows(\n pivots, precond_weights, False)\n\n # what is factorization using old precond weights but with\n # extra column\n true_LU_factor_extra_cols, p = truncated_pivoted_lu_factorization(\n A_sub*precond_weights_prev, ii-1, truncate_L_factor=False,\n num_initial_rows=pivots_prev)\n assert np.allclose(p, pivots_prev)\n assert self.check_LU_factor(\n true_LU_factor_extra_cols, pivots_prev, ii-1,\n A_sub*precond_weights_prev)\n new_cols = A_sub[:, ii-1:ii].copy()\n new_cols *= precond_weights_prev\n LU_factor = add_columns_to_pivoted_lu_factorization(\n LU_factor.copy(), new_cols, pivots_prev[:ii-1])\n assert np.allclose(LU_factor, true_LU_factor_extra_cols)\n assert self.check_LU_factor(\n LU_factor, pivots_prev, ii-1, A_sub*precond_weights_prev)\n\n # what is factorization with extra column but no preconditioning\n true_LU_factor_extra_cols_unprecond, p = \\\n truncated_pivoted_lu_factorization(\n A_sub, ii-1, truncate_L_factor=False,\n num_initial_rows=pivots_prev)\n assert np.allclose(p, pivots_prev)\n assert self.check_LU_factor(\n true_LU_factor_extra_cols_unprecond, pivots_prev, ii-1, A_sub)\n LU_factor_unprecond = unprecondition_LU_factor(\n LU_factor, pivoted_precond_weights_prev, ii-1)\n assert self.check_LU_factor(\n LU_factor_unprecond, pivots_prev, ii-1, A_sub)\n assert np.allclose(\n LU_factor_unprecond, true_LU_factor_extra_cols_unprecond)\n\n # what is factorization using new precond weights and\n # extra column\n true_LU_factor_extra_cols, _ = truncated_pivoted_lu_factorization(\n A_sub*precond_weights, ii-1, truncate_L_factor=False,\n num_initial_rows=pivots_prev)\n LU_factor = unprecondition_LU_factor(\n LU_factor, pivoted_precond_weights_prev/pivoted_precond_weights,\n ii-1)\n assert np.allclose(LU_factor, true_LU_factor_extra_cols)\n\n max_iters = A_sub.shape[1]\n LU_factor, pivots, it = continue_pivoted_lu_factorization(\n LU_factor.copy(), pivots_prev, ii-1, max_iters, num_initial_rows=0)\n\n true_LU_factor, _ = truncated_pivoted_lu_factorization(\n A_sub*precond_weights, num_pivots, truncate_L_factor=False,\n num_initial_rows=pivots)\n assert np.allclose(LU_factor, true_LU_factor)\n\n def test_cholesky_decomposition(self):\n nrows = 4\n A = np.random.normal(0., 1., (nrows, nrows))\n A = A.T.dot(A)\n L_np = np.linalg.cholesky(A)\n L = cholesky_decomposition(A)\n assert np.allclose(L, L_np)\n\n def test_pivoted_cholesky_decomposition(self):\n nrows, npivots = 4, 4\n A = np.random.normal(0., 1., (nrows, nrows))\n A = A.T.dot(A)\n L, pivots, error, flag = pivoted_cholesky_decomposition(A, npivots)\n assert np.allclose(L.dot(L.T), A)\n\n nrows, npivots = 4, 2\n A = np.random.normal(0., 1., (npivots, nrows))\n A = A.T.dot(A)\n L, pivots, error, flag = pivoted_cholesky_decomposition(A, npivots)\n assert L.shape == (nrows, npivots)\n assert pivots.shape[0] == npivots\n assert np.allclose(L.dot(L.T), A)\n\n # check init_pivots are enforced\n nrows, npivots = 4, 2\n A = np.random.normal(0., 1., (npivots+1, nrows))\n A = A.T.dot(A)\n L, pivots, error, flag = pivoted_cholesky_decomposition(A, npivots+1)\n L, new_pivots, error, flag = pivoted_cholesky_decomposition(\n A, npivots+1, init_pivots=pivots[1:2])\n assert np.allclose(new_pivots[:npivots+1], pivots[[1, 0, 2]])\n\n L = L[pivots, :]\n assert np.allclose(A[pivots, :][:, pivots], L.dot(L.T))\n\n assert np.allclose(A[np.ix_(pivots, pivots)], L.dot(L.T))\n\n P = get_pivot_matrix_from_vector(pivots, nrows)\n assert np.allclose(P.dot(A).dot(P.T), L.dot(L.T))\n\n A = np.array([[4, 12, -16], [12, 37, -43], [-16, -43, 98.]])\n L, pivots, error, flag = pivoted_cholesky_decomposition(A, A.shape[0])\n\n # reorder entries of A so that cholesky requires pivoting\n true_pivots = np.array([2, 1, 0])\n A_no_pivots = A[true_pivots, :][:, true_pivots]\n L_np = np.linalg.cholesky(A_no_pivots)\n assert np.allclose(L[pivots, :], L_np)\n\n # Create A with which needs cholesky with certain pivots\n A = np.array([[4, 12, -16], [12, 37, -43], [-16, -43, 98.]])\n true_pivots = np.array([1, 0, 2])\n A = A[true_pivots, :][:, true_pivots]\n L, pivots, error, flag = pivoted_cholesky_decomposition(A, A.shape[0])\n assert np.allclose(L[pivots, :], L_np)\n\n def test_restart_pivoted_cholesky(self):\n nrows = 10\n A = np.random.normal(0, 1, (nrows, nrows))\n A = A.T.dot(A)\n\n pivot_weights = np.random.uniform(1, 2, A.shape[0])\n L, pivots, error, flag = pivoted_cholesky_decomposition(\n A, A.shape[0], pivot_weights=pivot_weights)\n\n npivots = A.shape[0]-2\n full_L, full_pivots, full_error, flag, diag, init_error, \\\n ncompleted_pivots = pivoted_cholesky_decomposition(\n A, npivots, return_full=True, pivot_weights=pivot_weights)\n assert ncompleted_pivots == npivots\n\n npivots = A.shape[0]\n full_L, full_pivots, diag, chol_flag, ii, error = \\\n continue_pivoted_cholesky_decomposition(\n A, full_L, npivots, None, 0, True, pivot_weights,\n full_pivots, diag, ncompleted_pivots, init_error, econ=True)\n\n assert np.allclose(L, full_L)\n assert np.allclose(pivots, full_pivots)\n\n def test_update_cholesky_decomposition(self):\n nvars = 5\n B = np.random.normal(0, 1, (nvars, nvars))\n A = B.T.dot(B)\n\n L = np.linalg.cholesky(A)\n A_11 = A[:nvars-2, :nvars-2]\n A_12 = A[:nvars-2, nvars-2:]\n A_22 = A[nvars-2:, nvars-2:]\n assert np.allclose(np.block([[A_11, A_12], [A_12.T, A_22]]), A)\n L_11 = np.linalg.cholesky(A_11)\n L_up = update_cholesky_factorization(L_11, A_12, A_22)\n assert np.allclose(L, L_up)\n\n L_inv = np.linalg.inv(L)\n L_11_inv = np.linalg.inv(L_11)\n L_12_T = L[L_11.shape[0]:, :L_11.shape[1]]\n L_12 = L_12_T.T\n L_22 = L[L_11.shape[0]:, L_11.shape[0]:]\n assert np.allclose(\n L_inv, update_cholesky_factorization_inverse(L_11_inv, L_12, L_22))\n\n L_22_inv = np.linalg.inv(L_22)\n C = -np.dot(L_22_inv.dot(L_12.T), L_11_inv)\n A_inv = np.block(\n [[L_11_inv.T.dot(L_11_inv)+C.T.dot(C), C.T.dot(L_22_inv)],\n [L_22_inv.T.dot(C), L_22_inv.T.dot(L_22_inv)]])\n assert np.allclose(A_inv, np.linalg.inv(A))\n\n N = np.random.normal(0, 1, A.shape)\n assert np.allclose(np.trace(np.linalg.inv(A).dot(B)), np.sum(A_inv*B))\n\n B_11 = B[:A_11.shape[0], :A_11.shape[1]]\n prev_trace = np.trace(np.linalg.inv(A_11).dot(B_11))\n trace = update_trace_involving_cholesky_inverse(\n L_11_inv, L_12, L_22_inv, B, prev_trace)\n assert np.allclose(trace, np.trace(np.linalg.inv(A).dot(B)))\n\n x = np.random.normal(0, 1, (nvars))\n y = solve_triangular(L, x, lower=True)\n z = solve_triangular(L.T, y, lower=False)\n\n x_1 = x[:L_11.shape[0]]\n y_1 = solve_triangular(L_11, x_1, lower=True)\n z_1 = solve_triangular(L_11.T, y_1, lower=False)\n\n x_up_2 = x[L_11.shape[0]:]\n y_up_1 = y_1\n y_up_2 = solve_triangular(L_22, x_up_2-L_12_T.dot(y_up_1), lower=True)\n assert np.allclose(y_up_1, y[:L_11.shape[0]])\n assert np.allclose(y_up_2, y[L_11.shape[0]:])\n z_up_2 = solve_triangular(L_22.T, y_up_2, lower=False)\n z_up_1 = solve_triangular(\n L_11.T, y_up_1 - L_12.dot(z_up_2), lower=False)\n assert np.allclose(z_up_2, z[L_11.shape[0]:])\n assert np.allclose(z_up_1, z[:L_11.shape[0]])\n assert np.allclose(\n z_up_1,\n z_1 - solve_triangular(L_11.T, L_12.dot(z_up_2), lower=False))\n\n def test_cholesky_decomposition_minimizing_trace_norm(self):\n \"\"\"\n Test how to compute pivot that minimizes trace norm\n \"\"\"\n n = 6\n B = np.random.normal(0, 1, (n, n))\n A = B.T.dot(B)\n # pivots = [1,2,0] causes issues with pya.pivoted_cholesky_decomposition\n\n if n == 4:\n a1 = A[2, 2]\n b1 = np.array([[A[1, 2], A[0, 2], A[3, 2]]]).T\n C1 = np.array([[A[1, 1], A[1, 0], A[1, 3]],\n [A[0, 1], A[0, 0], A[0, 3]],\n [A[3, 1], A[3, 0], A[3, 3]]])\n S1 = C1 - b1.dot(b1.T)/a1\n L1 = np.zeros((n, n))\n pivots1 = np.array([2, 1, 0, 3])\n L1[pivots1, 0] = A[pivots1[0], :]/np.sqrt(a1)\n assert np.allclose(A[np.ix_(pivots1, pivots1)][1:, 1:], C1)\n assert np.allclose(L1[:1, :1].dot(L1[:1, :1].T), A[2, 2])\n\n raw_pivots2 = np.array([1, 0, 2]) # choose first remaining pivot\n S2 = S1[np.ix_(raw_pivots2, raw_pivots2)]\n a2 = S2[0, 0]\n b2 = S2[1:, 0:1]\n C2 = S2[1:, 1:]\n S2 = C2 - b2.dot(b2.T)/a2\n L2 = L1.copy()\n swap_rows(L2, 1, raw_pivots2[0]+1)\n L2[1:, 1:2] = np.vstack([[[a2]], b2])/np.sqrt(a2)\n pivots = np.hstack([pivots1[0], pivots1[1:][raw_pivots2]])\n assert np.allclose(\n L2.dot(L2.T)[:2, :2], A[np.ix_(pivots[:2], pivots[:2])])\n\n a_list = [a1, a2]\n b_list = [b1, b2]\n C_list = [C1, C2]\n S_list = [S1, S2]\n\n trace_A = np.trace(A)\n S = A.copy()\n lvecs = np.zeros(A.shape)\n Smats = []\n traces = [0]\n pivots = np.arange(n)\n # use_pivoting = False\n use_pivoting = True\n for ii in range(n):\n # Given a new l vector we have\n # A_ii = [L l ] [L.T] = L.dot(L.T)+l.dot(l.T)\n # [l.T]\n # Thus trace(A_ii) = trace(L.dot(L.T)+l.dot(l.T))\n # = trace(L.dot(L.T))+trace(l.dot(l.T))\n\n pivot_vals = np.linalg.norm(S[:, :], axis=0)**2/np.diag(S)\n if use_pivoting is True:\n raw_pivot = np.argmax(pivot_vals)\n else:\n raw_pivot = 0 # do not pivot\n\n traces.append(traces[-1]+pivot_vals[raw_pivot])\n pivot = raw_pivot+ii\n S_pivots = np.arange(S.shape[0])\n swap_rows(S_pivots, 0, raw_pivot)\n S = S[np.ix_(S_pivots, S_pivots)]\n\n a = S[0, 0]\n indices = np.arange(1, S.shape[0])\n b = S[1:, 0:1].copy()\n C = S[1:, 1:].copy()\n nonzero_l = np.vstack([[[a]], b])/np.sqrt(a)\n swap_rows(lvecs, ii, pivot)\n lvecs[ii:, ii:ii+1] = nonzero_l\n swap_rows(pivots, ii, pivot)\n\n L_ii = lvecs[:, :ii+1]\n trace_S = trace_A - (\n np.trace(L_ii[:, :-1].dot(L_ii[:, :-1].T)) +\n np.linalg.norm(S[:, 0])**2/S[0, 0])\n\n S = C-1/a*(b.dot(b.T))\n if ii < 2 and n == 4:\n assert np.allclose(C, C_list[ii])\n assert np.allclose(b, b_list[ii])\n assert np.allclose(a, a_list[ii])\n assert np.allclose(S, S_list[ii])\n assert np.allclose(S, S.T)\n Smats.append(S)\n A_ii = L_ii.dot(L_ii.T)\n assert np.allclose(\n A[np.ix_(pivots[:ii+1], pivots[:ii+1])], A_ii[:ii+1, :ii+1])\n assert np.allclose(\n (A[np.ix_(pivots, pivots)]-A_ii)[ii+1:, ii+1:], S)\n assert np.allclose(\n np.trace(A), np.trace(A_ii)+np.trace(S))\n assert np.allclose(\n np.trace(S), np.trace(A)-np.trace(A_ii))\n assert np.allclose(trace_S, np.trace(S))\n assert np.allclose(trace_A-traces[-1], np.trace(S))\n\n L_chol = np.linalg.cholesky(A[np.ix_(pivots, pivots)])\n assert np.allclose(L_chol, lvecs)\n\n L1, pivots1, error1, flag1 = pivoted_cholesky_decomposition(\n A, A.shape[0], econ=True, init_pivots=pivots)\n assert np.allclose(L_chol, L1[pivots])\n\n if use_pivoting is False:\n # This check only good if pivoting is not enforced\n for ii in range(n):\n E = A-L_chol[:, :ii+1].dot(L_chol[:, :ii+1].T)\n assert np.allclose(E[ii+1:, ii+1:], Smats[ii])\n L2, pivots2, error2, flag2 = pivoted_cholesky_decomposition(\n A, A.shape[0], econ=False)\n assert np.allclose(L2[pivots2], L_chol)\n assert np.allclose(pivots1, pivots2)\n\n def test_beta_pdf_on_ab(self):\n from scipy.stats import beta as beta_rv\n alpha_stat, beta_stat = 5, 2\n lb, ub = -2, 1\n xx = np.linspace(lb, ub, 100)\n vals = beta_pdf_on_ab(alpha_stat, beta_stat, lb, ub, xx)\n true_vals = beta_rv.pdf((xx-lb)/(ub-lb), alpha_stat, beta_stat)/(ub-lb)\n #true_vals = beta_rv.pdf(xx,alpha_stat,beta_stat,loc=lb,scale=ub-lb)\n assert np.allclose(vals, true_vals)\n\n import sympy as sp\n x = sp.Symbol('x')\n assert np.allclose(1,\n float(sp.integrate(beta_pdf_on_ab(alpha_stat, beta_stat, lb, ub, x),\n (x, [lb, ub]))))\n\n alpha_stat, beta_stat = 5, 2\n lb, ub = 0, 1\n xx = np.linspace(lb, ub, 100)\n vals = beta_pdf_on_ab(alpha_stat, beta_stat, lb, ub, xx)\n true_vals = beta_rv.pdf((xx-lb)/(ub-lb), alpha_stat, beta_stat)/(ub-lb)\n assert np.allclose(vals, true_vals)\n\n import sympy as sp\n x = sp.Symbol('x')\n assert np.allclose(\n 1,\n float(sp.integrate(beta_pdf_on_ab(alpha_stat, beta_stat, lb, ub, x),\n (x, [lb, ub]))))\n\n eps = 1e-7\n x = 0.5\n deriv = beta_pdf_derivative(alpha_stat, beta_stat, x)\n fd_deriv = (beta_pdf_on_ab(alpha_stat, beta_stat, 0, 1, x) -\n beta_pdf_on_ab(alpha_stat, beta_stat, 0, 1, x-eps))/eps\n assert np.allclose(deriv, fd_deriv)\n\n eps = 1e-7\n x = np.array([0.5, 0, -0.25])\n from functools import partial\n pdf_deriv = partial(beta_pdf_derivative, alpha_stat, beta_stat)\n deriv = pdf_derivative_under_affine_map(\n pdf_deriv, -1, 2, x)\n fd_deriv = (beta_pdf_on_ab(alpha_stat, beta_stat, -1, 1, x) -\n beta_pdf_on_ab(alpha_stat, beta_stat, -1, 1, x-eps))/eps\n assert np.allclose(deriv, fd_deriv)\n\n def test_compute_f_divergence(self):\n # KL divergence\n from scipy.stats import multivariate_normal\n nvars = 1\n mean = np.random.uniform(-0.1, 0.1, nvars)\n cov = np.diag(np.random.uniform(.5, 1, nvars))\n rv1 = multivariate_normal(mean, cov)\n rv2 = multivariate_normal(np.zeros(nvars), np.eye(nvars))\n def density1(x): return rv1.pdf(x.T)\n def density2(x): return rv2.pdf(x.T)\n\n # Integrate on [-radius,radius]\n # Note this induces small error by truncating domain\n radius = 10\n from pyapprox import get_tensor_product_quadrature_rule\n x, w = get_tensor_product_quadrature_rule(\n 400, nvars, np.polynomial.legendre.leggauss,\n transform_samples=lambda x: x*radius,\n density_function=lambda x: radius*np.ones(x.shape[1]))\n quad_rule = x, w\n div = compute_f_divergence(density1, density2, quad_rule, 'KL',\n normalize=False)\n true_div = 0.5*(np.diag(cov)+mean**2-np.log(np.diag(cov))-1).sum()\n assert np.allclose(div, true_div, rtol=1e-12)\n\n # Hellinger divergence\n from scipy.stats import beta\n a1, b1, a2, b2 = 1, 1, 2, 3\n rv1, rv2 = beta(a1, b1), beta(a2, b2)\n true_div = 2*(1-beta_fn((a1+a2)/2, (b1+b2)/2)/np.sqrt(\n beta_fn(a1, b1)*beta_fn(a2, b2)))\n\n x, w = get_tensor_product_quadrature_rule(\n 500, nvars, np.polynomial.legendre.leggauss,\n transform_samples=lambda x: (x+1)/2,\n density_function=lambda x: 0.5*np.ones(x.shape[1]))\n quad_rule = x, w\n div = compute_f_divergence(rv1.pdf, rv2.pdf, quad_rule, 'hellinger',\n normalize=False)\n assert np.allclose(div, true_div, rtol=1e-10)\n\n def test_num_entries_triangular_matrix(self):\n M = 4\n A = np.ones([M, M])\n L = np.tril(A)\n nentries = num_entries_square_triangular_matrix(\n M, include_diagonal=True)\n assert nentries == np.count_nonzero(L)\n\n M, N = 4, 3\n A = np.ones([M, N])\n L = np.tril(A)\n nentries = num_entries_rectangular_triangular_matrix(\n M, N, upper=False)\n assert nentries == np.count_nonzero(L)\n\n A = np.ones([M, N])\n U = np.triu(A)\n nentries = num_entries_rectangular_triangular_matrix(\n M, N, upper=True)\n assert nentries == np.count_nonzero(U)\n\n def test_flattened_rectangular_lower_triangular_matrix_index(self):\n\n M, N = 4, 3\n tril_indices = np.tril_indices(M, m=N)\n for nn in range(tril_indices[0].shape[0]):\n ii, jj = tril_indices[0][nn], tril_indices[1][nn]\n kk = flattened_rectangular_lower_triangular_matrix_index(\n ii, jj, M, N)\n assert kk == nn\n\n def test_evaluate_quadratic_form(self):\n nvars, nsamples = 3, 10\n A = np.random.normal(0, 1, nvars)\n A = A.T.dot(A)\n samples = np.random.uniform(0, 1, (nvars, nsamples))\n values1 = evaluate_quadratic_form(A, samples)\n\n values2 = np.zeros(samples.shape[1])\n for ii in range(samples.shape[1]):\n values2[ii] = samples[:, ii:ii+1].T.dot(A).dot(samples[:, ii:ii+1])\n\n assert np.allclose(values1, values2)\n\n def test_weighted_pivoted_cholesky(self):\n nrows, npivots = 4, 3\n A = np.random.normal(0., 1., (nrows, nrows))\n A = A.T.dot(A)\n weights = np.random.uniform(1, 2, (nrows))\n L, pivots, error, flag = pivoted_cholesky_decomposition(\n A, npivots, pivot_weights=weights)\n\n B = np.diag(np.sqrt(weights)).dot(A.dot(np.diag(np.sqrt(weights))))\n C = np.sqrt(weights)[:, np.newaxis]*A*np.sqrt(weights)\n assert np.allclose(B, C)\n L2, pivots2, error2, flag2 = pivoted_cholesky_decomposition(\n C, npivots, pivot_weights=None)\n\n # check pivots are the same\n assert np.allclose(pivots, pivots2)\n\n # check cholesky factors are the same\n # we have L2.dot(L2.T)=S.dot(A).dot(S)= S.dot(L.dot(L.T)).dot(S)\n # where S = np.diag(np.sqrt(weights)). So L2=S.dot(L)\n assert np.allclose(\n np.sqrt(weights[pivots, np.newaxis])*L[pivots, :npivots],\n L2[pivots, :npivots])\n\n def cholesky_qr_pivoting_equivalence(self):\n nrows, npivots = 4, 4\n A = np.random.normal(0., 1., (nrows, nrows))\n B = A.T.dot(A)\n cholL, chol_pivots, error, flag = pivoted_cholesky_decomposition(\n B, npivots)\n\n import scipy\n Q, R, P = scipy.linalg.qr(A, pivoting=True)\n assert np.allclose(P, chol_pivots)\n\n # print(R.T,'\\n',cholL[chol_pivots])\n assert np.allclose(np.absolute(R.T), np.absolute(cholL[chol_pivots]))\n\n def test_least_sqaures_loo_cross_validation(self):\n degree = 2\n alpha = 1e-3\n nsamples = 2*(degree+1)\n samples = np.random.uniform(-1, 1, (1, nsamples))\n basis_mat = samples.T**np.arange(degree+1)\n values = np.exp(samples).T\n cv_errors, cv_score, coef = leave_one_out_lsq_cross_validation(\n basis_mat, values, alpha)\n true_cv_errors = np.empty_like(cv_errors)\n for ii in range(nsamples):\n samples_ii = np.hstack((samples[:, :ii], samples[:, ii+1:]))\n basis_mat_ii = samples_ii.T**np.arange(degree+1)\n values_ii = np.vstack((values[:ii], values[ii+1:]))\n coef_ii = np.linalg.lstsq(\n basis_mat_ii.T.dot(basis_mat_ii) +\n alpha*np.eye(basis_mat.shape[1]\n ), basis_mat_ii.T.dot(values_ii),\n rcond=None)[0]\n true_cv_errors[ii] = (basis_mat[ii].dot(coef_ii)-values[ii])\n assert np.allclose(cv_errors, true_cv_errors)\n assert np.allclose(\n cv_score, np.sqrt(np.sum(true_cv_errors**2, axis=0)/nsamples))\n\n def test_leave_many_out_lsq_cross_validation(self):\n degree = 2\n nsamples = 2*(degree+1)\n samples = np.random.uniform(-1, 1, (1, nsamples))\n basis_mat = samples.T**np.arange(degree+1)\n values = np.exp(samples).T*100\n alpha = 1e-3 # ridge regression regularization parameter value\n\n assert nsamples % 2 == 0\n nfolds = nsamples//3\n fold_sample_indices = get_random_k_fold_sample_indices(\n nsamples, nfolds)\n cv_errors, cv_score, coef = leave_many_out_lsq_cross_validation(\n basis_mat, values, fold_sample_indices, alpha)\n\n true_cv_errors = np.empty_like(cv_errors)\n for kk in range(len(fold_sample_indices)):\n K = np.ones(nsamples, dtype=bool)\n K[fold_sample_indices[kk]] = False\n basis_mat_kk = basis_mat[K, :]\n gram_mat_kk = basis_mat_kk.T.dot(basis_mat_kk) + np.eye(\n basis_mat_kk.shape[1])*alpha\n values_kk = basis_mat_kk.T.dot(values[K, :])\n coef_kk = np.linalg.lstsq(gram_mat_kk, values_kk, rcond=None)[0]\n true_cv_errors[kk] = basis_mat[fold_sample_indices[kk], :].dot(\n coef_kk)-values[fold_sample_indices[kk]]\n # print(cv_errors, true_cv_errors)\n assert np.allclose(cv_errors, true_cv_errors)\n true_cv_score = np.sqrt((true_cv_errors**2).sum(axis=(0, 1))/nsamples)\n assert np.allclose(true_cv_score, cv_score)\n\n rsq = get_cross_validation_rsquared_coefficient_of_variation(\n cv_score, values)\n\n print(rsq)\n\n def test_integrate_using_univariate_gauss_legendre_quadrature_unbounded(self):\n from scipy.stats import norm, gamma, beta\n\n # unbounded\n rv = norm(0, 1)\n\n def integrand(x):\n return rv.pdf(x)[:, None]\n lb, ub = rv.interval(1)\n nquad_samples = 100\n res = integrate_using_univariate_gauss_legendre_quadrature_unbounded(\n integrand, lb, ub, nquad_samples, interval_size=2)\n assert np.allclose(res, 1)\n\n # left bounded\n rv = gamma(1)\n\n def integrand(x):\n return rv.pdf(x)[:, None]\n lb, ub = rv.interval(1)\n nquad_samples = 100\n res = integrate_using_univariate_gauss_legendre_quadrature_unbounded(\n integrand, lb, ub, nquad_samples, interval_size=2)\n assert np.allclose(res, 1)\n\n # bounded\n rv = beta(20, 10, -2, 5)\n\n def integrand(x):\n return rv.pdf(x)[:, None]\n lb, ub = rv.interval(1)\n nquad_samples = 100\n res = integrate_using_univariate_gauss_legendre_quadrature_unbounded(\n integrand, lb, ub, nquad_samples, interval_size=2)\n assert np.allclose(res, 1)\n\n # multiple qoi\n rv = norm(2, 3)\n\n def integrand(x):\n return rv.pdf(x)[:, None]*x[:, None]**np.arange(3)[None, :]\n lb, ub = rv.interval(1)\n nquad_samples = 100\n res = integrate_using_univariate_gauss_legendre_quadrature_unbounded(\n integrand, lb, ub, nquad_samples, interval_size=2)\n assert np.allclose(res, [1, 2, 3**2+2**2])\n\n def test_qr_solve(self):\n nrows = 3\n rhs = np.random.normal(0., 1., (nrows, 1))\n A = np.random.normal(0., 1., (nrows, nrows))\n A = A.T.dot(A)\n Q, R = qr_factorization(A)\n sol = qr_solve(Q, R, rhs)\n assert np.allclose(sol, np.linalg.solve(A, rhs))\n\nif __name__ == \"__main__\":\n utilities_test_suite = unittest.TestLoader().loadTestsFromTestCase(\n TestUtilities)\n unittest.TextTestRunner(verbosity=2).run(utilities_test_suite)\n", "from pyapprox.orthonormal_polynomials_1d import gauss_quadrature,\\\n evaluate_orthonormal_polynomial_1d\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pyapprox.utilities import cartesian_product, outer_product, hash_array\nfrom pyapprox.indexing import nchoosek, compute_hyperbolic_level_indices, \\\n argsort_indices_lexiographically_by_row\nfrom pyapprox.barycentric_interpolation import compute_barycentric_weights_1d,\\\n multivariate_barycentric_lagrange_interpolation, \\\n multivariate_hierarchical_barycentric_lagrange_interpolation\n\n\ndef get_1d_samples_weights(quad_rules, growth_rules,\n levels, config_variables_idx=None,\n unique_rule_indices=None):\n levels = np.asarray(levels)\n num_vars = levels.shape[0]\n # [x]*n syntax, what you get is a list of n many x objects,\n # but they're all references to the same object, e.g.\n # samples_1d,weights_1d=[[]]*num_vars,[[]]*num_vars\n # instead use\n samples_1d = [[] for i in range(num_vars)]\n weights_1d = [[] for i in range(num_vars)]\n return update_1d_samples_weights(\n quad_rules, growth_rules, levels,\n samples_1d, weights_1d, config_variables_idx, unique_rule_indices)\n\n\ndef update_1d_samples_weights_economical(\n quad_rules, growth_rules, levels, samples_1d, weights_1d,\n config_variables_idx, unique_rule_indices):\n \"\"\"\n Sometimes it is computationally time consuming to construct quadrature\n rules for each dimension, e.g. for numerically generated Leja rules.\n So only update unique rules and store result in all variable dimensions \n which use that rule.\n\n This function will update samples_1d of all variables\n with the same quad rule even if levels of some of those variables\n do not need to be updated (but it will only compute the rule\n once). This is in contrast to default update\n which computes the rule of every variable if and only if\n the level for that variable is insufficient ie needs to be updated. \n The former is economical in computaiontal cost the later in memory.\n\n TODO: ideally this function should only store samples_1d for the unique\n quadrature rules. But currently sparse grid assumes that there is a \n quadrature rule for each variable.\n \"\"\"\n assert len(quad_rules) == len(growth_rules)\n assert len(quad_rules) == len(unique_rule_indices)\n cnt = 0\n levels = np.asarray(levels)\n for dd in range(len(unique_rule_indices)):\n unique_rule_indices[dd] = np.asarray(unique_rule_indices[dd])\n cnt += unique_rule_indices[dd].shape[0]\n\n num_vars = levels.shape[0]\n if config_variables_idx is None:\n config_variables_idx = num_vars\n\n if cnt != config_variables_idx:\n msg = 'unique_rule_indices inconsistent with num_random_vars '\n msg += '(config_variable_idx)'\n raise Exception(msg)\n\n from inspect import signature\n for dd in range(len(unique_rule_indices)):\n # use first instance of quad_rule\n # assumes samples_1d stored for every dimension not just for unique\n # quadrature rules\n index = unique_rule_indices[dd][0]\n max_level_dd = levels[unique_rule_indices[dd]].max()\n current_level = len(samples_1d[index])\n if current_level <= max_level_dd:\n sig = signature(quad_rules[dd])\n keyword_args = [p.name for p in sig.parameters.values()\n if ((p.kind == p.POSITIONAL_OR_KEYWORD) or\n (p.kind == p.KEYWORD_ONLY))]\n if current_level > 0 and 'initial_points' in keyword_args:\n # useful for updating Leja rules\n x, w = quad_rules[dd](\n max_level_dd,\n initial_points=samples_1d[index][-1][np.newaxis, :])\n else:\n x, w = quad_rules[dd](max_level_dd)\n assert x.ndim == 1 and len(w) == max_level_dd+1\n for ll in range(current_level, max_level_dd+1):\n for kk in unique_rule_indices[dd]:\n # use weights ordered according to polynomial index ordering\n # not typical ascending order\n # Check if user specifies growth rule which is incompatible\n # with quad rule.\n assert w[ll].shape[0] == growth_rules[dd](ll)\n weights_1d[kk].append(w[ll][:growth_rules[dd](ll)])\n # following assumes nestedness of x\n samples_1d[kk].append(x[:growth_rules[dd](ll)])\n return samples_1d, weights_1d\n\n\ndef update_1d_samples_weights(quad_rules, growth_rules,\n levels, samples_1d, weights_1d,\n config_variables_idx,\n unique_rule_indices=None):\n if unique_rule_indices is not None:\n return update_1d_samples_weights_economical(\n quad_rules, growth_rules, levels, samples_1d, weights_1d,\n config_variables_idx, unique_rule_indices)\n\n num_vars = len(samples_1d)\n\n for dd in range(num_vars):\n current_level = len(samples_1d[dd])\n if current_level <= levels[dd]:\n x, w = quad_rules[dd](levels[dd])\n assert x.ndim == 1 and len(w) == levels[dd]+1\n for ll in range(current_level, levels[dd]+1):\n # Check if user specifies growth rule which is incompatible\n # with quad rule.\n assert w[ll].shape[0] == growth_rules[dd](ll)\n # use weights ordered according to polynomial index ordering\n # not typical ascending order\n weights_1d[dd].append(w[ll])\n # following assumes nestedness of x\n samples_1d[dd].append(x[:growth_rules[dd](ll)])\n return samples_1d, weights_1d\n\n\ndef get_hierarchical_sample_indices(subspace_index, poly_indices,\n samples_1d, config_variables_idx):\n \"\"\"\n This function is useful for obtaining the hierarhical function values of \n a subspace\n\n Use this function in the following way\n\n hier_indices = get_hierarchical_sample_indices()\n samples = get_subspace_samples(unique_samples_only=False)\n hier_samples = samples[:,hier_indices]\n \"\"\"\n num_vars, num_indices = poly_indices.shape\n if config_variables_idx is None:\n config_variables_idx = num_vars\n\n assert len(samples_1d) == config_variables_idx\n\n active_vars = np.where(subspace_index > 0)[0]\n hier_indices = np.empty((num_indices), dtype=int)\n kk = 0\n for ii in range(num_indices):\n index = poly_indices[:, ii]\n found = True\n for jj in range(len(active_vars)):\n subspace_level = subspace_index[active_vars[jj]]\n if active_vars[jj] < config_variables_idx:\n if subspace_level > 0:\n idx = samples_1d[active_vars[jj]\n ][subspace_level-1].shape[0]\n else:\n idx = 0\n else:\n idx = 0\n if index[active_vars[jj]] < idx:\n found = False\n break\n if found:\n hier_indices[kk] = ii\n kk += 1\n return hier_indices[:kk]\n\n\ndef get_subspace_samples(subspace_index, poly_indices, samples_1d,\n config_variables_idx=None, unique_samples_only=False):\n \"\"\"\n Compute the samples of a subspace. \n\n Parameters\n ----------\n unique_samples_only : boolean\n If true only return the samples that exist in this subspace\n and not in any ancestor subspace, i.e. the heirachical samples\n \"\"\"\n assert len(samples_1d) == poly_indices.shape[0]\n subspace_samples = get_sparse_grid_samples(\n poly_indices, samples_1d, config_variables_idx)\n if unique_samples_only:\n I = get_hierarchical_sample_indices(\n subspace_index, poly_indices, samples_1d,\n config_variables_idx)\n subspace_samples = subspace_samples[:, I]\n return subspace_samples\n\n\ndef get_subspace_polynomial_indices(subspace_index, growth_rule_1d,\n config_variables_idx=None):\n \"\"\"\n Get the polynomial indices of a tensor-product nodal subspace.\n\n Parameters\n ----------\n subspace index : np.ndarray (num_vars)\n The subspace index [l_1,...,l_d]\n\n growth_rule_1d : list of callable functions\n Function which takes a level l_i as its only argument and returns\n the number of samples in the 1D quadrature rule of the specified level.\n\n Return\n ------\n poly_indices : np.ndarray (num_vars x num_subspace_samples)\n The polynomial indices of the tensor-product subspace.\n \"\"\"\n subspace_index = np.asarray(subspace_index)\n num_vars = subspace_index.shape[0]\n if np.all(subspace_index == 0):\n return np.zeros((num_vars, 1), dtype=int)\n\n if config_variables_idx is None:\n config_variables_idx = num_vars\n assert len(growth_rule_1d) == config_variables_idx\n\n poly_indices_1d = []\n for ii in range(num_vars):\n if ii < config_variables_idx:\n poly_indices_1d.append(\n np.arange(growth_rule_1d[ii](subspace_index[ii])))\n else:\n # for config variables just set value equal to subspace index value\n poly_indices_1d.append(np.asarray([subspace_index[ii]]))\n\n poly_indices = cartesian_product(poly_indices_1d, 1)\n return poly_indices\n\n\ndef get_subspace_weights(subspace_index, weights_1d, config_variables_idx=None):\n \"\"\"\n Get the quadrature weights of a tensor-product nodal subspace.\n\n Parameters\n ----------\n subspace index : np.ndarray (num_vars)\n The subspace index [l_1,...,l_d]\n\n weights_1d : [[np.ndarray]*num_vars]\n List of quadrature weights for each level and each variable\n Each element of inner list is np.ndarray with ndim=1. which meaans only \n homogenous sparse grids are supported, i.e grids with same quadrature\n rule used in each dimension (level can be different per dimension \n though).\n\n Return\n ------\n subspace_weights : np.ndarray (num_subspace_samples)\n The quadrature weights of the tensor-product quadrature rule of the \n subspace.\n \"\"\"\n assert subspace_index.ndim == 1\n num_vars = subspace_index.shape[0]\n if config_variables_idx is None:\n config_variables_idx = num_vars\n assert len(weights_1d) == config_variables_idx\n\n subspace_weights_1d = []\n constant_term = 1.\n I = np.where(subspace_index[:config_variables_idx] > 0)[0]\n subspace_weights_1d = [weights_1d[ii][subspace_index[ii]] for ii in I]\n\n # for all cases I have tested so far the quadrature rules weights\n # are always 1 for level 0. Using loop below takes twice as long as\n # above pythonic loop without error checks.\n\n # for dd in range(config_variables_idx):\n # # integrate only over random variables. i.e. do not compute\n # # tensor product over config variables.\n\n # # only compute outer product over variables with non-zero index\n # if subspace_index[dd]>0:\n # # assumes level zero weight is constant\n # subspace_weights_1d.append(weights_1d[dd][subspace_index[dd]])\n # else:\n # assert len(weights_1d[dd][subspace_index[dd]])==1\n # constant_term *= weights_1d[dd][subspace_index[dd]][0]\n if len(subspace_weights_1d) > 0:\n subspace_weights = outer_product(subspace_weights_1d)*constant_term\n else:\n subspace_weights = np.ones(1)*constant_term\n return subspace_weights\n\n\ndef get_sparse_grid_samples(poly_indices, samples_1d, config_variables_idx=None):\n \"\"\"\n Compute the unique sparse grid samples from a set of polynomial indices.\n\n The function assumes the sparse grid is isotropic, i.e the same level\n is used for each variable. This function can also only be used\n for nested quadrature rules.\n\n Parameters\n ----------\n poly_indices : np.ndarray (num_vars x num_sparse_grid_samples)\n The unique polynomial indices of the sparse grid.\n\n level : integer\n The level of the isotropic sparse grid.\n\n samples_1d : np.ndarray (num_poly_indices)\n samples of the univariate quadrature for maximum level in grid\n\n Return\n ------\n samples : np.ndarray (num_vars x num_sparse_grid_samples)\n The unique samples of the sparse grid.\n \"\"\"\n # assume samples list for each variable has same length\n samples_1d = [samples_1d[dd][-1] for dd in range(len(samples_1d))]\n poly_indices_max = poly_indices.max(axis=1)\n for dd in range(len(samples_1d)):\n assert samples_1d[dd].shape[0] >= poly_indices_max[dd]\n num_vars, num_indices = poly_indices.shape\n if config_variables_idx is not None:\n assert num_vars > config_variables_idx\n samples = np.empty((num_vars, num_indices))\n for ii in range(num_indices):\n index = poly_indices[:, ii]\n for jj in range(num_vars):\n if config_variables_idx is None or jj < config_variables_idx:\n samples[jj, ii] = samples_1d[jj][index[jj]]\n else:\n samples[jj, ii] = index[jj]\n return samples\n\n\ndef get_smolyak_coefficients(subspace_indices):\n \"\"\"\n Given an arbitrary set of downward close indices determine the \n smolyak coefficients.\n \"\"\"\n num_vars, num_subspace_indices = subspace_indices.shape\n I = argsort_indices_lexiographically_by_row(subspace_indices)\n sorted_subspace_indices = subspace_indices[:, I]\n levels, level_change_indices = np.unique(\n sorted_subspace_indices[0, :], return_index=True)\n level_change_indices = np.append(\n level_change_indices[2:], [num_subspace_indices, num_subspace_indices])\n\n try:\n from pyapprox.cython.sparse_grid import get_smolyak_coefficients_pyx\n return get_smolyak_coefficients_pyx(\n sorted_subspace_indices, levels, level_change_indices)[I.argsort()]\n except:\n print('get_smolyak_coefficients extention failed')\n\n idx = 0\n smolyak_coeffs = np.zeros((num_subspace_indices), dtype=float)\n for ii in range(num_subspace_indices):\n index = sorted_subspace_indices[:, ii]\n if idx < levels.shape[0] and index[0] > levels[idx]:\n idx += 1\n for jj in range(ii, level_change_indices[idx]):\n diff = sorted_subspace_indices[:, jj]-index\n if diff.max() <= 1 and diff.min() >= 0:\n smolyak_coeffs[ii] += (-1.)**diff.sum()\n return smolyak_coeffs[I.argsort()]\n\n # try:\n # from pyapprox.cython.sparse_grid import \\\n # get_smolyak_coefficients_without_sorting_pyx\n # return get_smolyak_coefficients_without_sorting_pyx(subspace_indices)\n # except:\n # print ('get_smolyak_coefficients_without_sorting extention failed')\n\n # num_vars, num_subspace_indices = subspace_indices.shape\n # smolyak_coeffs = np.zeros((num_subspace_indices),dtype=float)\n # for ii in range(num_subspace_indices):\n # for jj in range(num_subspace_indices):\n # diff = subspace_indices[:,jj]-subspace_indices[:,ii]\n # if diff.max()<=1 and diff.min()>=0:\n # smolyak_coeffs[ii]+=(-1.)**diff.sum()\n # return smolyak_coeffs\n\n\ndef get_isotropic_sparse_grid_subspace_indices(num_vars, level):\n smolyak_coefficients = np.empty((0), dtype=float)\n sparse_grid_subspace_indices = np.empty((num_vars, 0), dtype=int)\n for dd in range(min(num_vars, level+1)):\n subspace_indices_dd = compute_hyperbolic_level_indices(\n num_vars, level-dd, 1.0)\n sparse_grid_subspace_indices = np.hstack(\n (sparse_grid_subspace_indices, subspace_indices_dd))\n subspace_coefficient = (-1.0)**(dd)*nchoosek(num_vars-1, dd)\n smolyak_coefficients = np.hstack((\n smolyak_coefficients,\n subspace_coefficient*np.ones(subspace_indices_dd.shape[1])))\n return sparse_grid_subspace_indices, smolyak_coefficients\n\n\ndef get_sparse_grid_samples_and_weights(num_vars, level,\n quad_rules,\n growth_rules,\n sparse_grid_subspace_indices=None):\n \"\"\"\n Compute the quadrature weights and samples of a isotropic sparse grid.\n\n Parameters\n ----------\n num_vars : integer\n The number of variables in (dimension of) the sparse grid\n\n level : integer\n The level of the isotropic sparse grid.\n\n quad_rules : callable univariate quadrature_rule(ll) or list\n Function used to compute univariate quadrature samples and weights \n for a given level (ll). The weights and samples must be returned\n with polynomial ordering. If list then argument is list of quadrature\n rules\n\n growth_rules :callable growth_rules(ll) or list\n Function that returns the number of samples in the univariate\n quadrature rule of a given level (ll). If list then argument if list\n of growth rules.\n\n Return\n ------\n samples : np.ndarray (num_vars x num_sparse_grid_samples)\n The unique samples of the sparse grid.\n\n weights : np.ndarray (num_sparse_grid_samples)\n The quadrature weights of the sparse grid.\n \"\"\"\n #subspace_indices = []\n #subspace_coefficients = []\n\n if callable(quad_rules):\n quad_rules = [quad_rules]*num_vars\n growth_rules = [growth_rules]*num_vars\n\n assert len(quad_rules) == len(growth_rules)\n assert len(quad_rules) == num_vars\n\n samples_1d, weights_1d = get_1d_samples_weights(\n quad_rules, growth_rules, [level]*num_vars)\n\n poly_indices_dict = dict()\n num_sparse_grid_samples = 0\n weights = []\n poly_indices = []\n sparse_grid_subspace_poly_indices_list = []\n sparse_grid_subspace_values_indices_list = []\n\n if sparse_grid_subspace_indices is None:\n sparse_grid_subspace_indices, smolyak_coefficients =\\\n get_isotropic_sparse_grid_subspace_indices(num_vars, level)\n else:\n smolyak_coefficients = get_smolyak_coefficients(\n sparse_grid_subspace_indices)\n I = np.where(np.absolute(smolyak_coefficients) > 1e-8)[0]\n smolyak_coefficients = smolyak_coefficients[I]\n sparse_grid_subspace_indices = sparse_grid_subspace_indices[:, I]\n\n for ii in range(sparse_grid_subspace_indices.shape[1]):\n subspace_index = sparse_grid_subspace_indices[:, ii]\n subspace_poly_indices = get_subspace_polynomial_indices(\n subspace_index, growth_rules)\n sparse_grid_subspace_poly_indices_list.append(subspace_poly_indices)\n subspace_weights = get_subspace_weights(\n subspace_index, weights_1d)*smolyak_coefficients[ii]\n assert subspace_weights.shape[0] == subspace_poly_indices.shape[1]\n subspace_values_indices = np.empty(\n (subspace_poly_indices.shape[1]), dtype=int)\n for jj in range(subspace_poly_indices.shape[1]):\n poly_index = subspace_poly_indices[:, jj]\n key = hash_array(poly_index)\n if key in poly_indices_dict:\n weights[poly_indices_dict[key]] += subspace_weights[jj]\n subspace_values_indices[jj] = poly_indices_dict[key]\n else:\n poly_indices.append(poly_index)\n poly_indices_dict[key] = num_sparse_grid_samples\n weights.append(subspace_weights[jj])\n subspace_values_indices[jj] = num_sparse_grid_samples\n num_sparse_grid_samples += 1\n sparse_grid_subspace_values_indices_list.append(\n subspace_values_indices)\n\n # get list of unique polynomial indices\n poly_indices = np.asarray(poly_indices).T\n samples = get_sparse_grid_samples(poly_indices, samples_1d)\n data_structures = [poly_indices_dict, poly_indices,\n sparse_grid_subspace_indices, np.asarray(\n smolyak_coefficients),\n sparse_grid_subspace_poly_indices_list, samples_1d, weights_1d,\n sparse_grid_subspace_values_indices_list]\n # subspace_poly_indices can be recomputed but return here to save\n # computations at the expense of more memory\n return samples, np.asarray(weights), data_structures\n\n\ndef get_subspace_values(values, subspace_values_indices):\n num_qoi = values.shape[1]\n num_subspace_samples = subspace_values_indices.shape[0]\n subspace_values = values[subspace_values_indices, :]\n # subspace_values = np.empty((num_subspace_samples,num_qoi),dtype=float)\n # for jj in range(num_subspace_samples):\n # values_index = subspace_values_indices[jj]\n # subspace_values[jj,:] = values[values_index,:]\n return subspace_values\n\n\ndef get_subspace_values_using_dictionary(values, subspace_poly_indices,\n poly_indices_dict):\n num_qoi = values.shape[1]\n num_subspace_samples = subspace_poly_indices.shape[1]\n subspace_values = np.empty((num_subspace_samples, num_qoi), dtype=float)\n for jj in range(num_subspace_samples):\n poly_index = subspace_poly_indices[:, jj]\n # could reduce number of hash based lookups by simply storing\n # replicate of values for each subspace, to reduce data storage\n # I can simply store index into an array which stores the unique values\n key = hash_array(poly_index)\n subspace_values[jj, :] = values[poly_indices_dict[key], :]\n return subspace_values\n\n\ndef evaluate_sparse_grid_subspace(samples, subspace_index, subspace_values,\n samples_1d, config_variables_idx, output):\n if config_variables_idx is None:\n config_variables_idx = samples.shape[0]\n\n active_sample_vars = np.where(subspace_index[:config_variables_idx] > 0)[0]\n num_active_sample_vars = active_sample_vars.shape[0]\n\n abscissa_1d = []\n barycentric_weights_1d = []\n for dd in range(num_active_sample_vars):\n active_idx = active_sample_vars[dd]\n abscissa_1d.append(samples_1d[active_idx][subspace_index[active_idx]])\n interval_length = 2\n if abscissa_1d[dd].shape[0] > 1:\n interval_length = abscissa_1d[dd].max()-abscissa_1d[dd].min()\n barycentric_weights_1d.append(\n compute_barycentric_weights_1d(\n abscissa_1d[dd], interval_length=interval_length))\n\n if num_active_sample_vars == 0:\n return np.tile(subspace_values, (samples.shape[1], 1))\n poly_vals = multivariate_barycentric_lagrange_interpolation(\n samples, abscissa_1d, barycentric_weights_1d, subspace_values,\n active_sample_vars)\n return poly_vals\n\n\ndef evaluate_sparse_grid(samples, values,\n poly_indices_dict, # not needed with new implementation\n sparse_grid_subspace_indices,\n sparse_grid_subspace_poly_indices_list,\n smolyak_coefficients, samples_1d,\n sparse_grid_subspace_values_indices_list,\n config_variables_idx=None, output=False):\n\n num_vars, num_samples = samples.shape\n assert values.ndim == 2\n assert values.shape[0] == len(poly_indices_dict)\n assert sparse_grid_subspace_indices.shape[1] == \\\n smolyak_coefficients.shape[0]\n\n # max_level_samples_1d_min = [\n # samples_1d[dd][-1].min() for dd in range(len(samples_1d))]\n # max_level_samples_1d_max = [\n # samples_1d[dd][-1].max() for dd in range(len(samples_1d))]\n # if (np.any(samples.min(axis=1)<max_level_samples_1d_min) or\n # np.any(samples.max(axis=1)>max_level_samples_1d_max)):\n # print ('warning extrapolating outside abscissa')\n # print(samples.min(axis=1),max_level_samples_1d_min)\n # print(samples.max(axis=1),max_level_samples_1d_max)\n # # this can be true for univariate quadrature rules that are not closed\n # # i.e on bounded domain and with samples on both boundaries\n # # need to make this check better\n\n num_qoi = values.shape[1]\n # must initialize to zero\n approx_values = np.zeros((num_samples, num_qoi), dtype=float)\n for ii in range(sparse_grid_subspace_indices.shape[1]):\n if (abs(smolyak_coefficients[ii]) > np.finfo(float).eps):\n subspace_index = sparse_grid_subspace_indices[:, ii]\n subspace_poly_indices = sparse_grid_subspace_poly_indices_list[ii]\n # subspace_values = get_subspace_values_using_dictionary(\n # values,subspace_poly_indices,poly_indices_dict)\n subspace_values = get_subspace_values(\n values, sparse_grid_subspace_values_indices_list[ii])\n subspace_approx_vals = evaluate_sparse_grid_subspace(\n samples, subspace_index, subspace_values,\n samples_1d, config_variables_idx, output)\n approx_values += smolyak_coefficients[ii]*subspace_approx_vals\n return approx_values\n\n\ndef integrate_sparse_grid_subspace(subspace_index, subspace_values,\n weights_1d, config_variables_idx):\n subspace_weights = get_subspace_weights(\n subspace_index, weights_1d, config_variables_idx)\n mean = np.dot(subspace_weights, subspace_values)\n variance = np.dot(subspace_weights, subspace_values**2)-mean**2\n return np.vstack((mean[np.newaxis, :], variance[np.newaxis, :]))\n\n\ndef integrate_sparse_grid(values,\n poly_indices_dict, # not needed with new implementation\n sparse_grid_subspace_indices,\n sparse_grid_subspace_poly_indices_list,\n smolyak_coefficients, weights_1d,\n sparse_grid_subspace_values_indices_list,\n config_variables_idx=None):\n assert values.ndim == 2\n assert values.shape[0] == len(poly_indices_dict)\n assert sparse_grid_subspace_indices.shape[1] == smolyak_coefficients.shape[0]\n\n num_qoi = values.shape[1]\n # must initialize to zero\n integral_values = np.zeros((2, num_qoi), dtype=float)\n for ii in range(sparse_grid_subspace_indices.shape[1]):\n if (abs(smolyak_coefficients[ii]) > np.finfo(float).eps):\n subspace_index = sparse_grid_subspace_indices[:, ii]\n subspace_values = get_subspace_values(\n values, sparse_grid_subspace_values_indices_list[ii])\n subspace_integral_vals = integrate_sparse_grid_subspace(\n subspace_index, subspace_values, weights_1d, config_variables_idx)\n integral_values += smolyak_coefficients[ii]*subspace_integral_vals\n return integral_values\n\n\ndef integrate_sparse_grid_from_subspace_moments(\n sparse_grid_subspace_indices,\n smolyak_coefficients, subspace_moments):\n assert sparse_grid_subspace_indices.shape[1] == \\\n smolyak_coefficients.shape[0]\n assert subspace_moments.shape[0] == sparse_grid_subspace_indices.shape[1]\n\n num_qoi = subspace_moments.shape[1]\n # must initialize to zero\n integral_values = np.zeros((num_qoi, 2), dtype=float)\n for ii in range(sparse_grid_subspace_indices.shape[1]):\n if (abs(smolyak_coefficients[ii]) > np.finfo(float).eps):\n integral_values += smolyak_coefficients[ii]*subspace_moments[ii]\n # keep shape consistent with shape returned by integrate_sparse_grid\n return integral_values.T\n\n\ndef evaluate_sparse_grid_from_subspace_values(\n sparse_grid_subspace_indices,\n smolyak_coefficients, subspace_interrogation_values):\n \"\"\"\n Some times you may want to evaluate a sparse grid repeatedly at the\n same set of samples. If so use this function. It avoids recomputing the\n subspace interpolants each time the sparse grid is interrogated. \n Note the reduced time complexity requires more storage\n \"\"\"\n assert sparse_grid_subspace_indices.shape[1] == \\\n smolyak_coefficients.shape[0]\n assert len(subspace_interrogation_values) == \\\n sparse_grid_subspace_indices.shape[1]\n\n # must initialize to zero\n values = 0\n for ii in range(sparse_grid_subspace_indices.shape[1]):\n if (abs(smolyak_coefficients[ii]) > np.finfo(float).eps):\n values += smolyak_coefficients[ii] * \\\n subspace_interrogation_values[ii]\n return values\n\n\ndef get_num_sparse_grid_samples(\n sparse_grid_subspace_poly_indices_list,\n smolyak_coefficients):\n \"\"\"\n This only works if there are no config variables. Specifically it \n will underestimate the number of model evaluations when config variables \n are present For example, if the smolyak coefficient of subspace is 1 and the\n coefficient its backwards neighbor is -1 this function will subtract off \n the number of samples from the backward neighbor to avoid double counting. \n But if config variables are present then the backward neighbour index may\n only vary in the config variables and thus the samples in each of the \n two subspaces come from different models and thus we actually want to \n count the samples of both subspaces.\n \"\"\"\n num_samples = 0\n for ii in range(smolyak_coefficients.shape[0]):\n if (abs(smolyak_coefficients[ii]) > np.finfo(float).eps):\n subspace_poly_indices = sparse_grid_subspace_poly_indices_list[ii]\n num_subspace_evals = subspace_poly_indices.shape[1]\n num_samples += smolyak_coefficients[ii]*num_subspace_evals\n return num_samples\n\n\ndef plot_sparse_grid_2d(samples, weights, poly_indices=None, subspace_indices=None,\n axs=None, active_samples=None, active_subspace_indices=None,\n config_variables_idx=None):\n \"\"\"\n Plot the sparse grid samples and color the samples by their quadrature \n weight.\n\n Parameters\n ---------\n samples : np.ndarray (num_vars x num_sparse_grid_samples)\n The unique samples of the sparse grid.\n\n weights : np.ndarray (num_sparse_grid_samples)\n The quadrature weights of the sparse grid.\n\n poly_indices : np.ndarray (num_vars x num_sparse_grid_samples)\n The unique polynomial indices of the sparse grid.\n\n \"\"\"\n from pyapprox.visualization import plot_2d_indices\n if samples.shape[0] != 2:\n return\n\n nplots = 1 + int(poly_indices is not None) + \\\n int(subspace_indices is not None)\n if axs is None:\n fig, axs = plt.subplots(1, nplots, figsize=(nplots*8, 6))\n if type(axs) != np.ndarray:\n axs = [axs]\n assert len(axs) == nplots\n\n if config_variables_idx is None:\n plot = axs[0].scatter(samples[0, :], samples[1, :], s=100, c=weights,\n cmap=plt.get_cmap('Greys'), edgecolors='black')\n plt.colorbar(plot, ax=axs[0])\n if active_samples is not None:\n axs[0].plot(active_samples[0, :], active_samples[1, :], 'ro')\n else:\n for ii in range(samples.shape[0]):\n axs[0].plot(samples[0, ii], samples[1, ii], 'ko')\n for ii in range(active_samples.shape[1]):\n axs[0].plot(active_samples[0, ii], active_samples[1, ii], 'ro')\n from matplotlib.pyplot import MaxNLocator\n ya = axs[0].get_yaxis()\n ya.set_major_locator(MaxNLocator(integer=True))\n # axs[0].set_ylabel(r'$\\alpha_1$',rotation=0)\n axs[0].set_xlabel('$z_1$', rotation=0)\n\n ii = 1\n if poly_indices is not None:\n plot_2d_indices(poly_indices, ax=axs[ii])\n ii += 1\n\n if subspace_indices is not None:\n plot_2d_indices(subspace_indices, active_subspace_indices, ax=axs[ii])\n ii += 1\n\n return axs\n\n\ndef plot_sparse_grid_3d(samples, weights, poly_indices=None, subspace_indices=None,\n active_samples=None, active_subspace_indices=None):\n from pyapprox.visualization import plot_3d_indices\n if samples.shape[0] != 3:\n return\n\n nplots = 1 + int(poly_indices is not None) + \\\n int(subspace_indices is not None)\n fig = plt.figure(figsize=(2*8, 6))\n axs = []\n ax = fig.add_subplot(1, nplots, 1, projection='3d')\n ax.plot(samples[0, :], samples[1, :], samples[2, :], 'ko')\n if active_samples is not None:\n ax.plot(active_samples[0, :], active_samples[1, :],\n active_samples[2, :], 'ro')\n axs.append(ax)\n\n angle = 45\n ax.view_init(30, angle)\n # ax.set_axis_off()\n ax.grid(False)\n # Hide axes ticks\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n\n ii = 2\n if poly_indices is not None:\n ax = fig.add_subplot(1, nplots, ii, projection='3d')\n plot_3d_indices(poly_indices, ax=ax)\n axs.append(ax)\n ii += 1\n\n if subspace_indices is not None:\n ax = fig.add_subplot(1, nplots, ii, projection='3d')\n plot_3d_indices(subspace_indices, ax, active_subspace_indices)\n axs.append(ax)\n ii += 1\n\n return axs\n\n\ndef evaluate_sparse_grid_subspace_hierarchically(samples, values, subspace_index,\n subspace_values_indices,\n samples_1d,\n subspace_poly_indices,\n config_variables_idx):\n if config_variables_idx is None:\n config_variables_idx = samples.shape[0]\n\n abscissa_1d = []\n barycentric_weights_1d = []\n hier_indices_1d = []\n active_vars = np.where(subspace_index > 0)[0]\n num_active_vars = active_vars.shape[0]\n\n subspace_values = values[subspace_values_indices, :]\n\n if num_active_vars == 0:\n return subspace_values\n\n for dd in range(num_active_vars):\n subspace_level = subspace_index[active_vars[dd]]\n if subspace_level > 0:\n idx1 = samples_1d[subspace_level-1].shape[0]\n else:\n idx1 = 0\n idx2 = samples_1d[subspace_level].shape[0]\n hier_indices_1d.append(np.arange(idx1, idx2))\n abscissa_1d.append(samples_1d[subspace_level])\n barycentric_weights_1d.append(\n compute_barycentric_weights_1d(abscissa_1d[dd]))\n\n hier_indices = get_hierarchical_sample_indices(\n subspace_index, subspace_poly_indices,\n samples_1d, config_variables_idx)\n\n hier_subspace_values = subspace_values[hier_indices, :]\n\n values = multivariate_hierarchical_barycentric_lagrange_interpolation(\n samples, abscissa_1d, barycentric_weights_1d, hier_subspace_values,\n active_vars, hier_indices_1d)\n\n return values\n\n\ndef evaluate_sparse_grid_hierarchically(\n samples, values,\n poly_indices_dict, # not needed with new implementation\n sparse_grid_subspace_indices,\n sparse_grid_subspace_poly_indices_list,\n smolyak_coefficients, samples_1d,\n sparse_grid_subspace_values_indices_list,\n config_variables_idx=None):\n \"\"\"\n This will not currently work as it requires the function argument values to\n be hierarchical surpluses not raw function values.\n \"\"\"\n num_vars, num_samples = samples.shape\n assert values.ndim == 2\n num_qoi = values.shape[1]\n approx_values = np.zeros((num_samples, num_qoi), dtype=float)\n\n for ii in range(sparse_grid_subspace_indices.shape[1]):\n approx_values += evaluate_sparse_grid_subspace_hierarchically(\n samples, values, sparse_grid_subspace_indices[:, ii],\n sparse_grid_subspace_values_indices_list[ii], samples_1d,\n sparse_grid_subspace_poly_indices_list[ii], config_variables_idx)\n return approx_values\n\n\ndef convert_univariate_lagrange_basis_to_orthonormal_polynomials(\n samples_1d, get_recursion_coefficients):\n \"\"\"\n Returns\n -------\n coeffs_1d : list [np.ndarray(num_terms_i,num_terms_i)]\n The coefficients of the orthonormal polynomial representation of\n each Lagrange basis. The columns are the coefficients of each \n lagrange basis. The rows are the coefficient of the degree i \n orthonormalbasis\n \"\"\"\n # Get the maximum number of terms in the orthonormal polynomial that\n # are need to interpolate all the interpolation nodes in samples_1d\n max_num_terms = samples_1d[-1].shape[0]\n num_quad_points = max_num_terms+1\n # Get the recursion coefficients of the orthonormal basis\n recursion_coeffs = get_recursion_coefficients(num_quad_points)\n # compute the points and weights of the correct quadrature rule\n x_quad, w_quad = gauss_quadrature(recursion_coeffs, num_quad_points)\n # evaluate the orthonormal basis at the quadrature points. This can\n # be computed once for all degrees up to the maximum degree\n ortho_basis_matrix = evaluate_orthonormal_polynomial_1d(\n x_quad, max_num_terms, recursion_coeffs)\n\n # compute coefficients of orthonormal basis using pseudo spectral projection\n coeffs_1d = []\n w_quad = w_quad[:, np.newaxis]\n for ll in range(len(samples_1d)):\n num_terms = samples_1d[ll].shape[0]\n # evaluate the lagrange basis at the quadrature points\n barycentric_weights_1d = [\n compute_barycentric_weights_1d(samples_1d[ll])]\n values = np.eye((num_terms), dtype=float)\n # Sometimes the following function will cause the erro\n # interpolation abscissa are not unique. This can be due to x_quad\n # not abscissa. E.g. x_quad may have points far enough outside\n # range of abscissa, e.g. abscissa are clenshaw curtis points and\n # x_quad points are Gauss-Hermite quadrature points\n lagrange_basis_vals = multivariate_barycentric_lagrange_interpolation(\n x_quad[np.newaxis, :], samples_1d[ll][np.newaxis, :],\n barycentric_weights_1d, values, np.zeros(1, dtype=int))\n # compute fourier like coefficients\n basis_coeffs = []\n for ii in range(num_terms):\n basis_coeffs.append(np.dot(\n w_quad.T,\n lagrange_basis_vals*ortho_basis_matrix[:, ii:ii+1])[0, :])\n coeffs_1d.append(np.asarray(basis_coeffs))\n return coeffs_1d\n\n\ndef convert_multivariate_lagrange_polys_to_orthonormal_polys(\n subspace_index, subspace_values, coeffs_1d, poly_indices,\n config_variables_idx):\n\n if config_variables_idx is None:\n config_variables_idx = subspace_index.shape[0]\n\n active_sample_vars = np.where(subspace_index[:config_variables_idx] > 0)[0]\n num_active_sample_vars = active_sample_vars.shape[0]\n\n if num_active_sample_vars == 0:\n coeffs = subspace_values\n return coeffs\n\n num_indices = poly_indices.shape[1]\n num_qoi = subspace_values.shape[1]\n coeffs = np.zeros((num_indices, num_qoi), dtype=float)\n for ii in range(num_indices):\n poly_coeffs_1d = \\\n [coeffs_1d[dd][subspace_index[dd]][:, poly_indices[dd, ii]]\n for dd in active_sample_vars]\n poly_coeffs = outer_product(poly_coeffs_1d)\n coeffs += subspace_values[ii, :]*poly_coeffs[:, np.newaxis]\n\n return coeffs\n\n\ndef get_num_model_evaluations_from_samples(samples, num_config_vars):\n config_vars_dict = dict()\n num_samples = samples.shape[1]\n sample_count = []\n unique_config_vars = []\n for ii in range(num_samples):\n config_vars = samples[-num_config_vars:, ii]\n key = hash_array(config_vars)\n if key in config_vars_dict:\n sample_count[config_vars_dict[key]] += 1\n else:\n config_vars_dict[key] = len(sample_count)\n sample_count.append(1)\n unique_config_vars.append(config_vars)\n unique_config_vars = np.array(unique_config_vars).T\n sample_count = np.array(sample_count)\n I = np.argsort(sample_count)[::-1]\n sample_count = sample_count[I]\n unique_config_vars = unique_config_vars[:, I]\n return np.vstack((sample_count[np.newaxis, :], unique_config_vars))\n\n\ndef get_equivalent_cost(cost_function, model_level_evals, model_ids):\n \"\"\"\n Returns\n -------\n equivalent_costs : np.ndarray\n Fraction of total work. equivalent_costs.sum()=1\n \"\"\"\n equivalent_costs = []\n model_costs = cost_function(model_ids)\n equivalent_costs = model_costs*model_level_evals\n total_cost = equivalent_costs.sum()\n equivalent_costs /= float(total_cost)\n return equivalent_costs, total_cost\n", "import numpy as np\n\nfrom pyapprox.utilities import evaluate_quadratic_form\n\n\ndef variance_linear_combination_of_indendent_variables(coef, variances):\n assert coef.shape[0] == variances.shape[0]\n return np.sum(coef**2*variances)\n\n\ndef get_oakley_function_data():\n r\"\"\"\n Get the data :math:`a_1,a_2,a_3` and :math:`M` of the Oakley function\n\n .. math:: f(z) = a_1^Tz + a_2^T\\sin(z) + a_3^T\\cos(z) + z^TMz\n\n Returns\n -------\n a1 : np.ndarray (15)\n The vector :math:`a_1` of the Oakley function\n\n a2 : np.ndarray (15)\n The vector :math:`a_2` of the Oakley function\n\n a3 : np.ndarray (15)\n The vector :math:`a_3` of the Oakley function\n\n M : np.ndarray (15,15)\n The non-symmetric matrix :math:`M` of the Oakley function\n\n Examples\n --------\n\n >>> from pyapprox.benchmarks.sensitivity_benchmarks import get_oakley_function_data\n >>> a1,a2,a3,M=get_oakley_function_data()\n >>> print(a1)\n [0.0118 0.0456 0.2297 0.0393 0.1177 0.3865 0.3897 0.6061 0.6159 0.4005\n 1.0741 1.1474 0.788 1.1242 1.1982]\n >>> print(a2)\n [0.4341 0.0887 0.0512 0.3233 0.1489 1.036 0.9892 0.9672 0.8977 0.8083\n 1.8426 2.4712 2.3946 2.0045 2.2621]\n >>> print(a3)\n [0.1044 0.2057 0.0774 0.273 0.1253 0.7526 0.857 1.0331 0.8388 0.797\n 2.2145 2.0382 2.4004 2.0541 1.9845]\n >>> print(M)\n [[-0.02248289 -0.18501666 0.13418263 0.36867264 0.17172785 0.13651143\n -0.44034404 -0.08142285 0.71321025 -0.44361072 0.50383394 -0.02410146\n -0.04593968 0.21666181 0.05588742]\n [ 0.2565963 0.05379229 0.25800381 0.23795905 -0.59125756 -0.08162708\n -0.28749073 0.41581639 0.49752241 0.08389317 -0.11056683 0.03322235\n -0.13979497 -0.03102056 -0.22318721]\n [-0.05599981 0.19542252 0.09552901 -0.2862653 -0.14441303 0.22369356\n 0.14527412 0.28998481 0.2310501 -0.31929879 -0.29039128 -0.20956898\n 0.43139047 0.02442915 0.04490441]\n [ 0.66448103 0.43069872 0.29924645 -0.16202441 -0.31479544 -0.39026802\n 0.17679822 0.05795266 0.17230342 0.13466011 -0.3527524 0.25146896\n -0.01881053 0.36482392 -0.32504618]\n [-0.121278 0.12463327 0.10656519 0.0465623 -0.21678617 0.19492172\n -0.06552113 0.02440467 -0.09682886 0.19366196 0.33354757 0.31295994\n -0.08361546 -0.25342082 0.37325717]\n [-0.2837623 -0.32820154 -0.10496068 -0.22073452 -0.13708154 -0.14426375\n -0.11503319 0.22424151 -0.03039502 -0.51505615 0.01725498 0.03895712\n 0.36069184 0.30902452 0.05003019]\n [-0.07787589 0.00374566 0.88685604 -0.26590028 -0.07932536 -0.04273492\n -0.18653782 -0.35604718 -0.17497421 0.08869996 0.40025886 -0.05597969\n 0.13724479 0.21485613 -0.0112658 ]\n [-0.09229473 0.59209563 0.03133829 -0.03308086 -0.24308858 -0.09979855\n 0.03446019 0.09511981 -0.3380162 0.006386 -0.61207299 0.08132542\n 0.88683114 0.14254905 0.14776204]\n [-0.13189434 0.52878496 0.12652391 0.04511362 0.58373514 0.37291503\n 0.11395325 -0.29479222 -0.57014085 0.46291592 -0.09405018 0.13959097\n -0.38607402 -0.4489706 -0.14602419]\n [ 0.05810766 -0.32289338 0.09313916 0.07242723 -0.56919401 0.52554237\n 0.23656926 -0.01178202 0.0718206 0.07827729 -0.13355752 0.22722721\n 0.14369455 -0.45198935 -0.55574794]\n [ 0.66145875 0.34633299 0.14098019 0.51882591 -0.28019898 -0.1603226\n -0.06841334 -0.20428242 0.06967217 0.23112577 -0.04436858 -0.16455425\n 0.21620977 0.00427021 -0.08739901]\n [ 0.31599556 -0.02755186 0.13434254 0.13497371 0.05400568 -0.17374789\n 0.17525393 0.06025893 -0.17914162 -0.31056619 -0.25358691 0.02584754\n -0.43006001 -0.62266361 -0.03399688]\n [-0.29038151 0.03410127 0.03490341 -0.12121764 0.02603071 -0.33546274\n -0.41424111 0.05324838 -0.27099455 -0.0262513 0.41024137 0.26636349\n 0.15582891 -0.18666254 0.01989583]\n [-0.24388652 -0.44098852 0.01261883 0.24945112 0.07110189 0.24623792\n 0.17484502 0.00852868 0.2514707 -0.14659862 -0.08462515 0.36931333\n -0.29955293 0.1104436 -0.75690139]\n [ 0.04149432 -0.25980564 0.46402128 -0.36112127 -0.94980789 -0.16504063\n 0.00309433 0.05279294 0.22523648 0.38390366 0.45562427 -0.18631744\n 0.0082334 0.16670803 0.16045688]]\n \"\"\"\n a1 = np.array([0.0118, 0.0456, 0.2297, 0.0393, 0.1177, 0.3865, 0.3897, 0.6061, 0.6159, 0.4005,\n 1.0741, 1.1474, 0.7880, 1.1242, 1.1982])\n a2 = np.array([0.4341, 0.0887, 0.0512, 0.3233, 0.1489, 1.0360, 0.9892, 0.9672, 0.8977, 0.8083,\n 1.8426, 2.4712, 2.3946, 2.0045, 2.2621])\n a3 = np.array([0.1044, 0.2057, 0.0774, 0.2730, 0.1253, 0.7526, 0.8570, 1.0331, 0.8388, 0.7970,\n 2.2145, 2.0382, 2.4004, 2.0541, 1.9845])\n M = np.array([[-2.2482886e-002, -1.8501666e-001, 1.3418263e-001, 3.6867264e-001, 1.7172785e-001, 1.3651143e-001, -4.4034404e-001, -8.1422854e-002, 7.1321025e-001, -4.4361072e-001, 5.0383394e-001, -2.4101458e-002, -4.5939684e-002, 2.1666181e-001, 5.5887417e-002],\n [2.5659630e-001, 5.3792287e-002, 2.5800381e-001, 2.3795905e-001, -5.9125756e-001, -8.1627077e-002, -2.8749073e-001, 4.1581639e-001,\n 4.9752241e-001, 8.3893165e-002, -1.1056683e-001, 3.3222351e-002, -1.3979497e-001, -3.1020556e-002, -2.2318721e-001],\n [-5.5999811e-002, 1.9542252e-001, 9.5529005e-002, -2.8626530e-001, -1.4441303e-001, 2.2369356e-001, 1.4527412e-001, 2.8998481e-001,\n 2.3105010e-001, -3.1929879e-001, -2.9039128e-001, -2.0956898e-001, 4.3139047e-001, 2.4429152e-002, 4.4904409e-002],\n [6.6448103e-001, 4.3069872e-001, 2.9924645e-001, -1.6202441e-001, -3.1479544e-001, -3.9026802e-001, 1.7679822e-001, 5.7952663e-002,\n 1.7230342e-001, 1.3466011e-001, -3.5275240e-001, 2.5146896e-001, -1.8810529e-002, 3.6482392e-001, -3.2504618e-001],\n [-1.2127800e-001, 1.2463327e-001, 1.0656519e-001, 4.6562296e-002, -2.1678617e-001, 1.9492172e-001, -6.5521126e-002,\n 2.4404669e-002, -9.6828860e-002, 1.9366196e-001, 3.3354757e-001, 3.1295994e-001, -8.3615456e-002, -2.5342082e-001, 3.7325717e-001],\n [-2.8376230e-001, -3.2820154e-001, -1.0496068e-001, -2.2073452e-001, -1.3708154e-001, -1.4426375e-001, -1.1503319e-001,\n 2.2424151e-001, -3.0395022e-002, -5.1505615e-001, 1.7254978e-002, 3.8957118e-002, 3.6069184e-001, 3.0902452e-001, 5.0030193e-002],\n [-7.7875893e-002, 3.7456560e-003, 8.8685604e-001, -2.6590028e-001, -7.9325357e-002, -4.2734919e-002, -1.8653782e-001, -\n 3.5604718e-001, -1.7497421e-001, 8.8699956e-002, 4.0025886e-001, -5.5979693e-002, 1.3724479e-001, 2.1485613e-001, -1.1265799e-002],\n [-9.2294730e-002, 5.9209563e-001, 3.1338285e-002, -3.3080861e-002, -2.4308858e-001, -9.9798547e-002, 3.4460195e-002,\n 9.5119813e-002, -3.3801620e-001, 6.3860024e-003, -6.1207299e-001, 8.1325416e-002, 8.8683114e-001, 1.4254905e-001, 1.4776204e-001],\n [-1.3189434e-001, 5.2878496e-001, 1.2652391e-001, 4.5113625e-002, 5.8373514e-001, 3.7291503e-001, 1.1395325e-001, -2.9479222e-001, -\n 5.7014085e-001, 4.6291592e-001, -9.4050179e-002, 1.3959097e-001, -3.8607402e-001, -4.4897060e-001, -1.4602419e-001],\n [5.8107658e-002, -3.2289338e-001, 9.3139162e-002, 7.2427234e-002, -5.6919401e-001, 5.2554237e-001, 2.3656926e-001, -1.1782016e-002,\n 7.1820601e-002, 7.8277291e-002, -1.3355752e-001, 2.2722721e-001, 1.4369455e-001, -4.5198935e-001, -5.5574794e-001],\n [6.6145875e-001, 3.4633299e-001, 1.4098019e-001, 5.1882591e-001, -2.8019898e-001, -1.6032260e-001, -6.8413337e-002, -\n 2.0428242e-001, 6.9672173e-002, 2.3112577e-001, -4.4368579e-002, -1.6455425e-001, 2.1620977e-001, 4.2702105e-003, -8.7399014e-002],\n [3.1599556e-001, -2.7551859e-002, 1.3434254e-001, 1.3497371e-001, 5.4005680e-002, -1.7374789e-001, 1.7525393e-001, 6.0258929e-002, -\n 1.7914162e-001, -3.1056619e-001, -2.5358691e-001, 2.5847535e-002, -4.3006001e-001, -6.2266361e-001, -3.3996882e-002],\n [-2.9038151e-001, 3.4101270e-002, 3.4903413e-002, -1.2121764e-001, 2.6030714e-002, -3.3546274e-001, -4.1424111e-001,\n 5.3248380e-002, -2.7099455e-001, -2.6251302e-002, 4.1024137e-001, 2.6636349e-001, 1.5582891e-001, -1.8666254e-001, 1.9895831e-002],\n [-2.4388652e-001, -4.4098852e-001, 1.2618825e-002, 2.4945112e-001, 7.1101888e-002, 2.4623792e-001, 1.7484502e-001, 8.5286769e-003,\n 2.5147070e-001, -1.4659862e-001, -8.4625150e-002, 3.6931333e-001, -2.9955293e-001, 1.1044360e-001, -7.5690139e-001],\n [4.1494323e-002, -2.5980564e-001, 4.6402128e-001, -3.6112127e-001, -9.4980789e-001, -1.6504063e-001, 3.0943325e-003, 5.2792942e-002, 2.2523648e-001, 3.8390366e-001, 4.5562427e-001, -1.8631744e-001, 8.2333995e-003, 1.6670803e-001, 1.6045688e-001]])\n return a1, a2, a3, M\n\n\ndef oakley_function(samples):\n a1, a2, a3, M = get_oakley_function_data()\n term1, term2 = a1.T.dot(samples), a2.T.dot(np.sin(samples))\n term3, term4 = a3.T.dot(\n np.cos(samples)), evaluate_quadratic_form(M, samples)\n vals = term1+term2+term3+term4\n return vals[:, np.newaxis]\n\n\ndef oakley_function_statistics():\n e = np.exp(1)\n a1, a2, a3, M = get_oakley_function_data()\n nvars = M.shape[0]\n\n term1_mean, term2_mean = 0, 0\n term3_mean, term4_mean = np.sum(a3/np.sqrt(e)), np.trace(M)\n mean = term1_mean+term2_mean+term3_mean+term4_mean\n\n term1_var = variance_linear_combination_of_indendent_variables(\n a1, np.ones(a1.shape[0]))\n variances_1d = np.ones(a2.shape[0])*(0.5*(1-1/e**2))\n term2_var = variance_linear_combination_of_indendent_variables(\n a2, variances_1d)\n variances_1d = np.ones(a3.shape[0])*(0.5*(1+1/e**2)-1.0/e)\n term3_var = variance_linear_combination_of_indendent_variables(\n a3, variances_1d)\n A = 0.5*(M.T+M) # needed because M is not symmetric\n term4_var = 2*np.trace(A.dot(A))\n\n cov_xsinx = 1/np.sqrt(e)\n covar13, covar14, covar23, covar24 = 0, 0, 0, 0\n covar12 = np.sum(a1*a2*cov_xsinx)\n covar34 = np.sum(-1/np.sqrt(e)*a3*np.diag(M))\n\n variance = term1_var+term2_var+term3_var+term4_var\n variance += 2*(covar12+covar13+covar14+covar23+covar24+covar34)\n main_effects = np.empty((nvars, 1))\n for ii in range(nvars):\n var1 = a1[ii]**2\n var2 = a2[ii]**2*(0.5*(1-1/e**2))\n var3 = a3[ii]**2*(0.5*(1+1/e**2)-1.0/e)\n var4 = 2*M[ii, ii]**2\n cov12 = cov_xsinx*a1[ii]*a2[ii]\n cov34 = -1/np.sqrt(e)*a3[ii]*M[ii, ii]\n main_effects[ii] = var1+var2+var3+var4+2*cov12+2*cov34\n\n return mean, variance, main_effects/variance\n\n\ndef ishigami_function(samples, a=7, b=0.1):\n if samples.ndim == 1:\n samples = samples[:, np.newaxis]\n vals = np.sin(samples[0, :])+a*np.sin(samples[1, :])**2 +\\\n b*samples[2, :]**4*np.sin(samples[0, :])\n return vals[:, np.newaxis]\n\n\ndef ishigami_function_jacobian(samples, a=7, b=0.1):\n if samples.ndim == 1:\n samples = samples[:, np.newaxis]\n assert samples.shape[1] == 1\n nvars = 3\n assert samples.shape[0] == nvars\n jac = np.empty((1, nvars))\n jac[0, 0] = np.cos(samples[0, :]) + b * \\\n samples[2, :]**4*np.cos(samples[0, :])\n jac[0, 1] = 2*a*np.sin(samples[1, :])*np.cos(samples[1, :])\n jac[0, 2] = 4*b*samples[2, :]**3*np.sin(samples[0, :])\n return jac\n\n\ndef ishigami_function_hessian(samples, a=7, b=0.1):\n if samples.ndim == 1:\n samples = samples[:, np.newaxis]\n assert samples.shape[1] == 1\n nvars = 3\n assert samples.shape[0] == nvars\n hess = np.empty((nvars, nvars))\n hess[0, 0] = -np.sin(samples[0, :]) - b * \\\n samples[2, :]**4*np.sin(samples[0, :])\n hess[1, 1] = 2*a*(np.cos(samples[1, :])**2-np.sin(samples[1, :])**2)\n hess[2, 2] = 12*b*samples[2, :]**2*np.sin(samples[0, :])\n hess[0, 1], hess[1, 0] = 0, 0\n hess[0, 2] = 4*b*samples[2, :]**3*np.cos(samples[0, :])\n hess[2, 0] = hess[0, 2]\n hess[1, 2], hess[2, 1] = 0, 0\n return hess\n\n\ndef get_ishigami_funciton_statistics(a=7, b=0.1):\n \"\"\"\n p_i(X_i) ~ U[-pi,pi]\n \"\"\"\n mean = a/2\n variance = a**2/8+b*np.pi**4/5+b**2*np.pi**8/18+0.5\n D_1 = b*np.pi**4/5+b**2*np.pi**8/50+0.5\n D_2, D_3, D_12, D_13 = a**2/8, 0, 0, b**2*np.pi**8/18-b**2*np.pi**8/50\n D_23, D_123 = 0, 0\n main_effects = np.array([D_1, D_2, D_3])/variance\n # the following two ways of calulating the total effects are equivalent\n total_effects1 = np.array(\n [D_1+D_12+D_13+D_123, D_2+D_12+D_23+D_123, D_3+D_13+D_23+D_123])/variance\n total_effects = 1 - \\\n np.array([D_2+D_3+D_23, D_1+D_3+D_13, D_1+D_2+D_12])/variance\n assert np.allclose(total_effects1, total_effects)\n sobol_indices = np.array([D_1, D_2, D_3, D_12, D_13, D_23, D_123])/variance\n sobol_interaction_indices = [[0], [1], [\n 2], [0, 1], [0, 2], [1, 2], [0, 1, 2]]\n return mean, variance, main_effects[:, np.newaxis], total_effects[:, np.newaxis], sobol_indices[:, np.newaxis], sobol_interaction_indices\n\n\ndef sobol_g_function(coefficients, samples):\n \"\"\"\n The coefficients control the sensitivity of each variable. Specifically\n they limit the range of the outputs, i.e.\n 1-1/(1+a_i) <= (abs(4*x-2)+a_i)/(a_i+1) <= 1-1/(1+a_i)\n \"\"\"\n nvars, nsamples = samples.shape\n assert coefficients.shape[0] == nvars\n vals = np.prod((np.absolute(4*samples-2)+coefficients[:, np.newaxis]) /\n (1+coefficients[:, np.newaxis]), axis=0)[:, np.newaxis]\n assert vals.shape[0] == nsamples\n return vals\n\n\ndef get_sobol_g_function_statistics(a, interaction_terms=None):\n \"\"\"\n See article: Variance based sensitivity analysis of model output. \n Design and estimator for the total sensitivity index\n \"\"\"\n nvars = a.shape[0]\n mean = 1\n unnormalized_main_effects = 1/(3*(1+a)**2)\n variance = np.prod(unnormalized_main_effects+1)-1\n main_effects = unnormalized_main_effects/variance\n total_effects = np.tile(np.prod(unnormalized_main_effects+1), (nvars))\n total_effects *= unnormalized_main_effects/(unnormalized_main_effects+1)\n total_effects /= variance\n if interaction_terms is None:\n return mean, variance, main_effects, total_effects\n\n sobol_indices = np.array([\n unnormalized_main_effects[index].prod()/variance\n for index in interaction_terms])\n return mean, variance, main_effects[:, np.newaxis], total_effects[:, np.newaxis], sobol_indices[:, np.newaxis]\n\n\ndef morris_function(samples):\n assert samples.shape[0] == 20\n beta0 = np.random.randn()\n beta_first_order = np.empty(20)\n beta_first_order[:10] = 20\n beta_first_order[10:] = np.random.normal(0, 1, 10)\n beta_second_order = np.empty((20, 20))\n beta_second_order[:6, :6] = -15\n beta_second_order[6:, 6:] = np.random.normal(0, 1, (14, 14))\n #beta_third_order = np.zeros((20,20,20))\n # beta_third_order[:5,:5,:5]=-10\n beta_third_order = -10\n #beta_forth_order = np.zeros((20,20,20,20))\n # beta_forth_order[:4,:4,:4,:4]=5\n beta_forth_order = 5\n ww = 2*(samples-0.5)\n I = [3, 5, 7]\n ww[I] = 2 * (1.1 * samples[I]/(samples[I]+0.1)-0.5)\n\n values = beta0\n values += np.sum(beta_first_order[:, np.newaxis]*ww, axis=0)\n\n for jj in range(20):\n for ii in range(jj):\n values += beta_second_order[ii, jj]*ww[ii]*ww[jj]\n\n for kk in range(5):\n for jj in range(kk):\n for ii in range(jj):\n values += beta_third_order*ww[ii]*ww[jj]*ww[kk]\n\n for ll in range(4):\n for kk in range(ll):\n for jj in range(kk):\n for ii in range(jj):\n values += beta_forth_order*ww[ii]*ww[jj]*ww[kk]*ww[ll]\n return values[:, np.newaxis]\n" ]
[ [ "numpy.diag", "numpy.dot", "scipy.stats.gamma", "numpy.sqrt", "numpy.linspace", "numpy.exp", "numpy.trace", "numpy.tril", "numpy.hstack", "numpy.ix_", "numpy.allclose", "numpy.tril_indices", "numpy.arange", "numpy.empty_like", "numpy.eye", "numpy.block", "numpy.argmax", "numpy.count_nonzero", "numpy.triu", "numpy.zeros", "numpy.linalg.inv", "scipy.stats.beta", "numpy.linalg.lstsq", "scipy.stats.multivariate_normal", "numpy.linalg.cholesky", "scipy.linalg.lu_factor", "numpy.array", "numpy.sum", "numpy.absolute", "scipy.linalg.qr", "numpy.linalg.solve", "numpy.array_equal", "numpy.random.seed", "scipy.stats.beta.pdf", "numpy.linalg.norm", "numpy.ones", "scipy.stats.norm", "numpy.random.normal", "numpy.random.uniform", "numpy.vstack" ], [ "numpy.dot", "numpy.asarray", "matplotlib.pyplot.MaxNLocator", "matplotlib.pyplot.get_cmap", "numpy.all", "numpy.where", "numpy.hstack", "numpy.unique", "numpy.arange", "numpy.eye", "numpy.finfo", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.append", "numpy.argsort", "numpy.array", "numpy.absolute", "numpy.tile", "matplotlib.pyplot.subplots", "numpy.ones", "numpy.empty", "matplotlib.pyplot.colorbar", "numpy.vstack" ], [ "numpy.diag", "numpy.absolute", "numpy.allclose", "numpy.sqrt", "numpy.trace", "numpy.cos", "numpy.ones", "numpy.sin", "numpy.random.normal", "numpy.random.randn", "numpy.prod", "numpy.array", "numpy.exp", "numpy.sum", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
VladimirGl/CLIP
[ "539a1e5b05defc1139f0e52105d5ed29ce94960b" ]
[ "clip.py" ]
[ "import hashlib\nimport os\nimport urllib\nimport warnings\nfrom typing import Union, List\n\nimport torch\nfrom PIL import Image\nfrom torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize\nfrom tqdm import tqdm\n\nfrom .model import build_model\nfrom .simple_tokenizer import SimpleTokenizer as _Tokenizer\n\n__all__ = [\"available_models\", \"load\", \"tokenize\"]\n_tokenizer = _Tokenizer()\n\n_MODELS = {\n \"RN50\": \"https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt\",\n \"ViT-B/32\": \"https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt\",\n}\n\n\ndef _download(url: str, root: str = os.path.expanduser(\"~/.cache/clip\")):\n os.makedirs(root, exist_ok=True)\n filename = os.path.basename(url)\n \n expected_sha256 = url.split(\"/\")[-2]\n download_target = os.path.join(root, filename)\n\n if os.path.exists(download_target) and not os.path.isfile(download_target):\n raise RuntimeError(f\"{download_target} exists and is not a regular file\")\n\n if os.path.isfile(download_target):\n if hashlib.sha256(open(download_target, \"rb\").read()).hexdigest() == expected_sha256:\n return download_target\n else:\n warnings.warn(f\"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file\")\n\n with urllib.request.urlopen(url) as source, open(download_target, \"wb\") as output:\n with tqdm(total=int(source.info().get(\"Content-Length\")), ncols=80) as loop: \n while True:\n buffer = source.read(8192)\n if not buffer:\n break\n\n output.write(buffer)\n loop.update(len(buffer))\n\n if hashlib.sha256(open(download_target, \"rb\").read()).hexdigest() != expected_sha256:\n raise RuntimeError(f\"Model has been downloaded but the SHA256 checksum does not not match\")\n\n return download_target\n\n\ndef available_models():\n return list(_MODELS.keys())\n\n\ndef load(name: str, device: Union[str, torch.device] = \"cuda\" if torch.cuda.is_available() else \"cpu\", jit=True):\n if name not in _MODELS:\n raise RuntimeError(f\"Model {name} not found; available models = {available_models()}\")\n\n model_path = _download(_MODELS[name])\n model = torch.jit.load(model_path, map_location=device if jit else \"cpu\").eval()\n n_px = model.input_resolution.item()\n\n transform = Compose([\n Resize(n_px, interpolation=Image.BICUBIC),\n CenterCrop(n_px),\n lambda image: image.convert(\"RGB\"),\n ToTensor(),\n Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),\n ])\n\n if not jit:\n model = build_model(model.state_dict()).to(device)\n return model, transform\n\n # patch the device names\n device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])\n device_node = [n for n in device_holder.graph.findAllNodes(\"prim::Constant\") if \"Device\" in repr(n)][-1]\n\n def patch_device(module):\n graphs = [module.graph] if hasattr(module, \"graph\") else []\n if hasattr(module, \"forward1\"):\n graphs.append(module.forward1.graph)\n\n for graph in graphs:\n for node in graph.findAllNodes(\"prim::Constant\"):\n if \"value\" in node.attributeNames() and str(node[\"value\"]).startswith(\"cuda\"):\n node.copyAttributes(device_node)\n\n model.apply(patch_device)\n patch_device(model.encode_image)\n patch_device(model.encode_text)\n\n # patch dtype to float32 on CPU\n if device == \"cpu\":\n float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])\n float_input = list(float_holder.graph.findNode(\"aten::to\").inputs())[1]\n float_node = float_input.node()\n\n def patch_float(module):\n graphs = [module.graph] if hasattr(module, \"graph\") else []\n if hasattr(module, \"forward1\"):\n graphs.append(module.forward1.graph)\n\n for graph in graphs:\n for node in graph.findAllNodes(\"aten::to\"):\n inputs = list(node.inputs())\n for i in [1, 2]: # dtype can be the second or third argument to aten::to()\n if inputs[i].node()[\"value\"] == 5:\n inputs[i].node().copyAttributes(float_node)\n\n model.apply(patch_float)\n patch_float(model.encode_image)\n patch_float(model.encode_text)\n\n model.float()\n\n return model, transform\n\n\ndef tokenize(texts: Union[str, List[str]], context_length: int = 77):\n if isinstance(texts, str):\n texts = [texts]\n\n sot_token = _tokenizer.encoder[\"<|startoftext|>\"]\n eot_token = _tokenizer.encoder[\"<|endoftext|>\"]\n all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]\n result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)\n\n for i, tokens in enumerate(all_tokens):\n if len(tokens) > context_length:\n raise RuntimeError(f\"Input {texts[i]} is too long for context length {context_length}\")\n result[i, :len(tokens)] = torch.tensor(tokens)\n\n return result\n" ]
[ [ "torch.jit.load", "torch.ones", "torch.tensor", "torch.cuda.is_available", "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gabrielmbmb/intry4.0-backend
[ "1ac28ad333c93ed6069dc2db4d9fe3a12b8a1b24" ]
[ "backend/apps/datamodel/models.py" ]
[ "import io\nimport uuid\nimport pytz\nimport json\nimport logging\nimport pandas as pd\nfrom constance import config\nfrom django.db import models\nfrom django.contrib.postgres.fields import ArrayField, JSONField\nfrom django.core.validators import (\n int_list_validator,\n MinValueValidator,\n)\nfrom django.db.models.signals import pre_delete\nfrom datetime import datetime\nfrom backend.apps.core import clients\n\nlogger = logging.getLogger(__name__)\n\n\nNOT_ATTRIBUTES_KEYS_SUBSCRIPTION = [\"id\", \"type\", \"TimeInstant\"]\n\n\nclass DataModel(models.Model):\n \"\"\"Class which holds everything related to a Blackbox Anomaly Detection model.\"\"\"\n\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n name = models.CharField(max_length=128, help_text=\"Model name\")\n is_training = models.BooleanField(\n help_text=\"Wether the model is being trained or not\", default=False,\n )\n trained = models.BooleanField(\n help_text=\"Wether the model is trained or not\", default=False\n )\n deployed = models.BooleanField(\n help_text=\"Wether the model is deployed or not\", default=False\n )\n date_trained = models.DateTimeField(\n help_text=\"Date the model was trained\", default=None, blank=True, null=True\n )\n date_deployed = models.DateTimeField(\n help_text=\"Date the model was deployed\", default=None, blank=True, null=True\n )\n num_predictions = models.IntegerField(\n help_text=\"Number of predictions made by this model\", default=0\n )\n task_status = models.CharField(\n help_text=\"URL to get the progress of training process\",\n null=True,\n blank=True,\n max_length=512,\n )\n\n # sensors\n plcs = JSONField()\n\n contamination = models.FloatField(\n help_text=\"Contamination fraction in the training dataset\",\n default=0.1,\n validators=[MinValueValidator(0.0)],\n null=True,\n blank=True,\n )\n\n scaler = models.CharField(\n help_text=\"The scaler used to scale the data before training and predicting\",\n default=\"minmax\",\n max_length=48,\n null=True,\n blank=True,\n )\n\n # PCA Mahalanobis\n pca_mahalanobis = models.BooleanField(null=True, blank=True, default=False)\n n_components = models.IntegerField(\n help_text=\"Numbers of components for the PCA algorithm\",\n default=2,\n validators=[MinValueValidator(1)],\n null=True,\n blank=True,\n )\n\n # Autoencoder\n autoencoder = models.BooleanField(null=True, blank=True, default=False)\n hidden_neurons = models.CharField(\n help_text=\"Neural Network layers and the number of neurons in each layer\",\n validators=[\n int_list_validator(\n sep=\",\",\n message=\"It should be a string with a list of integers separeted by a comma\",\n allow_negative=False,\n )\n ],\n default=\"32,16,16,32\",\n max_length=128,\n null=True,\n blank=True,\n )\n dropout_rate = models.FloatField(\n help_text=\"Dropout rate across all the layers of the Neural Network\",\n default=0.2,\n null=True,\n blank=True,\n )\n activation = models.CharField(\n help_text=\"Layers activation function of Neural Network\",\n choices=[\n (\"elu\", \"elu\"),\n (\"softmax\", \"softmax\"),\n (\"selu\", \"selu\"),\n (\"softplus\", \"softplus\"),\n (\"softsign\", \"softsign\"),\n (\"relu\", \"relu\"),\n (\"tanh\", \"tanh\"),\n (\"sigmoid\", \"sigmoid\"),\n (\"hard_sigmoid\", \"hard_sigmoid\"),\n (\"exponential\", \"exponential\"),\n ],\n default=\"elu\",\n max_length=24,\n null=True,\n blank=True,\n )\n kernel_initializer = models.CharField(\n help_text=\"Layers kernel initializer of Neural Network\",\n choices=[\n (\"Zeros\", \"Zeros\"),\n (\"Ones\", \"Ones\"),\n (\"Constant\", \"Constant\"),\n (\"RandomNormal\", \"RandomNormal\"),\n (\"RandomUniform\", \"RandomUniform\"),\n (\"TruncatedNormal\", \"TruncatedNormal\"),\n (\"VarianceScaling\", \"VarianceScaling\"),\n (\"Orthogonal\", \"Orthogonal\"),\n (\"Identity\", \"Identity\"),\n (\"lecun_uniform\", \"lecun_uniform\"),\n (\"glorot_normal\", \"glorot_normal\"),\n (\"glorot_uniform\", \"glorot_uniform\"),\n (\"he_normal\", \"he_normal\"),\n (\"lecun_normal\", \"lecun_normal\"),\n (\"he_uniform\", \"he_uniform\"),\n ],\n default=\"glorot_uniform\",\n max_length=24,\n null=True,\n blank=True,\n )\n loss_function = models.CharField(\n help_text=\"Loss function of the Neural Network\",\n default=\"mse\",\n max_length=24,\n null=True,\n blank=True,\n )\n optimizer = models.CharField(\n help_text=\"Optimizer of Neural Network\",\n choices=[\n (\"sgd\", \"sgd\"),\n (\"rmsprop\", \"rmsprop\"),\n (\"adagrad\", \"adagrad\"),\n (\"adadelta\", \"adadelta\"),\n (\"adam\", \"adam\"),\n (\"adamax\", \"adamax\"),\n (\"nadam\", \"nadam\"),\n ],\n default=\"adam\",\n max_length=24,\n null=True,\n blank=True,\n )\n epochs = models.IntegerField(\n help_text=\"Number of times that all the batches will be processed in the \"\n \" Neural Network\",\n default=100,\n null=True,\n blank=True,\n )\n batch_size = models.IntegerField(\n help_text=\"Batch size\", default=32, null=True, blank=True\n )\n validation_split = models.FloatField(\n help_text=\"Percentage of the training data that will be used for purpouses in\"\n \" the Neural Network\",\n default=0.05,\n null=True,\n blank=True,\n )\n early_stopping = models.BooleanField(\n help_text=\"Stops the training process in the Neural Network when it's not\"\n \" getting any improvement\",\n default=False,\n null=True,\n blank=True,\n )\n\n # K-Means\n kmeans = models.BooleanField(null=True, blank=True, default=False)\n n_clusters = models.IntegerField(\n help_text=\"Number of clusters for the K-Means algorithm\",\n default=None,\n null=True,\n blank=True,\n )\n max_cluster_elbow = models.IntegerField(\n help_text=\"Maximun number of cluster to test in the Elbow Method\",\n default=100,\n null=True,\n blank=True,\n )\n\n # One Class SVM\n ocsvm = models.BooleanField(null=True, blank=True, default=False)\n kernel = models.CharField(\n help_text=\"Kernel type for One Class SVM\",\n choices=[\n (\"linear\", \"linear\"),\n (\"poly\", \"poly\"),\n (\"rbf\", \"rbf\"),\n (\"sigmoid\", \"sigmoid\"),\n (\"precomputed\", \"precomputed\"),\n ],\n default=\"rbf\",\n max_length=24,\n null=True,\n blank=True,\n )\n degree = models.IntegerField(\n help_text=\"Degree of the polynomal kernel function for One Class SVM\",\n default=3,\n null=True,\n blank=True,\n )\n gamma = models.CharField(\n help_text=\"Kernel coefficient for 'rbf', 'poly' and 'sigmoid' in One Class SVM.\"\n \" It can 'scale', 'auto' or float\",\n default=\"scale\",\n max_length=24,\n null=True,\n blank=True,\n )\n coef0 = models.FloatField(\n help_text=\"Independent term in kernel function for One Class SVM. Only \"\n \"significant in 'poly'\",\n default=0.0,\n null=True,\n blank=True,\n )\n tol = models.FloatField(\n help_text=\"Tolerance for stopping criterion for One Class SVM\",\n default=0.001,\n null=True,\n blank=True,\n )\n shrinking = models.BooleanField(\n help_text=\"Whether to use the shrinking heuristic for One Class SVM\",\n default=True,\n null=True,\n blank=True,\n )\n cache_size = models.IntegerField(\n help_text=\"Specify the size of the kernel cache in MB for One Class SVM\",\n default=200,\n null=True,\n blank=True,\n )\n\n # Gaussian Distribution\n gaussian_distribution = models.BooleanField(null=True, blank=True, default=False)\n epsilon_candidates = models.IntegerField(\n help_text=\"Number of epsilon values that will be tested to find the best one\",\n default=100000000,\n null=True,\n blank=True,\n )\n\n # Isolation Forest\n isolation_forest = models.BooleanField(null=True, blank=True, default=False)\n n_estimators = models.IntegerField(\n help_text=\"The number of base estimators in the ensemble for Isolation \"\n \"Forest\",\n default=100,\n null=True,\n blank=True,\n )\n max_features = models.FloatField(\n help_text=\"Number of features to draw from X to train each base estimator\"\n \" for Isolation Forest\",\n default=1.0,\n null=True,\n blank=True,\n )\n bootstrap = models.BooleanField(\n help_text=\"Indicates if the Bootstrap technique is going to be applied \"\n \"for Isolation FOrest\",\n default=False,\n null=True,\n blank=True,\n )\n\n # Local Outlier Factor\n lof = models.BooleanField(null=True, blank=True, default=False)\n n_neighbors_lof = models.IntegerField(\n help_text=\"Number of neighbors to use in LOF\", default=20, null=True, blank=True\n )\n algorithm_lof = models.CharField(\n help_text=\"Algorithm used to compute the nearest neighbors in LOF\",\n choices=[\n (\"ball_tree\", \"ball_tree\"),\n (\"kd_tree\", \"kd_tree\"),\n (\"brute\", \"brute\"),\n (\"auto\", \"auto\"),\n ],\n default=\"auto\",\n max_length=24,\n null=True,\n blank=True,\n )\n leaf_size_lof = models.IntegerField(\n help_text=\"Leaf size passed to BallTree or KDTree in LOF\",\n default=30,\n null=True,\n blank=True,\n )\n metric_lof = models.CharField(\n help_text=\"The distance metric to use for the tree in LOF\",\n default=\"minkowski\",\n max_length=24,\n null=True,\n blank=True,\n )\n p_lof = models.IntegerField(\n help_text=\"Paremeter of the Minkowski metric in LOF\",\n default=2,\n null=True,\n blank=True,\n )\n\n # K-Nearest Neighbors\n knn = models.BooleanField(null=True, blank=True, default=False)\n n_neighbors_knn = models.IntegerField(\n help_text=\"Number of neighbors to use in KNN\", default=5, null=True, blank=True\n )\n radius = models.FloatField(\n help_text=\"The range of parameter space to use by default for \"\n \"radius_neighbors\",\n default=1.0,\n null=True,\n blank=True,\n )\n algorithm_knn = models.CharField(\n help_text=\"Algorithm used to compute the nearest neighbors in KNN\",\n choices=[\n (\"ball_tree\", \"ball_tree\"),\n (\"kd_tree\", \"kd_tree\"),\n (\"brute\", \"brute\"),\n (\"auto\", \"auto\"),\n ],\n default=\"auto\",\n max_length=24,\n null=True,\n blank=True,\n )\n leaf_size_knn = models.IntegerField(\n help_text=\"Leaf size passed to BallTree or KDTree in KNN\",\n default=30,\n null=True,\n blank=True,\n )\n metric_knn = models.CharField(\n help_text=\"The distance metric to use for the tree in KNN\",\n default=\"minkowski\",\n max_length=24,\n null=True,\n blank=True,\n )\n p_knn = models.IntegerField(\n help_text=\"Paremeter of the Minkowski metric in knn\",\n default=2,\n null=True,\n blank=True,\n )\n score_func = models.CharField(\n help_text=\"The function used to score anomalies in KNN\",\n choices=[\n (\"max_distance\", \"max_distance\"),\n (\"average\", \"average\"),\n (\"median\", \"median\"),\n ],\n default=\"max_distance\",\n max_length=24,\n null=True,\n blank=True,\n )\n\n # orion subscriptions\n subscriptions = ArrayField(models.CharField(max_length=128), default=list)\n\n # data from subscripitons\n data_from_subscriptions = JSONField(default=dict)\n dates = JSONField(default=dict)\n\n # clients\n blackbox_client = clients.BlackboxClient()\n crate_client = clients.CrateClient()\n orion_client = clients.OrionClient()\n\n def create_blackbox(self):\n \"\"\"Creates a Blackbox model in the Anomaly Detection API.\"\"\"\n self.blackbox_client.create_blackbox(self)\n\n def get_models_columns(self):\n \"\"\"Returns a dict containing two lists, one with the columns and the other\n with the models\n\n Returns:\n dict or None: containing two lists.\n \"\"\"\n data = {\"models\": [], \"columns\": []}\n if self.pca_mahalanobis:\n data[\"models\"].append(\"pca_mahalanobis\")\n\n if self.autoencoder:\n data[\"models\"].append(\"autoencoder\")\n\n if self.kmeans:\n data[\"models\"].append(\"kmeans\")\n\n if self.ocsvm:\n data[\"models\"].append(\"one_class_svm\")\n\n if self.gaussian_distribution:\n data[\"models\"].append(\"gaussian_distribution\")\n\n if self.isolation_forest:\n data[\"models\"].append(\"isolation_forest\")\n\n if self.lof:\n data[\"models\"].append(\"local_outlier_factor\")\n\n if self.knn:\n data[\"models\"].append(\"knearest_neighbors\")\n\n for sensors in self.plcs.values():\n data[\"columns\"] = data[\"columns\"] + sensors\n\n if data[\"models\"] and data[\"columns\"]:\n return data\n\n return None\n\n def train(\n self,\n with_source: str,\n n: int = None,\n from_date: str = None,\n to_date: str = None,\n train_df=None,\n ) -> bool:\n \"\"\"Trains the datamodel either with data from Crate or from a CSV\n\n Args:\n with_source (:obj:`str`): source of the training data. Valid choices are\n 'db' or 'csv'.\n n (:obj:`int`): the number of rows to take from the database. Defaults to\n None.\n from_date (:obj:`str`): date from which the rows has to be taken. Defaults\n to None.\n to_date (:obj:`str`): date until which the rows has to be taken. Defaults to\n None.\n train_df (:obj:`pandas.core.frame.DataFrame`): the dataframe to perform the\n training of the model. Defaults to None.\n Returns:\n bool: wether the process of training has been initiated or not.\n \"\"\"\n if not self.is_training:\n if with_source == \"db\":\n df = self.crate_client.get_data_from_plc(\n self.plcs, n=n, from_date=from_date, to_date=to_date\n )\n\n # train with data from CSV\n else:\n df = train_df\n\n if df is None:\n return False\n\n train_data_json = json.loads(df.to_json(orient=\"split\"))\n payload = self.to_json()\n payload[\"columns\"] = train_data_json[\"columns\"]\n payload[\"data\"] = train_data_json[\"data\"]\n\n self.task_status = self.blackbox_client.train(self.id, payload)\n self.is_training = True\n self.trained = False\n if self.deployed:\n self.set_deployed()\n self.save()\n\n return True\n\n return False\n\n def to_json(self):\n \"\"\"Gets the model as json format.\"\"\"\n json_ = {\n \"contamination\": self.contamination,\n \"scaler\": self.scaler,\n \"n_jobs\": -1,\n }\n\n if self.pca_mahalanobis:\n json_[\"pca_mahalanobis\"] = {\"n_components\": self.n_components}\n\n if self.autoencoder:\n json_[\"autoencoder\"] = {\n \"hidden_neurons\": list(\n map(lambda x: int(x), self.hidden_neurons.split(\",\"))\n ),\n \"dropout_rate\": self.dropout_rate,\n \"activation\": self.activation,\n \"kernel_initializer\": self.kernel_initializer,\n \"loss_function\": self.loss_function,\n \"optimizer\": self.optimizer,\n \"epochs\": self.epochs,\n \"batch_size\": self.batch_size,\n \"validation_split\": self.validation_split,\n \"early_stopping\": self.early_stopping,\n }\n\n if self.kmeans:\n json_[\"kmeans\"] = {\"max_cluster_elbow\": self.max_cluster_elbow}\n if self.n_clusters:\n json_[\"kmeans\"][\"n_clusters\"] = self.n_clusters\n\n if self.ocsvm:\n json_[\"one_class_svm\"] = {\n \"kernel\": self.kernel,\n \"degree\": self.degree,\n \"gamma\": self.gamma,\n \"coef0\": self.coef0,\n \"tol\": self.tol,\n \"shrinking\": self.shrinking,\n \"cache_size\": self.cache_size,\n }\n\n if self.gaussian_distribution:\n json_[\"gaussian_distribution\"] = {\n \"epsilon_candidates\": self.epsilon_candidates\n }\n\n if self.isolation_forest:\n json_[\"isolation_forest\"] = {\n \"n_estimators\": self.n_estimators,\n \"max_features\": self.max_features,\n \"bootstrap\": self.bootstrap,\n }\n\n if self.knn:\n json_[\"knearest_neighbors\"] = {\n \"n_neighbors\": self.n_neighbors_knn,\n \"radius\": self.radius,\n \"algorithm\": self.algorithm_knn,\n \"leaf_size\": self.leaf_size_knn,\n \"metric\": self.metric_knn,\n \"p\": self.p_knn,\n \"score_func\": self.score_func,\n }\n\n if self.lof:\n json_[\"local_outlier_factor\"] = {\n \"n_neighbors\": self.n_neighbors_lof,\n \"algorithm\": self.algorithm_lof,\n \"leaf_size\": self.leaf_size_knn,\n \"metric\": self.metric_knn,\n \"p\": self.p_knn,\n }\n\n return json_\n\n def set_trained(self):\n \"\"\"Sets the datamodel to the trained state.\"\"\"\n logger.info(f\"Setting datamodel with id {self.id} to trained!\")\n self.is_training = False\n self.trained = True\n self.date_trained = datetime.now(tz=pytz.UTC)\n self.save()\n\n def set_deployed(self):\n \"\"\"Sets the datamodel to the deployed state.\"\"\"\n self.deployed = not self.deployed\n\n if self.deployed:\n self.date_deployed = datetime.now(tz=pytz.UTC)\n\n # create subscriptions in OCB\n notification_url = (\n f\"http://{config.SERVER_IP}/api/v1/datamodels/{self.id}/predict/\"\n )\n\n subscriptions = []\n data_from_subscriptions = {}\n for (plc, sensors) in self.plcs.items():\n subscription = self.orion_client.create_subscription(\n url=notification_url, pattern=plc, conditions=sensors, throttling=5\n )\n subscriptions.append(subscription)\n data_from_subscriptions[plc] = {}\n\n self.subscriptions = subscriptions\n self.data_from_subscriptions = data_from_subscriptions\n\n else:\n self.date_deployed = None\n\n # remove subscriptions in OCB\n self.orion_client.delete_subscriptions(self.subscriptions)\n self.subscriptions = []\n\n self.save()\n\n def check_csv_columns(self, file, index_column: str = None) -> bool:\n \"\"\"Checks if a CSV has all the columns necessary to train this datamodel.\n\n Args:\n file (django.core.files.uploadedfile.TemporaryUploadedFile): training file.\n index_column (:obj:`str`): the name of the index column if there is one.\n Defaults to None.\n\n Returns:\n tuple: containing a bool which indicates if the CSV is valid. The second\n value is a dataframe in the case that CSV was valid or None if not.\n \"\"\"\n if index_column:\n df = pd.read_csv(\n io.StringIO(file.read().decode(\"UTF-8\")), index_col=index_column\n )\n else:\n df = pd.read_csv(io.StringIO(file.read().decode(\"UTF-8\")))\n\n # get the columns that should be in the csv\n columns_that_should_be_in_csv = []\n for columns in self.plcs.values():\n for column in columns:\n columns_that_should_be_in_csv.append(column)\n\n columns_csv = list(df.columns)\n\n if all(\n column in columns_csv for column in columns_that_should_be_in_csv\n ) and all(column in columns_that_should_be_in_csv for column in columns_csv):\n return True, df\n\n return False, None\n\n def _all_data_from_subscriptions_received(self) -> bool:\n \"\"\"Checks if data from all subscriptions has been received\n\n Returns:\n bool: weather if all data has been received.\n \"\"\"\n return all(\n [data_sub != {} for data_sub in self.data_from_subscriptions.values()]\n )\n\n def _create_prediction_df(self):\n \"\"\"Creates a dataframe which contains data from Orion subscriptions to make a\n prediction.\n\n Returns:\n pandas.core.frame.DataFrame: dataframe with data from subscriptions.\n \"\"\"\n dfs = []\n data_from_subscriptions = {}\n for (plc, data_sub) in self.data_from_subscriptions.items():\n df = pd.DataFrame(data=data_sub[\"rows\"], columns=data_sub[\"columns\"])\n dfs.append(df)\n data_from_subscriptions[plc] = {}\n self.data_from_subscriptions = data_from_subscriptions\n df = pd.concat(dfs, axis=1)\n return df\n\n def set_subscription_data_and_predict(self, data: dict):\n \"\"\"Sets subscription data and once it has received the data from all the\n subscriptions, it sends them to the Anomaly Detection API to generate a new\n prediction.\n\n Args:\n data (:obj:`str`): data from a subscription in OCB entity form.\n \"\"\"\n entity_id = data[\"id\"]\n\n # Get the attributes data of the subscription\n sub_data = {\"rows\": [[]], \"columns\": []}\n for key in data.keys():\n if key not in NOT_ATTRIBUTES_KEYS_SUBSCRIPTION:\n sub_data[\"rows\"][0].append(data[key][\"value\"])\n sub_data[\"columns\"].append(key)\n\n # save the data from this subscription\n if self.data_from_subscriptions[entity_id] == {}:\n logger.info(\n f\"Received data from {entity_id} for datamodel {self.id}. Columns: {sub_data['columns']}\"\n )\n # Save the time instant when the value of the sensors were updated\n for column in sub_data[\"columns\"]:\n self.dates[column] = data[\"TimeInstant\"][\"value\"]\n self.data_from_subscriptions[entity_id] = sub_data\n\n if self._all_data_from_subscriptions_received():\n logger.info(\n f\"All data received for datamodel {self.id}. Sending to Anomaly Backend...\"\n )\n df = self._create_prediction_df()\n payload = json.loads(df.to_json(orient=\"split\"))\n prediction = DataModelPrediction(\n datamodel=self, data=payload.copy(), dates=self.dates\n )\n payload[\"id\"] = str(prediction.id)\n prediction.task_status = self.blackbox_client.predict(self.id, payload)\n prediction.save()\n prediction.send_notification()\n\n self.save()\n\n def send_prediction_to_orion(self, predictions: dict):\n \"\"\"Sends the predictions received from the Anomaly Detection API to the Orion\n Context Broker.\n\n Args:\n predictions (:obj:`dict`): predictions made by the Anomaly Detection API.\n \"\"\"\n prediction = DataModelPrediction.objects.get(\n datamodel=self, id=predictions[\"id\"]\n )\n logger.debug(f\"Prediction is: {prediction}\")\n\n entity_id = f\"urn:ngsi-ld:AnomalyPrediction:{self.id}\"\n entity_type = \"AnomalyPrediction\"\n\n predictions_to_orion = {}\n\n for (key, value) in predictions.items():\n predictions_to_orion[key] = value[0]\n\n attrs = {\n \"name\": {\"type\": \"String\", \"value\": self.name},\n \"entities\": {\"type\": \"Object\", \"value\": self.plcs},\n \"date\": {\"type\": \"DateTime\", \"value\": datetime.now().isoformat()},\n \"predictions\": {\"type\": \"Object\", \"value\": predictions_to_orion},\n }\n\n self.orion_client.create_entity(entity_id, entity_type, attrs)\n self.num_predictions += 1\n self.save()\n\n def set_prediction_results(self, data: dict):\n \"\"\"Set the results of the prediction received by the Anomaly Detection API.\n\n Args:\n data (:obj:`dict`): a dictionary containing the predictions and the ID of\n the prediction.\n \"\"\"\n prediction = DataModelPrediction.objects.get(pk=data[\"id\"])\n prediction.predictions = {\n key: value[0] for (key, value) in data.items() if key != \"id\"\n }\n prediction.predictions_received_on = datetime.now(tz=pytz.UTC)\n prediction.save()\n self.num_predictions += 1\n self.save()\n prediction.send_to_orion()\n prediction.send_notification()\n\n def get_task_status(self):\n \"\"\"Gets the status of a task in the Anomaly Detection API.\"\"\"\n return self.blackbox_client.get_task_status(self.task_status)\n\n\ndef pre_delete_datamodel_handler(sender, instance, **kwargs):\n \"\"\"Handles the signal post delete of a model `DataModel` requesting Anomaly\n Detection to delete a Blackbox model\n\n Args:\n sender (backend.apps.models.DataModel): the datamodel just deleted.\n \"\"\"\n instance.blackbox_client.delete_blackbox(instance)\n\n\npre_delete.connect(pre_delete_datamodel_handler, sender=DataModel)\n\n\nclass DataModelPrediction(models.Model):\n \"\"\"Class which holds data of a prediction made by a `DataModel`.\"\"\"\n\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n datamodel = models.ForeignKey(DataModel, on_delete=models.CASCADE)\n data = JSONField(help_text=\"The data to be predicted\")\n dates = JSONField(help_text=\"When the date to be predicted was created\")\n predictions = JSONField(help_text=\"The predictions\", default=dict)\n task_status = models.CharField(\n help_text=\"URL to get the progress of predicting process\",\n null=True,\n blank=True,\n max_length=512,\n )\n ack = models.BooleanField(\n help_text=\"Wether the prediction has been acknowledged\", default=False\n )\n user_ack = models.CharField(\n help_text=\"The name of the user who acknowledged the prediction\",\n max_length=128,\n blank=True,\n null=True,\n )\n created_on = models.DateTimeField(\n help_text=\"When the prediction was created\", auto_now_add=True\n )\n predictions_received_on = models.DateTimeField(\n help_text=\"When the predictions where received\",\n default=None,\n null=True,\n blank=True,\n )\n\n orion_client = clients.OrionClient()\n notification_client = clients.NotificationClient()\n\n def send_to_orion(self):\n \"\"\"Sends the prediction to the Orion Context Broker.\"\"\"\n\n entity_id = f\"urn:ngsi-ld:AnomalyPrediction:{self.id}\"\n entity_type = \"AnomalyPrediction\"\n\n attrs = {\n \"datamodel_id\": {\"type\": \"String\", \"value\": str(self.datamodel.id)},\n \"datamodel_name\": {\"type\": \"String\", \"value\": self.datamodel.name},\n \"data\": {\n \"type\": \"Object\",\n \"value\": {\n column: value\n for (column, value) in zip(\n self.data[\"columns\"], self.data[\"data\"][0]\n )\n },\n },\n \"dates\": {\"type\": \"Object\", \"value\": self.dates},\n \"predictions\": {\"type\": \"Object\", \"value\": self.predictions},\n }\n self.orion_client.create_entity(entity_id, entity_type, attrs)\n\n def send_notification(self):\n \"\"\"Sends the prediction to the Notification Backend.\"\"\"\n\n self.notification_client.send_prediction(self.to_dict([\"_state\"]))\n\n def to_dict(self, exclude: list = None):\n \"\"\"Serialize the class into a dict.\n\n Args:\n exclude(:obj:`list`): a list of str containing the keys to exclude.\n\n Returns:\n dict: the DataModelPrediction data.\n \"\"\"\n to_exclude = exclude\n if to_exclude is None:\n to_exclude = []\n\n data = {}\n for (key, value) in self.__dict__.items():\n if key not in to_exclude:\n if type(value) is uuid.UUID:\n data[key] = str(value)\n elif type(value) is datetime:\n data[key] = value.isoformat()\n else:\n data[key] = value\n\n return data\n\n def set_ack(self, user: str):\n \"\"\"Sets the ACK for the prediction.\n\n Args:\n user (:obj:`str`): the user who sent the ACK.\n \"\"\"\n self.ack = True\n self.user_ack = user\n self.save()\n logger.info(f\"DataModel Prediction with {self.id} ACKed by {user}.\")\n\n\nclass TrainFile(models.Model):\n datamodel = models.ForeignKey(DataModel, on_delete=models.CASCADE)\n file = models.FileField(\n blank=False,\n null=False,\n help_text=\"A CSV training file containing the columns of the DataModel\",\n )\n index_column = models.CharField(max_length=128, blank=True, null=True)\n uploaded_at = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n get_latest_by = \"uploaded_at\"\n" ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
rosivagyok/Attention
[ "a4cd90dc5f992ae2fe5a7874628d04f531d4aebc" ]
[ "json_parser_train.py" ]
[ "import json\nimport os\nimport numpy as np, h5py\nimport scipy.io as sp\nimport pandas as pd\nfrom depth import depthlist\nfrom feature_smooth import feature_smooth\nfrom utils import angle_between, cross_validation\n\ndef parse_feats(f_in,f_out,f_in_d,depth,oversample):\n\n \"\"\" Load \"\"\"\n json_files = os.listdir(f_in)\n \n face_feats_all = np.zeros([2, len(json_files), 210], dtype=np.float64)\n pose_feats_all = np.zeros([2, len(json_files), 54], dtype=np.float64)\n pose_feats = np.zeros([len(json_files), 66], dtype=np.float64)\n\n for idx in range(0,len(json_files)):\n data = json.load(open(f_in + json_files[idx]))\n\n if len(data['people']) > 0:\n \n face_feats_all[0,idx] = data['people'][0]['face_keypoints']\n pose_feats_all[0,idx] = data['people'][0]['pose_keypoints']\n try:\n face_feats_all[1,idx] = data['people'][1]['face_keypoints']\n pose_feats_all[1,idx] = data['people'][1]['pose_keypoints']\n except IndexError:\n pass\n\n else:\n face_feats_all[0,idx] = np.zeros([210])\n face_feats_all[1,idx] = np.zeros([210])\n pose_feats_all[0,idx] = np.zeros([54])\n pose_feats_all[1,idx] = np.zeros([54])\n \n \"\"\" Similarity check for false positive detections;\n check which candidate yields more keypoints, use the one that has\n more\"\"\"\n k = np.count_nonzero([pose_feats_all[0,idx,0:2], pose_feats_all[0,idx,3:5], pose_feats_all[0,idx,42:44], pose_feats_all[0,idx,45:47], pose_feats_all[0,idx,6:8], pose_feats_all[0,idx,15:17]])\n a = np.count_nonzero([pose_feats_all[1,idx,0:2], pose_feats_all[1,idx,3:5], pose_feats_all[1,idx,42:44], pose_feats_all[1,idx,45:47], pose_feats_all[1,idx,6:8], pose_feats_all[1,idx,15:17]])\n\n if k < a:\n pose_feats_all[0,idx,:] = pose_feats_all[1,idx,:]\n face_feats_all[0,idx,:] = face_feats_all[1,idx,:]\n else:\n pass\n\n \"\"\" Nose - Neck \"\"\"\n pose_feats[idx,0:2] = np.array([pose_feats_all[0,idx,0:2]])\n pose_feats[idx,2:4] = np.array([pose_feats_all[0,idx,3:5]])\n\n \"\"\" REye - LEye \"\"\"\n pose_feats[idx,4:6] = np.array([pose_feats_all[0,idx,42:44]])\n pose_feats[idx,6:8] = np.array([pose_feats_all[0,idx,45:47]])\n\n \"\"\" RShoulder - LShoulder \"\"\"\n pose_feats[idx,8:10] = np.array([pose_feats_all[0,idx,6:8]])\n pose_feats[idx,10:12] = np.array([pose_feats_all[0,idx,15:17]])\n\n \"\"\" REye_refined \"\"\"\n pose_feats[idx,26:40] = np.ndarray.flatten(np.array([face_feats_all[0,idx,204:206], face_feats_all[0,idx,108:110], face_feats_all[0,idx,111:113],\n face_feats_all[0,idx,114:116], face_feats_all[0,idx,117:119], face_feats_all[0,idx,120:122], \n face_feats_all[0,idx,123:125]]))\n\n \"\"\" LEye_refined \"\"\"\n pose_feats[idx,40:54] = np.ndarray.flatten(np.array([face_feats_all[0,idx,207:209], face_feats_all[0,idx,126:128], face_feats_all[0,idx,129:131],\n face_feats_all[0,idx,132:134], face_feats_all[0,idx,135:137], face_feats_all[0,idx,138:140], \n face_feats_all[0,idx,141:143]]))\n\n \"\"\" facial keypoints if nose, REye or LEye is missing \"\"\"\n if not np.any(pose_feats[idx][0:2]):\n pose_feats[idx,0:2] = face_feats_all[0,idx,90:92]\n\n if not np.any(pose_feats[idx][4:5]):\n pose_feats[idx,4:6] = face_feats_all[0,idx,204:206]\n\n if not np.any(pose_feats[idx][6:7]):\n pose_feats[idx,6:8] = face_feats_all[0,idx,207:209]\n\n print(idx+1, ' / ', len(json_files), ' json frame files were processed.', end='\\r')\n\n \"\"\" Interpolate for zero feature space elements (name is a bit misleading...) \"\"\"\n\n pose_feats_smooth = feature_smooth(pose_feats)\n\n if depth==True:\n imagelist_d = os.listdir(f_in_d)\n d_list = depthlist(pose_feats_smooth,imagelist_d,f_in_d)\n else:\n d_list = np.load(f_in_d+'d_list.npy')\n print('\\nFound extracted depth for ', d_list.shape[0], ' / ', len(json_files), ' samples.')\n\n print('Calculating the rest of the feature space (distances, angles): \\n')\n \"\"\" Calculate the rest of the feature space (distances, angles) \"\"\"\n for i in range(0, len(pose_feats_smooth)):\n\n \"\"\" Recalculate coordinates to nose origin \"\"\"\n pose_feats_smooth[i,2:4] = pose_feats_smooth[i,2:4] - pose_feats_smooth[i,0:2]\n pose_feats_smooth[i,4:6] = pose_feats_smooth[i,4:6] - pose_feats_smooth[i,0:2]\n pose_feats_smooth[i,6:8] = pose_feats_smooth[i,6:8] - pose_feats_smooth[i,0:2]\n pose_feats_smooth[i,8:10] = pose_feats_smooth[i,8:10] - pose_feats_smooth[i,0:2]\n pose_feats_smooth[i,10:12] = pose_feats_smooth[i,10:12] - pose_feats_smooth[i,0:2]\n pose_feats_smooth[i,26:40] = np.subtract(pose_feats_smooth[i,26:40].reshape((7,2)), pose_feats_smooth[i,0:2]).reshape((1,14))\n pose_feats_smooth[i,40:54] = np.subtract(pose_feats_smooth[i,40:54].reshape((7,2)), pose_feats_smooth[i,0:2]).reshape((1,14))\n pose_feats_smooth[i,0:2] = [0, 0]\n\n \"\"\" Recalculate depth to nose depth value \"\"\"\n d_list[i,1] = d_list[i,1] - d_list[i,0]\n d_list[i,2] = d_list[i,2] - d_list[i,0]\n d_list[i,3] = d_list[i,3] - d_list[i,0]\n d_list[i,4] = d_list[i,4] - d_list[i,0]\n d_list[i,5] = d_list[i,5] - d_list[i,0]\n d_list[i,0] = 0\n\n \"\"\" Euclidean distance between all face features. \"\"\"\n pose_feats_smooth[i,12] = np.linalg.norm(pose_feats_smooth[i,0:2] - pose_feats_smooth[i,4:6])\n pose_feats_smooth[i,13] = np.linalg.norm(pose_feats_smooth[i,0:2] - pose_feats_smooth[i,6:8])\n pose_feats_smooth[i,14] = np.linalg.norm(pose_feats_smooth[i,4:6] - pose_feats_smooth[i,6:8])\n\n \"\"\" Euclidean distance between neck and all face features. \"\"\"\n pose_feats_smooth[i,15] = np.linalg.norm(pose_feats_smooth[i,2:4] - pose_feats_smooth[i,0:2])\n pose_feats_smooth[i,16] = np.linalg.norm(pose_feats_smooth[i,2:4] - pose_feats_smooth[i,4:6])\n pose_feats_smooth[i,17] = np.linalg.norm(pose_feats_smooth[i,2:4] - pose_feats_smooth[i,6:8])\n\n \"\"\" Euclidean distance between RShoulder and all face features. \"\"\"\n pose_feats_smooth[i,18] = np.linalg.norm(pose_feats_smooth[i,8:10] - pose_feats_smooth[i,0:2])\n pose_feats_smooth[i,19] = np.linalg.norm(pose_feats_smooth[i,8:10] - pose_feats_smooth[i,4:6])\n pose_feats_smooth[i,20] = np.linalg.norm(pose_feats_smooth[i,8:10] - pose_feats_smooth[i,6:8])\n\n \"\"\" Euclidean distance between LShoulder and all face features. \"\"\"\n pose_feats_smooth[i,21] = np.linalg.norm(pose_feats_smooth[i,10:12] - pose_feats_smooth[i,0:2])\n pose_feats_smooth[i,22] = np.linalg.norm(pose_feats_smooth[i,10:12] - pose_feats_smooth[i,4:6])\n pose_feats_smooth[i,23] = np.linalg.norm(pose_feats_smooth[i,10:12] - pose_feats_smooth[i,6:8])\n\n \"\"\" Angle between vec(neck,nose) and vec(neck,LShoulder) \"\"\"\n u = pose_feats_smooth[i,2:4] - pose_feats_smooth[i,0:2]\n v = pose_feats_smooth[i,2:4] - pose_feats_smooth[i,8:10]\n m = pose_feats_smooth[i,2:4] - pose_feats_smooth[i,10:12]\n\n pose_feats_smooth[i,24] = angle_between(u,m)\n pose_feats_smooth[i,25] = angle_between(u,v)\n\n \"\"\" Euclidean distance between Reye pupil and all eye conto. \"\"\"\n pose_feats_smooth[i,54] = np.linalg.norm(pose_feats_smooth[i,26:28] - pose_feats_smooth[i,28:30])\n pose_feats_smooth[i,55] = np.linalg.norm(pose_feats_smooth[i,26:28] - pose_feats_smooth[i,30:32])\n pose_feats_smooth[i,56] = np.linalg.norm(pose_feats_smooth[i,26:28] - pose_feats_smooth[i,32:34])\n pose_feats_smooth[i,57] = np.linalg.norm(pose_feats_smooth[i,26:28] - pose_feats_smooth[i,34:36])\n pose_feats_smooth[i,58] = np.linalg.norm(pose_feats_smooth[i,26:28] - pose_feats_smooth[i,36:38])\n pose_feats_smooth[i,59] = np.linalg.norm(pose_feats_smooth[i,26:28] - pose_feats_smooth[i,38:40])\n\n \"\"\" Euclidean distance between LEye pupil and all eye con. \"\"\"\n pose_feats_smooth[i,60] = np.linalg.norm(pose_feats_smooth[i,40:42] - pose_feats_smooth[i,42:44])\n pose_feats_smooth[i,61] = np.linalg.norm(pose_feats_smooth[i,40:42] - pose_feats_smooth[i,44:46])\n pose_feats_smooth[i,62] = np.linalg.norm(pose_feats_smooth[i,40:42] - pose_feats_smooth[i,46:48])\n pose_feats_smooth[i,63] = np.linalg.norm(pose_feats_smooth[i,40:42] - pose_feats_smooth[i,48:50])\n pose_feats_smooth[i,64] = np.linalg.norm(pose_feats_smooth[i,40:42] - pose_feats_smooth[i,50:52])\n pose_feats_smooth[i,65] = np.linalg.norm(pose_feats_smooth[i,40:42] - pose_feats_smooth[i,52:54])\n\n print(i+1, ' / ', len(json_files), ' samples were processed.', end='\\r')\n\n print('\\nCreated ', pose_feats_smooth.shape[0],' samples, with ', pose_feats_smooth.shape[1], ' features.')\n print('\\nLoading labels... ')\n pose_feats = pose_feats_smooth\n\n \"\"\" Load labels \"\"\"\n data = pd.read_excel('PANDORA_ATTENTION_LABELS.xlsx')\n labels = np.array(data)\n labels = labels[:,1]\n labels = np.append(labels,[0])\n\n print('\\nFound labels for ', labels.shape[0], ' / ', len(json_files), ' samples.')\n\n return pose_feats, d_list, labels\n" ]
[ [ "pandas.read_excel", "numpy.linalg.norm", "numpy.append", "numpy.any", "numpy.count_nonzero", "numpy.load", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
HeJinHub/text-rank
[ "0829ba6e8a05264f80144435c595c7233cfce103" ]
[ "textrank4zh/util.py" ]
[ "#-*- encoding:utf-8 -*-\n\"\"\"\n@author: letian\n@homepage: http://www.letiantian.me\n@github: https://github.com/someus/\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport os\nimport math\nimport networkx as nx\nimport numpy as np\nimport sys\nfrom . import compute_yingda\n\ntry:\n reload(sys)\n sys.setdefaultencoding('utf-8')\nexcept:\n pass\n \nsentence_delimiters = ['?', '!', ';', '?', '!', '。', ';', '……', '…', '\\n']\nallow_speech_tags = ['an', 'i', 'j', 'l', 'n', 'nr', 'nrfg', 'ns', 'nt', 'nz', 't', 'v', 'vd', 'vn', 'eng']\n\nPY2 = sys.version_info[0] == 2\nif not PY2:\n # Python 3.x and up\n text_type = str\n string_types = (str,)\n xrange = range\n\n def as_text(v): ## 生成unicode字符串\n if v is None:\n return None\n elif isinstance(v, bytes):\n return v.decode('utf-8', errors='ignore')\n elif isinstance(v, str):\n return v\n else:\n raise ValueError('Unknown type %r' % type(v))\n\n def is_text(v):\n return isinstance(v, text_type)\n\nelse:\n # Python 2.x\n text_type = unicode\n string_types = (str, unicode)\n xrange = xrange\n\n def as_text(v):\n if v is None:\n return None\n elif isinstance(v, unicode):\n return v\n elif isinstance(v, str):\n return v.decode('utf-8', errors='ignore')\n else:\n raise ValueError('Invalid type %r' % type(v))\n\n def is_text(v):\n return isinstance(v, text_type)\n\n__DEBUG = None\n\ndef debug(*args):\n global __DEBUG\n if __DEBUG is None:\n try:\n if os.environ['DEBUG'] == '1':\n __DEBUG = True\n else:\n __DEBUG = False\n except:\n __DEBUG = False\n if __DEBUG:\n print( ' '.join([str(arg) for arg in args]) )\n\nclass AttrDict(dict):\n \"\"\"Dict that can get attribute by dot\"\"\"\n def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self\n\n\ndef combine(word_list, window = 2):\n \"\"\"构造在window下的单词组合,用来构造单词之间的边。\n \n Keyword arguments:\n word_list -- list of str, 由单词组成的列表。\n windows -- int, 窗口大小。\n \"\"\"\n if window < 2: window = 2\n for x in xrange(1, window):\n if x >= len(word_list):\n break\n word_list2 = word_list[x:]\n res = zip(word_list, word_list2)\n for r in res:\n yield r\n\ndef get_similarity(word_list1, word_list2):\n \"\"\"默认的用于计算两个句子相似度的函数。\n\n Keyword arguments:\n word_list1, word_list2 -- 分别代表两个句子,都是由单词组成的列表\n \"\"\"\n words = list(set(word_list1 + word_list2)) \n vector1 = [float(word_list1.count(word)) for word in words]\n vector2 = [float(word_list2.count(word)) for word in words]\n \n vector3 = [vector1[x]*vector2[x] for x in xrange(len(vector1))]\n vector4 = [1 for num in vector3 if num > 0.]\n co_occur_num = sum(vector4)\n\n if abs(co_occur_num) <= 1e-12:\n return 0.\n \n denominator = math.log(float(len(word_list1))) + math.log(float(len(word_list2))) # 分母\n \n if abs(denominator) < 1e-12:\n return 0.\n \n return co_occur_num / denominator\n\ndef sort_words(vertex_source, window = 2, pagerank_config = {'alpha': 0.85,}):\n \"\"\"将单词按关键程度从大到小排序\n\n Keyword arguments:\n vertex_source -- 二维列表,子列表代表句子,子列表的元素是单词,这些单词用来构造pagerank中的节点\n edge_source -- 二维列表,子列表代表句子,子列表的元素是单词,根据单词位置关系构造pagerank中的边\n window -- 一个句子中相邻的window个单词,两两之间认为有边\n pagerank_config -- pagerank的设置\n \"\"\"\n sorted_words = []\n word_index = {}\n index_word = {}\n _vertex_source = vertex_source\n words_number = 0\n for word_list in _vertex_source:\n for word in word_list:\n if not word in word_index:\n word_index[word] = words_number\n index_word[words_number] = word\n words_number += 1\n\n graph = np.zeros((words_number, words_number))\n \n for word_list in _vertex_source:\n for w1, w2 in combine(word_list, window):\n if w1 in word_index and w2 in word_index:\n index1 = word_index[w1]\n index2 = word_index[w2]\n graph[index1][index2] = 1.0\n graph[index2][index1] = 1.0\n\n debug('graph:\\n', graph)\n \n nx_graph = nx.from_numpy_matrix(graph)\n scores = nx.pagerank(nx_graph, **pagerank_config) # this is a dict\n sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)\n for index, score in sorted_scores:\n item = AttrDict(word=index_word[index], weight=score)\n sorted_words.append(item)\n\n return sorted_words\n\ndef sort_sentences(sentences, words, sim_func = get_similarity,decay_rate = 1.0, pagerank_config = {'alpha': 0.85,}):\n \"\"\"将句子按照关键程度从大到小排序\n\n Keyword arguments:\n sentences -- 列表,元素是句子\n words -- 二维列表,子列表和sentences中的句子对应,子列表由单词组成\n sim_func -- 计算两个句子的相似性,参数是两个由单词组成的列表\n pagerank_config -- pagerank的设置\n \"\"\"\n\n while \"ou\" in sentences:\n sentences.remove(\"ou\")\n\n sorted_sentences = []\n _source = words\n\n sentences_num = len(sentences) \n graph = np.zeros((sentences_num, sentences_num))\n \n for x in xrange(sentences_num):\n for y in xrange(x, sentences_num):\n #similarity = sim_func( _source[x], _source[y] )\n similarity = compute_yingda.pipei(sentences[x],sentences[y])\n graph[x, y] = similarity\n graph[y, x] = similarity\n\n #进行咨询新闻特殊处理\n chapital = []\n duanluo_index = [x for x in xrange(sentences_num) if sentences[x].find(\"ou\") >= 0]\n chapital.append((0,duanluo_index[0]))\n for i in range(len(duanluo_index)-1):\n ft = (duanluo_index[i],duanluo_index[i+1])\n chapital.append(ft)\n chapital.append((duanluo_index[-1],sentences_num))\n\n #归一化\n for x in xrange(sentences_num):\n sum_lie = 0\n for y in xrange(sentences_num):\n if x != y :\n sum_lie += graph[y,x]\n if sum_lie >0 :\n graph [x,x] = 0\n else:\n graph [x,x] = 1.0\n sum_lie = 1.0\n for y in xrange(sentences_num):\n graph [y,x] = float(graph[y,x]) / sum_lie\n graph [x,y] = graph [y,x]\n\n\n #权重倾斜\n for i in xrange(len(chapital)):\n for j in xrange(chapital[i][0],chapital[i][1]):\n if chapital[i][1] - chapital[i][0] <= 1 and i != len(chapital) - 1 and sentences[chapital[i][0]].find(\",\") < 0:\n for k in xrange(chapital[i+1][0],chapital[i+1][1]):\n graph [chapital[i][0] , k] += (1.0/decay_rate - 1.0)\n elif j < chapital[i][1] - 1 and chapital[i][1] - chapital[i][0] >=3 :\n graph[j , j+1] += (1.0/decay_rate -1.0)\n\n for x in xrange(sentences_num):\n sum_lie = 0\n for y in xrange(sentences_num):\n sum_lie += graph[y , x]\n for y in xrange(sentences_num):\n graph[y,x] = float(graph[y,x]) / sum_lie\n\n nx_graph = nx.from_numpy_matrix(graph)\n scores = nx.pagerank(nx_graph, **pagerank_config) # this is a dict\n sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)\n\n for index, score in sorted_scores:\n item = AttrDict(index=index, sentence=sentences[index].replace(\"ou\",\"\").strip(), weight=score)\n sorted_sentences.append(item)\n\n return sorted_sentences\n\nif __name__ == '__main__':\n pass\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Joejiong/mindspore
[ "083fd6565cab1aa1d3114feeacccf1cba0d55e80", "083fd6565cab1aa1d3114feeacccf1cba0d55e80", "083fd6565cab1aa1d3114feeacccf1cba0d55e80", "083fd6565cab1aa1d3114feeacccf1cba0d55e80", "083fd6565cab1aa1d3114feeacccf1cba0d55e80" ]
[ "model_zoo/official/cv/resnet/eval.py", "mindspore/nn/layer/lstm.py", "tests/ut/python/dataset/test_filterop.py", "model_zoo/official/nlp/mass/train.py", "tests/ut/python/pynative_mode/test_implicit_conversion.py" ]
[ "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"train resnet.\"\"\"\nimport os\nimport random\nimport argparse\nimport numpy as np\nfrom mindspore import context\nfrom mindspore import dataset as de\nfrom mindspore.nn.loss import SoftmaxCrossEntropyWithLogits\nfrom mindspore.train.model import Model\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net\n\nparser = argparse.ArgumentParser(description='Image classification')\nparser.add_argument('--net', type=str, default=None, help='Resnet Model, either resnet50 or resnet101')\nparser.add_argument('--dataset', type=str, default=None, help='Dataset, either cifar10 or imagenet2012')\n\nparser.add_argument('--checkpoint_path', type=str, default=None, help='Checkpoint file path')\nparser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')\nparser.add_argument('--device_target', type=str, default='Ascend', help='Device target')\nargs_opt = parser.parse_args()\n\nrandom.seed(1)\nnp.random.seed(1)\nde.config.set_seed(1)\n\nif args_opt.net == \"resnet50\":\n from src.resnet import resnet50 as resnet\n if args_opt.dataset == \"cifar10\":\n from src.config import config1 as config\n from src.dataset import create_dataset1 as create_dataset\n else:\n from src.config import config2 as config\n from src.dataset import create_dataset2 as create_dataset\nelif args_opt.net == \"resnet101\":\n from src.resnet import resnet101 as resnet\n from src.config import config3 as config\n from src.dataset import create_dataset3 as create_dataset\nelse:\n from src.resnet import se_resnet50 as resnet\n from src.config import config4 as config\n from src.dataset import create_dataset4 as create_dataset\n\nif __name__ == '__main__':\n target = args_opt.device_target\n\n # init context\n context.set_context(mode=context.GRAPH_MODE, device_target=target, save_graphs=False)\n if target != \"GPU\":\n device_id = int(os.getenv('DEVICE_ID'))\n context.set_context(device_id=device_id)\n\n # create dataset\n dataset = create_dataset(dataset_path=args_opt.dataset_path, do_train=False, batch_size=config.batch_size,\n target=target)\n step_size = dataset.get_dataset_size()\n\n # define net\n net = resnet(class_num=config.class_num)\n\n # load checkpoint\n param_dict = load_checkpoint(args_opt.checkpoint_path)\n load_param_into_net(net, param_dict)\n net.set_train(False)\n\n # define loss, model\n if args_opt.dataset == \"imagenet2012\":\n if not config.use_label_smooth:\n config.label_smooth_factor = 0.0\n loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\",\n smooth_factor=config.label_smooth_factor, num_classes=config.class_num)\n else:\n loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')\n\n # define model\n model = Model(net, loss_fn=loss, metrics={'top_1_accuracy', 'top_5_accuracy'})\n\n # eval model\n res = model.eval(dataset)\n print(\"result:\", res, \"ckpt=\", args_opt.checkpoint_path)\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"lstm\"\"\"\nimport math\nimport numpy as np\nimport mindspore.nn as nn\nfrom mindspore import context\nfrom mindspore._checkparam import Validator as validator\nfrom mindspore.common.initializer import initializer\nfrom mindspore.common.parameter import Parameter, ParameterTuple\nfrom mindspore.common.tensor import Tensor\nfrom mindspore.nn.cell import Cell\nfrom mindspore.ops import operations as P\nfrom ..._checkparam import Rel\n\n__all__ = ['LSTM', 'LSTMCell']\n\n\nclass LSTM(Cell):\n r\"\"\"\n LSTM (Long Short-Term Memory) layer.\n\n Applies a LSTM to the input.\n\n There are two pipelines connecting two consecutive cells in a LSTM model; one is cell state pipeline\n and the other is hidden state pipeline. Denote two consecutive time nodes as :math:`t-1` and :math:`t`.\n Given an input :math:`x_t` at time :math:`t`, an hidden state :math:`h_{t-1}` and an cell\n state :math:`c_{t-1}` of the layer at time :math:`{t-1}`, the cell state and hidden state at\n time :math:`t` is computed using an gating mechanism. Input gate :math:`i_t` is designed to protect the cell\n from perturbation by irrelevant inputs. Forget gate :math:`f_t` affords protection of the cell by forgetting\n some information in the past, which is stored in :math:`h_{t-1}`. Output gate :math:`o_t` protects other\n units from perturbation by currently irrelevant memory contents. Candidate cell state :math:`\\tilde{c}_t` is\n calculated with the current input, on which the input gate will be applied. Finally, current cell state\n :math:`c_{t}` and hidden state :math:`h_{t}` are computed with the calculated gates and cell states. The complete\n formulation is as follows.\n\n .. math::\n \\begin{array}{ll} \\\\\n i_t = \\sigma(W_{ix} x_t + b_{ix} + W_{ih} h_{(t-1)} + b_{ih}) \\\\\n f_t = \\sigma(W_{fx} x_t + b_{fx} + W_{fh} h_{(t-1)} + b_{fh}) \\\\\n \\tilde{c}_t = \\tanh(W_{cx} x_t + b_{cx} + W_{ch} h_{(t-1)} + b_{ch}) \\\\\n o_t = \\sigma(W_{ox} x_t + b_{ox} + W_{oh} h_{(t-1)} + b_{oh}) \\\\\n c_t = f_t * c_{(t-1)} + i_t * \\tilde{c}_t \\\\\n h_t = o_t * \\tanh(c_t) \\\\\n \\end{array}\n\n Here :math:`\\sigma` is the sigmoid function, and :math:`*` is the Hadamard product. :math:`W, b`\n are learnable weights between the output and the input in the formula. For instance,\n :math:`W_{ix}, b_{ix}` are the weight and bias used to transform from input :math:`x` to :math:`i`.\n Details can be found in paper `LONG SHORT-TERM MEMORY\n <https://www.bioinf.jku.at/publications/older/2604.pdf>`_ and\n `Long Short-Term Memory Recurrent Neural Network Architectures for Large Scale Acoustic Modeling\n <https://static.googleusercontent.com/media/research.google.com/zh-CN//pubs/archive/43905.pdf>`_.\n\n Args:\n input_size (int): Number of features of input.\n hidden_size (int): Number of features of hidden layer.\n num_layers (int): Number of layers of stacked LSTM . Default: 1.\n has_bias (bool): Whether the cell has bias `b_ih` and `b_hh`. Default: True.\n batch_first (bool): Specifies whether the first dimension of input is batch_size. Default: False.\n dropout (float, int): If not 0, append `Dropout` layer on the outputs of each\n LSTM layer except the last layer. Default 0. The range of dropout is [0.0, 1.0].\n bidirectional (bool): Specifies whether it is a bidirectional LSTM. Default: False.\n\n Inputs:\n - **input** (Tensor) - Tensor of shape (seq_len, batch_size, `input_size`).\n - **hx** (tuple) - A tuple of two Tensors (h_0, c_0) both of data type mindspore.float32 or\n mindspore.float16 and shape (num_directions * `num_layers`, batch_size, `hidden_size`).\n Data type of `hx` should be the same as `input`.\n\n Outputs:\n Tuple, a tuple constains (`output`, (`h_n`, `c_n`)).\n\n - **output** (Tensor) - Tensor of shape (seq_len, batch_size, num_directions * `hidden_size`).\n - **hx_n** (tuple) - A tuple of two Tensor (h_n, c_n) both of shape\n (num_directions * `num_layers`, batch_size, `hidden_size`).\n\n Examples:\n >>> class LstmNet(nn.Cell):\n >>> def __init__(self, input_size, hidden_size, num_layers, has_bias, batch_first, bidirectional):\n >>> super(LstmNet, self).__init__()\n >>> self.lstm = nn.LSTM(input_size=input_size,\n >>> hidden_size=hidden_size,\n >>> num_layers=num_layers,\n >>> has_bias=has_bias,\n >>> batch_first=batch_first,\n >>> bidirectional=bidirectional,\n >>> dropout=0.0)\n >>>\n >>> def construct(self, inp, h0, c0):\n >>> return self.lstm(inp, (h0, c0))\n >>>\n >>> net = LstmNet(10, 12, 2, has_bias=True, batch_first=True, bidirectional=False)\n >>> input = Tensor(np.ones([3, 5, 10]).astype(np.float32))\n >>> h0 = Tensor(np.ones([1 * 2, 3, 12]).astype(np.float32))\n >>> c0 = Tensor(np.ones([1 * 2, 3, 12]).astype(np.float32))\n >>> output, (hn, cn) = net(input, h0, c0)\n \"\"\"\n\n def __init__(self,\n input_size,\n hidden_size,\n num_layers=1,\n has_bias=True,\n batch_first=False,\n dropout=0,\n bidirectional=False):\n super(LSTM, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.has_bias = has_bias\n self.batch_first = validator.check_value_type(\"batch_first\", batch_first, [bool], self.cls_name)\n self.hidden_size = validator.check_integer(\"hidden_size\", hidden_size, 0, Rel.GT, self.cls_name)\n self.num_layers = validator.check_integer(\"num_layers\", num_layers, 0, Rel.GT, self.cls_name)\n self.dropout = float(dropout)\n self.bidirectional = bidirectional\n if self.batch_first:\n self.transpose1 = P.Transpose()\n self.transpose2 = P.Transpose()\n num_directions = 2 if self.bidirectional else 1\n self.cpu_target = False\n enable_debug = context.get_context(\"enable_debug_runtime\")\n if context.get_context(\"device_target\") == \"CPU\" and not enable_debug:\n self.cpu_target = True\n if not self.cpu_target:\n self.lstm = P.LSTM(input_size=self.input_size,\n hidden_size=self.hidden_size,\n num_layers=self.num_layers,\n has_bias=self.has_bias,\n bidirectional=self.bidirectional,\n dropout=self.dropout)\n weight_size = 0\n gate_size = 4 * self.hidden_size\n for layer in range(self.num_layers):\n input_layer_size = self.input_size if layer == 0 else self.hidden_size * num_directions\n increment_size = gate_size * input_layer_size\n increment_size += gate_size * self.hidden_size\n if self.has_bias:\n increment_size += 2 * gate_size\n weight_size += increment_size * num_directions\n stdv = 1 / math.sqrt(hidden_size)\n w_np = np.random.uniform(-stdv, stdv, (weight_size, 1, 1)).astype(np.float32)\n self.weight = Parameter(initializer(Tensor(w_np), [weight_size, 1, 1]), name='weight')\n else:\n input_size_list = []\n input_size_list.append(self.input_size)\n for i in range(self.num_layers - 1):\n input_size_list.append(self.hidden_size * num_directions)\n weights = []\n layers = []\n bias_size = 0 if not self.has_bias else num_directions * self.hidden_size * 4\n stdv = 1 / math.sqrt(hidden_size)\n for i in range(num_layers):\n weight_size = (input_size_list[i] + self.hidden_size) * num_directions * self.hidden_size * 4\n if has_bias:\n weight_size = weight_size + bias_size\n w_np = np.random.uniform(-stdv, stdv, (weight_size, 1, 1)).astype(np.float32)\n weights.append(Parameter(initializer(Tensor(w_np), w_np.shape), name='weight' + str(i)))\n layers.append(nn.LSTMCell(input_size=input_size_list[i],\n hidden_size=self.hidden_size,\n has_bias=self.has_bias,\n bidirectional=self.bidirectional,\n dropout=self.dropout))\n self.lstms = layers\n self.weight = ParameterTuple(tuple(weights))\n self.fill = P.Fill()\n self.shape = P.Shape()\n\n def construct(self, x, hx):\n if self.batch_first:\n x = self.transpose1(x, (1, 0, 2))\n if not self.cpu_target:\n h, c = hx\n output, h, c, _, _ = self.lstm(x, h, c, self.weight)\n if self.batch_first:\n output = self.transpose2(output, (1, 0, 2))\n return (output, (h, c))\n h, c = hx\n output, hn, cn, _, _ = self.lstms[0](x, h[0], c[0], self.weight[0])\n for i in range(1, self.num_layers):\n output, hn, cn, _, _ = self.lstms[i](output, h[i], c[i], self.weight[i])\n if self.batch_first:\n output = self.transpose2(output, (1, 0, 2))\n return (output, (hn, cn))\n\n\nclass LSTMCell(Cell):\n r\"\"\"\n LSTM (Long Short-Term Memory) layer.\n\n Applies a LSTM layer to the input.\n\n There are two pipelines connecting two consecutive cells in a LSTM model; one is cell state pipeline\n and the other is hidden state pipeline. Denote two consecutive time nodes as :math:`t-1` and :math:`t`.\n Given an input :math:`x_t` at time :math:`t`, an hidden state :math:`h_{t-1}` and an cell\n state :math:`c_{t-1}` of the layer at time :math:`{t-1}`, the cell state and hidden state at\n time :math:`t` is computed using an gating mechanism. Input gate :math:`i_t` is designed to protect the cell\n from perturbation by irrelevant inputs. Forget gate :math:`f_t` affords protection of the cell by forgetting\n some information in the past, which is stored in :math:`h_{t-1}`. Output gate :math:`o_t` protects other\n units from perturbation by currently irrelevant memory contents. Candidate cell state :math:`\\tilde{c}_t` is\n calculated with the current input, on which the input gate will be applied. Finally, current cell state\n :math:`c_{t}` and hidden state :math:`h_{t}` are computed with the calculated gates and cell states. The complete\n formulation is as follows.\n\n .. math::\n \\begin{array}{ll} \\\\\n i_t = \\sigma(W_{ix} x_t + b_{ix} + W_{ih} h_{(t-1)} + b_{ih}) \\\\\n f_t = \\sigma(W_{fx} x_t + b_{fx} + W_{fh} h_{(t-1)} + b_{fh}) \\\\\n \\tilde{c}_t = \\tanh(W_{cx} x_t + b_{cx} + W_{ch} h_{(t-1)} + b_{ch}) \\\\\n o_t = \\sigma(W_{ox} x_t + b_{ox} + W_{oh} h_{(t-1)} + b_{oh}) \\\\\n c_t = f_t * c_{(t-1)} + i_t * \\tilde{c}_t \\\\\n h_t = o_t * \\tanh(c_t) \\\\\n \\end{array}\n\n Here :math:`\\sigma` is the sigmoid function, and :math:`*` is the Hadamard product. :math:`W, b`\n are learnable weights between the output and the input in the formula. For instance,\n :math:`W_{ix}, b_{ix}` are the weight and bias used to transform from input :math:`x` to :math:`i`.\n Details can be found in paper `LONG SHORT-TERM MEMORY\n <https://www.bioinf.jku.at/publications/older/2604.pdf>`_ and\n `Long Short-Term Memory Recurrent Neural Network Architectures for Large Scale Acoustic Modeling\n <https://static.googleusercontent.com/media/research.google.com/zh-CN//pubs/archive/43905.pdf>`_.\n\n Args:\n input_size (int): Number of features of input.\n hidden_size (int): Number of features of hidden layer.\n layer_index (int): index of current layer of stacked LSTM . Default: 0.\n has_bias (bool): Whether the cell has bias `b_ih` and `b_hh`. Default: True.\n batch_first (bool): Specifies whether the first dimension of input is batch_size. Default: False.\n dropout (float, int): If not 0, append `Dropout` layer on the outputs of each\n LSTM layer except the last layer. Default 0. The range of dropout is [0.0, 1.0].\n bidirectional (bool): Specifies whether this is a bidirectional LSTM. If set True,\n number of directions will be 2 otherwise number of directions is 1. Default: False.\n\n Inputs:\n - **input** (Tensor) - Tensor of shape (seq_len, batch_size, `input_size`).\n - **h** - data type mindspore.float32 or\n mindspore.float16 and shape (num_directions * `num_layers`, batch_size, `hidden_size`).\n - **c** - data type mindspore.float32 or\n mindspore.float16 and shape (num_directions * `num_layers`, batch_size, `hidden_size`).\n Data type of `h' and 'c' should be the same of `input`.\n\n Outputs:\n `output`, `h_n`, `c_n`, 'reserve', 'state'.\n\n - **output** (Tensor) - Tensor of shape (seq_len, batch_size, num_directions * `hidden_size`).\n - **h** - A Tensor with shape (num_directions * `num_layers`, batch_size, `hidden_size`).\n - **c** - A Tensor with shape (num_directions * `num_layers`, batch_size, `hidden_size`).\n - **reserve** - reserved\n - **state** - reserved\n\n Examples:\n >>> class LstmNet(nn.Cell):\n >>> def __init__(self, input_size, hidden_size, layer_index, has_bias, batch_first, bidirectional):\n >>> super(LstmNet, self).__init__()\n >>> self.lstm = nn.LSTMCell(input_size=input_size,\n >>> hidden_size=hidden_size,\n >>> layer_index=layer_index,\n >>> has_bias=has_bias,\n >>> batch_first=batch_first,\n >>> bidirectional=bidirectional,\n >>> dropout=0.0)\n >>>\n >>> def construct(self, inp, h0, c0):\n >>> return self.lstm(inp, (h0, c0))\n >>>\n >>> net = LstmNet(10, 12, 2, has_bias=True, batch_first=True, bidirectional=False)\n >>> input = Tensor(np.ones([3, 5, 10]).astype(np.float32))\n >>> h0 = Tensor(np.ones([1 * 2, 3, 12]).astype(np.float32))\n >>> c0 = Tensor(np.ones([1 * 2, 3, 12]).astype(np.float32))\n >>> output, hn, cn, _, _ = net(input, h0, c0)\n \"\"\"\n\n def __init__(self,\n input_size,\n hidden_size,\n has_bias=True,\n batch_first=False,\n dropout=0,\n bidirectional=False):\n super(LSTMCell, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.has_bias = has_bias\n self.batch_first = validator.check_value_type(\"batch_first\", batch_first, [bool], self.cls_name)\n self.dropout = float(dropout)\n self.bidirectional = bidirectional\n self.num_directions = 1\n if self.bidirectional:\n self.num_directions = 2\n if self.batch_first:\n self.transpose1 = P.Transpose()\n self.transpose2 = P.Transpose()\n\n self.lstm = P.LSTM(input_size=self.input_size,\n hidden_size=self.hidden_size,\n num_layers=1,\n has_bias=self.has_bias,\n bidirectional=self.bidirectional,\n dropout=self.dropout)\n\n def construct(self, x, h, c, w):\n if self.batch_first:\n x = self.transpose1(x, (1, 0, 2))\n output, hn, cn, _, _ = self.lstm(x, h, c, w)\n if self.batch_first:\n output = self.transpose2(output, (1, 0, 2))\n return output, hn, cn, _, _\n", "# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport numpy as np\n\nimport mindspore.dataset as ds\nimport mindspore.dataset.transforms.vision.c_transforms as cde\n\nDATA_DIR = [\"../data/dataset/test_tf_file_3_images/train-0000-of-0001.data\"]\nSCHEMA_DIR = \"../data/dataset/test_tf_file_3_images/datasetSchema.json\"\n\n\n# test for predicate\ndef test_diff_predicate_func():\n def test_filter(predicate_func):\n transforms = [\n cde.Decode(),\n cde.Resize([64, 64])\n ]\n dataset = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[\"image\", \"label\"], shuffle=False)\n dataset = dataset.map(input_columns=[\"image\"], operations=transforms, num_parallel_workers=1)\n dataset = dataset.filter(input_columns=[\"image\", \"label\"], predicate=predicate_func, num_parallel_workers=4)\n\n num_iter = 0\n label_list = []\n for data in dataset.create_dict_iterator():\n num_iter += 1\n label = data[\"label\"]\n label_list.append(label)\n assert num_iter == 1\n assert label_list[0] == 3\n\n test_filter(lambda image, label: label == 3)\n test_filter(lambda image, label: label[0] == 3)\n test_filter(lambda image, label: label == [3])\n test_filter(lambda image, label: label == np.array([3]))\n test_filter(lambda image, label: label == np.array(3))\n\n\ndef filter_func_ge(data):\n return data <= 10\n\n\ndef generator_1d():\n for i in range(64):\n yield (np.array(i),)\n\n\n# test with GeneratorDataset\ndef test_filter_by_generator_with_no():\n dataset = ds.GeneratorDataset(generator_1d, [\"data\"])\n dataset_f = dataset.filter(predicate=lambda data: data < 11, num_parallel_workers=4)\n num_iter = 0\n expected_rs = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n for item in dataset_f.create_dict_iterator():\n assert item[\"data\"] == expected_rs[num_iter]\n num_iter += 1\n\n\n# test with repeatOp before\ndef test_filter_by_generator_with_repeat():\n dataset = ds.GeneratorDataset(generator_1d, [\"data\"])\n dataset_r = dataset.repeat(4)\n dataset_f = dataset_r.filter(predicate=filter_func_ge, num_parallel_workers=4)\n num_iter = 0\n ret_data = []\n expected_rs = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n for item in dataset_f.create_dict_iterator():\n num_iter += 1\n ret_data.append(item[\"data\"])\n assert num_iter == 44\n for i in range(4):\n for ii, _ in enumerate(expected_rs):\n index = i * len(expected_rs) + ii\n assert ret_data[index] == expected_rs[ii]\n\n\n# test with repeatOp after\ndef test_filter_by_generator_with_repeat_after():\n dataset = ds.GeneratorDataset(generator_1d, [\"data\"])\n dataset_f = dataset.filter(predicate=filter_func_ge, num_parallel_workers=4)\n dataset_r = dataset_f.repeat(4)\n num_iter = 0\n ret_data = []\n expected_rs = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n for item in dataset_r.create_dict_iterator():\n num_iter += 1\n ret_data.append(item[\"data\"])\n assert num_iter == 44\n for i in range(4):\n for ii, _ in enumerate(expected_rs):\n index = i * len(expected_rs) + ii\n assert ret_data[index] == expected_rs[ii]\n\n\ndef filter_func_batch(data):\n return data[0] <= 8\n\n\ndef filter_func_batch_after(data):\n return data <= 20\n\n\n# test with batchOp before\ndef test_filter_by_generator_with_batch():\n dataset = ds.GeneratorDataset(generator_1d, [\"data\"])\n dataset_b = dataset.batch(4)\n dataset_f = dataset_b.filter(predicate=filter_func_batch, num_parallel_workers=4)\n num_iter = 0\n ret_data = []\n for item in dataset_f.create_dict_iterator():\n num_iter += 1\n ret_data.append(item[\"data\"])\n assert num_iter == 3\n assert ret_data[0][0] == 0\n assert ret_data[1][0] == 4\n assert ret_data[2][0] == 8\n\n\n# test with batchOp after\ndef test_filter_by_generator_with_batch_after():\n dataset = ds.GeneratorDataset(generator_1d, [\"data\"])\n dataset_f = dataset.filter(predicate=filter_func_batch_after, num_parallel_workers=4)\n dataset_b = dataset_f.batch(4)\n num_iter = 0\n ret_data = []\n for item in dataset_b.create_dict_iterator():\n num_iter += 1\n ret_data.append(item[\"data\"])\n assert num_iter == 6\n assert ret_data[0][0] == 0\n assert ret_data[1][0] == 4\n assert ret_data[5][0] == 20\n\n\ndef filter_func_shuffle(data):\n return data <= 20\n\n\n# test with batchOp before\ndef test_filter_by_generator_with_shuffle():\n dataset = ds.GeneratorDataset(generator_1d, [\"data\"])\n dataset_s = dataset.shuffle(4)\n dataset_f = dataset_s.filter(predicate=filter_func_shuffle, num_parallel_workers=4)\n num_iter = 0\n for _ in dataset_f.create_dict_iterator():\n num_iter += 1\n assert num_iter == 21\n\n\ndef filter_func_shuffle_after(data):\n return data <= 20\n\n\n# test with batchOp after\ndef test_filter_by_generator_with_shuffle_after():\n dataset = ds.GeneratorDataset(generator_1d, [\"data\"])\n dataset_f = dataset.filter(predicate=filter_func_shuffle_after, num_parallel_workers=4)\n dataset_s = dataset_f.shuffle(4)\n num_iter = 0\n for _ in dataset_s.create_dict_iterator():\n num_iter += 1\n assert num_iter == 21\n\n\ndef generator_1d_zip1():\n for i in range(64):\n yield (np.array(i),)\n\n\ndef generator_1d_zip2():\n for i in range(64):\n yield (np.array(i + 100),)\n\n\ndef filter_func_zip(data1, data2):\n _ = data2\n return data1 <= 20\n\n\ndef filter_func_zip_after(data1):\n return data1 <= 20\n\n\n# test with zipOp before\ndef test_filter_by_generator_with_zip():\n dataset1 = ds.GeneratorDataset(generator_1d_zip1, [\"data1\"])\n dataset2 = ds.GeneratorDataset(generator_1d_zip2, [\"data2\"])\n dataz = ds.zip((dataset1, dataset2))\n dataset_f = dataz.filter(predicate=filter_func_zip, num_parallel_workers=1)\n num_iter = 0\n ret_data = []\n for item in dataset_f.create_dict_iterator():\n num_iter += 1\n ret_data.append({\"data1\": item[\"data1\"], \"data2\": item[\"data2\"]})\n assert num_iter == 21\n assert ret_data[0][\"data1\"] == 0\n assert ret_data[0][\"data2\"] == 100\n assert ret_data[5][\"data1\"] == 5\n assert ret_data[5][\"data2\"] == 105\n\n\n# test with zipOp after\ndef test_filter_by_generator_with_zip_after():\n dataset1 = ds.GeneratorDataset(generator_1d_zip1, [\"data1\"])\n dataset2 = ds.GeneratorDataset(generator_1d_zip1, [\"data2\"])\n dt1 = dataset1.filter(predicate=filter_func_zip_after, num_parallel_workers=4)\n dt2 = dataset2.filter(predicate=filter_func_zip_after, num_parallel_workers=4)\n dataz = ds.zip((dt1, dt2))\n num_iter = 0\n ret_data = []\n for item in dataz.create_dict_iterator():\n num_iter += 1\n ret_data.append({\"data1\": item[\"data1\"], \"data2\": item[\"data2\"]})\n assert num_iter == 21\n assert ret_data[0][\"data1\"] == 0\n assert ret_data[0][\"data2\"] == 0\n assert ret_data[5][\"data1\"] == 5\n assert ret_data[5][\"data2\"] == 5\n\n\ndef filter_func_map(col1, col2):\n _ = col2\n return col1[0] > 8\n\n\ndef filter_func_map_part(col1):\n return col1 < 3\n\n\ndef filter_func_map_all(col1, col2):\n _, _ = col1, col2\n return True\n\n\ndef generator_mc(maxid=20):\n for i in range(maxid):\n yield (np.array([i]), np.array([[i, i + 1], [i + 2, i + 3]]))\n\n\ndef func_map(data_col1, data_col2):\n return (data_col1, data_col2)\n\n\ndef func_map_part(data_col1):\n return data_col1\n\n\n# test with map\ndef test_filter_by_generator_with_map_all_col():\n dataset = ds.GeneratorDataset(generator_mc(12), [\"col1\", \"col2\"])\n dataset_map = dataset.map(input_columns=[\"col1\"], output_columns=[\"col1\"], operations=func_map_part)\n # dataset_map = dataset.map( operations=func_map_part)\n dataset_f = dataset_map.filter(input_columns=[\"col1\"], predicate=filter_func_map_part, num_parallel_workers=1)\n num_iter = 0\n ret_data = []\n for item in dataset_f.create_dict_iterator():\n num_iter += 1\n ret_data.append(item[\"col1\"])\n assert num_iter == 3\n assert ret_data[0] == 0\n assert ret_data[1] == 1\n\n\n# test with map\ndef test_filter_by_generator_with_map_part_col():\n dataset = ds.GeneratorDataset(generator_mc(12), [\"col1\", \"col2\"])\n dataset_map = dataset.map(input_columns=[\"col1\"], output_columns=[\"out1\"], operations=func_map_part)\n\n dataset_f = dataset_map.filter(input_columns=[\"out1\", \"col2\"], predicate=filter_func_map, num_parallel_workers=4)\n num_iter = 0\n ret_data = []\n for item in dataset_f.create_dict_iterator():\n num_iter += 1\n print(item)\n ret_data.append(item[\"out1\"])\n assert num_iter == 3\n assert ret_data[0] == 9\n assert ret_data[2] == 11\n\n\ndef filter_func_rename(data):\n return data > 8\n\n\n# test with rename before\ndef test_filter_by_generator_with_rename():\n dataset = ds.GeneratorDataset(generator_1d, [\"data\"])\n dataset_b = dataset.rename(input_columns=[\"data\"], output_columns=[\"col1\"])\n dataset_f = dataset_b.filter(predicate=filter_func_rename, num_parallel_workers=4)\n num_iter = 0\n ret_data = []\n for item in dataset_f.create_dict_iterator():\n num_iter += 1\n ret_data.append(item[\"col1\"])\n assert num_iter == 55\n assert ret_data[0] == 9\n assert ret_data[54] == 63\n\n\n# test input_column\ndef filter_func_input_column1(col1, col2):\n _ = col2\n return col1[0] < 8\n\n\ndef filter_func_input_column2(col1):\n return col1[0] < 8\n\n\ndef filter_func_input_column3(col1):\n _ = col1\n return True\n\n\n# test with input_columns\ndef test_filter_by_generator_with_input_column():\n dataset = ds.GeneratorDataset(generator_mc(64), [\"col1\", \"col2\"])\n dataset_map = dataset.map(input_columns=[\"col1\"], output_columns=[\"out1\"], operations=func_map_part)\n dataset_f1 = dataset_map.filter(input_columns=[\"out1\", \"col2\"], predicate=filter_func_input_column1,\n num_parallel_workers=4)\n dataset_f2 = dataset_f1.filter(input_columns=[\"out1\"], predicate=filter_func_input_column2, num_parallel_workers=4)\n dataset_f3 = dataset_f2.filter(input_columns=[\"col2\"], predicate=filter_func_input_column3, num_parallel_workers=4)\n dataset_f4 = dataset_f3.filter(predicate=filter_func_input_column1, num_parallel_workers=4)\n num_iter = 0\n ret_data = []\n for item in dataset_f4.create_dict_iterator():\n num_iter += 1\n ret_data.append(item[\"out1\"])\n assert num_iter == 8\n assert ret_data[0] == 0\n assert ret_data[7] == 7\n\n\n# test kFilterPartial\ndef generator_mc_p0(maxid=20):\n for i in range(maxid):\n yield (np.array([i]), np.array([i + 100]))\n\n\ndef generator_mc_p1(maxid=20):\n for i in range(maxid):\n yield (np.array([i + 200]), np.array([i + 300]))\n\n\ndef filter_func_Partial_0(col1, col2, col3, col4):\n _, _, _ = col2, col3, col4\n filter_data = [0, 1, 2, 3, 4, 11]\n if col1[0] in filter_data:\n return False\n return True\n\n\n# test with row_data_buffer > 1\ndef test_filter_by_generator_Partial0():\n dataset1 = ds.GeneratorDataset(source=generator_mc_p0(), column_names=[\"col1\", \"col2\"])\n dataset2 = ds.GeneratorDataset(source=generator_mc_p1(), column_names=[\"col3\", \"col4\"])\n dataset_zip = ds.zip((dataset1, dataset2))\n dataset_f1 = dataset_zip.filter(predicate=filter_func_Partial_0, num_parallel_workers=2)\n ret = []\n for item in dataset_f1.create_dict_iterator():\n ret.append(item[\"col1\"])\n assert ret[0] == 5\n assert ret[6] == 12\n\n\n# test with row_data_buffer > 1\ndef test_filter_by_generator_Partial1():\n dataset1 = ds.GeneratorDataset(source=generator_mc_p0(), column_names=[\"col1\", \"col2\"])\n dataset2 = ds.GeneratorDataset(source=generator_mc_p1(), column_names=[\"col3\", \"col4\"])\n dataset_zip = ds.zip((dataset1, dataset2))\n dataset_f1 = dataset_zip.filter(predicate=filter_func_Partial_0, num_parallel_workers=2)\n dataset_map = dataset_f1.map(input_columns=[\"col1\"], output_columns=[\"out1\"], operations=lambda x1: x1 + 400)\n ret = []\n for item in dataset_map.create_dict_iterator():\n ret.append(item[\"out1\"])\n assert ret[0] == 405\n assert ret[6] == 412\n\n\n# test with row_data_buffer > 1\ndef test_filter_by_generator_Partial2():\n dataset1 = ds.GeneratorDataset(source=generator_mc_p0(), column_names=[\"col1\", \"col2\"])\n dataset2 = ds.GeneratorDataset(source=generator_mc_p1(), column_names=[\"col3\", \"col4\"])\n\n dataset1f = dataset1.filter(input_columns=[\"col1\"], predicate=lambda x: x not in [3, 7, 9], num_parallel_workers=2)\n dataset2f = dataset2.filter(input_columns=[\"col3\"], predicate=lambda x: x not in [203, 207, 209],\n num_parallel_workers=2)\n dataset_zip = ds.zip((dataset1f, dataset2f))\n dataset_map = dataset_zip.map(input_columns=[\"col1\", \"col3\"], output_columns=[\"out1\", \"out3\"],\n operations=lambda x1, x3: (x1 + 400, x3 + 500))\n ret1 = []\n ret3 = []\n for item in dataset_map.create_dict_iterator():\n ret1.append(item[\"out1\"])\n ret3.append(item[\"out3\"])\n assert ret1[0] == 400\n assert ret1[6] == 408\n assert ret3[0] == 700\n assert ret3[6] == 708\n\n\ndef filter_func_Partial(col1, col2):\n _ = col2\n return col1[0] % 3 == 0\n\n\ndef generator_big(maxid=20):\n for i in range(maxid):\n yield (np.array([i]), np.array([[i, i + 1], [i + 2, i + 3]]))\n\n\n# test with row_data_buffer > 1\ndef test_filter_by_generator_Partial():\n dataset = ds.GeneratorDataset(source=generator_mc(99), column_names=[\"col1\", \"col2\"])\n dataset_s = dataset.shuffle(4)\n dataset_f1 = dataset_s.filter(input_columns=[\"col1\", \"col2\"], predicate=filter_func_Partial, num_parallel_workers=1)\n\n for item in dataset_f1.create_dict_iterator():\n assert item[\"col1\"] % 3 == 0\n\n\ndef filter_func_cifar(col1, col2):\n _ = col1\n return col2 % 3 == 0\n\n\n# test with cifar10\ndef test_filte_case_dataset_cifar10():\n DATA_DIR_10 = \"../data/dataset/testCifar10Data\"\n dataset_c = ds.Cifar10Dataset(dataset_dir=DATA_DIR_10, num_samples=100000, shuffle=False)\n dataset_f1 = dataset_c.filter(input_columns=[\"image\", \"label\"], predicate=filter_func_cifar, num_parallel_workers=1)\n for item in dataset_f1.create_dict_iterator():\n # in this example, each dictionary has keys \"image\" and \"label\"\n assert item[\"label\"] % 3 == 0\n\n\n# column id sort\n\ndef generator_sort1(maxid=20):\n for i in range(maxid):\n yield (np.array([i]), np.array([i + 100]), np.array([i + 200]))\n\n\ndef generator_sort2(maxid=20):\n for i in range(maxid):\n yield (np.array([i + 300]), np.array([i + 400]), np.array([i + 500]))\n\n\ndef filter_func_part_sort(col1, col2, col3, col4, col5, col6):\n _, _, _, _, _, _ = col1, col2, col3, col4, col5, col6\n return True\n\n\ndef filter_func_map_sort(col1, col2, col3):\n return (col1, col2, col3)\n\n\ndef test_filter_by_generator_with_map_all_sort():\n dataset1 = ds.GeneratorDataset(generator_sort1(10), [\"col1\", \"col2\", \"col3\"])\n dataset2 = ds.GeneratorDataset(generator_sort2(10), [\"col4 \", \"col5\", \"col6\"])\n\n dataz = ds.zip((dataset1, dataset2))\n dataset_f = dataz.filter(predicate=filter_func_part_sort, num_parallel_workers=1)\n num_iter = 0\n ret_data = []\n for item in dataset_f.create_dict_iterator():\n num_iter += 1\n ret_data.append(item)\n\n assert num_iter == 10\n assert ret_data[0][\"col1\"] == 0\n assert ret_data[9][\"col6\"] == 509\n\ndef test_filter_by_generator_get_dataset_size():\n dataset = ds.GeneratorDataset(generator_1d, [\"data\"])\n dataset = dataset.filter(predicate=filter_func_shuffle_after, num_parallel_workers=4)\n data_sie = dataset.get_dataset_size()\n\n num_iter = 0\n for _ in dataset.create_dict_iterator():\n num_iter += 1\n assert data_sie == num_iter\n\n\nif __name__ == '__main__':\n test_diff_predicate_func()\n test_filte_case_dataset_cifar10()\n test_filter_by_generator_Partial0()\n test_filter_by_generator_Partial1()\n test_filter_by_generator_Partial2()\n test_filter_by_generator_with_batch()\n test_filter_by_generator_with_batch_after()\n test_filter_by_generator_with_input_column()\n test_filter_by_generator_with_map_all_col()\n test_filter_by_generator_with_map_all_sort()\n test_filter_by_generator_with_map_part_col()\n test_filter_by_generator_with_no()\n test_filter_by_generator_with_rename()\n test_filter_by_generator_with_repeat()\n test_filter_by_generator_with_repeat_after()\n test_filter_by_generator_with_shuffle()\n test_filter_by_generator_with_shuffle_after()\n test_filter_by_generator_with_zip()\n test_filter_by_generator_with_zip_after()\n test_filter_by_generator_Partial()\n test_filter_by_generator_get_dataset_size()\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Train api.\"\"\"\nimport os\nimport argparse\nimport pickle\n\nimport numpy as np\n\nimport mindspore.common.dtype as mstype\nfrom mindspore.common.tensor import Tensor\nfrom mindspore.nn import Momentum\nfrom mindspore.nn.optim import Adam, Lamb\nfrom mindspore.train.model import Model\nfrom mindspore.train.loss_scale_manager import DynamicLossScaleManager, FixedLossScaleManager\nfrom mindspore.train.callback import CheckpointConfig, ModelCheckpoint\nfrom mindspore import context, ParallelMode, Parameter\nfrom mindspore.communication import management as MultiAscend\nfrom mindspore.train.serialization import load_checkpoint\n\nfrom config import TransformerConfig\nfrom src.dataset import load_dataset\nfrom src.transformer import TransformerNetworkWithLoss, TransformerTrainOneStepWithLossScaleCell\nfrom src.transformer.infer_mass import infer\nfrom src.utils import LossCallBack\nfrom src.utils import one_weight, zero_weight, weight_variable\nfrom src.utils import square_root_schedule\nfrom src.utils.lr_scheduler import polynomial_decay_scheduler, BertLearningRate\n\nparser = argparse.ArgumentParser(description='MASS train entry point.')\nparser.add_argument(\"--config\", type=str, required=True, help=\"model config json file path.\")\nparser.add_argument(\"--platform\", type=str, required=True, help=\"model working platform.\")\n\ndef get_config(config):\n config = TransformerConfig.from_json_file(config)\n config.compute_type = mstype.float16\n config.dtype = mstype.float32\n return config\n\n\ndef _train(model, config: TransformerConfig,\n pre_training_dataset=None, fine_tune_dataset=None, test_dataset=None,\n callbacks: list = None):\n \"\"\"\n Train model.\n\n Args:\n model (Model): MindSpore model instance.\n config (TransformerConfig): Config of mass model.\n pre_training_dataset (Dataset): Pre-training dataset.\n fine_tune_dataset (Dataset): Fine-tune dataset.\n test_dataset (Dataset): Test dataset.\n callbacks (list): A list of callbacks.\n \"\"\"\n callbacks = callbacks if callbacks else []\n\n if pre_training_dataset is not None:\n print(\" | Start pre-training job.\")\n\n if os.getenv(\"RANK_SIZE\") is not None and int(os.getenv(\"RANK_SIZE\")) > 1:\n print(f\" | Rank {MultiAscend.get_rank()} Call model train.\")\n\n model.train(config.epochs, pre_training_dataset,\n callbacks=callbacks, dataset_sink_mode=config.dataset_sink_mode,\n sink_size=config.dataset_sink_step)\n\n # Test the accuracy of the model.\n if test_dataset is not None:\n print(\" | Start test job.\")\n result = infer(_config)\n with open(\"validation_res_after_pre_training.bin\", \"wb\") as f:\n pickle.dump(result, f, 1)\n\n if fine_tune_dataset is not None:\n print(\" | Start fine-tuning job.\")\n\n model.train(config.epochs, fine_tune_dataset,\n callbacks=callbacks, dataset_sink_mode=config.dataset_sink_mode,\n sink_size=config.dataset_sink_step)\n\n # Test the accuracy of the model.\n if test_dataset is not None:\n print(\" | Start test job.\")\n result = infer(_config)\n with open(\"validation_res_after_pre_training.bin\", \"wb\") as f:\n pickle.dump(result, f, 1)\n\n\ndef _build_training_pipeline(config: TransformerConfig,\n pre_training_dataset=None,\n fine_tune_dataset=None,\n test_dataset=None,\n platform=\"Ascend\"):\n \"\"\"\n Build training pipeline.\n\n Args:\n config (TransformerConfig): Config of mass model.\n pre_training_dataset (Dataset): Pre-training dataset.\n fine_tune_dataset (Dataset): Fine-tune dataset.\n test_dataset (Dataset): Test dataset.\n \"\"\"\n net_with_loss = TransformerNetworkWithLoss(config, is_training=True)\n net_with_loss.init_parameters_data()\n\n if config.existed_ckpt:\n if config.existed_ckpt.endswith(\".npz\"):\n weights = np.load(config.existed_ckpt)\n else:\n weights = load_checkpoint(config.existed_ckpt)\n for param in net_with_loss.trainable_params():\n weights_name = param.name\n if weights_name not in weights:\n raise ValueError(f\"Param {weights_name} is not found in ckpt file.\")\n\n if isinstance(weights[weights_name], Parameter):\n param.default_input = weights[weights_name].default_input\n elif isinstance(weights[weights_name], Tensor):\n param.default_input = Tensor(weights[weights_name].asnumpy(), config.dtype)\n elif isinstance(weights[weights_name], np.ndarray):\n param.default_input = Tensor(weights[weights_name], config.dtype)\n else:\n param.default_input = weights[weights_name]\n else:\n for param in net_with_loss.trainable_params():\n name = param.name\n value = param.default_input\n if isinstance(value, Tensor):\n if name.endswith(\".gamma\"):\n param.default_input = one_weight(value.asnumpy().shape)\n elif name.endswith(\".beta\") or name.endswith(\".bias\"):\n param.default_input = zero_weight(value.asnumpy().shape)\n else:\n param.default_input = weight_variable(value.asnumpy().shape)\n\n dataset = pre_training_dataset if pre_training_dataset is not None \\\n else fine_tune_dataset\n\n if dataset is None:\n raise ValueError(\"pre-training dataset or fine-tuning dataset must be provided one.\")\n\n update_steps = dataset.get_repeat_count() * dataset.get_dataset_size()\n if config.lr_scheduler == \"isr\":\n lr = Tensor(square_root_schedule(lr=config.lr,\n update_num=update_steps,\n decay_start_step=config.decay_start_step,\n warmup_steps=config.warmup_steps,\n min_lr=config.min_lr), dtype=mstype.float32)\n elif config.lr_scheduler == \"poly\":\n lr = Tensor(polynomial_decay_scheduler(lr=config.lr,\n min_lr=config.min_lr,\n decay_steps=config.decay_steps,\n total_update_num=update_steps,\n warmup_steps=config.warmup_steps,\n power=config.poly_lr_scheduler_power), dtype=mstype.float32)\n else:\n lr = config.lr\n\n if config.optimizer.lower() == \"adam\":\n optimizer = Adam(net_with_loss.trainable_params(), lr, beta1=0.9, beta2=0.98)\n elif config.optimizer.lower() == \"lamb\":\n lr = BertLearningRate(decay_steps=12000, learning_rate=config.lr, end_learning_rate=config.min_lr,\n power=10.0, warmup_steps=config.warmup_steps)\n decay_params = list(filter(lambda x: 'layernorm' not in x.name.lower() and 'bias' not in x.name.lower(),\n net_with_loss.trainable_params()))\n other_params = list(filter(lambda x: 'layernorm' in x.name.lower() or 'bias' in x.name.lower(),\n net_with_loss.trainable_params()))\n group_params = [{'params': decay_params, 'weight_decay': 0.01},\n {'params': other_params}]\n\n optimizer = Lamb(group_params, lr, eps=1e-6)\n elif config.optimizer.lower() == \"momentum\":\n optimizer = Momentum(net_with_loss.trainable_params(), lr, momentum=0.9)\n else:\n raise ValueError(f\"optimizer only support `adam` and `momentum` now.\")\n\n # loss scale.\n if platform == \"Ascend\":\n scale_manager = DynamicLossScaleManager(init_loss_scale=config.init_loss_scale,\n scale_factor=config.loss_scale_factor,\n scale_window=config.scale_window)\n else:\n scale_manager = FixedLossScaleManager(loss_scale=1.0, drop_overflow_update=True)\n net_with_grads = TransformerTrainOneStepWithLossScaleCell(network=net_with_loss, optimizer=optimizer,\n scale_update_cell=scale_manager.get_update_cell())\n net_with_grads.set_train(True)\n model = Model(net_with_grads)\n loss_monitor = LossCallBack(config)\n ckpt_config = CheckpointConfig(save_checkpoint_steps=config.save_ckpt_steps,\n keep_checkpoint_max=config.keep_ckpt_max)\n\n rank_size = os.getenv('RANK_SIZE')\n callbacks = [loss_monitor]\n if rank_size is not None and int(rank_size) > 1 and MultiAscend.get_rank() % 8 == 0:\n ckpt_callback = ModelCheckpoint(\n prefix=config.ckpt_prefix,\n directory=os.path.join(config.ckpt_path, 'ckpt_{}'.format(os.getenv('DEVICE_ID'))),\n config=ckpt_config)\n callbacks.append(ckpt_callback)\n\n if rank_size is None or int(rank_size) == 1:\n ckpt_callback = ModelCheckpoint(\n prefix=config.ckpt_prefix,\n directory=os.path.join(config.ckpt_path, 'ckpt_{}'.format(os.getenv('DEVICE_ID'))),\n config=ckpt_config)\n callbacks.append(ckpt_callback)\n\n print(f\" | ALL SET, PREPARE TO TRAIN.\")\n _train(model=model, config=config,\n pre_training_dataset=pre_training_dataset,\n fine_tune_dataset=fine_tune_dataset,\n test_dataset=test_dataset,\n callbacks=callbacks)\n\n\ndef _setup_parallel_env(platform):\n context.reset_auto_parallel_context()\n if platform == \"GPU\":\n MultiAscend.init(\"nccl\")\n else:\n MultiAscend.init()\n context.set_auto_parallel_context(\n parallel_mode=ParallelMode.DATA_PARALLEL,\n device_num=MultiAscend.get_group_size(),\n parameter_broadcast=True,\n mirror_mean=True\n )\n\n\ndef train_parallel(config: TransformerConfig, platform: \"Ascend\"):\n \"\"\"\n Train model with multi ascend chips.\n\n Args:\n config (TransformerConfig): Config for MASS model.\n \"\"\"\n _setup_parallel_env(platform)\n\n print(f\" | Starting training on {os.getenv('RANK_SIZE', None)} devices.\")\n\n pre_train_dataset = load_dataset(\n data_files=config.pre_train_dataset,\n batch_size=config.batch_size, epoch_count=1,\n sink_mode=config.dataset_sink_mode,\n sink_step=config.dataset_sink_step,\n rank_size=MultiAscend.get_group_size(),\n rank_id=MultiAscend.get_rank()\n ) if config.pre_train_dataset else None\n fine_tune_dataset = load_dataset(\n data_files=config.fine_tune_dataset,\n batch_size=config.batch_size, epoch_count=1,\n sink_mode=config.dataset_sink_mode,\n sink_step=config.dataset_sink_step,\n rank_size=MultiAscend.get_group_size(),\n rank_id=MultiAscend.get_rank()\n ) if config.fine_tune_dataset else None\n test_dataset = load_dataset(\n data_files=config.test_dataset,\n batch_size=config.batch_size, epoch_count=1,\n sink_mode=config.dataset_sink_mode,\n sink_step=config.dataset_sink_step,\n rank_size=MultiAscend.get_group_size(),\n rank_id=MultiAscend.get_rank()\n ) if config.test_dataset else None\n\n _build_training_pipeline(config=config,\n pre_training_dataset=pre_train_dataset,\n fine_tune_dataset=fine_tune_dataset,\n test_dataset=test_dataset,\n platform=platform)\n\n\ndef train_single(config: TransformerConfig, platform: \"Ascend\"):\n \"\"\"\n Train model on single device.\n\n Args:\n config (TransformerConfig): Config for model.\n \"\"\"\n print(\" | Starting training on single device.\")\n pre_train_dataset = load_dataset(data_files=config.pre_train_dataset,\n batch_size=config.batch_size,\n epoch_count=1,\n sink_mode=config.dataset_sink_mode,\n sink_step=config.dataset_sink_step) if config.pre_train_dataset else None\n fine_tune_dataset = load_dataset(data_files=config.fine_tune_dataset,\n batch_size=config.batch_size,\n epoch_count=1,\n sink_mode=config.dataset_sink_mode,\n sink_step=config.dataset_sink_step) if config.fine_tune_dataset else None\n test_dataset = load_dataset(data_files=config.test_dataset,\n batch_size=config.batch_size,\n epoch_count=1,\n sink_mode=config.dataset_sink_mode,\n sink_step=config.dataset_sink_step) if config.test_dataset else None\n\n _build_training_pipeline(config=config,\n pre_training_dataset=pre_train_dataset,\n fine_tune_dataset=fine_tune_dataset,\n test_dataset=test_dataset,\n platform=platform)\n\n\ndef _check_args(config):\n if not os.path.exists(config):\n raise FileNotFoundError(\"`config` is not existed.\")\n if not isinstance(config, str):\n raise ValueError(\"`config` must be type of str.\")\n\n\nif __name__ == '__main__':\n args, _ = parser.parse_known_args()\n\n device_id = os.getenv('DEVICE_ID', None)\n if device_id is None:\n device_id = 0\n device_id = int(device_id)\n context.set_context(\n mode=context.GRAPH_MODE,\n device_target=args.platform,\n reserve_class_name_in_scope=False,\n device_id=device_id)\n\n _rank_size = os.getenv('RANK_SIZE')\n\n _check_args(args.config)\n _config = get_config(args.config)\n\n np.random.seed(_config.random_seed)\n context.set_context(save_graphs=_config.save_graphs)\n\n if _rank_size is not None and int(_rank_size) > 1:\n train_parallel(_config, args.platform)\n else:\n train_single(_config, args.platform)\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\" test implicit conversion \"\"\"\nimport numpy as np\nimport pytest\n\nfrom mindspore import Tensor, nn\nfrom mindspore.ops import composite as C\n\n\ndef test_float_tensor_and_int_add():\n x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))\n y = 2\n ret_actual = x + y\n ret_expect = Tensor(np.array([[2.1, 2.2, 2.3], [2.4, 2.5, 2.6]], dtype=np.float32))\n assert ret_actual.dtype == ret_expect.dtype\n assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all()\n\n\ndef test_bool_tensor_and_float_add():\n x = Tensor(np.array([[True, False], [False, True]], dtype=np.bool_))\n y = 3.3\n ret_actual = x + y\n ret_expect = Tensor(np.array([[4.3, 3.3], [3.3, 4.3]], dtype=np.float32))\n assert ret_actual.dtype == ret_expect.dtype\n assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all()\n\n\ndef test_bool_tensor_and_int_add():\n x = Tensor(np.array([[True, False], [False, True]], dtype=np.bool_))\n y = 3\n ret_actual = x + y\n ret_expect = Tensor(np.array([[4, 3], [3, 4]], dtype=np.int32))\n assert ret_actual.dtype == ret_expect.dtype\n assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all()\n\n\ndef test_bool_and_int_tensor_add():\n x = True\n y = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32))\n ret_actual = x + y\n ret_expect = Tensor(np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32))\n assert ret_actual.dtype == ret_expect.dtype\n assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all()\n\n\ndef test_float_tensor_and_int_tensor_add():\n x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))\n y = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32))\n ret_actual = x + y\n ret_expect = Tensor(np.array([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]], dtype=np.float32))\n assert ret_actual.dtype == ret_expect.dtype\n assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all()\n\n\ndef test_float_tensor_and_float_tensor_add():\n x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))\n y = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float16))\n ret_actual = x + y\n ret_expect = Tensor(np.array([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]], dtype=np.float32))\n assert ret_actual.dtype == ret_expect.dtype\n assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all()\n\n\ndef test_int_tensor_and_int_tensor_add():\n x = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int8))\n y = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32))\n ret_actual = x + y\n ret_expect = Tensor(np.array([[2, 4, 6], [8, 10, 12]], dtype=np.int32))\n assert ret_actual.dtype == ret_expect.dtype\n assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all()\n\n\ndef test_float_tensor_and_bool_tensors_add():\n x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))\n y = Tensor(np.array([[True, True, True], [False, False, False]], dtype=np.bool_))\n ret_actual = x + y\n ret_expect = Tensor(np.array([[1.1, 1.2, 1.3], [0.4, 0.5, 0.6]], dtype=np.float32))\n assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all()\n\n\ndef test_float_tensor_and_str_add():\n x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))\n y = \"ok\"\n with pytest.raises(TypeError) as er:\n ret = x + y\n assert \"For 'TensorAdd', the 1th input is a not support implicit conversion type: str\" in str(er.value)\n\n\ndef test_float_tensor_and_tuple_add():\n x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))\n y = (1, 2, 3)\n with pytest.raises(TypeError) as er:\n ret = x + y\n assert \"For 'TensorAdd', the 1th input is a not support implicit conversion type: tuple\" in str(er.value)\n\n\ndef test_float_tensor_and_list_add():\n x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))\n y = [1, 2, 3]\n with pytest.raises(TypeError) as er:\n ret = x + y\n assert \"For 'TensorAdd', the 1th input is a not support implicit conversion type: list\" in str(er.value)\n\n\ndef test_float_tensor_and_bool_tensors_add_grad():\n class Net(nn.Cell):\n def __init__(self):\n super(Net, self).__init__()\n\n def construct(self, x, y):\n return x + y\n\n class GradNet(nn.Cell):\n def __init__(self, net):\n super(GradNet, self).__init__()\n self.net = net\n\n def construct(self, x, y, sens):\n return C.grad_all_with_sens(self.net)(x, y, sens)\n\n x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))\n y = Tensor(np.array([[True, True, True], [False, False, False]], dtype=np.bool_))\n sens = Tensor(np.array([[1.0, 2.0, 0.0], [0.0, 3.0, 4.0]], dtype=np.float32))\n net = Net()\n grad_net = GradNet(net)\n ret = grad_net(x, y, sens)\n assert ret[0].dtype == x.dtype\n assert ret[1].dtype == y.dtype\n assert (ret[0].asnumpy() == sens.asnumpy()).all()\n assert (ret[1].asnumpy() == sens.asnumpy().astype(np.bool_)).all()\n\n\ndef test_float_tensor_and_int_tensors_sub_grad():\n class Net(nn.Cell):\n def __init__(self):\n super(Net, self).__init__()\n\n def construct(self, x, y):\n return x - y\n\n class GradNet(nn.Cell):\n def __init__(self, net):\n super(GradNet, self).__init__()\n self.net = net\n\n def construct(self, x, y, sens):\n return C.grad_all_with_sens(self.net)(x, y, sens)\n\n x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))\n y = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32))\n sens = Tensor(np.array([[1.0, 2.0, 0.0], [0.0, 3.0, 4.0]], dtype=np.float32))\n net = Net()\n grad_net = GradNet(net)\n ret = grad_net(x, y, sens)\n print(ret)\n assert ret[0].dtype == x.dtype\n assert ret[1].dtype == y.dtype\n assert (ret[0].asnumpy() == sens.asnumpy()).all()\n assert (ret[1].asnumpy() == sens.asnumpy() * -1).all()\n\n\ndef test_float16_tensor_and_float32_tensors_sub_grad():\n class Net(nn.Cell):\n def __init__(self):\n super(Net, self).__init__()\n\n def construct(self, x, y):\n return x - y\n\n class GradNet(nn.Cell):\n def __init__(self, net):\n super(GradNet, self).__init__()\n self.net = net\n\n def construct(self, x, y, sens):\n return C.grad_all_with_sens(self.net)(x, y, sens)\n\n x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.int32))\n y = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float32))\n sens = Tensor(np.array([[1.0, 2.0, 0.0], [0.0, 3.0, 4.0]], dtype=np.float32))\n net = Net()\n grad_net = GradNet(net)\n ret = grad_net(x, y, sens)\n print(ret)\n assert ret[0].dtype == x.dtype\n assert ret[1].dtype == y.dtype\n assert (ret[0].asnumpy() == sens.asnumpy()).all()\n assert (ret[1].asnumpy() == sens.asnumpy() * -1).all()\n\n\ndef test_float_tensor_and_int_add_grad():\n class Net(nn.Cell):\n def __init__(self):\n super(Net, self).__init__()\n\n def construct(self, x):\n return x + 2\n\n class GradNet(nn.Cell):\n def __init__(self, net):\n super(GradNet, self).__init__()\n self.net = net\n\n def construct(self, x, sens):\n return C.grad_all_with_sens(self.net)(x, sens)\n\n x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))\n sens = Tensor(np.array([[1.0, 2.0, 0.0], [0.0, 3.0, 4.0]], dtype=np.float32))\n net = Net()\n grad_net = GradNet(net)\n ret = grad_net(x, sens)\n assert ret[0].dtype == x.dtype\n assert (ret[0].asnumpy() == sens.asnumpy()).all()\n" ]
[ [ "numpy.random.seed" ], [ "numpy.random.uniform" ], [ "numpy.array" ], [ "numpy.load", "numpy.random.seed" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
alessandrocuda/Iiwa_vision_reaching_object
[ "63c42109f8aaff56734ba2071121e97cc45d0b84" ]
[ "src/kuka_iiwa/kuka_iiwa_utilities/scripts/iiwa_joint_ee_data_collection.py" ]
[ "#!/usr/bin/python3\nimport rospy\nimport rospkg\nimport random\nimport numpy as np\nimport pickle\n\nfrom itertools import product\n\nfrom gazebo_msgs.srv import GetLinkState\nfrom gazebo_msgs.msg import LinkState\n\nfrom std_msgs.msg import Float64MultiArray\nfrom sensor_msgs.msg import JointState\n\ndef save_object(obj):\n try:\n with open(\"data2.pickle\", \"wb\") as f:\n pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)\n except Exception as ex:\n print(\"Error during pickling object (Possibly unsupported):\", ex)\n \n\ndef product_dict(**kwargs):\n keys = kwargs.keys()\n vals = kwargs.values()\n for instance in product(*vals):\n yield dict(zip(keys, instance))\n\ngrid = {\n \"joint1\": np.linspace(-1, 1, 10),\n \"joint2\": np.linspace(0.1, 1.2, 9),\n #\"joint3\": np.linspace(-1.5707, 1.5707, 4),\n \"joint4\": np.linspace(-2.6, 0, 8),\n #\"joint5\": np.linspace(-1.5707, 1.5707, 4),\n \"joint6\": np.linspace(0, 1.5707, 7),\n #\"joint7\": np.linspace(-1.5707, 1.5707, 4)\n}\n\ndataset_nn = {\"joints_status\": [],\n \"ee_position\": []}\n\njoints_grid = list(product_dict(**grid)) \ncurrent_joints_i = 0\ncurrent_joints = [0,0,0,0,0,0,0,0]\ntotal_joints_combination = len(joints_grid)\npub = rospy.Publisher('/iiwa/pos_effort_controller/command', Float64MultiArray, queue_size=10)\n\nstop_collecting_data = False\n\ndef myhook():\n print(\"shutdown time!\")\n\ndef data_collection_callback(data):\n #print(data.velocity)\n global current_joints_i\n global current_joints\n global stop_collecting_data\n \n command = Float64MultiArray()\n joints = joints_grid[current_joints_i]\n current_joints = [joints[\"joint1\"], joints[\"joint2\"], 0, joints[\"joint4\"], 0, joints[\"joint6\"], 1.57] \n command.data = current_joints\n #print(command.data)\n #rospy.loginfo(command)\n pub.publish(command)\n if all(np.array(data.velocity[2:]) < 8e-3) and all(np.abs(np.cos(data.position[2:])-np.cos(current_joints)) < 5e-2) and all(np.abs(np.sin(data.position[2:])-np.sin(current_joints)) < 5e-2):\n print(\"Combination {}/{}\".format(current_joints_i+1, total_joints_combination))\n if current_joints_i < total_joints_combination-1:\n print(\"cambio\")\n current_joints_i += 1\n else:\n stop_collecting_data = True\n\n state_msg_l = LinkState()\n state_msg_r = LinkState()\n\n state_msg_l.link_name, state_msg_r.link_name = \"iiwa_gripper::left_cube\", \"iiwa_gripper::right_cube\"\n state_msg_l.reference_frame, state_msg_r.reference_frame = '', ''\n\n set_state_l = rospy.ServiceProxy('/gazebo/get_link_state', GetLinkState)\n state_msg_l = set_state_l(state_msg_l.link_name, state_msg_l.reference_frame)\n\n set_state_r = rospy.ServiceProxy('/gazebo/get_link_state', GetLinkState)\n state_msg_r = set_state_r(state_msg_r.link_name, state_msg_r.reference_frame)\n\n x_ee = (state_msg_r.link_state.pose.position.x + state_msg_l.link_state.pose.position.x)/2\n y_ee = (state_msg_r.link_state.pose.position.y + state_msg_l.link_state.pose.position.y)/2\n z_ee = (state_msg_r.link_state.pose.position.z + state_msg_l.link_state.pose.position.z)/2\n print(current_joints)\n print([x_ee, y_ee, z_ee])\n dataset_nn[\"joints_status\"].append(data.position[2:])\n dataset_nn[\"ee_position\"].append([x_ee, y_ee, z_ee])\n print(\"saved\")\n \n if stop_collecting_data:\n print(\"spengo\")\n save_object(dataset_nn)\n rospy.signal_shutdown(\"fine raccolta dati\")\n\n\n\ndef data_collection():\n global current_joints\n rospy.init_node('data_collection', anonymous=True)\n rospy.Subscriber('/iiwa/joint_states', JointState, data_collection_callback)\n # Initial movement.\n command = Float64MultiArray()\n command.data = current_joints\n pub.publish(command)\n rospy.on_shutdown(myhook)\n rospy.spin()\n\n\nif __name__ == '__main__':\n try:\n data_collection()\n except rospy.ROSInterruptException:\n pass" ]
[ [ "numpy.array", "numpy.cos", "numpy.linspace", "numpy.sin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TaskeHAMANO/SPHERE
[ "22a6b7f18fa1a9cb2d50d9ce210b2d9a21f7541c" ]
[ "sphere/sphere_symtest.py" ]
[ "#!/usr/bin/env python\n# vim:fileencoding=utf-8\n# Author: Shinya Suzuki\n# Created: 2018-04-26\n\nfrom collections import namedtuple\nfrom sphere.sphere_utils import load_depth_file\nfrom sphere.sphere_cstats import (mean_resultant_length,\n mean_direction,\n sin_moment,\n cos_moment)\nfrom scipy.stats import norm\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport os\n\n\ndef argument_parse(argv=None):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"output_dest\",\n type=str,\n help=\"destination of output tsv file\")\n parser.add_argument(\"depth_file_path\",\n nargs=\"+\",\n type=str,\n help=\"file(s) path of coverage depth\")\n args = parser.parse_args(argv)\n return vars(args)\n\n\ndef perwey_test(theta, depth):\n \"\"\"\n Computes the Perwey test on sample theta\n\n The Perwey test is a nonparametric test of the null hypothesis\n\n References\n ----------\n .. [1] Pewsey, A. \"Testing Circular Symmetry\". The Canadian Journal of\n Statistics. Vol. 30(2002): 591-600\n \"\"\"\n PerweyResult = namedtuple(\"PerweyResult\", ('statistic', 'pvalue'))\n n_depth = np.sum(depth)\n cos_theta = np.cos(theta)\n sin_theta = np.sin(theta)\n C = np.sum(depth * cos_theta)\n S = np.sum(depth * sin_theta)\n\n mrl = mean_resultant_length(C, S, np.sum(depth))\n md = mean_direction(S, C)\n b2 = sin_moment(depth, theta, p=2, loc=md)\n a2 = cos_moment(depth, theta, p=2, loc=md)\n a3 = cos_moment(depth, theta, p=3, loc=md)\n a4 = cos_moment(depth, theta, p=4, loc=md)\n var_b2 = ((1.0-a4)/2.0-2.0*a2+2.0*a2/mrl*(a3+(a2*(1.0-a2))/mrl))/n_depth\n\n z = b2 / np.sqrt(var_b2)\n p = 1 - norm.cdf(abs(z))\n p = 2 * np.min([p, 1-p])\n\n return PerweyResult(z, p)\n\n\ndef main(args):\n result = []\n for f in args[\"depth_file_path\"]:\n file_name = os.path.basename(f)\n df = load_depth_file(f)\n length = len(df)\n x = np.arange(1, length+1, 1)\n theta = x / float(length) * 2.0 * np.pi\n depth = df[\"depth\"].values\n z, p = perwey_test(theta, depth)\n\n tmp = {\n \"filepath\": f,\n \"filename\": file_name,\n \"z\": z,\n \"p\": p\n }\n result.append(tmp)\n result_df = pd.DataFrame(result)\n result_df = result_df.set_index(\"filename\")\n result_df = result_df[[\n \"z\",\n \"p\",\n \"filepath\"\n ]]\n result_df.to_csv(args[\"output_dest\"], sep=\"\\t\")\n\n\ndef main_wrapper():\n args = argument_parse()\n main(args)\n\n\nif __name__ == '__main__':\n main_wrapper()\n" ]
[ [ "numpy.sqrt", "numpy.min", "numpy.arange", "numpy.cos", "pandas.DataFrame", "numpy.sin", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Lpallett4/coach
[ "801cf573cc4033940dce64651ffcddf391e6ded9" ]
[ "rl_coach/architectures/tensorflow_components/heads/rainbow_q_head.py" ]
[ "#\n# Copyright (c) 2017 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport tensorflow as tf\nimport numpy as np\nfrom rl_coach.architectures.tensorflow_components.heads import QHead\nfrom rl_coach.architectures.tensorflow_components.layers import Dense\nfrom rl_coach.base_parameters import AgentParameters\nfrom rl_coach.spaces import SpacesDefinition\n\n\nclass RainbowQHead(QHead):\n def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,\n head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='relu',\n dense_layer=Dense):\n super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function,\n dense_layer=dense_layer)\n self.num_actions = len(self.spaces.action.actions)\n self.num_atoms = agent_parameters.algorithm.atoms\n self.name = 'rainbow_q_values_head'\n self.z_values = tf.cast(tf.constant(np.linspace(self.ap.algorithm.v_min, self.ap.algorithm.v_max,\n self.ap.algorithm.atoms), dtype=tf.float32), dtype=tf.float64)\n self.loss_type = []\n\n def _build_module(self, input_layer):\n # state value tower - V\n with tf.variable_scope(\"state_value\"):\n state_value = self.dense_layer(512)(input_layer, activation=self.activation_function, name='fc1')\n state_value = self.dense_layer(self.num_atoms)(state_value, name='fc2')\n state_value = tf.expand_dims(state_value, axis=1)\n\n # action advantage tower - A\n with tf.variable_scope(\"action_advantage\"):\n action_advantage = self.dense_layer(512)(input_layer, activation=self.activation_function, name='fc1')\n action_advantage = self.dense_layer(self.num_actions * self.num_atoms)(action_advantage, name='fc2')\n action_advantage = tf.reshape(action_advantage, (tf.shape(input_layer)[0], self.num_actions,\n self.num_atoms))\n action_mean = tf.reduce_mean(action_advantage, axis=1, keepdims=True)\n action_advantage = action_advantage - action_mean\n\n # merge to state-action value function Q\n values_distribution = tf.add(state_value, action_advantage, name='output')\n\n # softmax on atoms dimension\n self.output = tf.nn.softmax(values_distribution)\n\n # calculate cross entropy loss\n self.distributions = tf.placeholder(tf.float32, shape=(None, self.num_actions, self.num_atoms),\n name=\"distributions\")\n self.target = self.distributions\n self.loss = tf.nn.softmax_cross_entropy_with_logits(labels=self.target, logits=values_distribution)\n tf.losses.add_loss(self.loss)\n\n self.q_values = tf.tensordot(tf.cast(self.output, tf.float64), self.z_values, 1)\n\n # used in batch-rl to estimate a probablity distribution over actions\n self.softmax = self.add_softmax_with_temperature()\n\n def __str__(self):\n result = [\n \"State Value Stream - V\",\n \"\\tDense (num outputs = 512)\",\n \"\\tDense (num outputs = {})\".format(self.num_atoms),\n \"Action Advantage Stream - A\",\n \"\\tDense (num outputs = 512)\",\n \"\\tDense (num outputs = {})\".format(self.num_actions * self.num_atoms),\n \"\\tReshape (new size = {} x {})\".format(self.num_actions, self.num_atoms),\n \"\\tSubtract(A, Mean(A))\".format(self.num_actions),\n \"Add (V, A)\",\n \"Softmax\"\n ]\n return '\\n'.join(result)\n" ]
[ [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.nn.softmax", "numpy.linspace", "tensorflow.reduce_mean", "tensorflow.shape", "tensorflow.cast", "tensorflow.placeholder", "tensorflow.expand_dims", "tensorflow.add", "tensorflow.variable_scope", "tensorflow.losses.add_loss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
Swissler5/astr-119
[ "2af96862bb6560cbfab74178936e13c794a91eda" ]
[ "astr-119-hw-2/data_types.py" ]
[ "#Shea Wissler\nimport numpy as np\n\n#integers\ni = 10\t\t#integer\nprint(type(i)) #print the data type of i\n\na_i = np.zeros(i,dtype=int) #declare an array of integers\nprint(type(a_i)) \t\t#returns ndarray\nprint(type(a_i[0])) \t#returns int64\n\n#floats\n\nx = 119.0 \t\t#floating point number\nprint(type(x)) \t#print the data type of x\n\ny = 1.19e2 \t#scientific notation of float 119\nprint(type(y))\n\nz = np.zeros(i,dtype=float) #declare an array of floats\nprint(type(z))\t\t\t\t##returns ndarray\nprint(type(z[0])) \t\t\t#returns int64" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CogComNeuroSci/parallel_computing
[ "cd995f783169f4df4753cb35bac49bb805903d83" ]
[ "HPC/parameter_sweep/generate_parameters.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@author: Pieter Huycke\nemail: [email protected]\nGitHub: phuycke\n\"\"\"\n\n#%%\n\nfrom itertools import product\n\nimport csv\nimport numpy as np\nimport os\n\n#%%\n\n# some parameter values\np1 = np.arange(.2, .7, .1).round(2)\np2 = [0, 1]\np3 = np.arange(1, 31)\n\n# all possible combinations between parameters + check of length\ncombinations = list(product(*[p1 ,p2, p3]))\nassert len(combinations) == (len(p1) * len(p2) * len(p3))\n\n# set your working directory\nos.chdir(r\"/user/gent/435/vsc43506/parameter_sweep\")\n\n# title for your csv file\nheader = [\"learning_rate\", \"syncing\", \"sub_id\"]\n\n# actual writing to file\nwith open('parameters.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n writer.writerow(header)\n [writer.writerow(list(c)) for c in combinations]\n" ]
[ [ "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PhotoLabDevelopment/tensorflow
[ "735642ee1cd8d7f21ddd94f851ee753c17c23019" ]
[ "tensorflow/python/autograph/core/converter.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Converter construction support.\n\nThis module contains a base class for all converters, as well as supporting\nstructures. These structures are referred to as contexts.\n\nThe class hierarchy is as follows:\n\n <your converter>\n [extends] converter.Base\n [extends] transformer.Base\n [extends] gast.nodeTransformer\n [uses] transfomer.SourceInfo\n [uses] converter.EntityContext\n [uses] converter.ProgramContext\n [uses] transfomer.SourceInfo\n\nconverter.Base is a specialization of transformer.Base for AutoGraph. It's a\nvery lightweight subclass that adds a `ctx` attribute holding the corresponding\nEntityContext object (see below). Note that converters are not reusable, and\n`visit` will raise an error if called more than once.\n\nconverter.EntityContext contains mutable state associated with an entity that\nthe converter processes.\n\nconverter.ProgramContext contains mutable state across related entities. For\nexample, when converting several functions that call one another, the\nProgramContext should be shared across these entities.\n\nBelow is the overall flow at conversion:\n\n program_ctx = ProgramContext(<entities to convert>, <global settings>, ...)\n while <program_ctx has more entities to convert>:\n entity, source_info = <get next entity from program_ctx>\n entity_ctx = EntityContext(program_ctx, source_info)\n for <each ConverterClass>:\n converter = ConverterClass(entity_ctx)\n\n # May update entity_ctx and program_ctx\n entity = converter.visit(entity)\n\n <add entity's dependencies to program_ctx>\n\nNote that pyct contains a small number of transformers used for static analysis.\nThese implement transformer.Base, rather than converter.Base, to avoid a\ndependency on AutoGraph.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport enum\n\nfrom tensorflow.python.autograph.pyct import anno\nfrom tensorflow.python.autograph.pyct import ast_util\nfrom tensorflow.python.autograph.pyct import cfg\nfrom tensorflow.python.autograph.pyct import compiler\nfrom tensorflow.python.autograph.pyct import parser\nfrom tensorflow.python.autograph.pyct import qual_names\nfrom tensorflow.python.autograph.pyct import templates\nfrom tensorflow.python.autograph.pyct import transformer\nfrom tensorflow.python.autograph.pyct.static_analysis import activity\nfrom tensorflow.python.autograph.pyct.static_analysis import liveness\nfrom tensorflow.python.autograph.pyct.static_analysis import reaching_definitions\nfrom tensorflow.python.util.tf_export import tf_export\n\n# TODO(mdan): These contexts can be refactored into first class objects.\n# For example, we could define Program and Entity abstractions that hold on\n# to the actual entity and have conversion methods.\n\n# TODO(mdan): Add a test specific to this converter.\n\n\n@tf_export('autograph.experimental.Feature')\nclass Feature(enum.Enum):\n \"\"\"This enumeration represents optional conversion options.\n\n These conversion options are experimental. They are subject to change without\n notice and offer no guarantees.\n\n _Example Usage_\n\n ```python\n optionals= tf.autograph.experimental.Feature.EQUALITY_OPERATORS\n @tf.function(experimental_autograph_options=optionals)\n def f(i):\n if i == 0: # EQUALITY_OPERATORS allows the use of == here.\n tf.print('i is zero')\n ```\n\n Attributes:\n ALL: Enable all features.\n AUTO_CONTROL_DEPS: Insert of control dependencies in the generated code.\n ASSERT_STATEMENTS: Convert Tensor-dependent assert statements to tf.Assert.\n BUILTIN_FUNCTIONS: Convert builtin functions applied to Tensors to\n their TF counterparts.\n EQUALITY_OPERATORS: Whether to convert the comparison operators, like\n equality. This is soon to be deprecated as support is being added to the\n Tensor class.\n LISTS: Convert list idioms, like initializers, slices, append, etc.\n NAME_SCOPES: Insert name scopes that name ops according to context, like the\n function they were defined in.\n \"\"\"\n\n ALL = 'ALL'\n\n AUTO_CONTROL_DEPS = 'AUTO_CONTROL_DEPS'\n ASSERT_STATEMENTS = 'ASSERT_STATEMENTS'\n BUILTIN_FUNCTIONS = 'BUILTIN_FUNCTIONS'\n EQUALITY_OPERATORS = 'EQUALITY_OPERATORS'\n LISTS = 'LISTS'\n NAME_SCOPES = 'NAME_SCOPES'\n\n @classmethod\n def all(cls):\n \"\"\"Returns a tuple that enables all options.\"\"\"\n return tuple(cls.__members__.values())\n\n @classmethod\n def all_but(cls, exclude):\n \"\"\"Returns a tuple that enables all but the excluded options.\"\"\"\n if not isinstance(exclude, (list, tuple, set)):\n exclude = (exclude,)\n return tuple(set(cls.all()) - set(exclude) - {cls.ALL})\n\n\nSTANDARD_OPTIONS = None # Forward definition.\n\n\nclass ConversionOptions(object):\n \"\"\"Immutable container for global conversion flags.\n\n Attributes:\n recursive: bool, whether to recursively convert any user functions or\n classes that the converted function may use.\n user_requested: bool, whether the conversion was explicitly requested by\n the user, as opposed to being performed as a result of other logic. This\n value always auto-resets resets to False in child conversions.\n optional_features: Union[Feature, Set[Feature]], controls the use of\n optional features in the conversion process. See Feature for available\n options.\n \"\"\"\n\n def __init__(self,\n recursive=False,\n user_requested=False,\n internal_convert_user_code=True,\n optional_features=Feature.ALL):\n self.recursive = recursive\n self.user_requested = user_requested\n # TODO(mdan): Rename to conversion_recursion_depth?\n self.internal_convert_user_code = internal_convert_user_code\n\n if optional_features is None:\n optional_features = ()\n elif isinstance(optional_features, Feature):\n optional_features = (optional_features,)\n optional_features = frozenset(optional_features)\n self.optional_features = optional_features\n\n def as_tuple(self):\n return (self.recursive, self.user_requested,\n self.internal_convert_user_code, self.optional_features)\n\n def __hash__(self):\n return hash(self.as_tuple())\n\n def __eq__(self, other):\n assert isinstance(other, ConversionOptions)\n return self.as_tuple() == other.as_tuple()\n\n def __str__(self):\n return 'ConversionOptions[{}]'\n\n def uses(self, feature):\n return (Feature.ALL in self.optional_features or\n feature in self.optional_features)\n\n def call_options(self):\n \"\"\"Returns the corresponding options to be used for recursive conversion.\"\"\"\n return ConversionOptions(\n recursive=self.recursive,\n user_requested=False,\n internal_convert_user_code=self.recursive,\n optional_features=self.optional_features)\n\n def to_ast(self):\n \"\"\"Returns a representation of this object as an AST node.\n\n The AST node encodes a constructor that would create an object with the\n same contents.\n\n Returns:\n ast.Node\n \"\"\"\n if self == STANDARD_OPTIONS:\n return parser.parse_expression('ag__.STD')\n\n template = \"\"\"\n ag__.ConversionOptions(\n recursive=recursive_val,\n user_requested=user_requested_val,\n optional_features=optional_features_val,\n internal_convert_user_code=internal_convert_user_code_val)\n \"\"\"\n\n def list_of_features(values):\n return parser.parse_expression('({})'.format(', '.join(\n 'ag__.{}'.format(str(v)) for v in values)))\n\n expr_ast = templates.replace(\n template,\n recursive_val=parser.parse_expression(str(self.recursive)),\n user_requested_val=parser.parse_expression(str(self.user_requested)),\n internal_convert_user_code_val=parser.parse_expression(\n str(self.internal_convert_user_code)),\n optional_features_val=list_of_features(self.optional_features))\n return expr_ast[0].value\n\n\nSTANDARD_OPTIONS = ConversionOptions(\n recursive=True,\n user_requested=False,\n internal_convert_user_code=True,\n optional_features=None)\n\n\nclass ProgramContext(\n collections.namedtuple('ProgramContext', ('options', 'autograph_module'))):\n \"\"\"ProgramContext keeps track of converting function hierarchies.\n\n This object is mutable, and is updated during conversion. Not thread safe.\n\n Attributes:\n options: ConversionOptions\n autograph_module: Module, a reference to the autograph module. This needs to\n be specified by the caller to avoid circular dependencies.\n \"\"\"\n pass\n\n\nclass EntityContext(transformer.Context):\n \"\"\"Tracks the conversion of a single entity.\n\n This object is mutable, and is updated during conversion. Not thread safe.\n\n Attributes:\n namer: Namer\n info: transformer.EntityInfo\n program: ProgramContext\n \"\"\"\n\n def __init__(self, namer, entity_info, program_ctx):\n super(EntityContext, self).__init__(entity_info)\n self.namer = namer\n self.program = program_ctx\n\n\nclass Base(transformer.Base):\n \"\"\"All converters should inherit from this class.\n\n Attributes:\n ctx: EntityContext\n \"\"\"\n\n def __init__(self, ctx):\n super(Base, self).__init__(ctx)\n\n self._used = False\n self._ast_depth = 0\n\n def get_definition_directive(self, node, directive, arg, default):\n \"\"\"Returns the unique directive argument for a symbol.\n\n See lang/directives.py for details on directives.\n\n Example:\n # Given a directive in the code:\n ag.foo_directive(bar, baz=1)\n\n # One can write for an AST node Name(id='bar'):\n get_definition_directive(node, ag.foo_directive, 'baz')\n\n Args:\n node: ast.AST, the node representing the symbol for which the directive\n argument is needed.\n directive: Callable[..., Any], the directive to search.\n arg: str, the directive argument to return.\n default: Any\n\n Raises:\n ValueError: if conflicting annotations have been found\n \"\"\"\n defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ())\n if not defs:\n return default\n\n arg_values_found = []\n for def_ in defs:\n if (directive in def_.directives and arg in def_.directives[directive]):\n arg_values_found.append(def_.directives[directive][arg])\n\n if not arg_values_found:\n return default\n\n if len(arg_values_found) == 1:\n return arg_values_found[0]\n\n # If multiple annotations reach the symbol, they must all match. If they do,\n # return any of them.\n first_value = arg_values_found[0]\n for other_value in arg_values_found[1:]:\n if not ast_util.matches(first_value, other_value):\n qn = anno.getanno(node, anno.Basic.QN)\n raise ValueError('%s has ambiguous annotations for %s(%s): %s, %s' %\n (qn, directive.__name__, arg,\n compiler.ast_to_source(other_value).strip(),\n compiler.ast_to_source(first_value).strip()))\n return first_value\n\n def visit(self, node):\n if not self._ast_depth:\n if self._used:\n raise ValueError('converter objects cannot be reused')\n self._used = True\n\n self._ast_depth += 1\n try:\n return super(Base, self).visit(node)\n finally:\n self._ast_depth -= 1\n\n\nclass AnnotatedDef(reaching_definitions.Definition):\n\n def __init__(self):\n super(AnnotatedDef, self).__init__()\n self.directives = {}\n\n\nclass AgAnno(enum.Enum):\n \"\"\"Annotation labels specific to AutoGraph. See anno.py.\"\"\"\n\n DIRECTIVES = 'User directives associated with the annotated statement.'\n\n def __repr__(self):\n return self.name\n\n\ndef standard_analysis(node, context, is_initial=False):\n \"\"\"Performs a complete static analysis of the given code.\n\n Args:\n node: ast.AST\n context: converter.EntityContext\n is_initial: bool, whether this is the initial analysis done on the input\n source code\n\n Returns:\n ast.AST, same as node, with the static analysis annotations added\n \"\"\"\n # TODO(mdan): Clear static analysis here.\n # TODO(mdan): Consider not running all analyses every time.\n # TODO(mdan): Don't return a node because it's modified by reference.\n graphs = cfg.build(node)\n node = qual_names.resolve(node)\n node = activity.resolve(node, context, None)\n node = reaching_definitions.resolve(node, context, graphs, AnnotatedDef)\n node = liveness.resolve(node, context, graphs)\n if is_initial:\n anno.dup(\n node,\n {\n anno.Static.DEFINITIONS: anno.Static.ORIG_DEFINITIONS,\n },\n )\n return node\n\n\ndef apply_(node, context, converter_module):\n \"\"\"Applies a converter to an AST.\n\n Args:\n node: ast.AST\n context: converter.EntityContext\n converter_module: converter.Base\n\n Returns:\n ast.AST, the result of applying converter to node\n \"\"\"\n node = standard_analysis(node, context)\n node = converter_module.transform(node, context)\n return node\n" ]
[ [ "tensorflow.python.autograph.pyct.static_analysis.reaching_definitions.resolve", "tensorflow.python.autograph.pyct.qual_names.resolve", "tensorflow.python.autograph.pyct.static_analysis.liveness.resolve", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.autograph.pyct.ast_util.matches", "tensorflow.python.autograph.pyct.parser.parse_expression", "tensorflow.python.autograph.pyct.static_analysis.activity.resolve", "tensorflow.python.autograph.pyct.anno.dup", "tensorflow.python.autograph.pyct.compiler.ast_to_source", "tensorflow.python.autograph.pyct.anno.getanno", "tensorflow.python.autograph.pyct.cfg.build" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13", "1.12" ] } ]
soxofaan/openeo-processes
[ "12565abd810c4cac84c9fccaf00d0fd873b88af0" ]
[ "examples/phenology-uc2/udf/smooth_savitzky_golay.py" ]
[ "# -*- coding: utf-8 -*-\n# Uncomment the import only for coding support\n#from openeo_udf.api.base import SpatialExtent, RasterCollectionTile, FeatureCollectionTile, UdfData\n\n__license__ = \"Apache License, Version 2.0\"\n\n\ndef rct_savitzky_golay(udf_data):\n from scipy.signal import savgol_filter\n import pandas as pd\n # Iterate over each tile\n for tile in udf_data.raster_collection_tiles:\n timeseries_array = tile.data\n #TODO: savitzky golay implementation assumes regularly spaced samples!\n\n #first we ensure that there are no nodata values in our input, as this will cause everything to become nodata.\n array_2d = timeseries_array.reshape((timeseries_array.shape[0], timeseries_array.shape[1] * timeseries_array.shape[2]))\n\n df = pd.DataFrame(array_2d)\n #df.fillna(method='ffill', axis=0, inplace=True)\n df.interpolate(inplace=True,axis=0)\n filled=df.as_matrix().reshape(timeseries_array.shape)\n\n #now apply savitzky golay on filled data\n smoothed_array = savgol_filter(filled, 5, 2,axis=0)\n #print(smoothed_array)\n tile.set_data(smoothed_array)\n\n\n# This function call is the entry point for the UDF.\n# The caller will provide all required data in the **data** object.\nrct_savitzky_golay(data)\n" ]
[ [ "scipy.signal.savgol_filter", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "0.14", "1.6", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
YiqingZhouKelly/pyqmc
[ "ec14c7b3cceb01d287cb09ebc5d2af37014c1c8e", "ec14c7b3cceb01d287cb09ebc5d2af37014c1c8e" ]
[ "pyqmc/dasktools.py", "pyqmc/slaterpbc.py" ]
[ "import os\nimport pyqmc\nimport numpy as np\nimport pandas as pd\nimport h5py\nimport pyqmc\nimport pyqmc.optimize_orthogonal\nos.environ[\"MKL_NUM_THREADS\"] = \"1\"\nos.environ[\"NUMEXPR_NUM_THREADS\"] = \"1\"\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\n\n\ndef distvmc(\n wf,\n coords,\n accumulators=None,\n nsteps=100,\n hdf_file=None,\n npartitions=None,\n nsteps_per=None,\n client=None,\n verbose=False,\n **kwargs\n):\n \"\"\" \n Args: \n wf: a wave function object\n\n coords: nconf x nelec x 3 \n\n nsteps: how many steps to move each walker\n\n\n \"\"\"\n if nsteps_per is None:\n nsteps_per = nsteps\n\n if hdf_file is not None:\n with h5py.File(hdf_file, \"a\") as hdf:\n if \"configs\" in hdf.keys():\n coords.configs = np.array(hdf[\"configs\"])\n if verbose:\n print(\"Restarted calculation\")\n\n if accumulators is None:\n accumulators = {}\n if npartitions is None:\n npartitions = sum([x for x in client.nthreads().values()])\n allruns = []\n niterations = int(nsteps / nsteps_per)\n coord = coords.split(npartitions)\n alldata = []\n for epoch in range(niterations):\n wfs = []\n thiscoord = []\n for i in range(npartitions):\n wfs.append(wf)\n thiscoord.append(coord[i])\n runs = client.map(\n pyqmc.vmc,\n wfs,\n thiscoord,\n **{\"nsteps\": nsteps_per, \"accumulators\": accumulators, \"stepoffset\": epoch*nsteps_per},\n **kwargs\n )\n iterdata = []\n for i, r in enumerate(runs):\n res = r.result()\n iterdata.extend(res[0])\n coord[i] = res[1]\n\n collected_data = (\n pd.DataFrame(iterdata)\n .groupby(\"step\", as_index=False)\n .apply(lambda x: x.stack().groupby(level=1).apply(np.mean, axis=0)) #Added for array returns, e.g. obdm, tbdm\n .to_dict(\"records\")\n )\n if verbose:\n print(\"epoch\", epoch, \"finished\", flush=True)\n\n coords.join(coord)\n alldata.extend(collected_data)\n for d in collected_data:\n pyqmc.mc.vmc_file(hdf_file, d, kwargs, coords)\n\n return alldata, coords\n\n\ndef dist_lm_sampler(\n wf, configs, params, pgrad_acc, npartitions=None, client=None, lm_sampler=None\n):\n \"\"\"\n Evaluates accumulator on the same set of configs for correlated sampling of different wave function parameters. Parallelized with parsl.\n\n Args:\n wf: wave function object\n configs: (nconf, nelec, 3) array\n params: (nsteps, nparams) array \n list of arrays of parameters (serialized) at each step\n pgrad_acc: PGradAccumulator \n\n kwargs:\n npartitions: number of tasks for parallelization\n divides configs array into npartitions chunks\n\n Returns:\n data: list of dicts, one dict for each sample\n each dict contains arrays returned from pgrad_acc, weighted by psi**2/psi0**2 \n \"\"\"\n if lm_sampler is None:\n from pyqmc.linemin import lm_sampler\n\n if npartitions is None:\n npartitions = sum([x for x in client.nthreads().values()])\n\n configspart = configs.split(npartitions)\n allruns = []\n for p in range(npartitions):\n allruns.append(client.submit(lm_sampler, wf, configspart[p], params, pgrad_acc))\n\n stepresults = []\n for r in allruns:\n stepresults.append(r.result())\n\n keys = stepresults[0][0].keys()\n # This will be a list of dictionaries\n final_results = []\n for p in range(len(params)):\n df = {}\n for k in keys:\n # print(k,flush=True)\n # print(stepresults[0][p][k])\n df[k] = np.concatenate([x[p][k] for x in stepresults], axis=0)\n final_results.append(df)\n\n return final_results\n\n\ndef line_minimization(*args, client, **kwargs):\n import pyqmc\n\n if \"vmcoptions\" not in kwargs:\n kwargs[\"vmcoptions\"] = {}\n if \"lmoptions\" not in kwargs:\n kwargs[\"lmoptions\"] = {}\n kwargs[\"vmcoptions\"][\"client\"] = client\n kwargs[\"lmoptions\"][\"client\"] = client\n return pyqmc.line_minimization(*args, vmc=distvmc, lm=dist_lm_sampler, **kwargs)\n\n\n\ndef cvmc_optimize(*args, client, **kwargs):\n import pyqmc\n from pyqmc.cvmc import lm_cvmc\n\n if \"vmcoptions\" not in kwargs:\n kwargs[\"vmcoptions\"] = {}\n if \"lmoptions\" not in kwargs:\n kwargs[\"lmoptions\"] = {}\n kwargs[\"vmcoptions\"][\"client\"] = client\n kwargs[\"lmoptions\"][\"client\"] = client\n kwargs[\"lmoptions\"][\"lm_sampler\"] = lm_cvmc\n return pyqmc.cvmc_optimize(*args, vmc=distvmc, lm=dist_lm_sampler, **kwargs)\n\n\ndef distdmc_propagate(wf, configs, weights, *args, client, npartitions=None, **kwargs):\n import pyqmc.dmc\n\n if npartitions is None:\n npartitions = sum([x for x in client.nthreads().values()])\n\n coord = configs.split(npartitions)\n weight = np.split(weights, npartitions)\n allruns = []\n for nodeconfigs, nodeweight in zip(coord, weight):\n allruns.append(\n client.submit(\n pyqmc.dmc.dmc_propagate, wf, nodeconfigs, nodeweight, *args, **kwargs\n )\n )\n\n import pandas as pd\n\n allresults = [r.result() for r in allruns]\n configs.join([x[1] for x in allresults])\n coordret = configs\n weightret = np.vstack([x[2] for x in allresults])\n df = pd.concat([pd.DataFrame(x[0]) for x in allresults])\n notavg = [\"weight\", \"weightvar\", \"weightmin\", \"weightmax\", \"acceptance\", \"step\"]\n # Here we reweight the averages since each step on each node\n # was done with a different average weight.\n\n for k in df.keys():\n if k not in notavg:\n df[k] = df[k] * df[\"weight\"]\n df = df.groupby(\"step\").aggregate(np.mean, axis=0).reset_index()\n for k in df.keys():\n if k not in notavg:\n df[k] = df[k] / df[\"weight\"]\n print(df)\n return df, coordret, weightret\n\n\n\n\ndef dist_sample_overlap(wfs, configs, *args, client, npartitions=None, **kwargs):\n if npartitions is None:\n npartitions = sum([x for x in client.nthreads().values()])\n\n coord = configs.split(npartitions)\n allruns = []\n \n for nodeconfigs in coord:\n allruns.append(\n client.submit(\n pyqmc.optimize_orthogonal.sample_overlap, wfs, nodeconfigs, *args, **kwargs\n )\n )\n\n allresults = [r.result() for r in allruns]\n configs.join([x[1] for x in allresults])\n coordret = configs\n # Here we reweight the averages since each step on each node\n # was done with a different average weight.\n keys = allresults[0][0].keys()\n df = {} \n for k in keys:\n df[k] = np.array([x[0][k] for x in allresults])\n for k in df.keys():\n if k != 'weight' and k!= 'overlap' and k!= 'overlap_gradient':\n if len(df[k].shape) == 2:\n df[k] = np.sum(df[k] * df[\"weight\"][:,:,-1],axis=0)/np.sum(df['weight'][:,:,-1],axis=0)\n elif len(df[k].shape) == 3:\n df[k] = np.sum(df[k] * df[\"weight\"][:,:,-1,np.newaxis],axis=0)/np.sum(df['weight'][:,:,-1, np.newaxis],axis=0)\n elif len(df[k].shape) == 4:\n df[k] = np.sum(df[k] * df[\"weight\"][:,:,-1,np.newaxis,np.newaxis],axis=0)/np.sum(df['weight'][:,:,-1, np.newaxis, np.newaxis],axis=0)\n\n else: \n raise NotImplementedError(\"too many/two few dimension in dist_sample_overlap\")\n elif k!='weight':\n df[k] = np.mean(df[k],axis=0)\n\n df['weight'] = np.mean(df['weight'], axis=0)\n\n return df, coordret\n\n\ndef dist_correlated_sample(wfs, configs, *args, client, npartitions = None, **kwargs):\n \n if npartitions is None:\n npartitions = sum([x for x in client.nthreads().values()])\n\n coord = configs.split(npartitions)\n allruns = []\n for nodeconfigs in coord:\n allruns.append(\n client.submit(\n pyqmc.optimize_orthogonal.correlated_sample, \n wfs, nodeconfigs, *args, **kwargs\n )\n )\n\n allresults = [r.result() for r in allruns]\n df = {}\n for k in allresults[0].keys():\n df[k] = np.array([x[k] for x in allresults])\n wt = df['weight']*df[\"rhoprime\"]\n df['total'] = np.sum(df['total'] * wt,axis=0)/np.sum(wt,axis=0)\n df['overlap'] = np.mean(df['overlap'], axis=0)\n df['weight'] = np.mean(df['weight']*df[\"rhoprime\"], axis=0)/np.mean(df[\"rhoprime\"], axis=0)\n #df[\"weight\"] = np.mean(df[\"weight\"], axis=0)\n df['rhoprime'] = np.mean(df['rhoprime'], axis=0)\n return df\n\n\n\ndef optimize_orthogonal(*args, client, **kwargs):\n if \"sample_options\" not in kwargs:\n kwargs[\"sample_options\"] = {}\n if \"correlated_options\" not in kwargs:\n kwargs[\"correlated_options\"] = {}\n\n kwargs[\"sample_options\"][\"client\"] = client\n kwargs[\"correlated_options\"][\"client\"] = client\n\n return pyqmc.optimize_orthogonal.optimize_orthogonal(*args, sampler=dist_sample_overlap, correlated_sampler = dist_correlated_sample,**kwargs)\n\n", "import numpy as np\r\nfrom pyqmc import pbc, slateruhf\r\n\r\n\r\ndef get_supercell_kpts(supercell):\r\n Sinv = np.linalg.inv(supercell.S).T\r\n u = [0, 1]\r\n unit_box = np.stack([x.ravel() for x in np.meshgrid(*[u] * 3, indexing=\"ij\")]).T\r\n unit_box_ = np.dot(unit_box, supercell.S.T)\r\n xyz_range = np.stack([f(unit_box_, axis=0) for f in (np.amin, np.amax)]).T\r\n kptmesh = np.meshgrid(*[np.arange(*r) for r in xyz_range], indexing=\"ij\")\r\n possible_kpts = np.dot(np.stack([x.ravel() for x in kptmesh]).T, Sinv)\r\n in_unit_box = (possible_kpts >= 0) * (possible_kpts < 1 - 1e-12)\r\n select = np.where(np.all(in_unit_box, axis=1))[0]\r\n reclatvec = np.linalg.inv(supercell.original_cell.lattice_vectors()).T * 2 * np.pi\r\n kpts = np.dot(possible_kpts[select], reclatvec)\r\n return kpts\r\n\r\n\r\ndef get_supercell(cell, S):\r\n \"\"\"\r\n Inputs:\r\n cell: pyscf Cell object\r\n S: (3, 3) supercell matrix for QMC from cell defined by cell.a. In other words, the QMC calculation cell is qmc_cell = np.dot(S, cell.lattice_vectors()). For a 2x2x2 supercell, S is [[2, 0, 0], [0, 2, 0], [0, 0, 2]].\r\n \"\"\"\r\n from pyscf.pbc import gto\r\n\r\n def get_supercell_copies(latvec, S):\r\n Sinv = np.linalg.inv(S).T\r\n u = [0, 1]\r\n unit_box = np.stack([x.ravel() for x in np.meshgrid(*[u] * 3, indexing=\"ij\")]).T\r\n unit_box_ = np.dot(unit_box, S)\r\n xyz_range = np.stack([f(unit_box_, axis=0) for f in (np.amin, np.amax)]).T\r\n mesh = np.meshgrid(*[np.arange(*r) for r in xyz_range], indexing=\"ij\")\r\n possible_pts = np.dot(np.stack([x.ravel() for x in mesh]).T, Sinv.T)\r\n in_unit_box = (possible_pts >= 0) * (possible_pts < 1 - 1e-12)\r\n select = np.where(np.all(in_unit_box, axis=1))[0]\r\n pts = np.linalg.multi_dot((possible_pts[select], S, latvec))\r\n return pts\r\n\r\n scale = np.abs(int(np.round(np.linalg.det(S))))\r\n superlattice = np.dot(S, cell.lattice_vectors())\r\n Rpts = get_supercell_copies(cell.lattice_vectors(), S)\r\n atom = []\r\n for (name, xyz) in cell._atom:\r\n atom.extend([(name, xyz + R) for R in Rpts])\r\n supercell = gto.Cell()\r\n supercell.a = superlattice\r\n supercell.atom = atom\r\n supercell.pseudo = cell.pseudo\r\n supercell.basis = cell.basis\r\n supercell.unit = cell.unit\r\n supercell.spin = cell.spin * scale\r\n supercell.build()\r\n supercell.original_cell = cell\r\n supercell.S = S\r\n return supercell\r\n\r\n\r\nclass PySCFSlaterPBC:\r\n \"\"\"A wave function object has a state defined by a reference configuration of electrons.\r\n The functions recompute() and updateinternals() change the state of the object, and \r\n the rest compute and return values from that state. \"\"\"\r\n\r\n def __init__(self, supercell, mf):\r\n \"\"\"\r\n Inputs:\r\n supercell:\r\n mf:\r\n \"\"\"\r\n for attribute in [\"original_cell\", \"S\"]:\r\n if not hasattr(supercell, attribute):\r\n print('Warning: supercell is missing attribute \"%s\"' % attribute)\r\n print(\"setting original_cell=supercell and S=np.eye(3)\")\r\n supercell.original_cell = supercell\r\n supercell.S = np.eye(3)\r\n\r\n self.occ = np.asarray(mf.mo_occ) > 0.9\r\n self.parameters = {}\r\n self.real_tol = 1e4\r\n\r\n self.supercell = supercell\r\n self._kpts = get_supercell_kpts(supercell)\r\n kdiffs = mf.kpts[np.newaxis] - self._kpts[:, np.newaxis]\r\n self.kinds = np.nonzero(np.linalg.norm(kdiffs, axis=-1) < 1e-12)[1]\r\n self.nk = len(self._kpts)\r\n print(\"nk\", self.nk)\r\n print(self.kinds)\r\n\r\n mo_coeff = np.asarray(mf.mo_coeff)\r\n self._cell = supercell.original_cell\r\n\r\n mcalist = []\r\n mcblist = []\r\n for kind in self.kinds:\r\n if len(mf.mo_coeff[0][0].shape) == 2:\r\n mca = mo_coeff[0][kind][:, self.occ[0][kind]]\r\n mcb = mo_coeff[1][kind][:, self.occ[1][kind]]\r\n else:\r\n mca = mf.mo_coeff[kind][:, np.asarray(mf.mo_occ[kind] > 0.9)]\r\n mcb = mf.mo_coeff[kind][:, np.asarray(mf.mo_occ[kind] > 1.1)]\r\n mca = np.real_if_close(mca, tol=self.real_tol)\r\n mcb = np.real_if_close(mcb, tol=self.real_tol)\r\n mcalist.append(mca / np.sqrt(self.nk))\r\n mcblist.append(mcb / np.sqrt(self.nk))\r\n self.parameters[\"mo_coeff_alpha\"] = np.asarray(mcalist)\r\n self.parameters[\"mo_coeff_beta\"] = np.asarray(mcblist)\r\n self._coefflookup = (\"mo_coeff_alpha\", \"mo_coeff_beta\")\r\n\r\n if len(mf.mo_coeff[0][0].shape) == 2:\r\n self._nelec = [int(np.sum(np.concatenate(o))) for o in mf.mo_occ]\r\n else:\r\n scale = np.linalg.det(self.supercell.S)\r\n self._nelec = [int(np.round(n * scale)) for n in self._cell.nelec]\r\n self._nelec = tuple(self._nelec)\r\n self.get_phase = lambda x: np.exp(2j * np.pi * np.angle(x))\r\n\r\n def evaluate_orbitals(self, configs, mask=None, eval_str=\"PBCGTOval_sph\"):\r\n mycoords = configs.configs\r\n if mask is not None:\r\n mycoords = mycoords[mask]\r\n mycoords = mycoords.reshape((-1, mycoords.shape[-1]))\r\n # wrap supercell positions into primitive cell\r\n prim_coords, prim_wrap = pbc.enforce_pbc(self._cell.lattice_vectors(), mycoords)\r\n configswrap = configs.wrap.reshape(prim_wrap.shape)\r\n wrap = prim_wrap + np.dot(configswrap, self.supercell.S)\r\n kdotR = np.linalg.multi_dot(\r\n (self._kpts, self._cell.lattice_vectors().T, wrap.T)\r\n )\r\n wrap_phase = np.exp(1j * kdotR)\r\n # evaluate AOs for all electron positions\r\n ao = self._cell.eval_gto(eval_str, prim_coords, kpts=self._kpts)\r\n ao = [ao[k] * wrap_phase[k][:, np.newaxis] for k in range(self.nk)]\r\n return ao\r\n\r\n def recompute(self, configs):\r\n \"\"\"This computes the value from scratch. Returns the logarithm of the wave function as\r\n (phase,logdet). If the wf is real, phase will be +/- 1.\"\"\"\r\n nconf, nelec, ndim = configs.configs.shape\r\n aos = self.evaluate_orbitals(configs)\r\n aos = np.reshape(aos, (self.nk, nconf, nelec, -1))\r\n self._aovals = aos\r\n self._dets = []\r\n self._inverse = []\r\n for s in [0, 1]:\r\n mo = []\r\n i0, i1 = s * self._nelec[0], self._nelec[0] + s * self._nelec[1]\r\n for k in range(self.nk):\r\n mo_coeff = self.parameters[self._coefflookup[s]][k]\r\n mo.append(np.dot(aos[k, :, i0:i1], mo_coeff))\r\n ne = self._nelec[s]\r\n mo = np.concatenate(mo, axis=-1).reshape(nconf, ne, ne)\r\n phase, mag = np.linalg.slogdet(mo)\r\n self._dets.append((phase, mag))\r\n self._inverse.append(np.linalg.inv(mo))\r\n\r\n return self.value()\r\n\r\n def updateinternals(self, e, epos, mask=None):\r\n s = int(e >= self._nelec[0])\r\n if mask is None:\r\n mask = [True] * epos.configs.shape[0]\r\n eeff = e - s * self._nelec[0]\r\n aos = self.evaluate_orbitals(epos)\r\n self._aovals[:, :, e, :] = np.asarray(aos)\r\n mo = []\r\n for k in range(self.nk):\r\n mo_coeff = self.parameters[self._coefflookup[s]][k]\r\n mo.append(np.dot(aos[k], mo_coeff))\r\n ne = self._nelec[s]\r\n mo = np.concatenate(mo, axis=-1).reshape(len(mask), ne)\r\n ratio, self._inverse[s][mask, :, :] = slateruhf.sherman_morrison_row(\r\n eeff, self._inverse[s][mask, :, :], mo[mask, :]\r\n )\r\n self._updateval(ratio, s, mask)\r\n\r\n # identical to slateruhf\r\n def _updateval(self, ratio, s, mask):\r\n self._dets[s][0][mask] *= self.get_phase(ratio) # will not work for complex!\r\n self._dets[s][1][mask] += np.log(np.abs(ratio))\r\n\r\n ### not state-changing functions\r\n\r\n # identical to slateruhf\r\n def value(self):\r\n \"\"\"Return logarithm of the wave function as noted in recompute()\"\"\"\r\n return self._dets[0][0] * self._dets[1][0], self._dets[0][1] + self._dets[1][1]\r\n\r\n # identical to slateruhf\r\n def _testrow(self, e, vec, mask=None):\r\n \"\"\"vec is a nconfig,nmo vector which replaces row e\"\"\"\r\n s = int(e >= self._nelec[0])\r\n if mask is None:\r\n return np.einsum(\r\n \"i...j,ij->i...\", vec, self._inverse[s][:, :, e - s * self._nelec[0]]\r\n )\r\n\r\n return np.einsum(\r\n \"i...j,ij->i...\", vec, self._inverse[s][mask, :, e - s * self._nelec[0]]\r\n )\r\n\r\n # identical to slateruhf\r\n def _testcol(self, i, s, vec):\r\n \"\"\"vec is a nconfig,nmo vector which replaces column i\"\"\"\r\n ratio = np.einsum(\"ij,ij->i\", vec, self._inverse[s][:, i, :])\r\n return ratio\r\n\r\n def testvalue(self, e, epos, mask=None):\r\n \"\"\" return the ratio between the current wave function and the wave function if \r\n electron e's position is replaced by epos\"\"\"\r\n s = int(e >= self._nelec[0])\r\n if mask is None:\r\n mask = [True] * epos.configs.shape[0]\r\n nmask = np.sum(mask)\r\n if nmask == 0:\r\n return np.zeros((0, epos.configs.shape[1]))\r\n aos = self.evaluate_orbitals(epos, mask)\r\n mo_coeff = self.parameters[self._coefflookup[s]]\r\n mo = [np.dot(aos[k], mo_coeff[k]) for k in range(self.nk)]\r\n mo = np.concatenate(mo, axis=-1).reshape(nmask, self._nelec[s])\r\n return self._testrow(e, mo, mask)\r\n\r\n def gradient(self, e, epos):\r\n \"\"\" Compute the gradient of the log wave function \r\n Note that this can be called even if the internals have not been updated for electron e,\r\n if epos differs from the current position of electron e.\"\"\"\r\n s = int(e >= self._nelec[0])\r\n aograd = self.evaluate_orbitals(epos, eval_str=\"PBCGTOval_sph_deriv1\")\r\n mo_coeff = self.parameters[self._coefflookup[s]]\r\n mograd = [ak.dot(mo_coeff[k]) for k, ak in enumerate(aograd)]\r\n mograd = np.concatenate(mograd, axis=-1)\r\n ratios = np.asarray([self._testrow(e, x) for x in mograd])\r\n return ratios[1:] / ratios[:1]\r\n\r\n def laplacian(self, e, epos):\r\n s = int(e >= self._nelec[0])\r\n ao = self.evaluate_orbitals(epos, eval_str=\"PBCGTOval_sph_deriv2\")\r\n mo_coeff = self.parameters[self._coefflookup[s]]\r\n mo = [\r\n np.dot([ak[0], ak[[4, 7, 9]].sum(axis=0)], mo_coeff[k])\r\n for k, ak in enumerate(ao)\r\n ]\r\n mo = np.concatenate(mo, axis=-1)\r\n ratios = self._testrow(e, mo[1])\r\n testvalue = self._testrow(e, mo[0])\r\n return ratios / testvalue\r\n\r\n def gradient_laplacian(self, e, epos):\r\n s = int(e >= self._nelec[0])\r\n ao = self.evaluate_orbitals(epos, eval_str=\"PBCGTOval_sph_deriv2\")\r\n mo = [\r\n np.dot(\r\n np.concatenate([ak[0:4], ak[[4, 7, 9]].sum(axis=0, keepdims=True)]),\r\n self.parameters[self._coefflookup[s]][k],\r\n )\r\n for k, ak in enumerate(ao)\r\n ]\r\n mo = np.concatenate(mo, axis=-1)\r\n ratios = np.asarray([self._testrow(e, x) for x in mo])\r\n return ratios[1:-1] / ratios[:1], ratios[-1] / ratios[0]\r\n\r\n def pgradient(self):\r\n d = {}\r\n # for parm in self.parameters:\r\n # s = int(\"beta\" in parm)\r\n # # Get AOs for our spin channel only\r\n # i0, i1 = s * self._nelec[0], self._nelec[0] + s * self._nelec[1]\r\n # ao = self._aovals[:, :, i0:i1] # (kpt, config, electron, ao)\r\n # pgrad_shape = (ao.shape[1],) + self.parameters[parm].shape\r\n # pgrad = np.zeros(pgrad_shape)\r\n # # Compute derivatives w.r.t. MO coefficients\r\n # for k in range(self.nk):\r\n # for i in range(self._nelec[s]):\r\n # for j in range(ao.shape[2]):\r\n # pgrad[:, k, j, i] = self._testcol(i, s, ao[k, :, :, j])\r\n # d[parm] = np.array(pgrad)\r\n return d\r\n\r\n def plot_orbitals(self, mf, norb, spin_channel=0, basename=\"\", nx=80, ny=80, nz=80):\r\n from pyqmc.coord import PeriodicConfigs\r\n\r\n grid = np.meshgrid(*[np.arange(n) / n for n in [nx, ny, nz]], indexing=\"ij\")\r\n grid = np.stack([g.ravel() for g in grid]).T\r\n grid = np.linalg.dot(grid, self.supercell.lattice_vectors())\r\n configs = PeriodicConfigs(\r\n grid.reshape((-1, 16, 3)), self._cell.lattice_vectors()\r\n )\r\n nconf, nelec, ndim = configs.configs.shape\r\n ao = self.evaluate_orbitals(configs)\r\n\r\n mo_coeff = np.asarray(mf.mo_coeff)\r\n coeff = []\r\n for kind in self.kinds:\r\n if len(mf.mo_coeff[0][0].shape) == 2:\r\n mca = mo_coeff[spin_channel][kind][:, :norb]\r\n else:\r\n mca = mf.mo_coeff[kind][:, :norb]\r\n mca = np.real_if_close(mca, tol=self.real_tol)\r\n coeff.append(mca)\r\n\r\n mo = []\r\n nsorb = int(np.round(np.linalg.det(self.S) * norb))\r\n for k in range(self.nk):\r\n mo.append(np.dot(ao[k], coeff[k]))\r\n mo = np.concatenate(mo, axis=-1).reshape(-1, nsorb)\r\n\r\n for i in range(nsorb):\r\n fname = basename + \"mo{0}.cube\".format(i)\r\n print(\"writing\", fname, mo[..., i].shape)\r\n self.generate_cube(fname, mo[..., i], nx, ny, nz)\r\n\r\n def generate_cube(self, fname, vals, nx, ny, nz, comment=\"HEADER LINE\\n\"):\r\n import cubetools\r\n\r\n cube = {}\r\n cube[\"comment\"] = comment\r\n cube[\"type\"] = \"\\n\"\r\n cube[\"natoms\"] = self.supercell.natm\r\n cube[\"origin\"] = np.zeros(3)\r\n cube[\"ints\"] = np.array([nx, ny, nz])\r\n cube[\"latvec\"] = self.supercell.lattice_vectors()\r\n cube[\"latvec\"] = cube[\"latvec\"] / cube[\"ints\"][:, np.newaxis]\r\n cube[\"atomname\"] = self.supercell.atom_charges()\r\n cube[\"atomxyz\"] = self.supercell.atom_coords()\r\n cube[\"data\"] = np.reshape(vals, (nx, ny, nz))\r\n with open(fname, \"w\") as f:\r\n cubetools.write_cube(cube, f)\r\n\r\n\r\ndef generate_test_inputs():\r\n import pyqmc\r\n from pyqmc.coord import PeriodicConfigs\r\n from pyscf.pbc import gto, scf\r\n from pyscf.pbc.dft.multigrid import multigrid\r\n from pyscf.pbc import tools\r\n from pyscf import lib\r\n\r\n from_chkfile = True\r\n\r\n if from_chkfile:\r\n\r\n def loadchkfile(chkfile):\r\n cell = gto.cell.loads(lib.chkfile.load(chkfile, \"mol\"))\r\n kpts = cell.make_kpts([1, 1, 1])\r\n mf = scf.KRKS(cell, kpts)\r\n mf.__dict__.update(lib.chkfile.load(chkfile, \"scf\"))\r\n return cell, mf\r\n\r\n cell1, mf1 = loadchkfile(\"mf1.chkfile\")\r\n cell2, mf2 = loadchkfile(\"mf2.chkfile\")\r\n else:\r\n L = 4\r\n cell2 = gto.M(\r\n atom=\"\"\"H {0} {0} {0} \r\n H {1} {1} {1}\"\"\".format(\r\n 0.0, L * 0.25\r\n ),\r\n basis=\"sto-3g\",\r\n a=np.eye(3) * L,\r\n spin=0,\r\n unit=\"bohr\",\r\n )\r\n\r\n print(\"Primitive cell\")\r\n kpts = cell2.make_kpts((2, 2, 2))\r\n mf2 = scf.KRKS(cell2, kpts)\r\n mf2.xc = \"pbe\"\r\n mf2.chkfile = \"mf2.chkfile\"\r\n mf2 = mf2.run()\r\n\r\n print(\"Supercell\")\r\n cell1 = tools.super_cell(cell2, [2, 2, 2])\r\n kpts = [[0, 0, 0]]\r\n mf1 = scf.KRKS(cell1, kpts)\r\n mf1.xc = \"pbe\"\r\n mf1.chkfile = \"mf1.chkfile\"\r\n mf1 = mf1.run()\r\n\r\n # wf1 = pyqmc.PySCFSlaterUHF(cell1, mf1)\r\n wf1 = PySCFSlaterPBC(cell1, mf1, supercell=1 * np.eye(3))\r\n wf2 = PySCFSlaterPBC(cell2, mf2, supercell=2 * np.eye(3))\r\n\r\n configs = pyqmc.initial_guess(cell1, 10, 0.1)\r\n\r\n return wf1, wf2, configs\r\n\r\n\r\ndef test_recompute(wf1, wf2, configs):\r\n p1, m1 = wf1.recompute(configs)\r\n p2, m2 = wf2.recompute(configs)\r\n\r\n print(\"phase\")\r\n print(\"p1\", p1)\r\n print(\"p2\", p2)\r\n print(\"p1/p2\", p1 / p2)\r\n print(\"log magnitude\")\r\n print(\"m1\", m1)\r\n print(\"m2\", m2)\r\n print(\"m1-m2\", m1 - m2)\r\n\r\n p_err = np.linalg.norm(p1 / p2 - p1[0] / p2[0])\r\n m_err = np.linalg.norm(m1 - m2 - m1[0] + m2[0])\r\n assert p_err < 1e-10, (p_err, m_err)\r\n assert m_err < 1e-1, (p_err, m_err)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n from pyqmc.testwf import (\r\n test_updateinternals,\r\n test_wf_gradient,\r\n test_wf_laplacian,\r\n test_wf_gradient_laplacian,\r\n )\r\n\r\n wf1, wf2, configs = generate_test_inputs()\r\n test_recompute(wf1, wf2, configs)\r\n test_updateinternals(wf1, configs)\r\n test_updateinternals(wf2, configs)\r\n test_wf_gradient(wf1, configs)\r\n test_wf_gradient(wf2, configs)\r\n test_wf_laplacian(wf1, configs)\r\n test_wf_laplacian(wf2, configs)\r\n test_wf_gradient_laplacian(wf1, configs)\r\n test_wf_gradient_laplacian(wf2, configs)\r\n" ]
[ [ "numpy.split", "pandas.DataFrame", "numpy.concatenate", "numpy.mean", "numpy.array", "numpy.sum", "numpy.vstack" ], [ "numpy.dot", "numpy.real_if_close", "numpy.sqrt", "numpy.einsum", "numpy.asarray", "numpy.concatenate", "numpy.all", "numpy.round", "numpy.exp", "numpy.reshape", "numpy.linalg.slogdet", "numpy.arange", "numpy.eye", "numpy.linalg.multi_dot", "numpy.linalg.det", "numpy.zeros", "numpy.linalg.inv", "numpy.array", "numpy.meshgrid", "numpy.sum", "numpy.abs", "numpy.linalg.norm", "numpy.angle" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zj-liu/PaddleHelix
[ "6a4bf9835aebf31c8c7ee0f1005ecaa132b916bb", "6a4bf9835aebf31c8c7ee0f1005ecaa132b916bb" ]
[ "apps/paddlefold/alphafold_paddle/data/templates.py", "apps/paddlefold/alphafold_paddle/relax/relax_test.py" ]
[ "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Functions for getting templates and calculating template features.\"\"\"\n\nimport dataclasses\nimport datetime\nimport glob\nimport os\nimport re\nimport numpy as np\nimport logging\nfrom typing import Any, Dict, Mapping, Optional, Sequence, Tuple\n\nfrom alphafold_paddle.common import residue_constants\nfrom alphafold_paddle.data import mmcif_parsing\nfrom alphafold_paddle.data import parsers\nfrom alphafold_paddle.data.tools import kalign\n\nlogger = logging.getLogger(__name__)\n\nclass Error(Exception):\n \"\"\"Base class for exceptions.\"\"\"\n\n\nclass NoChainsError(Error):\n \"\"\"An error indicating that template mmCIF didn't have any chains.\"\"\"\n\n\nclass SequenceNotInTemplateError(Error):\n \"\"\"An error indicating that template mmCIF didn't contain the sequence.\"\"\"\n\n\nclass NoAtomDataInTemplateError(Error):\n \"\"\"An error indicating that template mmCIF didn't contain atom positions.\"\"\"\n\n\nclass TemplateAtomMaskAllZerosError(Error):\n \"\"\"An error indicating that template mmCIF had all atom positions masked.\"\"\"\n\n\nclass QueryToTemplateAlignError(Error):\n \"\"\"An error indicating that the query can't be aligned to the template.\"\"\"\n\n\nclass CaDistanceError(Error):\n \"\"\"An error indicating that a CA atom distance exceeds a threshold.\"\"\"\n\n\nclass MultipleChainsError(Error):\n \"\"\"An error indicating that multiple chains were found for a given ID.\"\"\"\n\n\n# Prefilter exceptions.\nclass PrefilterError(Exception):\n \"\"\"A base class for template prefilter exceptions.\"\"\"\n\n\nclass DateError(PrefilterError):\n \"\"\"An error indicating that the hit date was after the max allowed date.\"\"\"\n\n\nclass PdbIdError(PrefilterError):\n \"\"\"An error indicating that the hit PDB ID was identical to the query.\"\"\"\n\n\nclass AlignRatioError(PrefilterError):\n \"\"\"An error indicating that the hit align ratio to the query was too small.\"\"\"\n\n\nclass DuplicateError(PrefilterError):\n \"\"\"An error indicating that the hit was an exact subsequence of the query.\"\"\"\n\n\nclass LengthError(PrefilterError):\n \"\"\"An error indicating that the hit was too short.\"\"\"\n\n\nTEMPLATE_FEATURES = {\n 'template_aatype': np.float32,\n 'template_all_atom_masks': np.float32,\n 'template_all_atom_positions': np.float32,\n 'template_domain_names': np.object,\n 'template_sequence': np.object,\n 'template_sum_probs': np.float32,\n}\n\n\ndef _get_pdb_id_and_chain(hit: parsers.TemplateHit) -> Tuple[str, str]:\n \"\"\"Returns PDB id and chain id for an HHSearch Hit.\"\"\"\n # PDB ID: 4 letters. Chain ID: 1+ alphanumeric letters or \".\" if unknown.\n id_match = re.match(r'[a-zA-Z\\d]{4}_[a-zA-Z0-9.]+', hit.name)\n if not id_match:\n raise ValueError(f'hit.name did not start with PDBID_chain: {hit.name}')\n pdb_id, chain_id = id_match.group(0).split('_')\n return pdb_id.lower(), chain_id\n\n\ndef _is_after_cutoff(\n pdb_id: str,\n release_dates: Mapping[str, datetime.datetime],\n release_date_cutoff: Optional[datetime.datetime]) -> bool:\n \"\"\"Checks if the template date is after the release date cutoff.\n\n Args:\n pdb_id: 4 letter pdb code.\n release_dates: Dictionary mapping PDB ids to their structure release dates.\n release_date_cutoff: Max release date that is valid for this query.\n\n Returns:\n True if the template release date is after the cutoff, False otherwise.\n \"\"\"\n if release_date_cutoff is None:\n raise ValueError('The release_date_cutoff must not be None.')\n if pdb_id in release_dates:\n return release_dates[pdb_id] > release_date_cutoff\n else:\n # Since this is just a quick prefilter to reduce the number of mmCIF files\n # we need to parse, we don't have to worry about returning True here.\n logger.warning('Template structure not in release dates dict: %s', pdb_id)\n return False\n\n\ndef _parse_obsolete(obsolete_file_path: str) -> Mapping[str, Optional[str]]:\n \"\"\"Parses the data file from PDB that lists which pdb_ids are obsolete.\"\"\"\n with open(obsolete_file_path) as f:\n result = {}\n for line in f:\n line = line.strip()\n # Format: Date From To\n # 'OBSLTE 06-NOV-19 6G9Y' - Removed, rare\n # 'OBSLTE 31-JUL-94 116L 216L' - Replaced, common\n # 'OBSLTE 26-SEP-06 2H33 2JM5 2OWI' - Replaced by multiple, rare\n if line.startswith('OBSLTE'):\n if len(line) > 30:\n # Replaced by at least one structure.\n from_id = line[20:24].lower()\n to_id = line[29:33].lower()\n result[from_id] = to_id\n elif len(line) == 24:\n # Removed.\n from_id = line[20:24].lower()\n result[from_id] = None\n return result\n\n\ndef _parse_release_dates(path: str) -> Mapping[str, datetime.datetime]:\n \"\"\"Parses release dates file, returns a mapping from PDBs to release dates.\"\"\"\n if path.endswith('txt'):\n release_dates = {}\n with open(path, 'r') as f:\n for line in f:\n pdb_id, date = line.split(':')\n date = date.strip()\n # Python 3.6 doesn't have datetime.date.fromisoformat() which is about\n # 90x faster than strptime. However, splitting the string manually is\n # about 10x faster than strptime.\n release_dates[pdb_id.strip()] = datetime.datetime(\n year=int(date[:4]), month=int(date[5:7]), day=int(date[8:10]))\n return release_dates\n else:\n raise ValueError('Invalid format of the release date file %s.' % path)\n\n\ndef _assess_hhsearch_hit(\n hit: parsers.TemplateHit,\n hit_pdb_code: str,\n query_sequence: str,\n query_pdb_code: Optional[str],\n release_dates: Mapping[str, datetime.datetime],\n release_date_cutoff: datetime.datetime,\n max_subsequence_ratio: float = 0.95,\n min_align_ratio: float = 0.1) -> bool:\n \"\"\"Determines if template is valid (without parsing the template mmcif file).\n\n Args:\n hit: HhrHit for the template.\n hit_pdb_code: The 4 letter pdb code of the template hit. This might be\n different from the value in the actual hit since the original pdb might\n have become obsolete.\n query_sequence: Amino acid sequence of the query.\n query_pdb_code: 4 letter pdb code of the query.\n release_dates: Dictionary mapping pdb codes to their structure release\n dates.\n release_date_cutoff: Max release date that is valid for this query.\n max_subsequence_ratio: Exclude any exact matches with this much overlap.\n min_align_ratio: Minimum overlap between the template and query.\n\n Returns:\n True if the hit passed the prefilter. Raises an exception otherwise.\n\n Raises:\n DateError: If the hit date was after the max allowed date.\n PdbIdError: If the hit PDB ID was identical to the query.\n AlignRatioError: If the hit align ratio to the query was too small.\n DuplicateError: If the hit was an exact subsequence of the query.\n LengthError: If the hit was too short.\n \"\"\"\n aligned_cols = hit.aligned_cols\n align_ratio = aligned_cols / len(query_sequence)\n\n template_sequence = hit.hit_sequence.replace('-', '')\n length_ratio = float(len(template_sequence)) / len(query_sequence)\n\n # Check whether the template is a large subsequence or duplicate of original\n # query. This can happen due to duplicate entries in the PDB database.\n duplicate = (template_sequence in query_sequence and\n length_ratio > max_subsequence_ratio)\n\n if _is_after_cutoff(hit_pdb_code, release_dates, release_date_cutoff):\n raise DateError(f'Date ({release_dates[hit_pdb_code]}) > max template date '\n f'({release_date_cutoff}).')\n\n if query_pdb_code is not None:\n if query_pdb_code.lower() == hit_pdb_code.lower():\n raise PdbIdError('PDB code identical to Query PDB code.')\n\n if align_ratio <= min_align_ratio:\n raise AlignRatioError('Proportion of residues aligned to query too small. '\n f'Align ratio: {align_ratio}.')\n\n if duplicate:\n raise DuplicateError('Template is an exact subsequence of query with large '\n f'coverage. Length ratio: {length_ratio}.')\n\n if len(template_sequence) < 10:\n raise LengthError(f'Template too short. Length: {len(template_sequence)}.')\n\n return True\n\n\ndef _find_template_in_pdb(\n template_chain_id: str,\n template_sequence: str,\n mmcif_object: mmcif_parsing.MmcifObject) -> Tuple[str, str, int]:\n \"\"\"Tries to find the template chain in the given pdb file.\n\n This method tries the three following things in order:\n 1. Tries if there is an exact match in both the chain ID and the sequence.\n If yes, the chain sequence is returned. Otherwise:\n 2. Tries if there is an exact match only in the sequence.\n If yes, the chain sequence is returned. Otherwise:\n 3. Tries if there is a fuzzy match (X = wildcard) in the sequence.\n If yes, the chain sequence is returned.\n If none of these succeed, a SequenceNotInTemplateError is thrown.\n\n Args:\n template_chain_id: The template chain ID.\n template_sequence: The template chain sequence.\n mmcif_object: The PDB object to search for the template in.\n\n Returns:\n A tuple with:\n * The chain sequence that was found to match the template in the PDB object.\n * The ID of the chain that is being returned.\n * The offset where the template sequence starts in the chain sequence.\n\n Raises:\n SequenceNotInTemplateError: If no match is found after the steps described\n above.\n \"\"\"\n # Try if there is an exact match in both the chain ID and the (sub)sequence.\n pdb_id = mmcif_object.file_id\n chain_sequence = mmcif_object.chain_to_seqres.get(template_chain_id)\n if chain_sequence and (template_sequence in chain_sequence):\n logger.info(\n 'Found an exact template match %s_%s.', pdb_id, template_chain_id)\n mapping_offset = chain_sequence.find(template_sequence)\n return chain_sequence, template_chain_id, mapping_offset\n\n # Try if there is an exact match in the (sub)sequence only.\n for chain_id, chain_sequence in mmcif_object.chain_to_seqres.items():\n if chain_sequence and (template_sequence in chain_sequence):\n logger.info('Found a sequence-only match %s_%s.', pdb_id, chain_id)\n mapping_offset = chain_sequence.find(template_sequence)\n return chain_sequence, chain_id, mapping_offset\n\n # Return a chain sequence that fuzzy matches (X = wildcard) the template.\n # Make parentheses unnamed groups (?:_) to avoid the 100 named groups limit.\n regex = ['.' if aa == 'X' else '(?:%s|X)' % aa for aa in template_sequence]\n regex = re.compile(''.join(regex))\n for chain_id, chain_sequence in mmcif_object.chain_to_seqres.items():\n match = re.search(regex, chain_sequence)\n if match:\n logger.info('Found a fuzzy sequence-only match %s_%s.', pdb_id, chain_id)\n mapping_offset = match.start()\n return chain_sequence, chain_id, mapping_offset\n\n # No hits, raise an error.\n raise SequenceNotInTemplateError(\n 'Could not find the template sequence in %s_%s. Template sequence: %s, '\n 'chain_to_seqres: %s' % (pdb_id, template_chain_id, template_sequence,\n mmcif_object.chain_to_seqres))\n\n\ndef _realign_pdb_template_to_query(\n old_template_sequence: str,\n template_chain_id: str,\n mmcif_object: mmcif_parsing.MmcifObject,\n old_mapping: Mapping[int, int],\n kalign_binary_path: str) -> Tuple[str, Mapping[int, int]]:\n \"\"\"Aligns template from the mmcif_object to the query.\n\n In case PDB70 contains a different version of the template sequence, we need\n to perform a realignment to the actual sequence that is in the mmCIF file.\n This method performs such realignment, but returns the new sequence and\n mapping only if the sequence in the mmCIF file is 90% identical to the old\n sequence.\n\n Note that the old_template_sequence comes from the hit, and contains only that\n part of the chain that matches with the query while the new_template_sequence\n is the full chain.\n\n Args:\n old_template_sequence: The template sequence that was returned by the PDB\n template search (typically done using HHSearch).\n template_chain_id: The template chain id was returned by the PDB template\n search (typically done using HHSearch). This is used to find the right\n chain in the mmcif_object chain_to_seqres mapping.\n mmcif_object: A mmcif_object which holds the actual template data.\n old_mapping: A mapping from the query sequence to the template sequence.\n This mapping will be used to compute the new mapping from the query\n sequence to the actual mmcif_object template sequence by aligning the\n old_template_sequence and the actual template sequence.\n kalign_binary_path: The path to a kalign executable.\n\n Returns:\n A tuple (new_template_sequence, new_query_to_template_mapping) where:\n * new_template_sequence is the actual template sequence that was found in\n the mmcif_object.\n * new_query_to_template_mapping is the new mapping from the query to the\n actual template found in the mmcif_object.\n\n Raises:\n QueryToTemplateAlignError:\n * If there was an error thrown by the alignment tool.\n * Or if the actual template sequence differs by more than 10% from the\n old_template_sequence.\n \"\"\"\n aligner = kalign.Kalign(binary_path=kalign_binary_path)\n new_template_sequence = mmcif_object.chain_to_seqres.get(\n template_chain_id, '')\n\n # Sometimes the template chain id is unknown. But if there is only a single\n # sequence within the mmcif_object, it is safe to assume it is that one.\n if not new_template_sequence:\n if len(mmcif_object.chain_to_seqres) == 1:\n logger.info('Could not find %s in %s, but there is only 1 sequence, so '\n 'using that one.',\n template_chain_id,\n mmcif_object.file_id)\n new_template_sequence = list(mmcif_object.chain_to_seqres.values())[0]\n else:\n raise QueryToTemplateAlignError(\n f'Could not find chain {template_chain_id} in {mmcif_object.file_id}. '\n 'If there are no mmCIF parsing errors, it is possible it was not a '\n 'protein chain.')\n\n try:\n (old_aligned_template, new_aligned_template), _ = parsers.parse_a3m(\n aligner.align([old_template_sequence, new_template_sequence]))\n except Exception as e:\n raise QueryToTemplateAlignError(\n 'Could not align old template %s to template %s (%s_%s). Error: %s' %\n (old_template_sequence, new_template_sequence, mmcif_object.file_id,\n template_chain_id, str(e)))\n\n logger.info('Old aligned template: %s\\nNew aligned template: %s',\n old_aligned_template, new_aligned_template)\n\n old_to_new_template_mapping = {}\n old_template_index = -1\n new_template_index = -1\n num_same = 0\n for old_template_aa, new_template_aa in zip(\n old_aligned_template, new_aligned_template):\n if old_template_aa != '-':\n old_template_index += 1\n if new_template_aa != '-':\n new_template_index += 1\n if old_template_aa != '-' and new_template_aa != '-':\n old_to_new_template_mapping[old_template_index] = new_template_index\n if old_template_aa == new_template_aa:\n num_same += 1\n\n # Require at least 90 % sequence identity wrt to the shorter of the sequences.\n if float(num_same) / min(\n len(old_template_sequence), len(new_template_sequence)) < 0.9:\n raise QueryToTemplateAlignError(\n 'Insufficient similarity of the sequence in the database: %s to the '\n 'actual sequence in the mmCIF file %s_%s: %s. We require at least '\n '90 %% similarity wrt to the shorter of the sequences. This is not a '\n 'problem unless you think this is a template that should be included.' %\n (old_template_sequence, mmcif_object.file_id, template_chain_id,\n new_template_sequence))\n\n new_query_to_template_mapping = {}\n for query_index, old_template_index in old_mapping.items():\n new_query_to_template_mapping[query_index] = (\n old_to_new_template_mapping.get(old_template_index, -1))\n\n new_template_sequence = new_template_sequence.replace('-', '')\n\n return new_template_sequence, new_query_to_template_mapping\n\n\ndef _check_residue_distances(all_positions: np.ndarray,\n all_positions_mask: np.ndarray,\n max_ca_ca_distance: float):\n \"\"\"Checks if the distance between unmasked neighbor residues is ok.\"\"\"\n ca_position = residue_constants.atom_order['CA']\n prev_is_unmasked = False\n prev_calpha = None\n for i, (coords, mask) in enumerate(zip(all_positions, all_positions_mask)):\n this_is_unmasked = bool(mask[ca_position])\n if this_is_unmasked:\n this_calpha = coords[ca_position]\n if prev_is_unmasked:\n distance = np.linalg.norm(this_calpha - prev_calpha)\n if distance > max_ca_ca_distance:\n raise CaDistanceError(\n 'The distance between residues %d and %d is %f > limit %f.' % (\n i, i + 1, distance, max_ca_ca_distance))\n prev_calpha = this_calpha\n prev_is_unmasked = this_is_unmasked\n\n\ndef _get_atom_positions(\n mmcif_object: mmcif_parsing.MmcifObject,\n auth_chain_id: str,\n max_ca_ca_distance: float) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Gets atom positions and mask from a list of Biopython Residues.\"\"\"\n num_res = len(mmcif_object.chain_to_seqres[auth_chain_id])\n\n relevant_chains = [c for c in mmcif_object.structure.get_chains()\n if c.id == auth_chain_id]\n if len(relevant_chains) != 1:\n raise MultipleChainsError(\n f'Expected exactly one chain in structure with id {auth_chain_id}.')\n chain = relevant_chains[0]\n\n all_positions = np.zeros([num_res, residue_constants.atom_type_num, 3])\n all_positions_mask = np.zeros([num_res, residue_constants.atom_type_num],\n dtype=np.int64)\n for res_index in range(num_res):\n pos = np.zeros([residue_constants.atom_type_num, 3], dtype=np.float32)\n mask = np.zeros([residue_constants.atom_type_num], dtype=np.float32)\n res_at_position = mmcif_object.seqres_to_structure[auth_chain_id][res_index]\n if not res_at_position.is_missing:\n res = chain[(res_at_position.hetflag,\n res_at_position.position.residue_number,\n res_at_position.position.insertion_code)]\n for atom in res.get_atoms():\n atom_name = atom.get_name()\n x, y, z = atom.get_coord()\n if atom_name in residue_constants.atom_order.keys():\n pos[residue_constants.atom_order[atom_name]] = [x, y, z]\n mask[residue_constants.atom_order[atom_name]] = 1.0\n elif atom_name.upper() == 'SE' and res.get_resname() == 'MSE':\n # Put the coordinates of the selenium atom in the sulphur column.\n pos[residue_constants.atom_order['SD']] = [x, y, z]\n mask[residue_constants.atom_order['SD']] = 1.0\n\n all_positions[res_index] = pos\n all_positions_mask[res_index] = mask\n _check_residue_distances(\n all_positions, all_positions_mask, max_ca_ca_distance)\n return all_positions, all_positions_mask\n\n\ndef _extract_template_features(\n mmcif_object: mmcif_parsing.MmcifObject,\n pdb_id: str,\n mapping: Mapping[int, int],\n template_sequence: str,\n query_sequence: str,\n template_chain_id: str,\n kalign_binary_path: str) -> Tuple[Dict[str, Any], Optional[str]]:\n \"\"\"Parses atom positions in the target structure and aligns with the query.\n\n Atoms for each residue in the template structure are indexed to coincide\n with their corresponding residue in the query sequence, according to the\n alignment mapping provided.\n\n Args:\n mmcif_object: mmcif_parsing.MmcifObject representing the template.\n pdb_id: PDB code for the template.\n mapping: Dictionary mapping indices in the query sequence to indices in\n the template sequence.\n template_sequence: String describing the amino acid sequence for the\n template protein.\n query_sequence: String describing the amino acid sequence for the query\n protein.\n template_chain_id: String ID describing which chain in the structure proto\n should be used.\n kalign_binary_path: The path to a kalign executable used for template\n realignment.\n\n Returns:\n A tuple with:\n * A dictionary containing the extra features derived from the template\n protein structure.\n * A warning message if the hit was realigned to the actual mmCIF sequence.\n Otherwise None.\n\n Raises:\n NoChainsError: If the mmcif object doesn't contain any chains.\n SequenceNotInTemplateError: If the given chain id / sequence can't\n be found in the mmcif object.\n QueryToTemplateAlignError: If the actual template in the mmCIF file\n can't be aligned to the query.\n NoAtomDataInTemplateError: If the mmcif object doesn't contain\n atom positions.\n TemplateAtomMaskAllZerosError: If the mmcif object doesn't have any\n unmasked residues.\n \"\"\"\n if mmcif_object is None or not mmcif_object.chain_to_seqres:\n raise NoChainsError('No chains in PDB: %s_%s' % (pdb_id, template_chain_id))\n\n warning = None\n try:\n seqres, chain_id, mapping_offset = _find_template_in_pdb(\n template_chain_id=template_chain_id,\n template_sequence=template_sequence,\n mmcif_object=mmcif_object)\n except SequenceNotInTemplateError:\n # If PDB70 contains a different version of the template, we use the sequence\n # from the mmcif_object.\n chain_id = template_chain_id\n warning = (\n f'The exact sequence {template_sequence} was not found in '\n f'{pdb_id}_{chain_id}. Realigning the template to the actual sequence.')\n logger.warning(warning)\n # This throws an exception if it fails to realign the hit.\n seqres, mapping = _realign_pdb_template_to_query(\n old_template_sequence=template_sequence,\n template_chain_id=template_chain_id,\n mmcif_object=mmcif_object,\n old_mapping=mapping,\n kalign_binary_path=kalign_binary_path)\n logger.info('Sequence in %s_%s: %s successfully realigned to %s',\n pdb_id, chain_id, template_sequence, seqres)\n # The template sequence changed.\n template_sequence = seqres\n # No mapping offset, the query is aligned to the actual sequence.\n mapping_offset = 0\n\n try:\n # Essentially set to infinity - we don't want to reject templates unless\n # they're really really bad.\n all_atom_positions, all_atom_mask = _get_atom_positions(\n mmcif_object, chain_id, max_ca_ca_distance=150.0)\n except (CaDistanceError, KeyError) as ex:\n raise NoAtomDataInTemplateError(\n 'Could not get atom data (%s_%s): %s' % (pdb_id, chain_id, str(ex))\n ) from ex\n\n all_atom_positions = np.split(all_atom_positions, all_atom_positions.shape[0])\n all_atom_masks = np.split(all_atom_mask, all_atom_mask.shape[0])\n\n output_templates_sequence = []\n templates_all_atom_positions = []\n templates_all_atom_masks = []\n\n for _ in query_sequence:\n # Residues in the query_sequence that are not in the template_sequence:\n templates_all_atom_positions.append(\n np.zeros((residue_constants.atom_type_num, 3)))\n templates_all_atom_masks.append(np.zeros(residue_constants.atom_type_num))\n output_templates_sequence.append('-')\n\n for k, v in mapping.items():\n template_index = v + mapping_offset\n templates_all_atom_positions[k] = all_atom_positions[template_index][0]\n templates_all_atom_masks[k] = all_atom_masks[template_index][0]\n output_templates_sequence[k] = template_sequence[v]\n\n # Alanine (AA with the lowest number of atoms) has 5 atoms (C, CA, CB, N, O).\n if np.sum(templates_all_atom_masks) < 5:\n raise TemplateAtomMaskAllZerosError(\n 'Template all atom mask was all zeros: %s_%s. Residue range: %d-%d' %\n (pdb_id, chain_id, min(mapping.values()) + mapping_offset,\n max(mapping.values()) + mapping_offset))\n\n output_templates_sequence = ''.join(output_templates_sequence)\n\n templates_aatype = residue_constants.sequence_to_onehot(\n output_templates_sequence, residue_constants.HHBLITS_AA_TO_ID)\n\n return (\n {\n 'template_all_atom_positions': np.array(templates_all_atom_positions),\n 'template_all_atom_masks': np.array(templates_all_atom_masks),\n 'template_sequence': output_templates_sequence.encode(),\n 'template_aatype': np.array(templates_aatype),\n 'template_domain_names': f'{pdb_id.lower()}_{chain_id}'.encode(),\n },\n warning)\n\n\ndef _build_query_to_hit_index_mapping(\n hit_query_sequence: str,\n hit_sequence: str,\n indices_hit: Sequence[int],\n indices_query: Sequence[int],\n original_query_sequence: str) -> Mapping[int, int]:\n \"\"\"Gets mapping from indices in original query sequence to indices in the hit.\n\n hit_query_sequence and hit_sequence are two aligned sequences containing gap\n characters. hit_query_sequence contains only the part of the original query\n sequence that matched the hit. When interpreting the indices from the .hhr, we\n need to correct for this to recover a mapping from original query sequence to\n the hit sequence.\n\n Args:\n hit_query_sequence: The portion of the query sequence that is in the .hhr\n hit\n hit_sequence: The portion of the hit sequence that is in the .hhr\n indices_hit: The indices for each aminoacid relative to the hit sequence\n indices_query: The indices for each aminoacid relative to the original query\n sequence\n original_query_sequence: String describing the original query sequence.\n\n Returns:\n Dictionary with indices in the original query sequence as keys and indices\n in the hit sequence as values.\n \"\"\"\n # If the hit is empty (no aligned residues), return empty mapping\n if not hit_query_sequence:\n return {}\n\n # Remove gaps and find the offset of hit.query relative to original query.\n hhsearch_query_sequence = hit_query_sequence.replace('-', '')\n hit_sequence = hit_sequence.replace('-', '')\n hhsearch_query_offset = original_query_sequence.find(hhsearch_query_sequence)\n\n # Index of -1 used for gap characters. Subtract the min index ignoring gaps.\n min_idx = min(x for x in indices_hit if x > -1)\n fixed_indices_hit = [\n x - min_idx if x > -1 else -1 for x in indices_hit\n ]\n\n min_idx = min(x for x in indices_query if x > -1)\n fixed_indices_query = [x - min_idx if x > -1 else -1 for x in indices_query]\n\n # Zip the corrected indices, ignore case where both seqs have gap characters.\n mapping = {}\n for q_i, q_t in zip(fixed_indices_query, fixed_indices_hit):\n if q_t != -1 and q_i != -1:\n if (q_t >= len(hit_sequence) or\n q_i + hhsearch_query_offset >= len(original_query_sequence)):\n continue\n mapping[q_i + hhsearch_query_offset] = q_t\n\n return mapping\n\n\[email protected](frozen=True)\nclass SingleHitResult:\n features: Optional[Mapping[str, Any]]\n error: Optional[str]\n warning: Optional[str]\n\n\ndef _process_single_hit(\n query_sequence: str,\n query_pdb_code: Optional[str],\n hit: parsers.TemplateHit,\n mmcif_dir: str,\n max_template_date: datetime.datetime,\n release_dates: Mapping[str, datetime.datetime],\n obsolete_pdbs: Mapping[str, Optional[str]],\n kalign_binary_path: str,\n strict_error_check: bool = False) -> SingleHitResult:\n \"\"\"Tries to extract template features from a single HHSearch hit.\"\"\"\n # Fail hard if we can't get the PDB ID and chain name from the hit.\n hit_pdb_code, hit_chain_id = _get_pdb_id_and_chain(hit)\n\n # This hit has been removed (obsoleted) from PDB, skip it.\n if hit_pdb_code in obsolete_pdbs and obsolete_pdbs[hit_pdb_code] is None:\n return SingleHitResult(\n features=None, error=None, warning=f'Hit {hit_pdb_code} is obsolete.')\n\n if hit_pdb_code not in release_dates:\n if hit_pdb_code in obsolete_pdbs:\n hit_pdb_code = obsolete_pdbs[hit_pdb_code]\n\n # Pass hit_pdb_code since it might have changed due to the pdb being obsolete.\n try:\n _assess_hhsearch_hit(\n hit=hit,\n hit_pdb_code=hit_pdb_code,\n query_sequence=query_sequence,\n query_pdb_code=query_pdb_code,\n release_dates=release_dates,\n release_date_cutoff=max_template_date)\n except PrefilterError as e:\n msg = f'hit {hit_pdb_code}_{hit_chain_id} did not pass prefilter: {str(e)}'\n logger.info('%s: %s', query_pdb_code, msg)\n if strict_error_check and isinstance(\n e, (DateError, PdbIdError, DuplicateError)):\n # In strict mode we treat some prefilter cases as errors.\n return SingleHitResult(features=None, error=msg, warning=None)\n\n return SingleHitResult(features=None, error=None, warning=None)\n\n mapping = _build_query_to_hit_index_mapping(\n hit.query, hit.hit_sequence, hit.indices_hit, hit.indices_query,\n query_sequence)\n\n # The mapping is from the query to the actual hit sequence, so we need to\n # remove gaps (which regardless have a missing confidence score).\n template_sequence = hit.hit_sequence.replace('-', '')\n\n cif_path = os.path.join(mmcif_dir, hit_pdb_code + '.cif')\n logger.info('Reading PDB entry from %s. Query: %s, template: %s',\n cif_path, query_sequence, template_sequence)\n # Fail if we can't find the mmCIF file.\n with open(cif_path, 'r') as cif_file:\n cif_string = cif_file.read()\n\n parsing_result = mmcif_parsing.parse(\n file_id=hit_pdb_code, mmcif_string=cif_string)\n\n if parsing_result.mmcif_object is not None:\n hit_release_date = datetime.datetime.strptime(\n parsing_result.mmcif_object.header['release_date'], '%Y-%m-%d')\n if hit_release_date > max_template_date:\n error = ('Template %s date (%s) > max template date (%s).' %\n (hit_pdb_code, hit_release_date, max_template_date))\n if strict_error_check:\n return SingleHitResult(features=None, error=error, warning=None)\n else:\n logger.warning(error)\n return SingleHitResult(features=None, error=None, warning=None)\n\n try:\n features, realign_warning = _extract_template_features(\n mmcif_object=parsing_result.mmcif_object,\n pdb_id=hit_pdb_code,\n mapping=mapping,\n template_sequence=template_sequence,\n query_sequence=query_sequence,\n template_chain_id=hit_chain_id,\n kalign_binary_path=kalign_binary_path)\n features['template_sum_probs'] = [hit.sum_probs]\n\n # It is possible there were some errors when parsing the other chains in the\n # mmCIF file, but the template features for the chain we want were still\n # computed. In such case the mmCIF parsing errors are not relevant.\n return SingleHitResult(\n features=features, error=None, warning=realign_warning)\n except (NoChainsError, NoAtomDataInTemplateError,\n TemplateAtomMaskAllZerosError) as e:\n # These 3 errors indicate missing mmCIF experimental data rather than a\n # problem with the template search, so turn them into warnings.\n warning = ('%s_%s (sum_probs: %.2f, rank: %d): feature extracting errors: '\n '%s, mmCIF parsing errors: %s'\n % (hit_pdb_code, hit_chain_id, hit.sum_probs, hit.index,\n str(e), parsing_result.errors))\n if strict_error_check:\n return SingleHitResult(features=None, error=warning, warning=None)\n else:\n return SingleHitResult(features=None, error=None, warning=warning)\n except Error as e:\n error = ('%s_%s (sum_probs: %.2f, rank: %d): feature extracting errors: '\n '%s, mmCIF parsing errors: %s'\n % (hit_pdb_code, hit_chain_id, hit.sum_probs, hit.index,\n str(e), parsing_result.errors))\n return SingleHitResult(features=None, error=error, warning=None)\n\n\[email protected](frozen=True)\nclass TemplateSearchResult:\n features: Mapping[str, Any]\n errors: Sequence[str]\n warnings: Sequence[str]\n\n\nclass TemplateHitFeaturizer:\n \"\"\"A class for turning hhr hits to template features.\"\"\"\n\n def __init__(\n self,\n mmcif_dir: str,\n max_template_date: str,\n max_hits: int,\n kalign_binary_path: str,\n release_dates_path: Optional[str],\n obsolete_pdbs_path: Optional[str],\n strict_error_check: bool = False):\n \"\"\"Initializes the Template Search.\n\n Args:\n mmcif_dir: Path to a directory with mmCIF structures. Once a template ID\n is found by HHSearch, this directory is used to retrieve the template\n data.\n max_template_date: The maximum date permitted for template structures. No\n template with date higher than this date will be returned. In ISO8601\n date format, YYYY-MM-DD.\n max_hits: The maximum number of templates that will be returned.\n kalign_binary_path: The path to a kalign executable used for template\n realignment.\n release_dates_path: An optional path to a file with a mapping from PDB IDs\n to their release dates. Thanks to this we don't have to redundantly\n parse mmCIF files to get that information.\n obsolete_pdbs_path: An optional path to a file containing a mapping from\n obsolete PDB IDs to the PDB IDs of their replacements.\n strict_error_check: If True, then the following will be treated as errors:\n * If any template date is after the max_template_date.\n * If any template has identical PDB ID to the query.\n * If any template is a duplicate of the query.\n * Any feature computation errors.\n \"\"\"\n self._mmcif_dir = mmcif_dir\n if not glob.glob(os.path.join(self._mmcif_dir, '*.cif')):\n logger.error('Could not find CIFs in %s', self._mmcif_dir)\n raise ValueError(f'Could not find CIFs in {self._mmcif_dir}')\n\n try:\n self._max_template_date = datetime.datetime.strptime(\n max_template_date, '%Y-%m-%d')\n except ValueError:\n raise ValueError(\n 'max_template_date must be set and have format YYYY-MM-DD.')\n self._max_hits = max_hits\n self._kalign_binary_path = kalign_binary_path\n self._strict_error_check = strict_error_check\n\n if release_dates_path:\n logger.info('Using precomputed release dates %s.', release_dates_path)\n self._release_dates = _parse_release_dates(release_dates_path)\n else:\n self._release_dates = {}\n\n if obsolete_pdbs_path:\n logger.info('Using precomputed obsolete pdbs %s.', obsolete_pdbs_path)\n self._obsolete_pdbs = _parse_obsolete(obsolete_pdbs_path)\n else:\n self._obsolete_pdbs = {}\n\n def get_templates(\n self,\n query_sequence: str,\n query_pdb_code: Optional[str],\n query_release_date: Optional[datetime.datetime],\n hits: Sequence[parsers.TemplateHit]) -> TemplateSearchResult:\n \"\"\"Computes the templates for given query sequence (more details above).\"\"\"\n logger.info('Searching for template for: %s', query_pdb_code)\n\n template_features = {}\n for template_feature_name in TEMPLATE_FEATURES:\n template_features[template_feature_name] = []\n\n # Always use a max_template_date. Set to query_release_date minus 60 days\n # if that's earlier.\n template_cutoff_date = self._max_template_date\n if query_release_date:\n delta = datetime.timedelta(days=60)\n if query_release_date - delta < template_cutoff_date:\n template_cutoff_date = query_release_date - delta\n assert template_cutoff_date < query_release_date\n assert template_cutoff_date <= self._max_template_date\n\n num_hits = 0\n errors = []\n warnings = []\n\n for hit in sorted(hits, key=lambda x: x.sum_probs, reverse=True):\n # We got all the templates we wanted, stop processing hits.\n if num_hits >= self._max_hits:\n break\n\n result = _process_single_hit(\n query_sequence=query_sequence,\n query_pdb_code=query_pdb_code,\n hit=hit,\n mmcif_dir=self._mmcif_dir,\n max_template_date=template_cutoff_date,\n release_dates=self._release_dates,\n obsolete_pdbs=self._obsolete_pdbs,\n strict_error_check=self._strict_error_check,\n kalign_binary_path=self._kalign_binary_path)\n\n if result.error:\n errors.append(result.error)\n\n # There could be an error even if there are some results, e.g. thrown by\n # other unparsable chains in the same mmCIF file.\n if result.warning:\n warnings.append(result.warning)\n\n if result.features is None:\n logger.info('Skipped invalid hit %s, error: %s, warning: %s',\n hit.name, result.error, result.warning)\n else:\n # Increment the hit counter, since we got features out of this hit.\n num_hits += 1\n for k in template_features:\n template_features[k].append(result.features[k])\n\n for name in template_features:\n if num_hits > 0:\n template_features[name] = np.stack(\n template_features[name], axis=0).astype(TEMPLATE_FEATURES[name])\n else:\n # Make sure the feature has correct dtype even if empty.\n template_features[name] = np.array([], dtype=TEMPLATE_FEATURES[name])\n\n return TemplateSearchResult(\n features=template_features, errors=errors, warnings=warnings)\n", "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for relax.\"\"\"\n\nimport os\n\nfrom absl.testing import absltest\nfrom alphafold_paddle.common import protein\nfrom alphafold_paddle.relax import relax\nimport numpy as np\n# Internal import (7716).\n\n\nclass RunAmberRelaxTest(absltest.TestCase):\n \"\"\"tbd.\"\"\"\n def setUp(self):\n \"\"\"tbd.\"\"\"\n super().setUp()\n self.test_dir = os.path.join(\n absltest.get_default_test_srcdir(),\n 'alphafold/relax/testdata/')\n self.test_config = {\n 'max_iterations': 1,\n 'tolerance': 2.39,\n 'stiffness': 10.0,\n 'exclude_residues': [],\n 'max_outer_iterations': 1}\n\n def test_process(self):\n \"\"\"tbd.\"\"\"\n amber_relax = relax.AmberRelaxation(**self.test_config)\n\n with open(os.path.join(self.test_dir, 'model_output.pdb')) as f:\n test_prot = protein.from_pdb_string(f.read())\n pdb_min, debug_info, num_violations = amber_relax.process(prot=test_prot)\n\n self.assertCountEqual(debug_info.keys(),\n set({'initial_energy', 'final_energy',\n 'attempts', 'rmsd'}))\n self.assertLess(debug_info['final_energy'], debug_info['initial_energy'])\n self.assertGreater(debug_info['rmsd'], 0)\n\n prot_min = protein.from_pdb_string(pdb_min)\n # Most protein properties should be unchanged.\n np.testing.assert_almost_equal(test_prot.aatype, prot_min.aatype)\n np.testing.assert_almost_equal(test_prot.residue_index,\n prot_min.residue_index)\n # Atom mask and bfactors identical except for terminal OXT of last residue.\n np.testing.assert_almost_equal(test_prot.atom_mask[:-1, :],\n prot_min.atom_mask[:-1, :])\n np.testing.assert_almost_equal(test_prot.b_factors[:-1, :],\n prot_min.b_factors[:-1, :])\n np.testing.assert_almost_equal(test_prot.atom_mask[:, :-1],\n prot_min.atom_mask[:, :-1])\n np.testing.assert_almost_equal(test_prot.b_factors[:, :-1],\n prot_min.b_factors[:, :-1])\n # There are no residues with violations.\n np.testing.assert_equal(num_violations, np.zeros_like(num_violations))\n\n def test_unresolved_violations(self):\n \"\"\"tbd.\"\"\"\n amber_relax = relax.AmberRelaxation(**self.test_config)\n with open(os.path.join(self.test_dir,\n 'with_violations_casp14.pdb')) as f:\n test_prot = protein.from_pdb_string(f.read())\n _, _, num_violations = amber_relax.process(prot=test_prot)\n exp_num_violations = np.array(\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0,\n 0, 0, 0, 0])\n # Check no violations were added. Can't check exactly due to stochasticity.\n self.assertTrue(np.all(num_violations <= exp_num_violations))\n\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "numpy.split", "numpy.linalg.norm", "numpy.stack", "numpy.array", "numpy.zeros", "numpy.sum" ], [ "numpy.all", "numpy.testing.assert_almost_equal", "numpy.array", "numpy.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
InzamamRahaman/PoincareDiskEmbedding
[ "89261bcf544e9d6c144ace2697400e2ffb489f94" ]
[ "model.py" ]
[ "import torch\nimport torch.nn as nn\nimport util\n\nclass Model(nn.Module):\n\n def __init__(self, vocab_size, embedding_dim=10):\n super(Model, self).__init__()\n self.embedding_dim = embedding_dim\n self.embeddings = nn.Embedding(vocab_size, embedding_dim)\n self.embeddings.weight.data.uniform_(-0.001, 0.001)\n\n def forward(self, u, v, negatives):\n u_emb = self.embeddings(u)\n v_emb = self.embeddings(v)\n negative_embeddings = self.embeddings(negatives)\n numer = torch.exp(-1 * util.hyperbolic_distance(u_emb, v_emb))\n denom = torch.exp(-1 * util.hyperbolic_distance(u_emb, negative_embeddings))\n denom = torch.sum(denom)\n # print('Numer: ', torch.log(numer))\n # print('Denom: ', torch.log(denom))\n # res = torch.log(numer) - torch.log(denom)\n res = numer / denom\n return res\n\n def fit(self, graph, alpha=0.10, iter=5, negative_samples=10, c=10):\n loss = 0\n for i in range(iter):\n loss = 0\n self.zero_grad()\n for u, v, negs in graph.get_examples(negative_samples):\n loss += self.forward(u, v, negs)\n print('Loss at iteration ', i, ' is ', loss.data[0])\n loss.backward()\n for theta in self.parameters():\n beta = -alpha\n if i < 10:\n beta /= c\n tensor_vals = torch.pow(util.metric_tensor(theta), -1)\n multed = tensor_vals.mul(beta).repeat(1, self.embedding_dim)\n grads = theta.grad\n scaled_gradient = multed * grads\n theta.data.add_(scaled_gradient.data)\n theta.data = util.proj(theta.data)\n\n\n\n\n\n\n\n\n\n" ]
[ [ "torch.sum", "torch.nn.Embedding" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
snakeice/fklearn
[ "80497dd403c582bda0e796c62219f0636d15d074", "80497dd403c582bda0e796c62219f0636d15d074" ]
[ "src/fklearn/training/transformation.py", "tests/training/test_pipeline.py" ]
[ "from typing import Any, Callable, Dict, List, Union, Optional\n\nimport numpy as np\nimport pandas as pd\nfrom numpy import nan\nimport swifter # NOQA\nfrom sklearn.preprocessing import StandardScaler\nfrom statsmodels.distributions import empirical_distribution as ed\nfrom toolz import curry, merge, compose, mapcat\nfrom fklearn.common_docstrings import learner_return_docstring, learner_pred_fn_docstring\nfrom fklearn.training.utils import log_learner_time\nfrom fklearn.types import LearnerReturnType, LearnerLogType\n\n\n@curry\n@log_learner_time(learner_name='selector')\ndef selector(df: pd.DataFrame,\n training_columns: List[str],\n predict_columns: List[str] = None) -> LearnerReturnType:\n \"\"\"\n Filters a DataFrames by selecting only the desired columns.\n\n Parameters\n ----------\n df : pandas.DataFrame\n A Pandas' DataFrame that must contain `columns`\n\n training_columns : list of str\n A list of column names that will remain in the dataframe during training time (fit)\n\n predict_columns: list of str\n A list of column names that will remain in the dataframe during prediction time (transform)\n If None, it defaults to `training_columns`.\n \"\"\"\n\n if predict_columns is None:\n predict_columns = training_columns\n\n def p(new_data_set: pd.DataFrame) -> pd.DataFrame:\n return new_data_set[predict_columns]\n\n p.__doc__ = learner_pred_fn_docstring(\"selector\")\n\n log = {'selector': {\n 'training_columns': training_columns,\n 'predict_columns': predict_columns,\n 'transformed_column': list(set(training_columns).union(predict_columns))}}\n\n return p, df[training_columns], log\n\n\nselector.__doc__ += learner_return_docstring(\"Selector\")\n\n\n@curry\n@log_learner_time(learner_name='capper')\ndef capper(df: pd.DataFrame,\n columns_to_cap: List[str],\n precomputed_caps: Dict[str, float] = None) -> LearnerReturnType:\n \"\"\"\n Learns the maximum value for each of the `columns_to_cap`\n and used that as the cap for those columns. If precomputed caps\n are passed, the function uses that as the cap value instead of\n computing the maximum.\n\n Parameters\n ----------\n df : pandas.DataFrame\n A Pandas' DataFrame that must contain `columns_to_cap` columns.\n\n columns_to_cap : list of str\n A list os column names that should be caped.\n\n precomputed_caps : dict\n A dictionary on the format {\"column_name\" : cap_value}.\n That maps column names to pre computed cap values\n \"\"\"\n\n if not precomputed_caps:\n precomputed_caps = {}\n\n caps = {col: precomputed_caps.get(col, df[col].max()) for col in columns_to_cap}\n\n def p(new_data_set: pd.DataFrame) -> pd.DataFrame:\n capped_cols = {col: new_data_set[col].clip(upper=caps[col]) for col in caps.keys()}\n return new_data_set.assign(**capped_cols)\n\n p.__doc__ = learner_pred_fn_docstring(\"capper\")\n\n log = {'capper': {\n 'caps': caps,\n 'transformed_column': columns_to_cap,\n 'precomputed_caps': precomputed_caps}}\n\n return p, p(df), log\n\n\ncapper.__doc__ += learner_return_docstring(\"Capper\")\n\n\n@curry\n@log_learner_time(learner_name='floorer')\ndef floorer(df: pd.DataFrame,\n columns_to_floor: List[str],\n precomputed_floors: Dict[str, float] = None) -> LearnerReturnType:\n \"\"\"\n Learns the minimum value for each of the `columns_to_floor`\n and used that as the floot for those columns. If precomputed floors\n are passed, the function uses that as the cap value instead of\n computing the minimun.\n\n Parameters\n ----------\n\n df : pandas.DataFrame\n A Pandas' DataFrame that must contain `columns_to_floor` columns.\n\n columns_to_floor : list of str\n A list os column names that should be floored.\n\n precomputed_floors : dict\n A dictionary on the format {\"column_name\" : floor_value}\n that maps column names to pre computed floor values\n \"\"\"\n\n if not precomputed_floors:\n precomputed_floors = {}\n\n floors = {col: precomputed_floors.get(col, df[col].min()) for col in columns_to_floor}\n\n def p(new_data_set: pd.DataFrame) -> pd.DataFrame:\n capped_cols = {col: new_data_set[col].clip(lower=floors[col]) for col in floors.keys()}\n return new_data_set.assign(**capped_cols)\n\n p.__doc__ = learner_pred_fn_docstring(\"floorer\")\n\n log = {'floorer': {\n 'floors': floors,\n 'transformed_column': columns_to_floor,\n 'precomputed_floors': precomputed_floors}}\n\n return p, p(df), log\n\n\nfloorer.__doc__ += learner_return_docstring(\"Floorer\")\n\n\n@curry\n@log_learner_time(learner_name='ecdfer')\ndef ecdfer(df: pd.DataFrame,\n ascending: bool = True,\n prediction_column: str = \"prediction\",\n ecdf_column: str = \"prediction_ecdf\",\n max_range: int = 1000) -> LearnerReturnType:\n \"\"\"\n Learns an Empirical Cumulative Distribution Function from the specified column\n in the input DataFrame. It is usually used in the prediction column to convert\n a predicted probability into a score from 0 to 1000.\n\n Parameters\n ----------\n df : Pandas' pandas.DataFrame\n A Pandas' DataFrame that must contain a `prediction_column` columns.\n\n ascending : bool\n Whether to compute an ascending ECDF or a descending one.\n\n prediction_column : str\n The name of the column in `df` to learn the ECDF from.\n\n ecdf_column : str\n The name of the new ECDF column added by this function\n\n max_range : int\n The maximum value for the ECDF. It will go will go\n from 0 to max_range.\n \"\"\"\n\n if ascending:\n base = 0\n sign = 1\n else:\n base = max_range\n sign = -1\n\n values = df[prediction_column]\n\n ecdf = ed.ECDF(values)\n\n def p(new_df: pd.DataFrame) -> pd.DataFrame:\n return new_df.assign(**{ecdf_column: (base + sign * max_range * ecdf(new_df[prediction_column]))})\n\n p.__doc__ = learner_pred_fn_docstring(\"ecdefer\")\n\n log = {'ecdfer': {\n 'nobs': len(values),\n 'prediction_column': prediction_column,\n 'ascending': ascending,\n 'transformed_column': [ecdf_column]}}\n\n return p, p(df), log\n\n\necdfer.__doc__ += learner_return_docstring(\"ECDFer\")\n\n\n@curry\n@log_learner_time(learner_name='discrete_ecdfer')\ndef discrete_ecdfer(df: pd.DataFrame,\n ascending: bool = True,\n prediction_column: str = \"prediction\",\n ecdf_column: str = \"prediction_ecdf\",\n max_range: int = 1000,\n round_method: Callable = int) -> LearnerReturnType:\n \"\"\"\n Learns an Empirical Cumulative Distribution Function from the specified column\n in the input DataFrame. It is usually used in the prediction column to convert\n a predicted probability into a score from 0 to 1000.\n\n Parameters\n ----------\n df : Pandas' pandas.DataFrame\n A Pandas' DataFrame that must contain a `prediction_column` columns.\n\n ascending : bool\n Whether to compute an ascending ECDF or a descending one.\n\n prediction_column : str\n The name of the column in `df` to learn the ECDF from.\n\n ecdf_column : str\n The name of the new ECDF column added by this function.\n\n max_range : int\n The maximum value for the ECDF. It will go will go\n from 0 to max_range.\n\n round_method: Callable\n A function perform the round of transformed values for ex: (int, ceil, floor, round)\n \"\"\"\n\n if ascending:\n base = 0\n sign = 1\n else:\n base = max_range\n sign = -1\n\n values = df[prediction_column]\n\n ecdf = ed.ECDF(values)\n\n df_ecdf = pd.DataFrame()\n df_ecdf['x'] = ecdf.x\n df_ecdf['y'] = pd.Series(base + sign * max_range * ecdf.y).apply(round_method)\n\n boundaries = df_ecdf.groupby(\"y\").agg((min, max))[\"x\"][\"min\"].reset_index()\n\n y = boundaries[\"y\"]\n x = boundaries[\"min\"]\n side = ecdf.side\n\n log = {'discrete_ecdfer': {\n 'map': dict(zip(x, y)),\n 'round_method': round_method,\n 'nobs': len(values),\n 'prediction_column': prediction_column,\n 'ascending': ascending,\n 'transformed_column': [ecdf_column]}}\n\n del ecdf\n del values\n del df_ecdf\n\n def p(new_df: pd.DataFrame) -> pd.DataFrame:\n if not ascending:\n tind = np.searchsorted(-x, -new_df[prediction_column])\n else:\n tind = np.searchsorted(x, new_df[prediction_column], side) - 1\n\n return new_df.assign(**{ecdf_column: y[tind].values})\n\n return p, p(df), log\n\n\ndiscrete_ecdfer.__doc__ += learner_return_docstring(\"Discrete ECDFer\")\n\n\n@curry\ndef prediction_ranger(df: pd.DataFrame,\n prediction_min: float,\n prediction_max: float,\n prediction_column: str = \"prediction\") -> LearnerReturnType:\n \"\"\"\n Caps and floors the specified prediction column to a set range.\n\n Parameters\n ----------\n df : pandas.DataFrame\n A Pandas' DataFrame that must contain a `prediction_column` columns.\n\n prediction_min : float\n The floor for the prediction.\n\n prediction_max : float\n The cap for the prediction.\n\n prediction_column : str\n The name of the column in `df` to cap and floor\n \"\"\"\n\n def p(new_df: pd.DataFrame) -> pd.DataFrame:\n return new_df.assign(\n **{prediction_column: new_df[prediction_column].clip(lower=prediction_min, upper=prediction_max)}\n )\n\n p.__doc__ = learner_pred_fn_docstring(\"prediction_ranger\")\n\n log = {'prediction_ranger': {\n 'prediction_min': prediction_min,\n 'prediction_max': prediction_max,\n 'transformed_column': [prediction_column]}}\n\n return p, p(df), log\n\n\nprediction_ranger.__doc__ += learner_return_docstring(\"Prediction Ranger\")\n\n\ndef apply_replacements(df: pd.DataFrame,\n columns: List[str],\n vec: Dict[str, Dict],\n replace_unseen: Any) -> pd.DataFrame:\n \"\"\"\n Base function to apply the replacements values found on the\n \"vec\" vectors into the df DataFrame.\n\n Parameters\n -----------\n\n df: pandas.DataFrame\n A Pandas DataFrame containing the data to be replaced.\n\n columns : list of str\n The df columns names to perform the replacements.\n\n vec: dict\n A dict mapping a col to dict mapping a value to its replacement. For example:\n vec = {\"feature1\": {1: 2, 3: 5, 6: 8}}\n\n replace_unseen: Any\n Default value to replace when original value is not present in the `vec` dict for the feature\n\n \"\"\"\n column_categorizer = lambda col: df[col].apply(lambda x: (np.nan\n if isinstance(x, float) and np.isnan(x)\n else vec[col].get(x, replace_unseen)))\n categ_columns = {col: column_categorizer(col) for col in columns}\n return df.assign(**categ_columns)\n\n\n@curry\n@log_learner_time(learner_name=\"value_mapper\")\ndef value_mapper(df: pd.DataFrame,\n value_maps: Dict[str, Dict],\n ignore_unseen: bool = True,\n replace_unseen_to: Any = np.nan) -> pd.DataFrame:\n \"\"\"\n Map values in selected columns in the DataFrame according to dictionaries of replacements.\n Learner wrapper for apply_replacements\n\n Parameters\n -----------\n\n df: pandas.DataFrame\n A Pandas DataFrame containing the data to be replaced.\n\n value_maps: dict of dicts\n A dict mapping a col to dict mapping a value to its replacement. For example:\n value_maps = {\"feature1\": {1: 2, 3: 5, 6: 8}}\n\n ignore_unseen: bool\n If True, values not explicitly declared in value_maps will be left as is.\n If False, these will be replaced by replace_unseen_to.\n\n replace_unseen_to: Any\n Default value to replace when original value is not present in the `vec` dict for the feature.\n \"\"\"\n\n def new_col_value_map(old_col_value_map: Dict[Any, Any],\n new_keys: List[Any]) -> Dict[Any, Dict]:\n old_keys = old_col_value_map.keys()\n return {key: old_col_value_map[key] if key in old_keys else key for key in new_keys}\n\n columns = list(value_maps.keys())\n if ignore_unseen:\n value_maps = {col: new_col_value_map(value_maps[col], list(df[col].unique())) for col in columns}\n\n def p(df: pd.DataFrame) -> pd.DataFrame:\n return apply_replacements(df, columns, value_maps, replace_unseen=replace_unseen_to)\n\n return p, p(df), {\"value_maps\": value_maps}\n\n\n@curry\n@log_learner_time(learner_name=\"truncate_categorical\")\ndef truncate_categorical(df: pd.DataFrame,\n columns_to_truncate: List[str],\n percentile: float,\n replacement: Union[str, float] = -9999,\n replace_unseen: Union[str, float] = -9999,\n store_mapping: bool = False) -> LearnerReturnType:\n \"\"\"\n Truncate infrequent categories and replace them by a single one.\n You can think of it like \"others\" category.\n\n Parameters\n ----------\n df : pandas.DataFrame\n A Pandas' DataFrame that must contain a `prediction_column` columns.\n\n columns_to_truncate : list of str\n The df columns names to perform the truncation.\n\n percentile : float\n Categories less frequent than the percentile will be replaced by the\n same one.\n\n replacement: int, str, float or nan\n The value to use when a category is less frequent that the percentile\n variable.\n\n replace_unseen : int, str, float, or nan\n The value to impute unseen categories.\n\n store_mapping : bool (default: False)\n Whether to store the feature value -> integer dictionary in the log.\n \"\"\"\n get_categs = lambda col: (df[col].value_counts() / len(df)).to_dict()\n update = lambda d: map(lambda kv: (kv[0], replacement) if kv[1] <= percentile else (kv[0], kv[0]), d.items())\n categs_to_dict = lambda categ_dict: dict(categ_dict)\n\n vec = {column: compose(categs_to_dict, update, get_categs)(column) for column in columns_to_truncate}\n\n def p(new_df: pd.DataFrame) -> pd.DataFrame:\n return apply_replacements(new_df, columns_to_truncate, vec, replace_unseen)\n\n p.__doc__ = learner_pred_fn_docstring(\"truncate_categorical\")\n\n log: LearnerLogType = {'truncate_categorical': {\n 'transformed_column': columns_to_truncate,\n 'replace_unseen': replace_unseen}\n }\n\n if store_mapping:\n log[\"truncate_categorical\"][\"mapping\"] = vec\n\n return p, p(df), log\n\n\ntruncate_categorical.__doc__ += learner_return_docstring(\"Truncate Categorical\")\n\n\n@curry\n@log_learner_time(learner_name=\"rank_categorical\")\ndef rank_categorical(df: pd.DataFrame,\n columns_to_rank: List[str],\n replace_unseen: Union[str, float] = nan,\n store_mapping: bool = False) -> LearnerReturnType:\n \"\"\"\n Rank categorical features by their frequency in the train set.\n\n Parameters\n ----------\n df : Pandas' DataFrame\n A Pandas' DataFrame that must contain a `prediction_column` columns.\n\n columns_to_rank : list of str\n The df columns names to perform the rank.\n\n replace_unseen : int, str, float, or nan\n The value to impute unseen categories.\n\n store_mapping : bool (default: False)\n Whether to store the feature value -> integer dictionary in the log\n \"\"\"\n\n col_categ_getter = lambda col: (df[col]\n .value_counts()\n .reset_index()\n .sort_values([col, \"index\"], ascending=[False, True])\n .set_index(\"index\")[col]\n .rank(method=\"first\", ascending=False).to_dict())\n\n vec = {column: col_categ_getter(column) for column in columns_to_rank}\n\n def p(new_df: pd.DataFrame) -> pd.DataFrame:\n return apply_replacements(new_df, columns_to_rank, vec, replace_unseen)\n\n p.__doc__ = learner_pred_fn_docstring(\"rank_categorical\")\n\n log: LearnerLogType = {'rank_categorical': {\n 'transformed_column': columns_to_rank,\n 'replace_unseen': replace_unseen}\n }\n\n if store_mapping:\n log['rank_categorical']['mapping'] = vec\n\n return p, p(df), log\n\n\nrank_categorical.__doc__ += learner_return_docstring(\"Rank Categorical\")\n\n\n@curry\n@log_learner_time(learner_name='count_categorizer')\ndef count_categorizer(df: pd.DataFrame,\n columns_to_categorize: List[str],\n replace_unseen: int = -1,\n store_mapping: bool = False) -> LearnerReturnType:\n \"\"\"\n Replaces categorical variables by count.\n\n Parameters\n ----------\n\n df : pandas.DataFrame\n A Pandas' DataFrame that must contain `columns_to_categorize` columns.\n\n columns_to_categorize : list of str\n A list of categorical column names.\n\n replace_unseen : int\n The value to impute unseen categories.\n\n store_mapping : bool (default: False)\n Whether to store the feature value -> integer dictionary in the log\n \"\"\"\n\n categ_getter = lambda col: df[col].value_counts().to_dict()\n vec = {column: categ_getter(column) for column in columns_to_categorize}\n\n def p(new_df: pd.DataFrame) -> pd.DataFrame:\n return apply_replacements(new_df, columns_to_categorize, vec, replace_unseen)\n\n p.__doc__ = learner_pred_fn_docstring(\"count_categorizer\")\n\n log: LearnerLogType = {'count_categorizer': {\n 'transformed_column': columns_to_categorize,\n 'replace_unseen': replace_unseen}\n }\n\n if store_mapping:\n log['count_categorizer']['mapping'] = vec\n\n return p, p(df), log\n\n\ncount_categorizer.__doc__ += learner_return_docstring(\"Count Categorizer\")\n\n\n@curry\n@log_learner_time(learner_name='label_categorizer')\ndef label_categorizer(df: pd.DataFrame,\n columns_to_categorize: List[str],\n replace_unseen: Union[str, float] = nan,\n store_mapping: bool = False) -> LearnerReturnType:\n \"\"\"\n Replaces categorical variables with a numeric identifier.\n\n Parameters\n ----------\n\n df : pandas.DataFrame\n A Pandas' DataFrame that must contain `columns_to_categorize` columns.\n\n columns_to_categorize : list of str\n A list of categorical column names.\n\n replace_unseen : int, str, float, or nan\n The value to impute unseen categories.\n\n store_mapping : bool (default: False)\n Whether to store the feature value -> integer dictionary in the log\n \"\"\"\n\n def categ_dict(series: pd.Series) -> Dict:\n categs = series.dropna().unique()\n return dict(map(reversed, enumerate(categs))) # type: ignore\n\n vec = {column: categ_dict(df[column]) for column in columns_to_categorize}\n\n def p(new_df: pd.DataFrame) -> pd.DataFrame:\n return apply_replacements(new_df, columns_to_categorize, vec, replace_unseen)\n\n p.__doc__ = learner_pred_fn_docstring(\"label_categorizer\")\n\n log: LearnerLogType = {'label_categorizer': {\n 'transformed_column': columns_to_categorize,\n 'replace_unseen': replace_unseen}\n }\n\n if store_mapping:\n log['label_categorizer']['mapping'] = vec\n\n return p, p(df), log\n\n\nlabel_categorizer.__doc__ += learner_return_docstring(\"Label Categorizer\")\n\n\n@curry\n@log_learner_time(learner_name='quantile_biner')\ndef quantile_biner(df: pd.DataFrame,\n columns_to_bin: List[str],\n q: int = 4,\n right: bool = False) -> LearnerReturnType:\n \"\"\"\n Discretize continuous numerical columns into its quantiles. Uses pandas.qcut\n to find the bins and then numpy.digitize to fit the columns into bins.\n\n Parameters\n ----------\n df : pandas.DataFrame\n A Pandas' DataFrame that must contain `columns_to_categorize` columns.\n\n columns_to_bin : list of str\n A list of numerical column names.\n\n q : int\n Number of quantiles. 10 for deciles, 4 for quartiles, etc.\n Alternately array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles.\n See https://pandas.pydata.org/pandas-docs/stable/generated/pandas.qcut.html\n\n right : bool\n Indicating whether the intervals include the right or the left bin edge.\n Default behavior is (right==False) indicating that the interval does not\n include the right edge. The left bin end is open in this case, i.e., bins[i-1]\n <= x < bins[i] is the default behavior for monotonically increasing bins.\n See https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.digitize.html\n \"\"\"\n\n bin_getter = lambda col: pd.qcut(df[col], q, retbins=True)[1]\n bins = {column: bin_getter(column) for column in columns_to_bin}\n\n def p(new_df: pd.DataFrame) -> pd.DataFrame:\n col_biner = lambda col: np.where(new_df[col].isnull(), nan, np.digitize(new_df[col], bins[col], right=right))\n bined_columns = {col: col_biner(col) for col in columns_to_bin}\n return new_df.assign(**bined_columns)\n\n p.__doc__ = learner_pred_fn_docstring(\"quantile_biner\")\n\n log = {'quantile_biner': {\n 'transformed_column': columns_to_bin,\n 'q': q}}\n\n return p, p(df), log\n\n\nquantile_biner.__doc__ += learner_return_docstring(\"Quantile Biner\")\n\n\n@curry\n@log_learner_time(learner_name='onehot_categorizer')\ndef onehot_categorizer(df: pd.DataFrame,\n columns_to_categorize: List[str],\n hardcode_nans: bool = False,\n drop_first_column: bool = False,\n store_mapping: bool = False) -> LearnerReturnType:\n \"\"\"\n Onehot encoding on categorical columns.\n Encoded columns are removed and substituted by columns named\n `fklearn_feat__col==val`, where `col` is the name of the column\n and `val` is one of the values the feature can assume.\n\n Parameters\n ----------\n df : pd.DataFrame\n A Pandas' DataFrame that must contain `columns_to_categorize` columns.\n\n columns_to_categorize : list of str\n A list of categorical column names. Must be non-empty.\n\n hardcode_nans : bool\n Hardcodes an extra column with: 1 if nan or unseen else 0.\n\n drop_first_column : bool\n Drops the first column to create (k-1)-sized one-hot arrays for k\n features per categorical column. Can be used to avoid colinearity.\n\n store_mapping : bool (default: False)\n Whether to store the feature value -> integer dictionary in the log\n \"\"\"\n\n categ_getter = lambda col: list(np.sort(df[col].dropna(axis=0, how='any').unique()))\n vec = {column: categ_getter(column) for column in sorted(columns_to_categorize)}\n\n def p(new_df: pd.DataFrame) -> pd.DataFrame:\n make_dummies = lambda col: dict(map(lambda categ: (\"fklearn_feat__\" + col + \"==\" + str(categ),\n (new_df[col] == categ).astype(int)),\n vec[col]))\n\n oh_cols = dict(mapcat(lambda col: merge(make_dummies(col),\n {\"fklearn_feat__\" + col + \"==\" + \"nan\":\n (~new_df[col].isin(vec[col])).astype(int)} if hardcode_nans\n else {}).items(),\n columns_to_categorize))\n\n return new_df.assign(**oh_cols).drop(columns_to_categorize, axis=1)\n\n p.__doc__ = learner_pred_fn_docstring(\"onehot_categorizer\")\n\n log = {'onehot_categorizer': {\n 'transformed_column': columns_to_categorize,\n 'hardcode_nans': hardcode_nans,\n 'drop_first_column': drop_first_column}}\n\n if store_mapping:\n log['onehot_categorizer']['mapping'] = vec\n\n return p, p(df), log\n\n\nonehot_categorizer.__doc__ += learner_return_docstring(\"Onehot Categorizer\")\n\n\n@curry\n@log_learner_time(learner_name='standard_scaler')\ndef standard_scaler(df: pd.DataFrame,\n columns_to_scale: List[str]) -> LearnerReturnType:\n \"\"\"\n Fits a standard scaler to the dataset.\n\n Parameters\n ----------\n\n df : pandas.DataFrame\n A Pandas' DataFrame with columns to scale.\n It must contain all columns listed in `columns_to_scale`.\n\n columns_to_scale : list of str\n A list of names of the columns for standard scaling.\n \"\"\"\n\n scaler = StandardScaler()\n\n scaler.fit(df[columns_to_scale].values)\n\n def p(new_data_set: pd.DataFrame) -> pd.DataFrame:\n new_data = scaler.transform(new_data_set[columns_to_scale].values)\n new_cols = pd.DataFrame(data=new_data, columns=columns_to_scale).to_dict('list')\n return new_data_set.assign(**new_cols)\n\n p.__doc__ = learner_pred_fn_docstring(\"standard_scaler\")\n\n log = {'standard_scaler': {\n 'standard_scaler': scaler.get_params(),\n 'transformed_column': columns_to_scale}}\n\n return p, p(df), log\n\n\nstandard_scaler.__doc__ += learner_return_docstring(\"Standard Scaler\")\n\n\n@curry\n@log_learner_time(learner_name='custom_transformer')\ndef custom_transformer(df: pd.DataFrame,\n columns_to_transform: List[str],\n transformation_function: Callable[[pd.DataFrame], pd.DataFrame],\n is_vectorized: bool = False) -> LearnerReturnType:\n \"\"\"\n Applies a custom function to the desired columns.\n\n Parameters\n ----------\n df : pandas.DataFrame\n A Pandas' DataFrame that must contain `columns`\n\n columns_to_transform : list of str\n A list of column names that will remain in the dataframe during training time (fit)\n\n transformation_function : function(pandas.DataFrame) -> pandas.DataFrame\n A function that receives a DataFrame as input, performs a transformation on its columns\n and returns another DataFrame.\n\n \"\"\"\n\n def p(df: pd.DataFrame) -> pd.DataFrame:\n if is_vectorized:\n return df.assign(**{col: transformation_function(df[col]) for col in columns_to_transform})\n\n return df.assign(**{col: df[col].swifter.apply(transformation_function) for col in columns_to_transform})\n\n p.__doc__ = learner_pred_fn_docstring(\"custom_transformer\")\n\n log = {'custom_transformer': {\n 'transformed_column': columns_to_transform,\n 'transformation_function': transformation_function.__name__}\n }\n\n return p, p(df), log\n\n\ncustom_transformer.__doc__ += learner_return_docstring(\"Custom Transformer\")\n\n\n@curry\n@log_learner_time(learner_name='null_injector')\ndef null_injector(df: pd.DataFrame,\n proportion: float,\n columns_to_inject: Optional[List[str]] = None,\n groups: Optional[List[List[str]]] = None,\n seed: int = 1) -> LearnerReturnType:\n \"\"\"\n Applies a custom function to the desired columns.\n\n Parameters\n ----------\n df : pandas.DataFrame\n A Pandas' DataFrame that must contain `columns_to_inject` as columns\n\n columns_to_inject : list of str\n A list of features to inject nulls. If groups is not None it will be ignored.\n\n proportion : float\n Proportion of nulls to inject in the columns.\n\n groups : list of list of str (default = None)\n A list of group of features. If not None, feature in the same group will be set to NaN together.\n\n seed : int\n Random seed for consistency.\n \"\"\"\n assert (proportion > 0.0) & (proportion < 1.0), \"proportions must be between 0 and 1\"\n assert (columns_to_inject is None) ^ (groups is None), \"Either columns_to_inject or groups must be None.\"\n\n n_rows = df.shape[0]\n\n groups = [[f] for f in columns_to_inject] if columns_to_inject is not None else groups\n\n null_cols = {} # type: ignore\n for seed_i, group in enumerate(groups): # type: ignore\n np.random.seed(seed + seed_i)\n replace_mask = np.random.binomial(1, 1 - proportion, n_rows).astype(bool)\n null_cols = merge(null_cols, {feature: df[feature].where(replace_mask) for feature in group})\n\n null_data = df.assign(**null_cols)\n\n def p(new_data_set: pd.DataFrame) -> pd.DataFrame:\n return new_data_set\n\n p.__doc__ = learner_pred_fn_docstring(\"null_injector\")\n\n log = {'null_injector': {\n \"columns_to_inject\": columns_to_inject,\n \"proportion\": proportion,\n \"groups\": groups\n }}\n\n return p, null_data, log\n\n\nnull_injector.__doc__ += learner_return_docstring(\"Null Injector\")\n\n\n@curry\n@log_learner_time(learner_name='missing_warner')\ndef missing_warner(df: pd.DataFrame, cols_list: List[str],\n new_column_name: str = \"has_unexpected_missing\",\n detailed_warning: bool = False,\n detailed_column_name: Optional[str] = None) -> LearnerReturnType:\n \"\"\"\n Creates a new column to warn about rows that columns that don't have missing in the training set\n but have missing on the scoring\n\n Parameters\n ----------\n\n df : pandas.DataFrame\n A Pandas' DataFrame.\n\n cols_list : list of str\n List of columns to consider when evaluating missingness\n\n new_column_name : str\n Name of the column created to alert the existence of missing values\n \"\"\"\n\n assert ((detailed_warning and detailed_column_name) or ((not detailed_warning) and (\n not detailed_column_name))), \"Either detailed_warning and detailed_column_name \" \\\n \"should be defined or both should be False.\"\n\n df_selected = df[cols_list]\n cols_without_missing = df_selected.loc[:, df_selected.isna().sum(axis=0) == 0].columns.tolist()\n\n def p(dataset: pd.DataFrame) -> pd.DataFrame:\n def detailed_assignment(df: pd.DataFrame, cols_to_check: List[str]) -> np.array:\n cols_with_missing = np.array([np.where(df[col].isna(), col, \"\") for col in cols_to_check]).T\n missing_by_row_list = np.array([list(filter(None, x)) for x in cols_with_missing]).reshape(-1, 1)\n if missing_by_row_list.size == 0:\n return np.empty((df.shape[0], 0)).tolist()\n else:\n return missing_by_row_list\n\n new_dataset = dataset.assign(**{new_column_name: lambda df: df[cols_without_missing].isna().sum(axis=1) > 0})\n if detailed_warning and detailed_column_name:\n missing_by_row_list = detailed_assignment(new_dataset, cols_without_missing)\n return new_dataset.assign(**{detailed_column_name: missing_by_row_list})\n else:\n return new_dataset\n\n p.__doc__ = learner_pred_fn_docstring(\"missing_warner\")\n\n log = {\"missing_warner\": {\n \"cols_list\": cols_list,\n \"cols_without_missing\": cols_without_missing}\n }\n\n return p, df, log\n\n\nmissing_warner.__doc__ += learner_return_docstring(\"Missing Alerter\")\n", "import itertools\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport toolz as fp\n\nfrom fklearn.training.imputation import placeholder_imputer\nfrom fklearn.training.pipeline import build_pipeline\nfrom fklearn.training.regression import xgb_regression_learner\nfrom fklearn.training.transformation import count_categorizer, onehot_categorizer\n\n\ndef test_build_pipeline():\n df_train = pd.DataFrame({\n 'id': [\"id1\", \"id2\", \"id3\", \"id4\", \"id3\", \"id4\"],\n 'x1': [10.0, 13.0, 10.0, 13.0, None, 13.0],\n \"x2\": [0, 1, 1, 0, 1, 0],\n \"cat\": [\"c1\", \"c1\", \"c2\", None, \"c2\", \"c4\"],\n 'y': [2.3, 4.0, 100.0, -3.9, 100.0, -3.9]\n })\n\n df_test = pd.DataFrame({\n 'id': [\"id4\", \"id4\", \"id5\", \"id6\", \"id5\", \"id6\"],\n 'x1': [12.0, 1000.0, -4.0, 0.0, -4.0, 0.0],\n \"x2\": [1, 1, 0, None, 0, 1],\n \"cat\": [\"c1\", \"c2\", \"c5\", None, \"c2\", \"c3\"],\n 'y': [1.3, -4.0, 0.0, 49, 0.0, 49]\n })\n\n features = [\"x1\", \"x2\", \"cat\"]\n target = \"y\"\n\n train_fn = build_pipeline(\n placeholder_imputer(columns_to_impute=features, placeholder_value=-999),\n count_categorizer(columns_to_categorize=[\"cat\"]),\n xgb_regression_learner(features=features,\n target=target,\n num_estimators=20,\n extra_params={\"seed\": 42}))\n\n predict_fn, pred_train, log = train_fn(df_train)\n\n pred_test_with_shap = predict_fn(df_test, apply_shap=True)\n assert set(pred_test_with_shap.columns) - set(pred_train.columns) == {\"shap_values\", \"shap_expected_value\"}\n\n pred_test_without_shap = predict_fn(df_test)\n assert set(pred_test_without_shap.columns) == set(pred_train.columns)\n\n pd.util.testing.assert_frame_equal(pred_test_with_shap[pred_test_without_shap.columns], pred_test_without_shap)\n\n\ndef test_build_pipeline_no_side_effects():\n test_df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5], \"y\": [2, 4, 6, 8, 10]})\n orig_df = test_df.copy()\n\n def side_effect_learner(df):\n df['side_effect1'] = df['x'] * 2\n return lambda dataset: dataset, df, {}\n\n def kwargs_learner(df):\n df['side_effect2'] = df['y'] * 2\n\n def p(dataset, mult=2):\n return dataset.assign(x=dataset.x * mult)\n\n return p, p(df), {}\n\n side_effect_pipeline = build_pipeline(side_effect_learner, kwargs_learner)\n side_effect_pipeline(test_df)\n\n pd.util.testing.assert_frame_equal(test_df, orig_df)\n\n\ndef test_build_pipeline_idempotency():\n test_df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5], \"y\": [2, 4, 6, 8, 10]})\n orig_df = test_df.copy()\n\n mult_constant = 2\n expected_df = pd.DataFrame({\"x\": np.array([1, 2, 3, 4, 5]) * mult_constant, \"y\": [2, 4, 6, 8, 10]})\n\n def kwargs_learner(df):\n def p(dataset, mult):\n return dataset.assign(x=dataset.x * mult)\n\n return p, p(df, mult_constant), {\"kwargs_learner\": {\"mult_constant\": mult_constant}}\n\n def dummy_learner(df):\n return lambda dataset: dataset, df, {\"dummy_learner\": {\"dummy\": {}}}\n\n for variation in itertools.permutations([dummy_learner, kwargs_learner, dummy_learner]):\n side_effect_pipeline = build_pipeline(*variation)\n predict_fn, result_df, log = side_effect_pipeline(test_df)\n\n pd.util.testing.assert_frame_equal(test_df, orig_df)\n pd.util.testing.assert_frame_equal(result_df, expected_df)\n pd.util.testing.assert_frame_equal(predict_fn(test_df, mult=mult_constant), expected_df)\n\n\ndef test_build_pipeline_learner_assertion():\n @fp.curry\n def learner(df, a, b, c=3):\n return lambda dataset: dataset + a + b + c, df, {}\n\n learner_fn = learner(b=2)\n\n with pytest.raises(AssertionError):\n build_pipeline(learner_fn)\n\n learner_fn = learner(a=1, b=2)\n\n build_pipeline(learner_fn)\n\n\ndef test_build_pipeline_predict_arguments_assertion():\n test_df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5], \"y\": [2, 4, 6, 8, 10]})\n\n @fp.curry\n def invalid_learner(df):\n def p(dataset, *a, **b):\n return dataset + len(a) + len(b)\n\n return p, df, {}\n\n with pytest.raises(AssertionError):\n build_pipeline(invalid_learner)(test_df)\n\n\ndef test_build_pipeline_serialisation():\n df_train = pd.DataFrame({\n 'id': [\"id1\"],\n 'x1': [10.0],\n 'y': [2.3]\n })\n\n fn = lambda x: x\n\n @fp.curry\n def dummy_learner(df, fn, call):\n return fn, df, {f\"dummy_learner_{call}\": {}}\n\n @fp.curry\n def dummy_learner_2(df, fn, call):\n return dummy_learner(df, fn, call)\n\n @fp.curry\n def dummy_learner_3(df, fn, call):\n return fn, df, {f\"dummy_learner_{call}\": {}, \"obj\": \"a\"}\n\n train_fn = build_pipeline(\n dummy_learner(fn=fn, call=1),\n dummy_learner_2(fn=fn, call=2),\n dummy_learner_3(fn=fn, call=3))\n\n predict_fn, pred_train, log = train_fn(df_train)\n\n fkml = {\"pipeline\": [\"dummy_learner\", \"dummy_learner_2\", \"dummy_learner_3\"],\n \"output_columns\": ['id', 'x1', 'y'],\n \"features\": ['id', 'x1', 'y'],\n \"learners\": {\"dummy_learner\": {\"fn\": fn, \"log\": {\"dummy_learner_1\": {}}},\n \"dummy_learner_2\": {\"fn\": fn, \"log\": {\"dummy_learner_2\": {}}},\n \"dummy_learner_3\": {\"fn\": fn, \"log\": {\"dummy_learner_3\": {}}, \"obj\": \"a\"}}}\n\n assert log[\"__fkml__\"] == fkml\n assert \"obj\" not in log.keys()\n\n\ndef test_build_pipeline_with_onehotencoder():\n df_train = pd.DataFrame({\n 'id': [\"id1\", \"id2\", \"id3\", \"id4\", \"id3\", \"id4\"],\n 'x1': [10.0, 13.0, 10.0, 13.0, None, 13.0],\n \"x2\": [0, 1, 1, 0, 1, 0],\n \"cat\": [\"c1\", \"c1\", \"c2\", None, \"c2\", \"c4\"],\n 'y': [2.3, 4.0, 100.0, -3.9, 100.0, -3.9]\n })\n\n df_test = pd.DataFrame({\n 'id': [\"id4\", \"id4\", \"id5\", \"id6\", \"id5\", \"id6\"],\n 'x1': [12.0, 1000.0, -4.0, 0.0, -4.0, 0.0],\n \"x2\": [1, 1, 0, None, 0, 1],\n \"cat\": [\"c1\", \"c2\", \"c5\", None, \"c2\", \"c3\"],\n 'y': [1.3, -4.0, 0.0, 49, 0.0, 49]\n })\n\n features = [\"x1\", \"x2\", \"cat\"]\n target = \"y\"\n\n train_fn = build_pipeline(\n placeholder_imputer(columns_to_impute=[\"x1\", \"x2\"], placeholder_value=-999),\n onehot_categorizer(columns_to_categorize=[\"cat\"], hardcode_nans=True),\n xgb_regression_learner(features=features,\n target=target,\n num_estimators=20,\n extra_params={\"seed\": 42}))\n\n predict_fn, pred_train, log = train_fn(df_train)\n\n pred_test = predict_fn(df_test)\n\n expected_feature_columns_after_encoding = [\"x1\", \"x2\", \"fklearn_feat__cat==c1\", \"fklearn_feat__cat==c2\",\n \"fklearn_feat__cat==c4\", \"fklearn_feat__cat==nan\"]\n\n assert set(pred_test.columns) == set(expected_feature_columns_after_encoding + [\"id\", target, \"prediction\"])\n" ]
[ [ "numpy.digitize", "pandas.Series", "numpy.random.seed", "numpy.isnan", "pandas.DataFrame", "numpy.searchsorted", "numpy.random.binomial", "sklearn.preprocessing.StandardScaler", "pandas.qcut", "numpy.empty" ], [ "pandas.util.testing.assert_frame_equal", "numpy.array", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
shinianzhihou/change_detection.pytorch
[ "e365eb1d97ebbce400ffc4230ac0fa063638fe05" ]
[ "cd_core/runners/hooks/optimizer.py" ]
[ "from torch.nn.utils import clip_grad\n\nfrom cd_core.runners import Hook,HOOKS\n\[email protected]_module()\nclass OptimizerHook(Hook):\n def __init__(self, grad_clip=None):\n self.grad_clip = grad_clip\n\n def clip_grads(self, params):\n params = list(\n filter(lambda p: p.requires_grad and p.grad is not None, params))\n if len(params) > 0:\n return clip_grad.clip_grad_norm_(params, **self.grad_clip)\n\n def after_train_iter(self, runner):\n runner.optimizer.zero_grad()\n runner.outputs['loss'].backward()\n if self.grad_clip is not None:\n grad_norm = self.clip_grads(runner.model.parameters())\n if grad_norm is not None:\n # Add grad norm to the logger\n # TODO(snian) check log_buffer.\n runner.log_buffer.update({'grad_norm': float(grad_norm)},\n runner.outputs['num_samples'])\n runner.optimizer.step()" ]
[ [ "torch.nn.utils.clip_grad.clip_grad_norm_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hpcaitech/FastFold
[ "a65d5009279ef84c1518081344db5c02213c387a" ]
[ "fastfold/distributed/core.py" ]
[ "import os\n\nimport torch\nimport colossalai\n\n\ndef ensure_divisibility(numerator, denominator):\n \"\"\"Ensure that numerator is divisible by the denominator.\"\"\"\n assert numerator % denominator == 0, '{} is not divisible by {}'.format(numerator, denominator)\n\n\ndef set_missing_distributed_environ(key, value):\n if key not in os.environ:\n os.environ[str(key)] = str(value)\n\n\ndef init_dap(tensor_model_parallel_size_=None):\n colossalai.logging.disable_existing_loggers()\n\n if tensor_model_parallel_size_ == None:\n if 'WORLD_SIZE' in os.environ:\n tensor_model_parallel_size_ = int(os.environ['WORLD_SIZE'])\n else:\n tensor_model_parallel_size_ = 1\n\n if torch.distributed.is_initialized():\n _logger = colossalai.logging.get_dist_logger()\n _logger.error(\n \"use fastfold.distributed.init_dap instead of torch.distributed.init_process_group!\")\n exit(-1)\n\n # set distributed environ for single device launch\n set_missing_distributed_environ('WORLD_SIZE', 1)\n set_missing_distributed_environ('RANK', 0)\n set_missing_distributed_environ('LOCAL_RANK', 0)\n set_missing_distributed_environ('MASTER_ADDR', \"localhost\")\n set_missing_distributed_environ('MASTER_PORT', -1)\n\n colossalai.launch_from_torch(\n config={\"parallel\": dict(tensor=dict(size=tensor_model_parallel_size_))})\n" ]
[ [ "torch.distributed.is_initialized" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
confiwent/Comyco_linear_QoE
[ "087834ce4abfb203041de39d92f72e9adb9b976c", "087834ce4abfb203041de39d92f72e9adb9b976c" ]
[ "env_oracle.py", "train_lin.py" ]
[ "import numpy as np\n\nMILLISECONDS_IN_SECOND = 1000.0\nB_IN_MB = 1000000.0\nBITS_IN_BYTE = 8.0\nRANDOM_SEED = 42\nVIDEO_CHUNCK_LEN = 4000.0 # millisec, every time add this amount to buffer\nBITRATE_LEVELS = 6\nTOTAL_VIDEO_CHUNCK = 48\nBUFFER_THRESH = 60.0 * MILLISECONDS_IN_SECOND # millisec, max buffer limit\nDRAIN_BUFFER_SLEEP_TIME = 500.0 # millisec\nPACKET_PAYLOAD_PORTION = 0.95\nLINK_RTT = 80 # millisec\nPACKET_SIZE = 1500 # bytes\n# NOISE_LOW = 0.9\n# NOISE_HIGH = 1.1\nVIDEO_SIZE_FILE = './envivo/size/video_size_'\n\n\nclass Environment:\n def __init__(self, all_cooked_time, all_cooked_bw, random_seed=RANDOM_SEED):\n assert len(all_cooked_time) == len(all_cooked_bw)\n\n np.random.seed(random_seed)\n\n self.all_cooked_time = all_cooked_time\n self.all_cooked_bw = all_cooked_bw\n\n self.video_chunk_counter = 0\n self.buffer_size = 0\n\n # pick a random trace file\n self.trace_idx = np.random.randint(len(self.all_cooked_time))\n self.cooked_time = self.all_cooked_time[self.trace_idx]\n self.cooked_bw = self.all_cooked_bw[self.trace_idx]\n\n # randomize the start point of the trace\n # note: trace file starts with time 0\n self.mahimahi_ptr = np.random.randint(1, len(self.cooked_bw))\n self.last_mahimahi_time = self.cooked_time[self.mahimahi_ptr - 1]\n\n self.video_size = {} # in bytes\n for bitrate in range(BITRATE_LEVELS):\n self.video_size[bitrate] = []\n with open(VIDEO_SIZE_FILE + str(bitrate)) as f:\n for line in f:\n self.video_size[bitrate].append(int(line.split()[0]))\n\n def get_video_chunk(self, quality):\n\n assert quality >= 0\n assert quality < BITRATE_LEVELS\n\n video_chunk_size = self.video_size[quality][self.video_chunk_counter]\n\n # use the delivery opportunity in mahimahi\n delay = 0.0 # in ms\n video_chunk_counter_sent = 0 # in bytes\n\n while True: # download video chunk over mahimahi\n throughput = self.cooked_bw[self.mahimahi_ptr] \\\n * B_IN_MB / BITS_IN_BYTE\n duration = self.cooked_time[self.mahimahi_ptr] \\\n - self.last_mahimahi_time\n\n packet_payload = throughput * duration * PACKET_PAYLOAD_PORTION\n\n if video_chunk_counter_sent + packet_payload > video_chunk_size:\n\n fractional_time = (video_chunk_size - video_chunk_counter_sent) / \\\n throughput / PACKET_PAYLOAD_PORTION\n delay += fractional_time\n self.last_mahimahi_time += fractional_time\n break\n\n video_chunk_counter_sent += packet_payload\n delay += duration\n self.last_mahimahi_time = self.cooked_time[self.mahimahi_ptr]\n self.mahimahi_ptr += 1\n\n if self.mahimahi_ptr >= len(self.cooked_bw):\n # loop back in the beginning\n # note: trace file starts with time 0\n self.mahimahi_ptr = 1\n self.last_mahimahi_time = self.cooked_time[self.mahimahi_ptr - 1]\n\n delay *= MILLISECONDS_IN_SECOND\n delay += LINK_RTT\n # delay *= np.random.uniform(NOISE_LOW, NOISE_HIGH)\n\n # rebuffer time\n rebuf = np.maximum(delay - self.buffer_size, 0.0)\n\n # update the buffer\n self.buffer_size = np.maximum(self.buffer_size - delay, 0.0)\n\n # add in the new chunk\n self.buffer_size += VIDEO_CHUNCK_LEN\n\n # sleep if buffer gets too large\n sleep_time = 0\n if self.buffer_size > BUFFER_THRESH:\n # exceed the buffer limit\n # we need to skip some network bandwidth here\n # but do not add up the delay\n drain_buffer_time = self.buffer_size - BUFFER_THRESH\n sleep_time = np.ceil(drain_buffer_time / DRAIN_BUFFER_SLEEP_TIME) * \\\n DRAIN_BUFFER_SLEEP_TIME\n self.buffer_size -= sleep_time\n\n while True:\n duration = self.cooked_time[self.mahimahi_ptr] \\\n - self.last_mahimahi_time\n if duration > sleep_time / MILLISECONDS_IN_SECOND:\n self.last_mahimahi_time += sleep_time / MILLISECONDS_IN_SECOND\n break\n sleep_time -= duration * MILLISECONDS_IN_SECOND\n self.last_mahimahi_time = self.cooked_time[self.mahimahi_ptr]\n self.mahimahi_ptr += 1\n\n if self.mahimahi_ptr >= len(self.cooked_bw):\n # loop back in the beginning\n # note: trace file starts with time 0\n self.mahimahi_ptr = 1\n self.last_mahimahi_time = self.cooked_time[self.mahimahi_ptr - 1]\n\n # the \"last buffer size\" return to the controller\n # Note: in old version of dash the lowest buffer is 0.\n # In the new version the buffer always have at least\n # one chunk of video\n return_buffer_size = self.buffer_size\n\n self.video_chunk_counter += 1\n video_chunk_remain = TOTAL_VIDEO_CHUNCK - self.video_chunk_counter\n\n end_of_video = False\n if self.video_chunk_counter >= TOTAL_VIDEO_CHUNCK:\n end_of_video = True\n self.buffer_size = 0\n self.video_chunk_counter = 0\n \n self.trace_idx += 1\n if self.trace_idx >= len(self.all_cooked_time):\n self.trace_idx = 0 \n\n self.cooked_time = self.all_cooked_time[self.trace_idx]\n self.cooked_bw = self.all_cooked_bw[self.trace_idx]\n\n # randomize the start point of the video\n # note: trace file starts with time 0\n self.mahimahi_ptr = np.random.randint(1, len(self.cooked_bw))\n self.last_mahimahi_time = self.cooked_time[self.mahimahi_ptr - 1]\n\n next_video_chunk_sizes = []\n for i in range(BITRATE_LEVELS):\n next_video_chunk_sizes.append(self.video_size[i][self.video_chunk_counter])\n\n return delay, \\\n sleep_time, \\\n return_buffer_size / MILLISECONDS_IN_SECOND, \\\n rebuf / MILLISECONDS_IN_SECOND, \\\n video_chunk_size, \\\n next_video_chunk_sizes, \\\n end_of_video, \\\n video_chunk_remain\n\n def get_download_time(self,trace_idx, video_chunk_counter, mahimahi_ptr,last_mahimahi_time, chunk_quality):\n ## ---------------- compute last time ----------------------------------------------------\n if trace_idx == -1:\n trace_idx = self.trace_idx\n video_chunk_counter = self.video_chunk_counter\n mahimahi_ptr = self.mahimahi_ptr\n cooked_time = self.all_cooked_time[trace_idx]\n last_mahimahi_time = self.last_mahimahi_time\n ## ----------------- assign values ----------------------------------------------------\n\n cooked_bw = self.all_cooked_bw[trace_idx]\n cooked_time = self.all_cooked_time[trace_idx]\n\n ## ------------------- compute true bandwidth --------------------------------------------\n download_time = []\n for quality in range(chunk_quality, min(chunk_quality + 2, 6)):\n duration_all = 0\n video_chunk_counter_sent = 0 # in bytes\n video_chunk_size = self.video_size[quality][video_chunk_counter]\n mahimahi_ptr_tmp = mahimahi_ptr\n last_mahimahi_time_tmp = last_mahimahi_time\n\n while True: # download video chunk over mahimahi\n throughput = cooked_bw[mahimahi_ptr_tmp] \\\n * B_IN_MB / BITS_IN_BYTE\n duration = cooked_time[mahimahi_ptr_tmp] \\\n - last_mahimahi_time_tmp\n\n packet_payload = throughput * duration * PACKET_PAYLOAD_PORTION\n\n if video_chunk_counter_sent + packet_payload > video_chunk_size:\n fractional_time = (video_chunk_size - video_chunk_counter_sent) / \\\n throughput / PACKET_PAYLOAD_PORTION\n last_mahimahi_time_tmp += fractional_time\n duration_all += fractional_time\n break\n video_chunk_counter_sent += packet_payload\n last_mahimahi_time_tmp = cooked_time[mahimahi_ptr_tmp]\n mahimahi_ptr_tmp += 1\n\n if mahimahi_ptr_tmp >= len(cooked_bw):\n # loop back in the beginning\n # note: trace file starts with time 0\n mahimahi_ptr_tmp = 1\n last_mahimahi_time_tmp = cooked_time[mahimahi_ptr_tmp-1]\n duration_all += duration\n download_time.append(duration_all)\n if quality == chunk_quality:\n trace_idx_ = trace_idx\n video_chunk_counter_ = video_chunk_counter\n mahimahi_ptr_ = mahimahi_ptr_tmp\n last_mahimahi_time_ = last_mahimahi_time_tmp\n\n ## -------------------- test whether end of video ---------------------------------------------------\n video_chunk_counter_ += 1\n if video_chunk_counter_ >= TOTAL_VIDEO_CHUNCK:\n\n video_chunk_counter_ = 0\n # trace_idx_ += 1\n # if trace_idx_ >= len(self.all_cooked_time):\n # trace_idx_ = 0\n\n # cooked_time = self.all_cooked_time[trace_idx_]\n # cooked_bw = self.all_cooked_bw[trace_idx_]\n\n # randomize the start point of the video\n # note: trace file starts with time 0\n mahimahi_ptr_ = 1\n last_mahimahi_time_ = cooked_time[mahimahi_ptr_ - 1]\n\n\n if len(download_time)==1:\n return download_time[0],0, trace_idx_, video_chunk_counter_, mahimahi_ptr_, last_mahimahi_time_\n else:\n return download_time[0],download_time[1], trace_idx_, video_chunk_counter_, mahimahi_ptr_, last_mahimahi_time_", "import os\nimport numpy as np\nimport tensorflow as tf\nimport env_oracle as env\nimport time\nimport load_trace\nfrom mpc_prunning import solving_log, solving_log_true_bw\n# import pool\nimport libcomyco_lin as libcomyco\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\nS_INFO = 6\nS_LEN = 8\nA_DIM = 6\nLR_RATE = 1e-4\nDEFAULT_QUALITY = 1\nVIDEO_BIT_RATE = [300, 750, 1200, 1850, 2850, 4300] # Kbps\nMODEL_TEST_INTERVAL = 10\nQOE_METRIC = 'log'\n# REBUF_PENALTY = 2.66 #4.3\nREBUFF_PENALTY_LIN = 4.3\nREBUFF_PENALTY_LOG = 2.66\nSMOOTH_PENALTY = 1\nMPC_FUTURE_CHUNK_COUNT = 7\n\nRANDOM_SEED = 42\nBUFFER_NORM_FACTOR = 10.0\nM_IN_K = 1000.0\nRAND_RANGE = 1000\nTRAIN_TRACES = './bwsets/all/test_traces/' #_test\n# TRAIN_TRACES = './cooked_test_traces/' #_test\nLOG_FILE = './results/'\nTEST_LOG_FOLDER = './test_results/'\n\n# fixed to envivo\nVIDEO_SIZE_FILE = './envivo/size/video_size_'\n# VMAF = './envivo/vmaf/video'\nCHUNK_TIL_VIDEO_END_CAP = 48.0\n\n\ndef loopmain(sess, actor):\n video_size = {} # in bytes\n # vmaf_size = {}\n for bitrate in range(A_DIM):\n video_size[bitrate] = []\n # vmaf_size[bitrate] = []\n with open(VIDEO_SIZE_FILE + str(bitrate)) as f:\n for line in f:\n video_size[bitrate].append(int(line.split()[0]))\n # with open(VMAF + str(A_DIM - bitrate)) as f:\n # for line in f:\n # vmaf_size[bitrate].append(float(line))\n all_cooked_time, all_cooked_bw, _ = load_trace.load_trace(TRAIN_TRACES)\n net_env = env.Environment(all_cooked_time=all_cooked_time,\n all_cooked_bw=all_cooked_bw)\n with open(LOG_FILE + 'agent', 'w') as log_file, open(LOG_FILE + 'log_test', 'w') as test_log_file:\n last_bit_rate = DEFAULT_QUALITY\n bit_rate = DEFAULT_QUALITY\n # last_chunk_vmaf = None\n\n action_vec = np.zeros(A_DIM)\n action_vec[bit_rate] = 1\n\n s_batch = [np.zeros((S_INFO, S_LEN))]\n a_batch = [action_vec]\n a_real_batch = [action_vec]\n r_batch = []\n\n entropy_record = []\n time_stamp = 0\n sess.run(tf.global_variables_initializer())\n \n saver = tf.train.Saver(max_to_keep=1000)\n epoch = 0\n while True:\n delay, sleep_time, buffer_size, rebuf, \\\n video_chunk_size, next_video_chunk_sizes, \\\n end_of_video, video_chunk_remain = \\\n net_env.get_video_chunk(int(bit_rate))\n\n time_stamp += delay # in ms\n time_stamp += sleep_time # in ms\n\n if QOE_METRIC == 'lin':\n # -- lin scale reward --\n REBUF_PENALTY = REBUFF_PENALTY_LIN\n reward = VIDEO_BIT_RATE[bit_rate] / M_IN_K \\\n - REBUF_PENALTY * rebuf \\\n - SMOOTH_PENALTY * np.abs(VIDEO_BIT_RATE[bit_rate] -\n VIDEO_BIT_RATE[last_bit_rate]) / M_IN_K\n # reward_max = 4.3\n else:\n # -- log scale reward --\n REBUF_PENALTY = REBUFF_PENALTY_LOG\n log_bit_rate = np.log(VIDEO_BIT_RATE[bit_rate] / float(VIDEO_BIT_RATE[0]))\n log_last_bit_rate = np.log(VIDEO_BIT_RATE[last_bit_rate] / float(VIDEO_BIT_RATE[0]))\n\n reward = log_bit_rate \\\n - REBUF_PENALTY * rebuf \\\n - SMOOTH_PENALTY * np.abs(log_bit_rate - log_last_bit_rate)\n\n r_batch.append(reward)\n\n last_bit_rate = bit_rate\n\n state = np.array(s_batch[-1], copy=True)\n\n # dequeue history record\n state = np.roll(state, -1, axis=1)\n\n # this should be S_INFO number of terms\n state[0, -1] = VIDEO_BIT_RATE[bit_rate] / float(np.max(VIDEO_BIT_RATE)) # last quality\n state[1, -1] = buffer_size / BUFFER_NORM_FACTOR # 10 sec\n state[2, -1] = float(video_chunk_size) / \\\n float(delay) / M_IN_K # kilo byte / ms\n state[3, -1] = float(delay) / M_IN_K / BUFFER_NORM_FACTOR # 10 sec\n state[4, :A_DIM] = np.array(\n next_video_chunk_sizes) / M_IN_K / M_IN_K # mega byte\n state[5, -1] = np.minimum(video_chunk_remain,\n CHUNK_TIL_VIDEO_END_CAP) / float(CHUNK_TIL_VIDEO_END_CAP)\n action_prob, bit_rate = actor.predict(\n np.reshape(state, (-1, S_INFO, S_LEN)))\n\n # net_env.get_optimal(float(last_chunk_vmaf))\n\n ##----------------------------MPC having known the future bandwidth------------------------\n last_index = int(CHUNK_TIL_VIDEO_END_CAP - video_chunk_remain - 1)\n future_horizon = MPC_FUTURE_CHUNK_COUNT\n if (CHUNK_TIL_VIDEO_END_CAP - 1 - last_index < MPC_FUTURE_CHUNK_COUNT):\n future_horizon = CHUNK_TIL_VIDEO_END_CAP - 1 - last_index\n start_buffer = buffer_size\n\n action_real = solving_log_true_bw(start_buffer, int(last_bit_rate), int(future_horizon), net_env, REBUF_PENALTY, SMOOTH_PENALTY, QOE_METRIC)\n # action_real = int(net_env.optimal)\n\n action_vec = np.zeros(A_DIM)\n action_vec[bit_rate] = 1\n\n action_real_vec = np.zeros(A_DIM)\n action_real_vec[action_real] = 1\n \n actor.submit(state, action_real_vec)\n actor.train()\n\n entropy_record.append(actor.compute_entropy(action_prob[0]))\n # log time_stamp, bit_rate, buffer_size, reward\n log_file.write(str(time_stamp) + '\\t' +\n str(VIDEO_BIT_RATE[bit_rate]) + '\\t' +\n str(buffer_size) + '\\t' +\n str(rebuf) + '\\t' +\n str(video_chunk_size) + '\\t' +\n str(delay) + '\\t' +\n str(VIDEO_BIT_RATE[action_real]) + '\\t' +\n str(entropy_record[-1]) + '\\t' +\n str(reward) + '\\n')\n log_file.flush()\n\n # report experience to the coordinator\n if end_of_video:\n del s_batch[:]\n del a_batch[:]\n del r_batch[:]\n del a_real_batch[:]\n #del d_batch[:]\n del entropy_record[:]\n\n # so that in the log we know where video ends\n log_file.write('\\n')\n\n # store the state and action into batches\n if end_of_video:\n last_bit_rate = DEFAULT_QUALITY\n bit_rate = DEFAULT_QUALITY # use the default action here\n # last_chunk_vmaf = None\n #chunk_index = 0\n\n action_vec = np.zeros(A_DIM)\n action_vec[bit_rate] = 1\n\n action_real_vec = np.zeros(A_DIM)\n action_real_vec[action_real] = 1\n\n s_batch.append(np.zeros((S_INFO, S_LEN)))\n a_batch.append(action_vec)\n a_real_batch.append(action_real_vec)\n\n epoch += 1\n if epoch % MODEL_TEST_INTERVAL == 0:\n # actor.save('models/nn_model_ep_' + \\\n # str(epoch) + '.ckpt')\n saver.save(sess, 'models/nn_model_ep_' + str(epoch) + '.ckpt')\n os.system('python rl_test_lin.py ' + 'models/nn_model_ep_' + \\\n str(epoch) + '.ckpt')\n # os.system('python plot_results.py >> results.log')\n\n ## -------------------------record the results--------------------------------\n rewards = []\n test_log_files_ = os.listdir(TEST_LOG_FOLDER)\n for test_log_file_ in test_log_files_:\n reward = []\n with open(TEST_LOG_FOLDER + test_log_file_, 'r') as f:\n for line in f:\n parse = line.split()\n try:\n reward.append(float(parse[-1]))\n except IndexError:\n break\n rewards.append(np.sum(reward[1:]))\n\n rewards = np.array(rewards)\n\n rewards_min = np.min(rewards)\n rewards_5per = np.percentile(rewards, 5)\n rewards_mean = np.mean(rewards)\n rewards_median = np.percentile(rewards, 50)\n rewards_95per = np.percentile(rewards, 95)\n rewards_max = np.max(rewards)\n\n test_log_file.write(str(int(epoch)) + '\\t' +\n str(rewards_min) + '\\t' +\n str(rewards_5per) + '\\t' +\n str(rewards_mean) + '\\t' +\n str(rewards_median) + '\\t' +\n str(rewards_95per) + '\\t' +\n str(rewards_max) + '\\n')\n test_log_file.flush()\n ## ------------------------------------------------------------------\n else:\n s_batch.append(state)\n\n action_vec = np.zeros(A_DIM)\n action_vec[bit_rate] = 1\n\n a_batch.append(action_vec)\n a_real_batch.append(action_vec)\n\ndef main():\n # create result directory\n if not os.path.exists(LOG_FILE):\n os.makedirs(LOG_FILE)\n gpu_options = tf.GPUOptions(allow_growth=True)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\n actor = libcomyco.libcomyco(sess,\n S_INFO=S_INFO, S_LEN=S_LEN, A_DIM=A_DIM,\n LR_RATE=LR_RATE)\n # modify for single agent\n loopmain(sess, actor)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.ceil", "numpy.maximum", "numpy.random.seed" ], [ "numpy.minimum", "numpy.abs", "numpy.sum", "numpy.min", "numpy.reshape", "numpy.percentile", "tensorflow.ConfigProto", "tensorflow.global_variables_initializer", "numpy.max", "tensorflow.GPUOptions", "numpy.mean", "tensorflow.train.Saver", "numpy.array", "numpy.zeros", "numpy.roll" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]