repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
sourav-30/Manager
[ "21adc814ab2c6e1cd5b375ea3ce3e01da743e357" ]
[ "emanager/hr/Worker.py" ]
[ "import os\nfrom emanager.constants import*\nimport pandas as pd\n\nhr_path = os.path.dirname(os.path.realpath(__file__))\n\n\nclass Worker:\n def __init__(self, name):\n self.name = name\n\n self.check_database()\n self.check_attendance()\n self.check_balance()\n\n def check_database(self):\n \"\"\"Check the database to find the Worker details and\n update the status of Worker object\"\"\"\n \n w_data = pd.read_csv(f\"{hr_path}/worker_data.csv\", index_col=\"NAME\")\n try:\n self.id = w_data.loc[self.name, \"ID\"]\n self.have_id = True\n self.details = w_data.loc[self.name, :]\n print(self.details)\n except:\n self.have_id = False\n\n def refresh_data(self):\n self.check_database()\n self.check_attendance()\n self.check_balance()\n \n def update_details(self, detail, new_value):\n \"\"\"Update details of a Worker\"\n \n Parameters\n ------------\n detail: str, list of str\n NAME, AGE, ADDRESS, MOBILE_NO, PAY_RATE, GROUP\n new_value: str, list of str\n new value of the detail\n \"\"\"\n \n if type(detail)!= list:\n detail=[detail]\n new_value=[new_value]\n print(\"Details Updated :\\n\", detail)\n \n w_data = pd.read_csv(f\"{hr_path}/worker_data.csv\", index_col=\"ID\")\n w_data.at[self.id, detail+[\"LAST_MODIFIED\"]] = new_value + [TIMESTAMP]\n w_data.to_csv(f\"{hr_path}/worker_data.csv\")\n self.check_database()\n \n \n def update_pay_rate(self, new_rate):\n w_data = pd.read_csv(f\"{hr_path}/worker_data.csv\", index_col=\"ID\")\n w_data.at[self.id, [\"PAY_RATE\", \"LAST_MODIFIED\"]] = [new_rate, TIMESTAMP]\n w_data.to_csv(f\"{hr_path}/worker_data.csv\")\n self.check_database()\n\n def check_attendance(self):\n pass\n\n def check_balance(self):\n pass\n\n def update_attendance(self):\n pass\n\n # with open(\"attendance_sheet.csv\") as a_data:\n\n\nclass AddWorker:\n \"\"\"Add new workers to database\n group : Permanent/ Temporary\"\"\"\n\n def __init__(\n self, name, age, address, mobile_no, join_date, pay_r, group=\"Temporary\"\n ):\n print(\"Adding new Worker....\")\n self.name = name\n\n self.id = self.generate_id(name, group, id_type=\"W\")\n self.add_entry(name, age, address, mobile_no, join_date, pay_r, group)\n \n\n def generate_id(self, name, group, id_type=\"X\"):\n initials = name.split()\n ts = TIMESTAMP.strftime(\"%y%m%S\")\n id_no = id_type + group[0] + initials[0][0] + initials[1][0] + ts\n return id_no\n\n def add_entry(self, name, age, address, mobile_no, join_date, pay_r, group):\n with open(f\"{hr_path}/worker_data.csv\", \"a\") as c_data:\n c_data.write(\n f\"\\n{self.id},{name},{age},{address},{mobile_no},{join_date},{pay_r},{group},{TIMESTAMP}\"\n )\n\n" ]
[ [ "pandas.read_csv" ] ]
neuromorphs/grill-dvs-calibration
[ "9aa7c533d0203915f8ebd27665b590de4cecec01" ]
[ "edvs.py" ]
[ "import numpy as np\nimport threading\nimport atexit\nimport time\n\nclass Serial(object):\n def __init__(self, port, baud):\n import serial\n self.conn = serial.Serial(port, baudrate=baud, rtscts=True, timeout=0)\n def send(self, message):\n self.conn.write(message.encode('utf-8'))\n def receive(self):\n return self.conn.read(1024)\n def close(self):\n self.conn.close()\n\nimport socket\nclass Socket(object):\n cache = {}\n def __init__(self, address, port=56000):\n self.socket = Socket.get_socket(address, port)\n\n @classmethod\n def get_socket(cls, address, port):\n key = (address, port)\n s = cls.cache.get(key, None)\n if s is None:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((address, port))\n s.settimeout(0)\n cls.cache[key] = s\n return s\n\n def send(self, message):\n self.socket.send(message.encode())\n def receive(self):\n try:\n return self.socket.recv(1024)\n except socket.error:\n return b''\n def close(self):\n self.socket.close()\n\n\nclass EDVS:\n def __init__(self):\n self.connection = None\n self.retina_packet_size = None\n self.image = None\n self.record_file = None\n\n def connect(self, connection):\n self.connection = connection\n self.last_time = {}\n self.connection.send('\\n')\n self.retina(False)\n atexit.register(self.disconnect)\n thread = threading.Thread(target=self.sensor_loop)\n thread.daemon = True\n thread.start()\n \n def disconnect(self):\n self.retina(False)\n if self.record_file is not None:\n self.record_file.close()\n self.connection.close()\n\n def retina(self, active, bytes_in_timestamp=4):\n if active:\n assert bytes_in_timestamp in [0, 2, 3, 4]\n cmd = '!E%d\\nE+\\n' % bytes_in_timestamp\n self.retina_packet_size = 2 + bytes_in_timestamp\n else:\n cmd = 'E-\\n'\n self.retina_packet_size = None\n self.connection.send(cmd)\n\n def show_image(self, decay=0.5, display_mode='quick'):\n if self.image is None:\n self.image = np.zeros((128, 128), dtype=float)\n thread = threading.Thread(target=self.image_loop,\n args=(decay, display_mode))\n thread.daemon = True\n thread.start()\n\n def image_loop(self, decay, display_mode):\n import pylab\n\n import matplotlib.pyplot as plt\n # using axis for updating only parts of the image that change\n fig, ax = plt.subplots()\n # so quick mode can run on ubuntu\n plt.show(block=False)\n\n pylab.ion()\n img = pylab.imshow(self.image, vmax=1, vmin=-1,\n interpolation='none', cmap='binary')\n pylab.xlim(0, 127)\n pylab.ylim(127, 0)\n\n while True:\n\n img.set_data(self.image)\n\n if display_mode == 'quick':\n # this is faster, but doesn't work on all systems\n fig.canvas.draw()\n fig.canvas.flush_events()\n \n elif display_mode == 'ubuntu_quick':\n # this is even faster, but doesn't work on all systems\n ax.draw_artist(ax.patch)\n ax.draw_artist(img)\n ax.draw_artist(scatter)\n fig.canvas.update()\n\n fig.canvas.flush_events()\n else:\n # this works on all systems, but is kinda slow\n pylab.pause(1e-8)\n\n self.image *= decay\n\n\n def sensor_loop(self):\n \"\"\"Handle all data coming from the robot.\"\"\"\n old_data = None\n buffered_ascii = b''\n while True:\n packet_size = self.retina_packet_size\n # grab the new data\n data = self.connection.receive()\n if len(data) > 0:\n print(len(data))\n\n # combine it with any leftover data from last time through the loop\n if old_data is not None:\n data = old_data + data\n old_data = None\n\n if packet_size is None:\n # no retina events, so everything should be ascii\n buffered_ascii += data\n else:\n # find the ascii events\n data_all = np.frombuffer(data, np.uint8)\n ascii_index = np.where(data_all[::packet_size] < 0x80)[0]\n\n offset = 0\n while len(ascii_index) > 0:\n # if there's an ascii event, remove it from the data\n index = ascii_index[0]*packet_size\n stop_index = np.where(data_all[index:] >=0x80)[0]\n if len(stop_index) > 0:\n stop_index = index + stop_index[0]\n else:\n stop_index = len(data)\n\n # and add it to the buffered_ascii list\n buffered_ascii += data[offset+index:offset+stop_index]\n data_all = np.hstack((data_all[:index],\n data_all[stop_index:]))\n offset += stop_index - index\n ascii_index = np.where(data_all[::packet_size] < 0x80)[0]\n\n # handle any partial retina packets\n extra = len(data_all) % packet_size\n if extra != 0:\n old_data = data[-extra:]\n data_all = data_all[:-extra]\n if len(data_all) > 0:\n # now process those retina events\n self.process_retina(data_all)\n\n # and process the ascii events too\n while b'\\n' in buffered_ascii:\n cmd, buffered_ascii = buffered_ascii.split(b'\\n', 1)\n self.process_ascii(cmd)\n\n def process_ascii(self, message):\n message = message.decode('utf-8')\n print(message)\n\n last_timestamp = None\n def process_retina(self, data):\n packet_size = self.retina_packet_size\n y = data[::packet_size] & 0x7f\n x = data[1::packet_size] & 0x7f\n if self.record_file is not None:\n self.record_file.write(data)\n if self.image is not None:\n value = np.where(data[1::packet_size]>=0x80, 1, -1)\n np.add.at(self.image, (y, x), value)\n\n\n def record_retina_data(self, filename):\n self.record_file = open(filename, 'wb')\n \n \nif __name__ == '__main__':\n edvs = EDVS()\n edvs.connect(Socket('99.250.220.231', port=9105))\n #edvs.connect(Serial('COM6', baud=4000000))\n time.sleep(1)\n edvs.retina(True)\n edvs.show_image(display_mode='quick', decay=0.2)\n while True:\n time.sleep(0.01)" ]
[ [ "numpy.zeros", "numpy.frombuffer", "numpy.hstack", "matplotlib.pyplot.subplots", "numpy.where", "numpy.add.at", "matplotlib.pyplot.show" ] ]
Ter-hash/holography_test
[ "372e5192cd1355cb565159f2a96fd2f7370095ce" ]
[ "propagation_partial.py" ]
[ "import torch\nimport torch.nn as nn\nimport utils.utils as utils\nimport numpy as np\nimport time\nfrom propagation_ASM import propagation_ASM\nfrom spectrum import wvl2transmission_measured\nfrom utils.pytorch_prototyping.pytorch_prototyping import Conv2dSame\nimport random\n\n\nclass PartialProp(nn.Module):\n \"\"\"Propagates a SLM phase with multiple wavelengths and sum at the target plane in field\n\n Class initialization parameters\n -------------------------------\n :param distance: propagation distance in m.\n :param feature_size: feature size of SLM.\n :param wavelength_central: principal wavelength of spectrum.\n :param num_wvls: number of wavelengths.\n :param sample_wavelength_rate: sampling rate between wavelengths\n (if evenly sampled, or not, use it for determining wavelength range of sampling)\n :param source_diameter: diameter of aperture of LED (only for LED)\n :param f_col: focal length of the first collimating lens after the source\n :param proptype: 'ASM'\n :param randomly_sampled: boolean, if True, randomly sample angles/wvs every forward pass.\n :param use_sampling_pool: boolean, if True, use pre-defined sampling pool for angle and wvs.\n :param src_type: source type, default 'LED', could be 'sLED'.\n :param batch_size: number of samples every prop\n :param num_angs: number of angles (if using sampling pool)\n :param source_amp_sigma: sigma value used for modeling source amplitude\n :param use_kernel: If True, model partially spatial coherence as a simple kernel.\n Usage\n -----\n Functions as a pytorch module:\n\n >>> multi_propagate = PartialProp(...)\n >>> output_field = multi_propagate(slm_phase)\n\n slm_phase: encoded phase-only representation at SLM plane , with dimensions\n [batch, 1, height, width]\n output_field: output_field at the target plane, with dimensions [batch,\n 1, height, width, 2]\n \"\"\"\n\n def __init__(self, distance=0.1, feature_size=6.4e-6, wavelength_central=532e-9, linear_conv=True,\n num_wvls=1, sample_wavelength_rate=1e-9, source_diameter=100e-6, f_col=200e-3, image_res=(1080, 1920),\n proptype='ASM', randomly_sampled=True, use_sampling_pool=True, src_type='LED',\n device=torch.device('cpu'), batch_size=1, num_angs=1, source_amp_sigma=10e-6,\n use_kernel=False, initial_kernel='point', initial_kernel_size=None, slm_noise=0.0, fwhm=None,\n ):\n\n super(PartialProp, self).__init__()\n\n if slm_noise > 0.0: # Simple noise model used in Fig. S6\n torch.manual_seed(0)\n mean_noise = torch.zeros(1, 1, *image_res).to(device)\n std_noise = slm_noise * torch.ones(1, 1, *image_res).to(device)\n self.slm_noise = torch.normal(mean_noise, std_noise)\n else:\n self.slm_noise = None\n\n self.prop_dist = distance\n self.batch_size = batch_size\n self.feature_size = (feature_size\n if hasattr(feature_size, '__len__')\n else [feature_size] * 2)\n self.image_res = image_res\n self.precomped_H = None\n self.precomped_H_exp = None\n self.linear_conv = linear_conv\n self.sample_wv_rate = sample_wavelength_rate\n self.randomly_sample = randomly_sampled # False: backpropagate through fixed wv/spatial thing\n\n self.use_sampling_pool = use_sampling_pool # True: ramdomly pick wavelengths and spatial distributions\n # at pinhole at the very first and pick over the pool\n # False: every iteration pick new wvs / tilted waves randomly\n\n # middle wavelength value\n self.wv_center = wavelength_central\n\n # maximum incident angle from the edge of the source pinhole (in angular frequency, rad)\n self.w_max = 2 * np.pi / wavelength_central * source_diameter / 2 / f_col\n self.sigma_w = 2 * np.pi / wavelength_central * source_amp_sigma / f_col\n\n # uniformly sample wvls for initial\n self.src_type = src_type\n self.fwhm = fwhm\n self.pick_wvs(wavelength_central, 0, sample_wavelength_rate, num_wvls, src_type=src_type, device=device)\n self.wv_delta = self.num_wvls / 2 * self.sample_wv_rate\n\n ##################################\n # modeling low-spatial-coherence #\n ##################################\n assert not (use_kernel and (num_angs > 1))\n if use_kernel:\n self.low_spatial_coherence = self.spatial_kernels(initial_kernel, initial_kernel_size,\n num_wvls, wavelength_central,\n distance, f_col, feature_size,\n source_diameter, image_res,\n device)\n self.source_amp_angular = [1.]\n self.source_phase = 0.\n else:\n self.low_spatial_coherence = None\n if src_type == 'sLED':\n source_amp = [1.] * 1\n self.source_amp_angular = source_amp\n self.source_phase = 0.\n elif src_type == 'LED':\n source_amp = []\n if not randomly_sampled:\n # 1) Amplitude: Manually sample and keep these samples during iterations\n ws = manual_angles(num_angs, self.w_max)\n for wx, wy in ws:\n source_amp.append(np.exp(-(wx ** 2 + wy ** 2) / (2.0 * self.sigma_w ** 2)))\n self.source_amp_angular = source_amp\n\n # 2) Phase: Render field from the angles, then extract phase\n source_field = self.source_field(ws, feature_size, image_res).to(device).detach()\n source_field.requires_grad = False\n _, self.source_phase = utils.rect_to_polar(source_field[..., 0], source_field[..., 1])\n else: # random sampling\n # If use sampling pool : make sampling pool and pick from there.\n # otherwise : do nothing and just pick randomly every iteration.\n if use_sampling_pool:\n # 1) Amp: Assume gaussian shape intensity over the pinhole\n r = self.w_max * np.random.random(num_angs)\n theta = 2 * np.pi * np.random.random(num_angs)\n self.ws = np.array([(wx, wy) for wx, wy in zip(r * np.cos(theta), r * np.sin(theta))])\n for wx, wy in self.ws:\n source_amp.append(np.exp(-(wx ** 2 + wy ** 2) / (2.0 * 2.0 * self.sigma_w ** 2)))\n self.source_amp_angular = np.array(source_amp)\n\n # 2) Phase\n source_field = self.source_field(self.ws, feature_size, image_res).to(device).detach()\n source_field.requires_grad = False\n _, self.source_phase = utils.rect_to_polar(source_field[..., 0], source_field[..., 1])\n\n if proptype == 'ASM':\n self.prop = propagation_ASM_broadband\n\n # set a device for initializing the precomputed objects\n try:\n self.dev = device # next(self.parameters()).device\n except StopIteration: # no parameters\n self.dev = torch.device('cpu')\n\n def forward(self, phases):\n # 1. precompute the kernels only once\n if self.precomped_H is None:\n self.calculate_Hs()\n\n # 2. randomly sample the coefficients\n source_amp, source_phase, precomped_H, a_ang, q_wv = self.sample_ang_wvs()\n\n # consider phases of incident beam from different angles\n processed_phase = phases + source_phase\n if self.slm_noise is not None:\n processed_phase += self.slm_noise\n\n # propagate from SLM to target plane\n real, imag = utils.polar_to_rect(torch.ones_like(processed_phase), processed_phase)\n processed_complex = torch.stack((real, imag), -1)\n processed_complex = torch.view_as_complex(processed_complex)\n output_complex = self.prop(processed_complex, self.feature_size,\n None, self.prop_dist, H=precomped_H,\n linear_conv=self.linear_conv)\n\n if self.low_spatial_coherence is not None:\n # all the amplitudes are converted into intensity, then are applied convolution\n # shift-invariant kernel for low-spatial coherence (finite size of light source)\n # sum over n wavelengths (n channels) -> 1 channel\n intensity = q_wv * (output_complex.abs() ** 2)\n intensity = self.low_spatial_coherence(intensity)\n else:\n # Stochastic sampling\n intensity = (q_wv * a_ang) * (output_complex.abs() ** 2)\n intensity = torch.mean(intensity, dim=1, keepdim=True)\n intensity = torch.sum(intensity, dim=0, keepdim=True)\n\n # convert back to amplitude (lose phase info)\n output_amp = torch.pow(intensity, 0.5)\n output_field = torch.stack(utils.polar_to_rect(output_amp, torch.zeros_like(output_amp)), -1)\n return torch.view_as_complex(output_field)\n\n def calculate_Hs(self, verbose=True):\n \"\"\"\n calculate and stack propagation kernels in channel dimensions\n \"\"\"\n if verbose:\n t0 = time.time()\n print(' - computing: propagation kernels...')\n\n self.precomped_H = None\n for wavelength in self.wvls:\n if self.precomped_H is None:\n self.precomped_H = propagation_ASM(torch.empty(1, 1, 1080, 1920),\n self.feature_size, wavelength,\n z=self.prop_dist, return_H=True,\n linear_conv=True)\n else:\n # stack wavelengths in channel dimension\n self.precomped_H = torch.cat((self.precomped_H,\n propagation_ASM(torch.empty(1, 1, 1080, 1920),\n self.feature_size, wavelength,\n z=self.prop_dist, return_H=True,\n linear_conv=True)), dim=1)\n self.precomped_H = self.precomped_H.detach().to(self.dev)\n self.precomped_H.requires_grad = False\n\n if verbose:\n print(f' - done: propagation kernels... took{time.time() - t0:.4f}s')\n\n def spatial_kernels(self, initial_kernel, initial_kernel_size, num_wvls,\n wavelength_central, distance, f_col, feature_size, led_size_source_plane,\n image_res, device):\n # consider spatial coherence as a simple convolution layer applied for intensity\n # Note that we can do this in frequency domain - see Deng et al. 2017, Park 2020.\n\n # calculate the kernel size\n if initial_kernel_size is not None:\n kernel_size = initial_kernel_size\n else:\n led_size_recon_plane = [led_size_source_plane * distance / f_col] * 2\n dx_recon_plane = [wavelength_central * distance / (N * dx)\n for N, dx in zip(image_res, feature_size)]\n kernel_size = [round(s / dx) for s, dx in zip(led_size_recon_plane, dx_recon_plane)]\n\n # use modified pytorch prototyping from Vincent\n low_spatial_coherence = Conv2dSame(num_wvls, 1, kernel_size=kernel_size, bias=False)\n\n if initial_kernel == 'point':\n # initial kernel is a central point\n initial_weight = torch.zeros(1, num_wvls, *kernel_size).to(device)\n initial_weight[..., int(kernel_size[0] / 2), int(kernel_size[1] / 2)] = 1. # functions as floor\n low_spatial_coherence.net[1].weight = nn.Parameter(initial_weight)\n elif initial_kernel == 'uniform':\n # initial kernel is an uniform rectangle\n initial_weight = (torch.ones(1, num_wvls, *kernel_size)\n / np.prod(kernel_size)).detach().to(device)\n low_spatial_coherence.net[1].weight = nn.Parameter(initial_weight)\n\n else:\n # designated kernel\n initial_weight = torch.tensor(initial_kernel,\n dtype=torch.float32).repeat(1, num_wvls, 1, 1).detach().to(device)\n\n low_spatial_coherence.net[1].weight = nn.Parameter(initial_weight)\n\n return low_spatial_coherence\n\n def source_field(self, angles, feature_size, image_res):\n '''\n Stack several plane waves in minibatch dimension\n :param angles: np 1d array of tuples (wy, wx)\n :param feature_size:\n :param image_res:\n :return:\n '''\n source_field = None\n for w in angles:\n # here this amplitude can be a function of wx and wy!\n wy, wx = w\n amp = 1.\n\n if source_field is None:\n source_field = tilted_plane_wave(amp=amp, w=(wy, wx),\n feature_size=feature_size,\n field_resolution=image_res)\n else:\n source_field = torch.cat((source_field, tilted_plane_wave(amp=amp, w=(wy, wx),\n feature_size=feature_size,\n field_resolution=image_res)\n ), dim=1)\n\n return source_field\n\n def pick_wvs(self, wv_center, wv_delta=0, sample_wv_rate=1e-9, num_wvls=1, rand=False,\n src_type='LED', device=torch.device('cuda:0')):\n \"\"\"\n randomly pick wv_center +- wv_delta or wv_center +- n * sample_wv_rate.\n\n :param wv_center:\n :param wv_delta:\n :param sample_wv_rate:\n :param num_wvls:\n :param rand:\n :param src_type:\n :param device:\n :return:\n \"\"\"\n if not rand:\n self.wvls = np.array([wv_center + d * sample_wv_rate\n for d in range(round((-num_wvls + 1) / 2), round((num_wvls + 1) / 2))])\n else:\n self.wvls = np.random.uniform(wv_center - wv_delta,\n wv_center + wv_delta, (num_wvls))\n\n if self.fwhm is not None:\n sigma = self.fwhm / (2 * np.sqrt(2 * np.log(2)))\n trans = [np.exp(-(wvl - wv_center) ** 2 / (2 * sigma ** 2)) for wvl in self.wvls]\n else:\n trans = [wvl2transmission_measured(wvl, src_type) for wvl in self.wvls]\n self.trans = nn.Parameter(torch.tensor([x / max(trans) for x in trans], dtype=torch.float32).\n to(device), requires_grad=False) # normalize\n\n def sample_ang_wvs(self):\n num_angs = len(self.source_amp_angular)\n num_wvs = len(self.wvls)\n if self.randomly_sample: # randomly sample, every iteration:\n # pick kernels and source_phases randomly and pair them (number of batch size)\n if self.use_sampling_pool:\n # pick wavelengths\n m = random.choices(range(len(self.wvls)), k=self.batch_size)\n precomped_H = self.precomped_H[:, m, ...]\n\n # pick tilted waves\n if self.src_type == 'LED':\n n = random.choices(range(len(self.ws)), k=self.batch_size)\n source_phase = self.source_phase[:, n, ...]\n source_amp = self.source_amp_angular[n]\n else:\n source_amp = 1.\n source_phase = 0.\n else:\n # pick wavelengths\n self.pick_wvs(self.wv_center, self.wv_delta, num_wvls=self.batch_size, rand=True, device=self.dev)\n self.calculate_Hs(verbose=False)\n precomped_H = self.precomped_H\n\n # pick tilted waves\n if self.src_type == 'LED':\n # From cartesian\n ws = []\n while True:\n num_pick = self.batch_size - len(ws)\n if num_pick < 1:\n break\n wxs, wys = self.w_max * (2 * np.random.random(num_pick) - 1), \\\n self.w_max * (2 * np.random.random(num_pick) - 1)\n for wx, wy in zip(wxs, wys):\n r = np.sqrt(wx ** 2 + wy ** 2)\n\n # restrict within circle shape aperture\n if r <= self.w_max:\n ws.append((wx, wy))\n\n source_amp = []\n for wx, wy in ws:\n source_amp.append(np.exp(-(wx ** 2 + wy ** 2) / (2.0 * 2.0 * self.sigma_w ** 2)))\n source_field = self.source_field(ws, self.feature_size, self.image_res).to(self.dev).detach()\n source_field.requires_grad = False\n _, source_phase = utils.rect_to_polar(source_field[..., 0], source_field[..., 1])\n\n elif self.src_type == 'sLED':\n # No spatial distribution for sLED\n source_amp = 1.\n source_phase = 0.\n else:\n # uniformly sampled\n if self.use_sampling_pool:\n if num_wvs > num_angs:\n m = random.choices(range(num_wvs), k=num_angs)\n precomped_H = self.precomped_H[:, m, ...]\n source_phase = self.source_phase\n source_amp = self.source_amp_angular\n else:\n n = random.choices(range(num_angs), k=num_wvs)\n source_phase = self.source_phase[:, n, ...]\n source_amp = self.source_amp_angular[:, n, ...]\n precomped_H = self.precomped_H\n else:\n source_phase = self.source_phase\n source_amp = self.source_amp_angular\n precomped_H = self.precomped_H\n\n # calculate coefficients\n if self.src_type == 'LED':\n # span weights along angles on channel dimension\n a_ang = torch.tensor(source_amp, dtype=torch.float32).unsqueeze(0).unsqueeze(2).unsqueeze(3).to(self.dev)\n elif self.src_type == 'sLED':\n a_ang = torch.tensor(source_amp, dtype=torch.float32).to(self.dev)\n q_wv = self.trans.reshape(1, len(self.trans), 1, 1)\n if self.use_sampling_pool and num_wvs > num_angs:\n q_wv = q_wv[:, m, ...]\n\n return source_amp, source_phase, precomped_H, a_ang, q_wv\n\n def to(self, *args, **kwargs):\n slf = super().to(*args, **kwargs)\n if slf.precomped_H is not None:\n slf.precomped_H = slf.precomped_H.to(*args, **kwargs)\n if slf.precomped_H_exp is not None:\n slf.precomped_H_exp = slf.precomped_H_exp.to(*args, **kwargs)\n # try setting dev based on some parameter, default to cpu\n try:\n slf.dev = next(slf.parameters()).device\n except StopIteration: # no parameters\n device_arg = torch._C._nn._parse_to(*args, **kwargs)[0]\n if device_arg is not None:\n slf.dev = device_arg\n return slf\n\n @property\n def num_wvls(self):\n return len(self.wvls)\n\n @property\n def training(self):\n return self._training\n\n @training.setter\n def training(self, mode):\n if mode:\n self.zernike_eval = None # reset when switching to training\n self._training = mode\n\n\ndef propagation_ASM_broadband(u_in, feature_size, wavelength, z, linear_conv=True,\n padtype='zero', H=None,\n dtype=torch.float32):\n \"\"\"Propagates the input field using the angular spectrum method\n # Assume H are always given\n\n Inputs\n ------\n u_in: complex field of size (num_images, 1, height, width, 2)\n where the last two channels are real and imaginary values\n feature_size: (height, width) of individual holographic features in m\n wavelength: wavelength in m\n z: propagation distance\n linear_conv: if True, pad the input to obtain a linear convolution\n padtype: 'zero' to pad with zeros, 'median' to pad with median of u_in's\n amplitude\n return_H[_exp]: used for precomputing H or H_exp, ends the computation early\n and returns the desired variable\n precomped_H[_exp]: the precomputed value for H or H_exp\n dtype: torch dtype for computation at different precision\n\n Output\n ------\n tensor of size (num_images, 1, height, width, 2)\n \"\"\"\n\n if linear_conv:\n # preprocess with padding for linear conv\n input_resolution = u_in.size()[-2:]\n conv_size = [i * 2 for i in input_resolution]\n if padtype == 'zero':\n padval = 0\n elif padtype == 'median':\n padval = torch.median(torch.pow((u_in ** 2).sum(-1), 0.5))\n u_in = utils.pad_image(u_in, conv_size, padval=padval, stacked_complex=False)\n\n U1 = torch.fft.fftn(utils.ifftshift(u_in), dim=(-2, -1), norm='ortho') \\\n \\\n # convolution of the system\n U2 = H * U1\n\n # Fourier transform of the convolution to the observation plane\n u_out = utils.fftshift(torch.fft.ifftn(U2, dim=(-2, -1), norm='ortho'))\n\n if linear_conv:\n return utils.crop_image(u_out, input_resolution, stacked_complex=False)\n else:\n return u_out\n\n\ndef tilted_plane_wave(w, amp=1.0,\n feature_size=(6.4e-6, 6.4e-6), field_resolution=(1080, 1920), dtype=torch.float32):\n '''\n return a complex wave field that comes from a shifted source position at source plane after collimating lens\n\n :param wavelength:\n :param f_col: the focal length of collimating lens\n :param amp:\n :param pos_src:\n :param feature_size:\n :param field_resolution:\n :param dtype:\n :return:\n '''\n\n dy, dx = feature_size\n y = np.linspace(-dy * field_resolution[0] / 2,\n dy * field_resolution[0] / 2,\n field_resolution[0])\n x = np.linspace(-dx * field_resolution[1] / 2,\n dx * field_resolution[1] / 2,\n field_resolution[1])\n X, Y = np.meshgrid(x, y)\n wy, wx = w\n\n phase = wx * X + wy * Y\n phase = torch.tensor(phase, dtype=dtype, requires_grad=False)\n phase = torch.reshape(phase, (1, 1, phase.size()[0], phase.size()[1]))\n\n real, img = utils.polar_to_rect(amp * torch.ones_like(phase), phase)\n field = torch.stack((real, img), 4)\n\n return field\n\n\ndef manual_angles(num_angs, w_max):\n ws = [(0., 0.)]\n if num_angs == 5:\n wx, wy = w_max * np.cos(np.pi / 4), w_max * np.sin(np.pi / 4)\n ws.append((wx, 0.))\n ws.append((-wx, 0.))\n ws.append((0., wy))\n ws.append((0., -wy))\n elif num_angs == 9:\n wx, wy = w_max * np.cos(np.pi / 4), w_max * np.sin(np.pi / 4)\n ws.append((wx, 0.))\n ws.append((-wx, 0.))\n ws.append((0., wy))\n ws.append((0., -wy))\n ws.append((wx, wy))\n ws.append((-wx, wy))\n ws.append((-wx, wy))\n ws.append((wx, -wy))\n return np.array(ws)\n" ]
[ [ "torch.stack", "numpy.exp", "torch.nn.Parameter", "torch.ones", "torch.fft.ifftn", "numpy.cos", "numpy.random.random", "torch.sum", "numpy.sin", "numpy.log", "torch.view_as_complex", "torch.normal", "torch.manual_seed", "numpy.prod", "torch.tensor", "numpy.sqrt", "torch.zeros_like", "torch.empty", "torch.zeros", "torch.device", "numpy.array", "torch.pow", "numpy.random.uniform", "torch._C._nn._parse_to", "torch.ones_like", "numpy.linspace", "numpy.meshgrid", "torch.mean" ] ]
rrtaylor/tensorflow
[ "8b639d335ec0ad6b69dddb791636adb3cd1dab68" ]
[ "tensorflow/python/eager/context.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"State management for eager execution.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport contextlib\nimport copy\nimport random\nimport threading\nimport numpy as np\nimport six\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python import tf2\nfrom tensorflow.python.eager import executor\nfrom tensorflow.python.eager import monitoring\nfrom tensorflow.python.framework import c_api_util\nfrom tensorflow.python.framework import device as pydev\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import is_in_graph_mode\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util.tf_export import tf_export\n\nGRAPH_MODE = 0\nEAGER_MODE = 1\n\ndefault_execution_mode = EAGER_MODE if tf2.enabled() else GRAPH_MODE\n\n# Cache from (old_device_name, partial_new_device_name) -> (new_device_name,\n# new_device_spec).\n# Note that we do not protect this with a lock and instead rely on python's GIL\n# and the idempotent nature of writes to provide thread safety.\n_device_parsing_cache = {}\n_starting_device_spec = pydev.DeviceSpec.from_string(\"\")\n\n_MAXINT32 = 2**31 - 1\n\nDEVICE_PLACEMENT_EXPLICIT = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_EXPLICIT\nDEVICE_PLACEMENT_WARN = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_WARN\nDEVICE_PLACEMENT_SILENT = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_SILENT\nDEVICE_PLACEMENT_SILENT_FOR_INT32 = (\n pywrap_tensorflow.TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32)\n\nSYNC = 0\nASYNC = 1\n\nMIRRORING_NONE = pywrap_tensorflow.TFE_MIRRORING_NONE\nMIRRORING_ALL = pywrap_tensorflow.TFE_MIRRORING_ALL\n\n_tf2_gauge = monitoring.BoolGauge(\"/tensorflow/api/tf2_enable\",\n \"Whether tf2.enable() is called.\")\n\n_python_eager_context_create_counter = monitoring.Counter(\n \"/tensorflow/api/python/eager_context_create_counter\",\n \"Counter for number of eager contexts created in Python.\")\n\n_tf2_gauge.get_cell().set(tf2.enabled())\n\n\nclass _EagerTensorCache(object):\n \"\"\"Simple cache which evicts items based on length in a FIFO manner.\"\"\"\n\n def __init__(self, max_items=256, max_tensor_size=10000):\n self._data = collections.OrderedDict()\n self._max_items = max_items\n self._max_tensor_size = max_tensor_size\n\n def put(self, key, value):\n if value._num_elements() > self._max_tensor_size: # pylint: disable=protected-access\n return\n\n self._data[key] = value\n\n if len(self._data) > self._max_items:\n self._data.popitem(last=False)\n\n def get(self, key):\n return self._data.get(key, None)\n\n def flush(self):\n self._data = {}\n\n\nclass FunctionCallOptions(object):\n \"\"\"Options applied at call sites of eager functions.\n\n Eager functions are functions decorated with tf.contrib.eager.defun.\n \"\"\"\n\n def __init__(self, executor_type=None, config_proto=None):\n \"\"\"Constructor.\n\n Args:\n executor_type: (optional) name of the executor to be used to execute the\n eager function. If None or an empty string, the default Tensorflow\n executor will be used.\n config_proto: (optional) a `config_pb2.ConfigProto` proto or\n a serialized string of that proto.\n The config used by Grappler when optimizing the function graph.\n Each concrete function is optimized the first time is called. Changing\n config_proto after the first call has no effect.\n If config_proto is None, an empty RewriterConfig will be used.\n \"\"\"\n self.config_proto_serialized = config_proto\n self.executor_type = executor_type\n\n @property\n def executor_type(self):\n return self._executor_type\n\n @executor_type.setter\n def executor_type(self, executor_type):\n self._executor_type = executor_type\n\n @property\n def config_proto_serialized(self):\n return self._config_proto_serialized\n\n @config_proto_serialized.setter\n def config_proto_serialized(self, config):\n if isinstance(config, config_pb2.ConfigProto):\n self._config_proto_serialized = config.SerializeToString()\n elif isinstance(config, str):\n self._config_proto_serialized = config\n elif config is None:\n self._config_proto_serialized = (\n config_pb2.ConfigProto().SerializeToString())\n else:\n raise ValueError(\"the rewriter config must be either a \"\n \"config_pb2.ConfigProto, or a serialized string of that \"\n \"proto or None. got: {}\".format(type(config)))\n\n\n# Map from context_id (an int) to _TensorCaches.\n# Dicts are thread safe in CPython.\n# TODO(iga): Remove this once TensorCaches are moved to C++.\n_tensor_caches_map = {}\n\n\nclass _TensorCaches(threading.local):\n \"\"\"Thread local tensor caches.\"\"\"\n\n def __init__(self):\n super(_TensorCaches, self).__init__()\n self._ones_rank_cache = None\n self._zeros_cache = None\n\n @property\n def ones_rank_cache(self):\n if not self._ones_rank_cache:\n self._ones_rank_cache = _EagerTensorCache()\n return self._ones_rank_cache\n\n @property\n def zeros_cache(self):\n if not self._zeros_cache:\n self._zeros_cache = _EagerTensorCache()\n return self._zeros_cache\n\n\nclass _ThreadLocalData(threading.local):\n \"\"\"Thread local storage for the eager context.\"\"\"\n\n def __init__(self):\n super(_ThreadLocalData, self).__init__()\n self.device_spec = _starting_device_spec\n self.device_name = \"\"\n self.mode = default_execution_mode\n self.is_eager = default_execution_mode == EAGER_MODE\n self.scope_name = \"\"\n self.summary_writer = None\n self.summary_recording = None\n self.summary_recording_distribution_strategy = True\n self.summary_step = None\n self.function_call_options = None\n self.executor = None\n\n\nContextSwitch = collections.namedtuple(\n \"ContextSwitch\", [\"is_building_function\", \"enter_context_fn\",\n \"device_stack\"])\n\n\n# `_ContextSwitchStack` is a `threading.local` to match the semantics of\n# ``DefaultGraphStack`, which is also a `threading.local`.\nclass _ContextSwitchStack(threading.local):\n \"\"\"A thread-local stack of context switches.\"\"\"\n\n def __init__(self, eager):\n super(_ContextSwitchStack, self).__init__()\n self.stack = []\n if eager:\n # Initialize the stack with a pointer to enter the eager context; this\n # ensures that the fact that eager execution was enabled is propagated\n # across threads, since (1) `enable_eager_execution` modifies a\n # process-level flag (`default_execution_mode`) and (2) `__init__` is\n # called each time a threading.local object is used in a separate thread.\n self.push(is_building_function=False, enter_context_fn=eager_mode,\n device_stack=None)\n\n def push(self, is_building_function, enter_context_fn, device_stack):\n \"\"\"Push metadata about a context switch onto the stack.\n\n A context switch can take any one of the two forms: installing a graph as\n the default graph, or entering the eager context. For each context switch,\n we record whether or not the entered context is building a function.\n\n Args:\n is_building_function: (bool.) Whether the context is building a function.\n enter_context_fn: (function.) A callable that executes the context switch.\n For example, `graph.as_default` or `eager_mode`.\n device_stack: If applicable, the device function stack for this\n graph. When breaking out of graphs in init_scope, the innermost nonempty\n device stack is used. Eager contexts put `None` here and the value is\n never used.\n \"\"\"\n\n self.stack.append(\n ContextSwitch(is_building_function, enter_context_fn, device_stack))\n\n def pop(self):\n \"\"\"Pop the stack.\"\"\"\n\n self.stack.pop()\n\n\nclass LogicalDevice(\n collections.namedtuple(\"LogicalDevice\", [\"name\", \"device_type\"])):\n \"\"\"Abstraction for a device initialized by the runtime.\n\n A LogicalDevice corresponds to a initialized instance on a PhysicalDevice or a\n remote device available in the cluster. Tensors and operations can be placed\n on a specific LogicalDevice by calling `tf.device()` with the `name` of the\n LogicalDevice.\n\n Fields:\n name: The fully qualified name of the device. Can be used for Op or function\n placement.\n device_type: String declaring the type of device such as \"CPU\" or \"GPU\".\n \"\"\"\n pass\n\n\n@tf_export(\"config.experimental.VirtualDeviceConfiguration\")\nclass VirtualDeviceConfiguration(\n collections.namedtuple(\"VirtualDeviceConfiguration\", [\"memory_limit\"])):\n \"\"\"Configuration class for virtual devices for a PhysicalDevice.\n\n Fields:\n memory_limit: (optional) Maximum memory (in MB) to allocate on the virtual\n device. Currently only supported for GPUs.\n \"\"\"\n\n def __new__(cls, memory_limit=None):\n return super(VirtualDeviceConfiguration, cls).__new__(cls, memory_limit)\n\n\nclass PhysicalDevice(\n collections.namedtuple(\"PhysicalDevice\", [\"name\", \"device_type\"])):\n \"\"\"Abstraction for a locally visible physical device.\n\n TensorFlow can utilize various devices such as the CPU or multiple GPUs\n for computation. Before initializing a local device for use, the user can\n customize certain properties of the device such as it's visibility or memory\n configuration.\n\n Once a PhysicalDevice is initialized one or many LogicalDevice objects are\n created. Use tf.config.set_virtual_device_configuration() to create multiple\n LogicalDevice objects for a PhysicalDevice. This is useful when separation\n between models is needed.\n\n Fields:\n name: Unique identifier for device.\n device_type: String declaring the type of device such as \"CPU\" or \"GPU\".\n \"\"\"\n pass\n\n\nclass _AtomicCounter(object):\n \"\"\"A simple atomic counter.\"\"\"\n\n def __init__(self):\n self._value = 0\n self._lock = threading.Lock()\n\n def increment_and_get(self):\n with self._lock:\n self._value += 1\n return self._value\n\n\n_context_id_counter = _AtomicCounter()\n\n\nclass _TensorCacheDeleter(object):\n \"\"\"Deletes tensor caches for a given context.\"\"\"\n\n def __init__(self, context_id):\n self._context_id = context_id\n\n def __del__(self):\n if _tensor_caches_map is None:\n return\n if self._context_id in _tensor_caches_map:\n del _tensor_caches_map[self._context_id]\n\n\n# TODO(agarwal): rename to EagerContext / EagerRuntime ?\n# TODO(agarwal): consider keeping the corresponding Graph here.\nclass Context(object):\n \"\"\"Environment in which eager operations execute.\"\"\"\n\n # TODO(agarwal): create and link in some documentation for `execution_mode`.\n # pylint: disable=redefined-outer-name\n def __init__(self,\n config=None,\n device_policy=None,\n execution_mode=None,\n server_def=None):\n \"\"\"Creates a new Context.\n\n Args:\n config: (Optional.) A `ConfigProto` protocol buffer with configuration\n options for the Context. Note that a lot of these options may be\n currently unimplemented or irrelevant when eager execution is enabled.\n device_policy: (Optional.) What policy to use when trying to run an\n operation on a device with inputs which are not on that device.\n When set to None, an appropriate value will be picked automatically.\n The value picked may change between TensorFlow releases.\n\n Defaults to DEVICE_PLACEMENT_SILENT.\n Valid values:\n - DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is\n not correct.\n - DEVICE_PLACEMENT_WARN: copies the tensors which are not on the\n right device but raises a warning.\n - DEVICE_PLACEMENT_SILENT: silently copies the tensors. This might\n hide performance problems.\n - DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies int32 tensors,\n raising errors on the other ones.\n execution_mode: (Optional.) Policy controlling how operations dispatched\n are actually executed. When set to None, an appropriate value will be\n picked automatically. The value picked may change between TensorFlow\n releases.\n Valid values:\n - SYNC: executes each operation synchronously.\n - ASYNC: executes each operation asynchronously. These\n operations may return \"non-ready\" handles.\n server_def: (Optional.) A tensorflow::ServerDef proto.\n Enables execution on remote devices. GrpcServers need to be started by\n creating an identical server_def to this, and setting the appropriate\n task_indexes, so that the servers can communicate. It will then be\n possible to execute operations on remote devices.\n\n Raises:\n ValueError: If execution_mode is not valid.\n \"\"\"\n # This _id is used only to index the tensor caches.\n # TODO(iga): Remove this when tensor caches are moved to C++.\n self._id = _context_id_counter.increment_and_get()\n self._tensor_cache_deleter = _TensorCacheDeleter(self._id)\n _tensor_caches_map[self._id] = _TensorCaches()\n\n self._config = config\n self._thread_local_data = _ThreadLocalData()\n self._context_switches = _ContextSwitchStack(self.executing_eagerly())\n self._context_handle = None\n self._context_devices = None\n self._post_execution_callbacks = []\n self._seed = None\n self._initialize_lock = threading.Lock()\n self._initialized = False\n if device_policy is None:\n device_policy = DEVICE_PLACEMENT_SILENT\n self._device_policy = device_policy\n self._mirroring_policy = None\n if execution_mode not in (None, SYNC, ASYNC):\n raise ValueError(\n \"execution_mode should be None/SYNC/ASYNC. Got %s\" % execution_mode)\n if execution_mode is None:\n execution_mode = SYNC\n self._default_is_async = execution_mode == ASYNC\n self._server_def = server_def\n self._collective_ops_server_def = None\n self._collective_leader = None\n self._collective_scoped_allocator_enabled_ops = None\n self._collective_use_nccl_communication = None\n self._collective_device_filters = None\n\n self._device_lock = threading.Lock()\n self._physical_devices = None\n self._visible_device_list = []\n self._memory_growth_map = None\n self._virtual_device_map = {}\n\n # Values set after construction\n self._optimizer_jit = None\n self._intra_op_parallelism_threads = None\n self._inter_op_parallelism_threads = None\n self._soft_device_placement = None\n self._log_device_placement = None\n self._optimizer_experimental_options = {}\n\n _python_eager_context_create_counter.get_cell().increase_by(1)\n # pylint: enable=redefined-outer-name\n\n def _set_global_seed(self, seed):\n \"\"\"Set a global eager mode seed for random ops.\"\"\"\n self._seed = seed\n # `random.Random(seed)` needs `seed` to be hashable, while values of type\n # e.g. `np.int64` or `np.ndarray` are not. We use `int(...)` to convert them\n # to int.\n try:\n hash(seed)\n except TypeError:\n seed = int(np.array(seed))\n self._rng = random.Random(seed)\n # Also clear the kernel cache, to reset any existing seeds\n if self._context_handle is not None:\n pywrap_tensorflow.TFE_ContextClearCaches(self._context_handle)\n\n def _internal_operation_seed(self):\n \"\"\"Returns a fake operation seed.\n\n In eager mode, user shouldn't set or depend on operation seed.\n Here, we generate a random seed based on global seed to make\n operation's randomness different and depend on the global seed.\n\n Returns:\n A fake operation seed based on global seed.\n \"\"\"\n return self._rng.randint(0, _MAXINT32)\n\n def _initialize_logical_devices(self):\n \"\"\"Helper to initialize devices.\"\"\"\n # Store list of devices\n self._logical_devices = []\n self._context_devices = []\n device_list = pywrap_tensorflow.TFE_ContextListDevices(\n self._context_handle)\n try:\n self._num_gpus = 0\n for i in range(pywrap_tensorflow.TF_DeviceListCount(device_list)):\n dev_name = pywrap_tensorflow.TF_DeviceListName(device_list, i)\n self._context_devices.append(pydev.canonical_name(dev_name))\n spec = pydev.DeviceSpec.from_string(dev_name)\n self._logical_devices.append(\n LogicalDevice(name=dev_name, device_type=spec.device_type))\n dev_type = pywrap_tensorflow.TF_DeviceListType(device_list, i)\n if dev_type == \"GPU\":\n self._num_gpus += 1\n\n finally:\n pywrap_tensorflow.TF_DeleteDeviceList(device_list)\n\n def ensure_initialized(self):\n \"\"\"Initialize handle and devices if not already done so.\"\"\"\n if self._initialized:\n return\n with self._initialize_lock:\n if self._initialized:\n return\n assert self._context_devices is None\n opts = pywrap_tensorflow.TFE_NewContextOptions()\n try:\n config_str = self.config.SerializeToString()\n pywrap_tensorflow.TFE_ContextOptionsSetConfig(opts, config_str)\n if self._device_policy is not None:\n pywrap_tensorflow.TFE_ContextOptionsSetDevicePlacementPolicy(\n opts, self._device_policy)\n if self._mirroring_policy is not None:\n pywrap_tensorflow.TFE_ContextOptionsSetMirroringPolicy(\n opts, self._mirroring_policy)\n if self._default_is_async == ASYNC:\n pywrap_tensorflow.TFE_ContextOptionsSetAsync(opts, True)\n context_handle = pywrap_tensorflow.TFE_NewContext(opts)\n finally:\n pywrap_tensorflow.TFE_DeleteContextOptions(opts)\n assert not (self._server_def and self._collective_ops_server_def), (\n \"Cannot enable remote execution as well as collective ops at the \"\n \"moment. If this is important to you, please file an issue.\")\n if self._server_def is not None:\n server_def_str = self._server_def.SerializeToString()\n pywrap_tensorflow.TFE_ContextSetServerDef(context_handle, 600,\n server_def_str)\n elif self._collective_ops_server_def is not None:\n server_def_str = self._collective_ops_server_def.SerializeToString()\n pywrap_tensorflow.TFE_EnableCollectiveOps(context_handle,\n server_def_str)\n\n self._context_handle = context_handle\n self._initialize_logical_devices()\n self._initialized = True\n\n def _clear_caches(self):\n self.ones_rank_cache().flush()\n self.zeros_cache().flush()\n pywrap_tensorflow.TFE_ClearScalarCache()\n\n def set_server_def(self, server_def, keep_alive_secs=600):\n \"\"\"Allow setting a server_def on the context.\n\n When a server def is replaced, it effectively clears a bunch of caches\n within the context. If you attempt to use a tensor object that was pointing\n to a tensor on the remote device, it will raise an error.\n\n Args:\n server_def: A tensorflow::ServerDef proto.\n Enables execution on remote devices.\n keep_alive_secs: Num. seconds after which the remote end will hang up.\n As long as the client is still alive, the server state for the context\n will be kept alive. If the client is killed (or there is some failure),\n the server will clean up its context keep_alive_secs after the final RPC\n it receives.\n\n Raises:\n ValueError: if server_def is None.\n \"\"\"\n if not server_def:\n raise ValueError(\"server_def is None.\")\n\n self._server_def = server_def\n\n if self._context_handle:\n server_def_str = server_def.SerializeToString()\n pywrap_tensorflow.TFE_ContextSetServerDef(self._context_handle,\n keep_alive_secs, server_def_str)\n self._initialize_logical_devices()\n\n # Clear all the caches in case there are remote tensors in them.\n self._clear_caches()\n\n def enable_collective_ops(self, server_def):\n \"\"\"Enable distributed collective ops with an appropriate server_def.\n\n Args:\n server_def: A tensorflow::ServerDef proto. Enables execution on remote\n devices.\n\n Raises:\n ValueError: if server_def is None.\n RuntimeError: if this method is not called at program startup.\n \"\"\"\n if not server_def:\n raise ValueError(\"server_def is None.\")\n\n if self._context_handle is not None:\n raise RuntimeError(\"Collective ops must be enabled at program startup\")\n\n self._collective_ops_server_def = server_def\n\n def configure_collective_ops(\n self,\n collective_leader=\"\",\n scoped_allocator_enabled_ops=(\"CollectiveReduce\",),\n use_nccl_communication=False,\n device_filters=None):\n \"\"\"Configure collective ops.\n\n Collective group leader is necessary for collective ops to run, other\n configurations are mainly for the purpose of performance.\n\n Args:\n collective_leader: a device string for collective leader, e.g.\n \"/job:worker/replica:0/task:\"; empty string means local execution of\n collective ops.\n scoped_allocator_enabled_ops: a tuple or a list of op names for scoped\n allocator to run with.\n use_nccl_communication: whether to use nccl communication for collective\n ops.\n device_filters: a tuple or a list of device strings. If set, corresponding\n task can only see the devices filtered by these device filters.\n\n Raises:\n RuntimeError: if this method is not called at program startup.\n \"\"\"\n if self._collective_leader is not None:\n if (self._collective_leader != collective_leader or\n self._collective_scoped_allocator_enabled_ops !=\n scoped_allocator_enabled_ops or\n self._collective_use_nccl_communication != use_nccl_communication or\n self._collective_device_filters != device_filters):\n raise ValueError(\"Collective ops are already configured.\")\n else:\n return\n\n if self._context_handle is not None:\n raise RuntimeError(\"Collective ops must be configured at program startup\")\n\n self._collective_leader = collective_leader\n self._collective_scoped_allocator_enabled_ops = scoped_allocator_enabled_ops\n self._collective_use_nccl_communication = use_nccl_communication\n self._collective_device_filters = device_filters\n\n @property\n def _handle(self):\n if self._context_handle is None:\n raise AssertionError(\"Context must be initialized first.\")\n\n return self._context_handle\n\n @property\n def _devices(self):\n if self._context_devices is None:\n raise AssertionError(\"Context must be initialized first.\")\n\n return self._context_devices\n\n def __str__(self):\n if self._context_handle is None:\n return \"Eager TensorFlow Context. Devices currently uninitialized.\"\n else:\n devices = self._devices\n lines = [\"Eager TensorFlow Context with %d devices\" % (len(devices))]\n for i, d in enumerate(devices):\n lines.append(\" Device %d: %s\" % (i, d))\n return \"\\n\".join(lines)\n\n @tf_contextlib.contextmanager\n def _mode(self, mode):\n \"\"\"A context manager to allow setting the mode to EAGER/GRAPH.\"\"\"\n ctx = self._thread_local_data\n old_mode = ctx.mode\n old_is_eager = ctx.is_eager\n ctx.mode = mode\n ctx.is_eager = mode == EAGER_MODE\n if mode == EAGER_MODE:\n # Entering graph mode does not provide us with sufficient information to\n # record a context switch; graph-based context switches are only logged\n # when a graph is registered as the default graph.\n self.context_switches.push(False, eager_mode, None)\n try:\n yield\n finally:\n ctx.is_eager = old_is_eager\n ctx.mode = old_mode\n if mode == EAGER_MODE:\n self.context_switches.pop()\n\n def executing_eagerly(self):\n \"\"\"Returns True if current thread has eager executing enabled.\"\"\"\n return self._thread_local_data.is_eager\n\n def ones_rank_cache(self):\n \"\"\"Per-device cache for scalars.\"\"\"\n return _tensor_caches_map[self._id].ones_rank_cache\n\n def zeros_cache(self):\n \"\"\"Per-device cache for scalars.\"\"\"\n return _tensor_caches_map[self._id].zeros_cache\n\n @property\n def scope_name(self):\n \"\"\"Returns scope name for the current thread.\"\"\"\n return self._thread_local_data.scope_name\n\n @scope_name.setter\n def scope_name(self, s):\n \"\"\"Sets scope name for the current thread.\"\"\"\n self._thread_local_data.scope_name = s\n\n @property\n def summary_writer(self):\n \"\"\"Returns default summary writer for the current thread.\"\"\"\n return self._thread_local_data.summary_writer\n\n @summary_writer.setter\n def summary_writer(self, writer):\n \"\"\"Sets default summary writer for the current thread.\"\"\"\n self._thread_local_data.summary_writer = writer\n\n @property\n def summary_recording(self):\n \"\"\"Returns summary recording condition.\"\"\"\n return self._thread_local_data.summary_recording\n\n @summary_recording.setter\n def summary_recording(self, condition):\n \"\"\"Sets summary recording condition.\"\"\"\n self._thread_local_data.summary_recording = condition\n\n @property\n def summary_recording_distribution_strategy(self):\n \"\"\"Returns summary recording condition for distribution strategy.\"\"\"\n return self._thread_local_data.summary_recording_distribution_strategy\n\n @summary_recording_distribution_strategy.setter\n def summary_recording_distribution_strategy(self, condition):\n \"\"\"Sets summary recording condition for distribution strategy.\"\"\"\n self._thread_local_data.summary_recording_distribution_strategy = condition\n\n @property\n def summary_step(self):\n \"\"\"Returns summary step variable.\"\"\"\n return self._thread_local_data.summary_step\n\n @summary_step.setter\n def summary_step(self, step):\n \"\"\"Sets summary step variable.\"\"\"\n self._thread_local_data.summary_step = step\n\n @property\n def device_name(self):\n \"\"\"Returns the device name for the current thread.\"\"\"\n return self._thread_local_data.device_name\n\n @property\n def device_spec(self):\n \"\"\"Returns the device spec for the current thread.\"\"\"\n return self._thread_local_data.device_spec\n\n def _set_device(self, device_name, device_spec):\n self._thread_local_data.device_name = device_name\n self._thread_local_data.device_spec = device_spec\n\n def device(self, name):\n \"\"\"Context-manager to force placement of operations and Tensors on a device.\n\n Args:\n name: Name of the device or None to get default placement.\n\n Returns:\n Context manager that forces device placement.\n\n Raises:\n ValueError: If name is not a string or is an invalid device name.\n RuntimeError: If device scopes are not properly nested.\n \"\"\"\n return _EagerDeviceContext(self, name)\n\n def devices(self):\n \"\"\"List of the names of devices available to execute operations.\"\"\"\n return self._devices\n\n # TODO(fishx): remove this property.\n @property\n def execution_mode(self):\n \"\"\"Gets execution mode for current thread.\"\"\"\n return ASYNC if self.is_async() else SYNC\n\n @execution_mode.setter\n def execution_mode(self, mode):\n \"\"\"Sets execution mode for current thread.\"\"\"\n if mode not in (None, SYNC, ASYNC):\n raise ValueError(\n \"Execution mode should be None/SYNC/ASYNC. Got %s\" % mode)\n\n if mode is None:\n mode = SYNC\n\n enable_async = (mode == ASYNC)\n if self.is_async() != enable_async:\n # Only set the execution mode if the context has already been initialized\n if self._context_handle is not None:\n self.executor.wait()\n executor_new = executor.new_executor(enable_async)\n self._thread_local_data.executor = executor_new\n pywrap_tensorflow.TFE_ContextSetExecutorForThread(\n self._context_handle, executor_new.handle())\n else:\n self._default_is_async = enable_async\n\n def is_async(self):\n if self._context_handle is not None:\n return self.executor.is_async()\n else:\n return self._default_is_async\n\n @property\n def executor(self):\n ensure_initialized()\n return executor.Executor(\n pywrap_tensorflow.TFE_ContextGetExecutorForThread(self._context_handle))\n\n @executor.setter\n def executor(self, e):\n ensure_initialized()\n pywrap_tensorflow.TFE_ContextSetExecutorForThread(self._context_handle,\n e.handle())\n\n @property\n def config(self):\n \"\"\"Return the ConfigProto with all runtime deltas applied.\"\"\"\n # Ensure physical devices have been discovered and config has been imported\n self._initialize_physical_devices()\n\n config = config_pb2.ConfigProto()\n if self._config is not None:\n config.CopyFrom(self._config)\n\n if self._optimizer_jit is not None:\n config.graph_options.optimizer_options.global_jit_level = (\n config_pb2.OptimizerOptions.ON_1\n if self._optimizer_jit else config_pb2.OptimizerOptions.OFF)\n if self._intra_op_parallelism_threads is not None:\n config.intra_op_parallelism_threads = self._intra_op_parallelism_threads\n if self._inter_op_parallelism_threads is not None:\n config.inter_op_parallelism_threads = self._inter_op_parallelism_threads\n\n if self._soft_device_placement is not None:\n config.allow_soft_placement = self._soft_device_placement\n else:\n config.allow_soft_placement = self.executing_eagerly()\n\n if self._log_device_placement is not None:\n config.log_device_placement = self._log_device_placement\n\n def rewriter_toggle(option):\n toggle = self._optimizer_experimental_options.get(option, None)\n if toggle is None:\n return\n\n setattr(config.graph_options.rewrite_options,\n option,\n (rewriter_config_pb2.RewriterConfig.ON\n if toggle else rewriter_config_pb2.RewriterConfig.OFF))\n\n def rewriter_bool(option):\n toggle = self._optimizer_experimental_options.get(option, None)\n if toggle is None:\n return\n\n setattr(config.graph_options.rewrite_options,\n option,\n toggle)\n\n rewriter_toggle(\"layout_optimizer\")\n rewriter_toggle(\"constant_folding\")\n rewriter_toggle(\"shape_optimization\")\n rewriter_toggle(\"remapping\")\n rewriter_toggle(\"arithmetic_optimization\")\n rewriter_toggle(\"dependency_optimization\")\n rewriter_toggle(\"loop_optimization\")\n rewriter_toggle(\"function_optimization\")\n rewriter_toggle(\"debug_stripper\")\n rewriter_bool(\"disable_model_pruning\")\n rewriter_toggle(\"scoped_allocator_optimization\")\n rewriter_toggle(\"pin_to_host_optimization\")\n rewriter_toggle(\"implementation_selector\")\n rewriter_toggle(\"auto_mixed_precision\")\n rewriter_bool(\"disable_meta_optimizer\")\n nodes = self._optimizer_experimental_options.get(\"min_graph_nodes\", None)\n if nodes is not None:\n config.graph_options.rewrite_options.min_graph_nodes = nodes\n\n # Compute device counts\n config.device_count[\"CPU\"] = 0\n config.device_count[\"GPU\"] = 0\n for dev in self._physical_devices:\n if dev not in self._visible_device_list:\n continue\n\n virtual_devices = self._virtual_device_map.get(dev)\n if virtual_devices is None:\n config.device_count[dev.device_type] += 1\n else:\n config.device_count[dev.device_type] += len(virtual_devices)\n\n # Configure gpu_options\n gpu_options = self._compute_gpu_options()\n config.gpu_options.MergeFrom(gpu_options)\n\n # Configure collective ops\n if self._collective_leader:\n config.experimental.collective_group_leader = self._collective_leader\n if self._collective_scoped_allocator_enabled_ops:\n rewrite_options = config.graph_options.rewrite_options\n rewrite_options.scoped_allocator_optimization = (\n rewriter_config_pb2.RewriterConfig.ON)\n del rewrite_options.scoped_allocator_opts.enable_op[:]\n for op in self._collective_scoped_allocator_enabled_ops:\n rewrite_options.scoped_allocator_opts.enable_op.append(op)\n if self._collective_use_nccl_communication:\n config.experimental.collective_nccl = True\n if self._collective_device_filters:\n del config.device_filters[:]\n for f in self._collective_device_filters:\n config.device_filters.append(f)\n\n return config\n\n def _compute_gpu_options(self):\n \"\"\"Build the GPUOptions proto.\"\"\"\n visible_device_list = []\n virtual_devices = []\n gpu_index = -1\n memory_growths = set()\n for dev in self.list_physical_devices(\"GPU\"):\n gpu_index += 1\n\n if dev not in self._visible_device_list:\n continue\n\n growth = self._memory_growth_map[dev]\n memory_growths.add(growth)\n visible_device_list.append(str(gpu_index))\n\n if self._virtual_device_map:\n vdevs = self._virtual_device_map.get(dev, [])\n device_limits = []\n for virt_dev in vdevs:\n device_limits.append(virt_dev.memory_limit)\n\n virtual_devices.append(\n config_pb2.GPUOptions.Experimental.VirtualDevices(\n memory_limit_mb=device_limits))\n\n # Only compute growth if virtual devices have not been configured and we\n # have GPUs\n if not virtual_devices and memory_growths:\n if len(memory_growths) > 1:\n raise ValueError(\"Memory growth cannot differ between GPU devices\")\n allow_growth = memory_growths.pop()\n else:\n allow_growth = None\n\n return config_pb2.GPUOptions(\n allow_growth=allow_growth,\n visible_device_list=\",\".join(visible_device_list),\n experimental=config_pb2.GPUOptions.Experimental(\n virtual_devices=virtual_devices))\n\n @property\n def function_call_options(self):\n \"\"\"Returns function call options for current thread.\n\n Note that the returned object is still referenced by the eager context.\n\n Returns: the FunctionCallOptions for current thread.\n \"\"\"\n if self._thread_local_data.function_call_options is None:\n config = self.config\n\n # Default to soft placement for functions unless specified\n if self._soft_device_placement is None:\n config.allow_soft_placement = True\n self._thread_local_data.function_call_options = FunctionCallOptions(\n config_proto=config)\n\n return self._thread_local_data.function_call_options\n\n @function_call_options.setter\n def function_call_options(self, options):\n \"\"\"Returns function call options for current thread.\"\"\"\n self._thread_local_data.function_call_options = options\n\n def num_gpus(self):\n \"\"\"The number of GPUs available to execute operations.\"\"\"\n self.ensure_initialized()\n return self._num_gpus\n\n def add_function(self, fn):\n \"\"\"Add a function definition to the context.\n\n Once added, the function (identified by its name) can be executed like any\n other operation.\n\n Args:\n fn: A wrapped TF_Function (returned from TF_GraphToFunction_wrapper).\n \"\"\"\n self.ensure_initialized()\n pywrap_tensorflow.TFE_ContextAddFunction(self._handle, fn)\n\n def add_function_def(self, fdef):\n \"\"\"Add a function definition to the context.\n\n Once added, the function (identified by its name) can be executed like any\n other operation.\n\n Args:\n fdef: A FunctionDef protocol buffer message.\n \"\"\"\n self.ensure_initialized()\n fdef_string = fdef.SerializeToString()\n pywrap_tensorflow.TFE_ContextAddFunctionDef(\n self._handle, fdef_string, len(fdef_string))\n\n def remove_function(self, name):\n \"\"\"Remove a function from the context.\n\n Once removed, the function cannot be executed anymore.\n\n Args:\n name: function signature name.\n \"\"\"\n self.ensure_initialized()\n pywrap_tensorflow.TFE_ContextRemoveFunction(self._handle, name)\n\n def has_function(self, name):\n \"\"\"Check if a function `name` is registered.\"\"\"\n self.ensure_initialized()\n return bool(pywrap_tensorflow.TFE_ContextHasFunction(self._handle, name))\n\n def add_post_execution_callback(self, callback):\n \"\"\"Add a post-execution callback to the context.\n\n A post-execution callback is invoked immediately after an eager operation or\n function has finished execution, providing access to the op's type, name\n input and output tensors. Multiple execution callbacks can be added, in\n which case the callbacks will be invoked in the order in which they are\n added.\n\n Args:\n callback: a callable of the signature\n `f(op_type, op_name, attrs, inputs, outputs)`.\n `op_type` is the type of the operation that was just executed (e.g.,\n `MatMul`).\n `op_name` is the name of the operation that has was just executed. This\n name is set by the client who created the operation and can be `None` if\n it is unset.\n `attrs` contains the attributes of the operation as a `tuple` of\n alternating attribute names and attribute values.\n `inputs` is the `list` of input `Tensor`(s) to the op.\n `outputs` is the `list` of output `Tensor`(s) from the op.\n Return value(s) from the callback are ignored.\n \"\"\"\n # TODO(cais): (b/64674139) Allow access to function-internal operations.\n self._post_execution_callbacks.append(callback)\n\n def clear_post_execution_callbacks(self):\n \"\"\"Clear all post-execution callbacks added to the context.\"\"\"\n del self._post_execution_callbacks[:]\n\n @property\n def post_execution_callbacks(self):\n \"\"\"Get the list of post-execution callbacks added to the context.\"\"\"\n return self._post_execution_callbacks\n\n def _initialize_physical_devices(self):\n \"\"\"Get local devices visible to the system.\"\"\"\n # We lazy initialize self._physical_devices since we do not want to do this\n # the constructor since the backend may not be initialized yet.\n with self._device_lock:\n if self._physical_devices is not None:\n return\n\n devs = pywrap_tensorflow.TF_ListPhysicalDevices()\n self._physical_devices = [\n PhysicalDevice(name=d.decode(),\n device_type=d.decode().split(\":\")[1]) for d in devs]\n # Construct the visible device list from all physical devices but ignore\n # XLA devices\n self._visible_device_list = [\n d for d in self._physical_devices\n if not d.device_type.startswith(\"XLA\")\n ]\n self._memory_growth_map = {\n d: None for d in self._physical_devices if d.device_type == \"GPU\"\n }\n\n # Import device settings that may have been passed into the constructor\n self._import_config()\n\n def list_physical_devices(self, device_type=None):\n \"\"\"List local devices visible to the system.\n\n This API allows a client to query the devices before they have been\n initialized by the eager runtime. Additionally a user can filter by device\n type, to get only CPUs or GPUs.\n\n Args:\n device_type: Optional device type to limit results to\n\n Returns:\n List of PhysicalDevice objects.\n \"\"\"\n self._initialize_physical_devices()\n\n if device_type is not None:\n return [\n d for d in self._physical_devices\n if device_type is None or device_type == d.device_type\n ]\n\n return self._physical_devices\n\n def _import_config(self):\n \"\"\"Import config if passed in during construction.\n\n If Context was created with a ConfigProto such as when calling\n tf.compat.v1.enable_eager_execution(), then we need to pull out the\n various pieces we might be replacing and import then into our internal\n class representation.\n \"\"\"\n if self._config is None:\n return\n\n num_cpus = self._config.device_count.get(\"CPU\", 1)\n if num_cpus != 1:\n cpus = [d for d in self._physical_devices if d.device_type == \"CPU\"]\n if num_cpus == 0:\n self.set_visible_devices([], \"CPU\")\n elif num_cpus > 1:\n self.set_virtual_device_configuration(\n cpus[0], [VirtualDeviceConfiguration() for _ in range(num_cpus)])\n\n # Parse GPU options\n gpus = [d for d in self._physical_devices if d.device_type == \"GPU\"]\n\n # If there are no GPUs detected, simply ignore all the GPU options passed in\n # rather than doing any validation checks.\n if not gpus:\n return\n\n gpu_count = self._config.device_count.get(\"GPU\", None)\n\n visible_gpus = []\n # TODO(gjn): Handle importing existing virtual GPU configuration\n visible_indices = self._config.gpu_options.visible_device_list\n if visible_indices:\n for index in visible_indices.split(\",\"):\n if int(index) >= len(gpus):\n raise ValueError(\"Invalid visible device index: %s\" % index)\n visible_gpus.append(gpus[int(index)])\n else:\n visible_gpus = gpus\n\n if gpu_count is not None:\n visible_gpus = visible_gpus[:gpu_count]\n\n self.set_visible_devices(visible_gpus, \"GPU\")\n\n def list_logical_devices(self, device_type=None):\n \"\"\"Return logical devices.\"\"\"\n self.ensure_initialized()\n\n devices = []\n for dev in self._logical_devices:\n if device_type is not None and device_type != dev.device_type:\n continue\n\n devices.append(dev)\n\n return devices\n\n def get_visible_devices(self, device_type=None):\n \"\"\"Get the list of visible devices.\"\"\"\n self._initialize_physical_devices()\n\n if device_type is None:\n return self._visible_device_list\n else:\n return [\n d for d in self._visible_device_list if d.device_type == device_type\n ]\n\n def set_visible_devices(self, devices, device_type=None):\n \"\"\"Set the list of visible devices.\"\"\"\n self._initialize_physical_devices()\n\n if not isinstance(devices, list):\n devices = [devices]\n\n for d in devices:\n if d not in self._physical_devices:\n raise ValueError(\"Unrecognized device: %s\" % repr(d))\n if device_type is not None and d.device_type != device_type:\n raise ValueError(\"Unrecognized device: %s\" % repr(d))\n\n visible_device_list = []\n if device_type is not None:\n visible_device_list = [\n d for d in self._visible_device_list if d.device_type != device_type\n ]\n\n visible_device_list += devices\n\n if self._visible_device_list == visible_device_list:\n return\n\n if self._context_handle is not None:\n raise RuntimeError(\n \"Visible devices cannot be modified after being initialized\")\n\n self._visible_device_list = visible_device_list\n\n def get_memory_growth(self, dev):\n \"\"\"Get if memory growth is enabled for a PhysicalDevice.\"\"\"\n self._initialize_physical_devices()\n\n if dev not in self._physical_devices:\n raise ValueError(\"Unrecognized device: %s\" % repr(dev))\n\n return self._memory_growth_map[dev]\n\n def set_memory_growth(self, dev, enable):\n \"\"\"Set if memory growth should be enabled for a PhysicalDevice.\"\"\"\n self._initialize_physical_devices()\n\n if dev not in self._physical_devices:\n raise ValueError(\"Unrecognized device: %s\" % repr(dev))\n\n if dev in self._virtual_device_map:\n raise ValueError(\n \"Cannot set memory growth on device when virtual devices configured\")\n\n if dev.device_type != \"GPU\":\n raise ValueError(\"Cannot set memory growth on non-GPU devices\")\n\n if self._memory_growth_map.get(dev) == enable:\n return\n\n if self._context_handle is not None:\n raise RuntimeError(\n \"Physical devices cannot be modified after being initialized\")\n\n self._memory_growth_map[dev] = enable\n\n def get_virtual_device_configuration(self, dev):\n \"\"\"Get the virtual device configuration for a PhysicalDevice.\"\"\"\n self._initialize_physical_devices()\n\n if dev not in self._physical_devices:\n raise ValueError(\"Unrecognized device: %s\" % repr(dev))\n\n return self._virtual_device_map.get(dev)\n\n def set_virtual_device_configuration(self, dev, virtual_devices):\n \"\"\"Set the virtual device configuration for a PhysicalDevice.\"\"\"\n self._initialize_physical_devices()\n\n if dev not in self._physical_devices:\n raise ValueError(\"Unrecognized device: %s\" % repr(dev))\n\n if dev.device_type == \"CPU\":\n for vdev in virtual_devices:\n if vdev.memory_limit is not None:\n raise ValueError(\"Setting memory limit on CPU virtual devices is \"\n \"currently not supported\")\n elif dev.device_type == \"GPU\":\n for vdev in virtual_devices:\n if vdev.memory_limit is None:\n raise ValueError(\n \"Setting memory limit is required for GPU virtual devices is\")\n else:\n raise ValueError(\"Virtual devices are not supported for %s\" %\n dev.device_type())\n\n if self._virtual_device_map.get(dev) == virtual_devices:\n return\n\n if self._context_handle is not None:\n raise RuntimeError(\n \"Virtual devices cannot be modified after being initialized\")\n\n self._virtual_device_map[dev] = virtual_devices\n\n @property\n def optimizer_jit(self):\n level = self.config.graph_options.optimizer_options.global_jit_level\n return (level == config_pb2.OptimizerOptions.ON_1 or\n level == config_pb2.OptimizerOptions.ON_2)\n\n @optimizer_jit.setter\n def optimizer_jit(self, enabled):\n self._optimizer_jit = enabled\n\n self._thread_local_data.function_call_options = None\n\n def get_optimizer_experimental_options(self):\n \"\"\"Get experimental options for the optimizer.\n\n Returns:\n Dictionary of current option values\n \"\"\"\n rewrite_options = self.config.graph_options.rewrite_options\n options = {}\n\n def rewriter_toggle(option):\n attr = getattr(rewrite_options, option)\n if attr != 0:\n options[option] = (attr == rewriter_config_pb2.RewriterConfig.ON)\n\n def rewriter_bool(option):\n options[option] = getattr(rewrite_options, option)\n\n rewriter_toggle(\"layout_optimizer\")\n rewriter_toggle(\"constant_folding\")\n rewriter_toggle(\"shape_optimization\")\n rewriter_toggle(\"remapping\")\n rewriter_toggle(\"arithmetic_optimization\")\n rewriter_toggle(\"dependency_optimization\")\n rewriter_toggle(\"loop_optimization\")\n rewriter_toggle(\"function_optimization\")\n rewriter_toggle(\"debug_stripper\")\n rewriter_bool(\"disable_model_pruning\")\n rewriter_toggle(\"scoped_allocator_optimization\")\n rewriter_toggle(\"pin_to_host_optimization\")\n rewriter_toggle(\"implementation_selector\")\n rewriter_toggle(\"auto_mixed_precision\")\n rewriter_bool(\"disable_meta_optimizer\")\n\n if rewrite_options.min_graph_nodes != 0:\n options[\"min_graph_nodes\"] = rewrite_options.min_graph_nodes\n\n return options\n\n def set_optimizer_experimental_options(self, options):\n \"\"\"Set experimental options for the optimizer.\n\n Args:\n options: Dictionary of options to modify\n \"\"\"\n self._optimizer_experimental_options.update(options)\n\n self._thread_local_data.function_call_options = None\n\n @property\n def intra_op_parallelism_threads(self):\n return self.config.intra_op_parallelism_threads\n\n @intra_op_parallelism_threads.setter\n def intra_op_parallelism_threads(self, num_threads):\n if self._intra_op_parallelism_threads == num_threads:\n return\n\n if self._context_handle is not None:\n raise RuntimeError(\n \"Intra op parallelism cannot be modified after initialization.\")\n\n self._intra_op_parallelism_threads = num_threads\n\n @property\n def inter_op_parallelism_threads(self):\n return self.config.inter_op_parallelism_threads\n\n @inter_op_parallelism_threads.setter\n def inter_op_parallelism_threads(self, num_threads):\n if self._inter_op_parallelism_threads == num_threads:\n return\n\n if self._context_handle is not None:\n raise RuntimeError(\n \"Inter op parallelism cannot be modified after initialization.\")\n\n self._inter_op_parallelism_threads = num_threads\n\n @property\n def soft_device_placement(self):\n return self.config.allow_soft_placement\n\n @soft_device_placement.setter\n def soft_device_placement(self, enabled):\n self._soft_device_placement = enabled\n\n self._thread_local_data.function_call_options = None\n\n @property\n def log_device_placement(self):\n return self.config.log_device_placement\n\n @log_device_placement.setter\n def log_device_placement(self, enabled):\n if self._log_device_placement == enabled:\n return\n\n if self._context_handle is not None:\n raise RuntimeError(\n \"Device placement logging must be set at program startup\")\n\n self._log_device_placement = enabled\n self._thread_local_data.function_call_options = None\n\n @property\n def device_policy(self):\n # Only get the policy from the context if it has already been initialized\n if self._context_handle is not None:\n return pywrap_tensorflow.TFE_ContextGetDevicePlacementPolicy(self._handle)\n\n return self._device_policy\n\n @device_policy.setter\n def device_policy(self, policy):\n if policy is None:\n policy = DEVICE_PLACEMENT_SILENT\n\n if self._device_policy != policy:\n self._device_policy = policy\n\n # Only set the policy if the context has already been initialized\n if self._context_handle is not None:\n pywrap_tensorflow.TFE_ContextSetThreadLocalDevicePlacementPolicy(\n self._handle, self._device_policy)\n\n @property\n def mirroring_policy(self):\n # Only get the policy from the context if it has already been initialized\n if self._context_handle is not None:\n return pywrap_tensorflow.TFE_ContextGetMirroringPolicy(self._handle)\n\n return self._mirroring_policy\n\n @mirroring_policy.setter\n def mirroring_policy(self, policy):\n if policy is None:\n policy = MIRRORING_NONE\n\n if self._mirroring_policy != policy:\n self._mirroring_policy = policy\n\n # Only set the policy if the context has already been initialized\n if self._context_handle is not None:\n pywrap_tensorflow.TFE_ContextSetThreadLocalMirroringPolicy(\n self._handle, self._mirroring_policy)\n\n def enable_run_metadata(self):\n \"\"\"Enables tracing of op execution via RunMetadata.\n\n To retrieve the accumulated metadata call context.export_run_metadata()\n and to stop tracing call context.disable_run_metadata().\n \"\"\"\n self.ensure_initialized()\n pywrap_tensorflow.TFE_ContextEnableRunMetadata(self._handle)\n\n def disable_run_metadata(self):\n \"\"\"Disables tracing of op execution via RunMetadata.\"\"\"\n if not self._context_handle:\n return\n pywrap_tensorflow.TFE_ContextDisableRunMetadata(self._context_handle)\n\n def enable_graph_collection(self):\n \"\"\"Enables graph collection of executed functions.\n\n To retrieve the accumulated graphs call context.export_run_metadata()\n and to stop collecting graphs call context.disable_graph_collection().\n \"\"\"\n self.ensure_initialized()\n pywrap_tensorflow.TFE_ContextEnableGraphCollection(self._handle)\n\n def disable_graph_collection(self):\n \"\"\"Disables graph collection of executed functions.\"\"\"\n if not self._context_handle:\n return\n pywrap_tensorflow.TFE_ContextDisableGraphCollection(self._context_handle)\n\n def export_run_metadata(self):\n \"\"\"Returns a RunMetadata proto with accumulated information.\n\n The returned protocol buffer contains information since the most recent call\n to either enable_run_metadata or export_run_metadata.\n\n Returns:\n A RunMetadata protocol buffer. Or None if not enabled.\n \"\"\"\n if not self._context_handle:\n return None\n with c_api_util.tf_buffer() as buffer_:\n pywrap_tensorflow.TFE_ContextExportRunMetadata(\n self._context_handle, buffer_)\n proto_data = pywrap_tensorflow.TF_GetBuffer(buffer_)\n run_metadata = config_pb2.RunMetadata()\n run_metadata.ParseFromString(compat.as_bytes(proto_data))\n return run_metadata\n\n @property\n def context_switches(self):\n \"\"\"Returns a stack of context switches.\"\"\"\n return self._context_switches\n\n def start_step(self):\n pywrap_tensorflow.TFE_ContextStartStep(self._handle)\n\n def end_step(self):\n pywrap_tensorflow.TFE_ContextEndStep(self._handle)\n\n\nclass _EagerDeviceContext(object):\n \"\"\"Context-manager forcing placement of ops and Tensors on a device.\"\"\"\n\n def __init__(self, ctx, device_name):\n self._device_name = device_name\n self._ctx = ctx\n self._stack = []\n\n def __enter__(self):\n ctx = self._ctx\n old_device_name = ctx.device_name\n old_device_spec = ctx.device_spec\n new_device_name = self._device_name\n cache_key = (old_device_name, new_device_name)\n try:\n new_device_name, new_device_spec = _device_parsing_cache[cache_key]\n except TypeError:\n # Error while trying to compute the cache key.\n raise ValueError(\"Expecting a string device name. Got %s(%s)\" %\n (type(new_device_name), new_device_name))\n except KeyError:\n # Handle a cache miss.\n if new_device_name is not None:\n if not isinstance(new_device_name, six.string_types):\n raise ValueError(\"Expecting a string device name. Got %s(%s)\" %\n (type(new_device_name), new_device_name))\n device_spec = pydev.DeviceSpec.from_string(new_device_name)\n if old_device_name:\n new_device_spec = copy.copy(old_device_spec)\n else:\n ctx.ensure_initialized()\n new_device_spec = pydev.DeviceSpec.from_string(\n ctx._context_devices[0]) # pylint: disable=protected-access\n new_device_spec = new_device_spec.make_merged_spec(device_spec)\n else:\n new_device_spec = pydev.DeviceSpec.from_string(\"\")\n new_device_name = new_device_spec.to_string()\n _device_parsing_cache[cache_key] = (new_device_name, new_device_spec)\n\n ctx._set_device(new_device_name, new_device_spec) # pylint: disable=protected-access\n self._stack.append((old_device_name, old_device_spec, new_device_spec))\n\n def __exit__(self, *ex_info):\n ctx = self._ctx\n old_device_name, old_device_spec, new_device_spec = self._stack[-1]\n if ctx.device_spec is not new_device_spec:\n raise RuntimeError(\n \"Exiting device scope without proper scope nesting\")\n del self._stack[-1]\n ctx._set_device(old_device_name, old_device_spec) # pylint: disable=protected-access\n\n\n# Do not set directly. Use _set_context.\n_context = None\n_context_lock = threading.Lock()\n\n\ndef _set_context_locked(ctx):\n global _context\n pywrap_tensorflow.TFE_Py_SetEagerContext(ctx)\n _context = ctx\n\n\ndef _set_context(ctx):\n with _context_lock:\n _set_context_locked(ctx)\n\n\ndef _create_context():\n with _context_lock:\n if _context is None:\n ctx = Context()\n _set_context_locked(ctx)\n\n\ndef context():\n \"\"\"Returns a singleton context object.\"\"\"\n if _context is None:\n _create_context()\n return _context\n\n\ndef context_safe():\n \"\"\"Returns current context (or None if one hasn't been initialized).\"\"\"\n return _context\n\n\ndef ensure_initialized():\n \"\"\"Initialize the context.\"\"\"\n context().ensure_initialized()\n\n\ndef set_global_seed(seed):\n \"\"\"Sets the eager mode seed.\"\"\"\n context()._set_global_seed(seed) # pylint: disable=protected-access\n\n\ndef global_seed():\n \"\"\"Returns the eager mode seed.\"\"\"\n return context()._seed # pylint: disable=protected-access\n\n\ndef internal_operation_seed():\n \"\"\"Returns the operation seed generated based on global seed.\"\"\"\n return context()._internal_operation_seed() # pylint: disable=protected-access\n\n\n@tf_export(\"executing_eagerly\")\ndef executing_eagerly():\n \"\"\"Returns True if the current thread has eager execution enabled.\n\n Eager execution is typically enabled via\n `tf.compat.v1.enable_eager_execution`, but may also be enabled within the\n context of a Python function via tf.contrib.eager.py_func.\n \"\"\"\n if context_safe() is None:\n return default_execution_mode == EAGER_MODE\n\n return context().executing_eagerly()\n\n\ndef in_eager_mode():\n \"\"\"Use executing_eagerly() instead. This function will be removed.\"\"\"\n return executing_eagerly()\n\n\ndef shared_name(name=None):\n \"\"\"Returns the anonymous shared name GUID if no shared name is specified.\n\n In eager mode we need to use a unique shared name to avoid spurious sharing\n issues. The runtime generates a unique name on our behalf when the reserved\n GUID is used as a shared name.\n\n Args:\n name: Optional shared name\n\n Returns:\n Eager compatible shared name.\n \"\"\"\n if name or not executing_eagerly():\n return name\n\n # Ensure a unique name when eager execution is enabled to avoid spurious\n # sharing issues.\n return \"cd2c89b7-88b7-44c8-ad83-06c2a9158347\"\n\n\ndef graph_mode():\n \"\"\"Context-manager to disable eager execution for the current thread.\"\"\"\n return context()._mode(GRAPH_MODE) # pylint: disable=protected-access\n\n\ndef eager_mode():\n \"\"\"Context-manager to enable eager execution for the current thread.\"\"\"\n return context()._mode(EAGER_MODE) # pylint: disable=protected-access\n\n\n# TODO(agarwal): get rid of this and use ops.name_scope instead.\[email protected]\ndef namescope(name):\n \"\"\"ContextManager for creating hierarchical name scopes.\"\"\"\n ctx = context()\n old_name = ctx.scope_name\n ctx.scope_name = \"%s/%s\" % (old_name, name) if old_name else name\n try:\n yield\n finally:\n ctx.scope_name = old_name\n\n\ndef scope_name():\n \"\"\"Name of the current scope.\"\"\"\n return context().scope_name\n\n\ndef device(name):\n \"\"\"Context-manager to force placement of operations and Tensors on a device.\n\n Example:\n ```python\n with tf.device('gpu:0'):\n with tf.device('cpu:0'):\n shape = tf.constant([], dtype=tf.int32)\n x = tf.random.truncated_normal(shape, tf.float32)\n ```\n will ensure that the `shape` Tensor is on CPU but the `truncated_normal`\n operation runs on GPU 0.\n\n Args:\n name: Name of the device (see context().devices()), or None to\n perform automatic placement.\n\n Returns:\n Context manager for setting the device.\n \"\"\"\n ensure_initialized()\n return context().device(name)\n\n\n@tf_export(\"config.experimental_list_devices\")\ndef list_devices():\n \"\"\"List the names of the available devices.\n\n Returns:\n Names of the available devices, as a `list`.\n \"\"\"\n ensure_initialized()\n return context().devices()\n\n\n@tf_export(\"debugging.get_log_device_placement\")\ndef get_log_device_placement():\n \"\"\"Get if device placements are logged.\n\n Returns:\n If device placements are logged.\n \"\"\"\n return context().log_device_placement\n\n\n@tf_export(\"debugging.set_log_device_placement\")\ndef set_log_device_placement(enabled):\n \"\"\"Set if device placements should be logged.\n\n Args:\n enabled: Whether to enabled device placement logging.\n \"\"\"\n context().log_device_placement = enabled\n\n\n@tf_contextlib.contextmanager\ndef device_policy(policy):\n \"\"\"Context manager for setting device placement policy for current thread.\"\"\"\n ctx = context()\n old_policy = ctx.device_policy\n try:\n ctx.device_policy = policy\n yield\n finally:\n ctx.device_policy = old_policy\n\n\n@tf_contextlib.contextmanager\ndef mirroring_policy(policy):\n \"\"\"Context manager for setting mirroring policy for current thread.\"\"\"\n ctx = context()\n old_policy = ctx.mirroring_policy\n try:\n ctx.mirroring_policy = policy\n yield\n finally:\n ctx.mirroring_policy = old_policy\n\n\ndef set_execution_mode(mode):\n \"\"\"Sets execution mode for the current thread.\"\"\"\n context().execution_mode = mode\n\n\n# TODO(fishx): remove this method.\n@tf_contextlib.contextmanager\ndef execution_mode(mode):\n \"\"\"Context manager for setting execution mode for current thread.\"\"\"\n ctx = context()\n executor_new = executor.new_executor(mode == ASYNC)\n executor_old = ctx.executor\n try:\n executor_old.wait()\n ctx.executor = executor_new\n yield\n finally:\n ctx.executor = executor_old\n executor_new.wait()\n\n\n@tf_contextlib.contextmanager\ndef executor_scope(e):\n \"\"\"Context manager for changing executor for current thread.\n\n Args:\n e: A Executor to execute eager ops under this scope. Setting it to None will\n switch back to use the default executor for the context.\n\n Yields:\n Context manager for setting the executor for current thread.\n \"\"\"\n ctx = context()\n executor_old = ctx.executor\n try:\n ctx.executor = e\n yield\n finally:\n ctx.executor = executor_old\n\n\n@tf_export(\"experimental.function_executor_type\")\n@tf_contextlib.contextmanager\ndef function_executor_type(executor_type):\n \"\"\"Context manager for setting the executor of eager defined functions.\n\n Eager defined functions are functions decorated by tf.contrib.eager.defun.\n\n Args:\n executor_type: a string for the name of the executor to be used to execute\n functions defined by tf.contrib.eager.defun.\n\n Yields:\n Context manager for setting the executor of eager defined functions.\n \"\"\"\n current_options = context().function_call_options\n old_options = copy.copy(current_options)\n try:\n current_options.executor_type = executor_type\n yield\n finally:\n context().function_call_options = old_options\n\n\ndef is_async():\n \"\"\"Returns true if current thread is in async mode.\"\"\"\n return context().is_async()\n\n\ndef async_wait():\n \"\"\"Waits for ops dispatched in ASYNC mode to finish.\"\"\"\n return context().executor.wait()\n\n\ndef async_clear_error():\n \"\"\"Clears errors raised during ASYNC execution mode.\"\"\"\n return context().executor.clear_error()\n\n\ndef num_gpus():\n \"\"\"Get the number of available GPU devices.\n\n Returns:\n The number of available GPU devices.\n \"\"\"\n return context().num_gpus()\n\n\ndef enable_run_metadata():\n \"\"\"Enables tracing of op execution via RunMetadata.\n\n To retrieve the accumulated metadata call context.export_run_metadata()\n and to stop tracing call context.disable_run_metadata().\n \"\"\"\n context().enable_run_metadata()\n\n\ndef disable_run_metadata():\n \"\"\"Disables tracing of op execution via RunMetadata.\"\"\"\n context().disable_run_metadata()\n\n\ndef enable_graph_collection():\n \"\"\"Enables graph collection of executed functions.\n\n To retrieve the accumulated graphs call context.export_run_metadata()\n and to stop collecting graphs call context.disable_graph_collection().\n \"\"\"\n context().enable_graph_collection()\n\n\ndef disable_graph_collection():\n \"\"\"Disables graph collection of executed functions.\"\"\"\n context().disable_graph_collection()\n\n\ndef export_run_metadata():\n \"\"\"Returns a RunMetadata proto with accumulated information.\n\n The returned protocol buffer contains information since the most recent call\n to either enable_run_metadata or export_run_metadata.\n\n Returns:\n A RunMetadata protocol buffer.\n \"\"\"\n return context().export_run_metadata()\n\n\ndef set_server_def(server_def):\n context().set_server_def(server_def)\n\n\ndef add_function(fdef):\n \"\"\"Add a function definition to the context.\"\"\"\n context().add_function(fdef)\n\n\ndef remove_function(name):\n \"\"\"Remove a function from the context.\"\"\"\n context().remove_function(name)\n\n\n# Not every user creates a Context via context.context()\n# (for example, enable_eager_execution in python/framework/ops.py),\n# but they do all import this file. Note that IS_IN_GRAPH_MODE and\n# in_graph_mode are both parameterless functions.\ndef _tmp_in_graph_mode():\n if context_safe() is None:\n # Context not yet initialized. Assume graph mode following the\n # default implementation in `is_in_graph_mode`.\n return True\n return not executing_eagerly()\n\n\nis_in_graph_mode.IS_IN_GRAPH_MODE = _tmp_in_graph_mode\n" ]
[ [ "tensorflow.core.protobuf.config_pb2.ConfigProto", "tensorflow.python.pywrap_tensorflow.TFE_ContextExportRunMetadata", "tensorflow.python.eager.executor.new_executor", "tensorflow.python.eager.monitoring.Counter", "tensorflow.python.pywrap_tensorflow.TFE_NewContext", "tensorflow.python.tf2.enabled", "tensorflow.python.pywrap_tensorflow.TF_GetBuffer", "tensorflow.python.pywrap_tensorflow.TFE_NewContextOptions", "tensorflow.python.pywrap_tensorflow.TF_ListPhysicalDevices", "tensorflow.python.pywrap_tensorflow.TF_DeleteDeviceList", "tensorflow.python.pywrap_tensorflow.TFE_ContextEnableGraphCollection", "tensorflow.python.pywrap_tensorflow.TFE_ContextEndStep", "tensorflow.python.framework.device.DeviceSpec.from_string", "tensorflow.python.pywrap_tensorflow.TFE_ContextOptionsSetConfig", "tensorflow.python.pywrap_tensorflow.TFE_ContextClearCaches", "tensorflow.python.framework.c_api_util.tf_buffer", "tensorflow.python.eager.monitoring.BoolGauge", "tensorflow.python.pywrap_tensorflow.TFE_ContextSetServerDef", "tensorflow.python.pywrap_tensorflow.TFE_ContextGetDevicePlacementPolicy", "tensorflow.python.pywrap_tensorflow.TFE_ContextDisableGraphCollection", "numpy.array", "tensorflow.python.pywrap_tensorflow.TFE_ContextStartStep", "tensorflow.python.pywrap_tensorflow.TFE_ContextHasFunction", "tensorflow.python.pywrap_tensorflow.TFE_ContextDisableRunMetadata", "tensorflow.python.pywrap_tensorflow.TFE_ContextGetMirroringPolicy", "tensorflow.python.pywrap_tensorflow.TFE_ClearScalarCache", "tensorflow.python.pywrap_tensorflow.TFE_ContextSetThreadLocalMirroringPolicy", "tensorflow.python.pywrap_tensorflow.TFE_ContextOptionsSetAsync", "tensorflow.python.pywrap_tensorflow.TFE_ContextEnableRunMetadata", "tensorflow.python.pywrap_tensorflow.TFE_ContextAddFunction", "tensorflow.python.pywrap_tensorflow.TF_DeviceListName", "tensorflow.python.pywrap_tensorflow.TFE_ContextSetThreadLocalDevicePlacementPolicy", "tensorflow.python.pywrap_tensorflow.TFE_ContextListDevices", "tensorflow.core.protobuf.config_pb2.GPUOptions.Experimental", "tensorflow.python.pywrap_tensorflow.TFE_Py_SetEagerContext", "tensorflow.python.framework.device.canonical_name", "tensorflow.python.pywrap_tensorflow.TFE_ContextOptionsSetMirroringPolicy", "tensorflow.core.protobuf.config_pb2.GPUOptions.Experimental.VirtualDevices", "tensorflow.python.pywrap_tensorflow.TFE_ContextRemoveFunction", "tensorflow.python.pywrap_tensorflow.TF_DeviceListCount", "tensorflow.python.pywrap_tensorflow.TFE_ContextOptionsSetDevicePlacementPolicy", "tensorflow.python.pywrap_tensorflow.TFE_EnableCollectiveOps", "tensorflow.core.protobuf.config_pb2.RunMetadata", "tensorflow.python.pywrap_tensorflow.TF_DeviceListType", "tensorflow.python.pywrap_tensorflow.TFE_DeleteContextOptions", "tensorflow.python.pywrap_tensorflow.TFE_ContextGetExecutorForThread", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.util.compat.as_bytes" ] ]
saverymax/mvts_transformer
[ "22796d6977b78d5636f6aad3f7efeb49f2991808" ]
[ "src/optimizers.py" ]
[ "import math\nimport torch\nfrom torch.optim.optimizer import Optimizer\n\n\ndef get_optimizer(name):\n\n if name == \"Adam\":\n return torch.optim.Adam\n elif name == \"RAdam\":\n return RAdam\n\n\n# from https://github.com/LiyuanLucasLiu/RAdam/blob/master/radam/radam.py\nclass RAdam(Optimizer):\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=True):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n\n self.degenerated_to_sgd = degenerated_to_sgd\n if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict):\n for param in params:\n if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]):\n param['buffer'] = [[None, None, None] for _ in range(10)]\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,\n buffer=[[None, None, None] for _ in range(10)])\n super(RAdam, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(RAdam, self).__setstate__(state)\n\n def step(self, closure=None):\n\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('RAdam does not support sparse gradients')\n\n p_data_fp32 = p.data.float()\n\n state = self.state[p]\n\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\n state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n else:\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n state['step'] += 1\n buffered = group['buffer'][int(state['step'] % 10)]\n if state['step'] == buffered[0]:\n N_sma, step_size = buffered[1], buffered[2]\n else:\n buffered[0] = state['step']\n beta2_t = beta2 ** state['step']\n N_sma_max = 2 / (1 - beta2) - 1\n N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n buffered[1] = N_sma\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n step_size = math.sqrt(\n (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (\n N_sma_max - 2)) / (1 - beta1 ** state['step'])\n elif self.degenerated_to_sgd:\n step_size = 1.0 / (1 - beta1 ** state['step'])\n else:\n step_size = -1\n buffered[2] = step_size\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)\n p.data.copy_(p_data_fp32)\n elif step_size > 0:\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n p_data_fp32.add_(-step_size * group['lr'], exp_avg)\n p.data.copy_(p_data_fp32)\n\n return loss\n\n\nclass PlainRAdam(Optimizer):\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=True):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n\n self.degenerated_to_sgd = degenerated_to_sgd\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n\n super(PlainRAdam, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(PlainRAdam, self).__setstate__(state)\n\n def step(self, closure=None):\n\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('RAdam does not support sparse gradients')\n\n p_data_fp32 = p.data.float()\n\n state = self.state[p]\n\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\n state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n else:\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n state['step'] += 1\n beta2_t = beta2 ** state['step']\n N_sma_max = 2 / (1 - beta2) - 1\n N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n step_size = group['lr'] * math.sqrt(\n (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (\n N_sma_max - 2)) / (1 - beta1 ** state['step'])\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n p_data_fp32.addcdiv_(-step_size, exp_avg, denom)\n p.data.copy_(p_data_fp32)\n elif self.degenerated_to_sgd:\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n step_size = group['lr'] / (1 - beta1 ** state['step'])\n p_data_fp32.add_(-step_size, exp_avg)\n p.data.copy_(p_data_fp32)\n\n return loss\n\n\nclass AdamW(Optimizer):\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, warmup=0):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n\n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay, warmup=warmup)\n super(AdamW, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(AdamW, self).__setstate__(state)\n\n def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n\n p_data_fp32 = p.data.float()\n\n state = self.state[p]\n\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\n state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n else:\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n\n if group['warmup'] > state['step']:\n scheduled_lr = 1e-8 + state['step'] * group['lr'] / group['warmup']\n else:\n scheduled_lr = group['lr']\n\n step_size = scheduled_lr * math.sqrt(bias_correction2) / bias_correction1\n\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * scheduled_lr, p_data_fp32)\n\n p_data_fp32.addcdiv_(-step_size, exp_avg, denom)\n\n p.data.copy_(p_data_fp32)\n\n return loss\n" ]
[ [ "torch.zeros_like" ] ]
hpphappy/XRF_tomography_Theta
[ "5db1f9e8fc477449561927816106d5e55a5917af" ]
[ "data_generation_fns_updating.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 7 23:34:50 2020\n\n@author: panpanhuang\n\"\"\"\n\nimport numpy as np\nfrom numpy.random import default_rng\nimport xraylib as xlib\nimport xraylib_np as xlib_np\nimport torch as tc\nimport torch.nn.functional as F\nimport os\nfrom tqdm import tqdm\nimport pickle\n\n# Note: xraylib uses keV \n\n# an array of sub-lines of K line with the required format by xraylib.\nfl_K = np.array([xlib.KA1_LINE, xlib.KA2_LINE, xlib.KA3_LINE, xlib.KB1_LINE, xlib.KB2_LINE,\n xlib.KB3_LINE, xlib.KB4_LINE, xlib.KB5_LINE])\n\n# an array of sub-lines of L line with the required format by xraylib.\nfl_L = np.array([xlib.LA1_LINE, xlib.LA2_LINE, xlib.LB1_LINE, xlib.LB2_LINE, xlib.LB3_LINE,\n xlib.LB4_LINE, xlib.LB5_LINE, xlib.LB6_LINE, xlib.LB7_LINE, xlib.LB9_LINE,\n xlib.LB10_LINE, xlib.LB15_LINE, xlib.LB17_LINE])\n\n# an array of sub-lines of M line with the required format by xraylib.\nfl_M = np.array([xlib.MA1_LINE, xlib.MA2_LINE, xlib.MB_LINE])\n\n\n\nfl_line_groups = np.array([\"K\", \"L\", \"M\"])\n\n\ndef rotate(arr, theta, dev):\n \"\"\"\n This function rotates the grid concentration with dimension: (n_element, sample_height_n, sample_size_n, sample_size_n)\n The rotational axis is along dim 1 of the grid\n \n Parameters\n ----------\n arr : torch tensor\n grid concentration\n \n theta : float\n rotation angle in radians (clockwise)\n \n dev : string\n specify \"cpu\" or the cuda diveice (ex: cuda:0)\n\n\n Returns\n -------\n q : torch tensor\n the rotated grid concentration\n\n \"\"\"\n \n m0 = tc.tensor([tc.cos(theta), -tc.sin(theta), 0.0], device=dev)\n m1 = tc.tensor([tc.sin(theta), tc.cos(theta), 0.0], device=dev)\n m = tc.stack([m0, m1]).view(1, 2, 3)\n m = m.repeat([arr.shape[0], 1, 1])\n \n g = F.affine_grid(m, arr.shape)\n q = F.grid_sample(arr, g, padding_mode='border')\n \n return q\n\n\n\ndef attenuation_3d(src_path, theta_st, theta_end, n_theta, sample_height_n, sample_size_n,\n sample_size_cm, this_aN_dic, probe_energy, dev):\n \"\"\" \n Parameters\n ----------\n src_path : string\n the path of the elemental concentration grid\n \n theta_st: float\n The initial angle of the sample\n \n theta_end: float\n The final angle of the sample\n \n n_theta: integer\n The number of sample angles\n \n sample_height_n : integer\n The height of the sample along the rotational axis (in number of pixels)\n \n sample_size_n: int scalar\n sample size in number of pixles on the side along the probe propagation axis\n\n sample_size_cm: scalar\n sample size in cm on the side along the probe propagation axis\n \n this_aN_dic: dictionary\n a dictionary of items with key = element symbol (string), and value = atomic number\n e.g. this_aN_dic = {\"C\":6, \"O\": 8}\n \n probe_energy : ndarray\n This array is an array with only 1 element. The element is the keV energy of the incident beam.\n \n dev : string\n specify \"cpu\" or the cuda diveice (ex: cuda:0)\n\n Returns\n -------\n attenuation_map_flat : torch tensor\n an array of attenuation ratio before the probe enters each voxel.\n dim 0: all angles of the sample\n dim 1: all voxels (flattened 3D array)\n \n transmission : TYPE\n DESCRIPTION.\n \"\"\" \n \n n_element = len(this_aN_dic)\n theta_ls = - tc.linspace(theta_st, theta_end, n_theta + 1)[:-1]\n grid_concentration = tc.tensor(np.load(src_path)).float().to(dev)\n aN_ls = np.array(list(this_aN_dic.values()))\n probe_attCS_ls = tc.tensor(xlib_np.CS_Total(aN_ls, probe_energy).flatten()).float().to(dev)\n \n att_exponent_acc_map = tc.zeros((len(theta_ls), sample_height_n, sample_size_n, sample_size_n+1), device=dev)\n for i , theta in enumerate(theta_ls):\n theta = tc.tensor(theta, device=dev)\n concentration_map_rot = rotate(grid_concentration, theta, dev)\n for j in range(n_element):\n lac_single = concentration_map_rot[j] * probe_attCS_ls[j]\n lac_acc = tc.cumsum(lac_single, axis=2)\n lac_acc = tc.cat((tc.zeros((sample_height_n, sample_size_n, 1), device=dev), lac_acc), dim = 2)\n att_exponent_acc = lac_acc * (sample_size_cm / sample_size_n) \n att_exponent_acc_map[i,:,:,:] += att_exponent_acc\n\n attenuation_map_flat = tc.exp(-(att_exponent_acc_map[:,:,:,:-1])).view(n_theta, sample_height_n * sample_size_n * sample_size_n).float().to(dev)\n transmission = tc.exp(-att_exponent_acc_map[:,:,:,-1]).view(n_theta, sample_height_n * sample_size_n).float().to(dev)\n \n return attenuation_map_flat, transmission\n\n\ndef create_XRT_data_3d(src_path, theta_st, theta_end, n_theta, sample_height_n, sample_size_n,\n sample_size_cm, this_aN_dic, probe_energy, probe_cts, save_path, save_fname, theta_sep, Poisson_noise, dev):\n \"\"\"\n Parameters\n ----------\n src_path: string\n the path of the elemental concentration grid\n \n theta_st: float\n The initial angle of the sample\n \n theta_end: float\n The final angle of the sample\n \n n_theta: integer\n The number of sample angles\n \n sample_height_n : integer\n The height of the sample along the rotational axis (in number of pixels)\n \n sample_size_n: int scalar\n sample size in number of pixles on the side along the probe propagation axis\n\n sample_size_cm: scalar\n sample size in cm on the side along the probe propagation axis\n \n this_aN_dic: dictionary\n a dictionary of items with key = element symbol (string), and value = atomic number\n e.g. this_aN_dic = {\"C\":6, \"O\": 8}\n \n probe_energy : ndarray\n This array is an array with only 1 element. The element is the keV energy of the incident beam.\n \n probe_cts : float\n The incident photon counts/s\n \n save_path : string\n The directory of saving the XRT_data\n\n Returns\n -------\n XRT_data : ndarray\n The dimension of the array is (n_theta, sample_height_n * sample_size_n)\n [note: sample_size may not be the same as the input argument because of padding]\n \"\"\" \n XRT_data = probe_cts * attenuation_3d(src_path, theta_st, theta_end, n_theta, sample_height_n, sample_size_n,\n sample_size_cm, this_aN_dic, probe_energy, dev)[1]\n \n if Poisson_noise == True:\n random_noise_generator = default_rng()\n XRT_data = random_noise_generator.poisson(XRT_data)\n \n if not os.path.exists(save_path):\n os.mkdir(save_path)\n else:\n pass \n \n if theta_sep == True: \n for this_theta_idx in tqdm(range(n_theta)):\n np.save(os.path.join(save_path, save_fname +'_{}'.format(this_theta_idx)), XRT_data[this_theta_idx])\n \n else:\n np.save(os.path.join(save_path, save_fname), XRT_data.cpu())\n \n return XRT_data\n\n\ndef MakeFLlinesDictionary(this_aN_dic, probe_energy,\n sample_size_n, sample_size_cm,\n fl_line_groups = np.array([\"K\", \"L\", \"M\"]), fl_K = fl_K, fl_L = fl_L, fl_M = fl_M,\n group_lines = True):\n \"\"\" \n\n Parameters\n ----------\n this_aN_dic: dictionary\n a dictionary of items with key = element symbol (string), and value = atomic number\n e.g. this_aN_dic = {\"C\":6, \"O\": 8}\n\n probe_energy : ndarray\n This array is an array with only 1 element. The element is the keV energy of the incident beam.\n\n sample_size_n: int scalar\n sample size in number of pixles on the side along the probe propagation axis\n\n sample_size_cm: scalar\n sample size in cm on the side along the probe propagation axis\n\n fl_line_groups : ndarray of string, optional\n representing XRF line group. The default is np.array([\"K\", \"L\", \"M\"]).\n\n fl_K : ndarray, optional\n The default is fl_K, an array of sub-lines of K line with the required format by xraylib.\n\n fl_L : ndarray, optional\n The default is fl_L, an array of sub-lines of L line with the required format by xraylib.\n\n fl_M : ndarray, optional\n The default is fl_M, an array of sub-lines of M line with the required format by xraylib.\n\n group_lines : boolean, optional\n Whether treating all K (or L, M) sub-lines as a single line. The default is True.\n\n Returns\n -------\n FL_all_elements_dic : dictionary\n The dictionary has 3 items. \n 1st item \n key: \"(element_name, Line)\"\n value: an ndarray of ndarrays of 2 elements(type: string), [element symbol, line group]\n e.g. [['C', 'K'], ['O', 'K'], ['Si', 'K'], ['Si', 'L']]\n\n 2nd item\n key: \"fl_energy\"\n value: float, Fluorescence energy in keV for each line of all element\n\n 3rd item: \n key: \"detected_fl_unit_concentration\"\n value: a 1D array of the fluorescence ratio generated assuming unit concentration [1 g/cm^3 ] for all element in this_aN_dic\n \n 4th item: \n key: \"n_line_group_each_element\"\n value: an array indicating the number of fluorescence line groups for each element specified in this_aN_dictionary\n \n 5th item:\n key: \"n_lines\"\n total number of fluorescence lines (grouped) in this system\n \"\"\"\n\n element_ls = np.array(list(this_aN_dic.keys()))\n aN_ls = np.array(list(this_aN_dic.values()))\n\n n_line_group = len(fl_line_groups)\n FL_all_elements_dic = {\"element_Line\": [], \"fl_energy\": np.array([]), \"detected_fl_unit_concentration\": np.array([])}\n voxel_size = sample_size_cm/sample_size_n \n\n fl_cs_K = xlib_np.CS_FluorLine_Kissel_Cascade(aN_ls, fl_K, probe_energy)\n fl_cs_L = xlib_np.CS_FluorLine_Kissel_Cascade(aN_ls, fl_L, probe_energy)\n fl_cs_M = xlib_np.CS_FluorLine_Kissel_Cascade(aN_ls, fl_M, probe_energy)\n\n # Remove the extra dimension with only 1 element\n fl_cs_K = np.reshape(fl_cs_K, (fl_cs_K.shape[:-1]))\n fl_cs_L = np.reshape(fl_cs_L, (fl_cs_L.shape[:-1]))\n fl_cs_M = np.reshape(fl_cs_M, (fl_cs_M.shape[:-1]))\n\n fl_energy_K = xlib_np.LineEnergy(aN_ls, fl_K)\n fl_energy_L = xlib_np.LineEnergy(aN_ls, fl_L)\n fl_energy_M = xlib_np.LineEnergy(aN_ls, fl_M)\n\n FL_all_elements_dic = {\"(element_name, Line)\": [], \"fl_energy\": np.array([]), \"detected_fl_unit_concentration\": np.array([]),\n \"n_line_group_each_element\": np.array([]), \"n_lines\": None}\n if group_lines == True:\n fl_energy_group = np.zeros((len(element_ls),n_line_group))\n fl_cs_group = np.zeros((len(element_ls),n_line_group))\n \n for i, element_name in enumerate(element_ls): \n\n if np.sum(fl_cs_K[i] != 0):\n fl_energy_group[i,0] = np.average(fl_energy_K[i], weights=fl_cs_K[i]) \n fl_cs_group[i,0] = np.sum(fl_cs_K[i])\n else:\n fl_energy_group[i,0] = 0\n fl_cs_group[i,0] = 0\n\n if np.sum(fl_cs_L[i] != 0):\n fl_energy_group[i,1] = np.average(fl_energy_L[i], weights=fl_cs_L[i]) \n fl_cs_group[i,1] = np.sum(fl_cs_L[i])\n else:\n fl_energy_group[i,1] = 0\n fl_cs_group[i,1] = 0\n\n if np.sum(fl_cs_M[i] != 0):\n fl_energy_group[i,2] = np.average(fl_energy_M[i], weights=fl_cs_M[i]) \n fl_cs_group[i,2] = np.sum(fl_cs_M[i])\n else:\n fl_energy_group[i,2] = 0\n fl_cs_group[i,2] = 0\n\n element_Line = fl_line_groups[fl_energy_group[i]!= 0]\n element_Line = [[element_name, element_Line[j]] for j in range(len(element_Line))]\n for k in range(len(element_Line)):\n FL_all_elements_dic[\"(element_name, Line)\"].append(element_Line[k]) \n\n Line_energy = fl_energy_group[i][fl_energy_group[i]!=0]\n FL_all_elements_dic[\"fl_energy\"] = np.append(FL_all_elements_dic[\"fl_energy\"], Line_energy)\n fl_unit_con = fl_cs_group[i][fl_energy_group[i]!=0] * voxel_size\n FL_all_elements_dic[\"detected_fl_unit_concentration\"] = np.append(FL_all_elements_dic[\"detected_fl_unit_concentration\"], fl_unit_con)\n FL_all_elements_dic[\"n_line_group_each_element\"] = np.append(FL_all_elements_dic[\"n_line_group_each_element\"], len(fl_unit_con))\n \n FL_all_elements_dic[\"(element_name, Line)\"] = np.array(FL_all_elements_dic[\"(element_name, Line)\"])\n \n FL_all_elements_dic[\"n_lines\"] = len(FL_all_elements_dic[\"(element_name, Line)\"])\n return FL_all_elements_dic\n\n\ndef generate_fl_signal_from_each_voxel_3d(src_path, theta_st, theta_end, n_theta, sample_size_n, sample_height_n, sample_size_cm, this_aN_dic, probe_energy, dev):\n \"\"\"\n This function calculates the ratio of fluoresence signal gen\n The rotational axis is along dim 1 of the grid\n\n Parameters\n ----------\n src_path: string\n the path of the elemental concentration grid\n \n theta_st: float\n The initial angle of the sample\n \n theta_end: float\n The final angle of the sample\n \n n_theta: integer\n The number of sample angles\n\n sample_size_n: int scalar\n sample size in number of pixles on the side along the probe propagation axis\n \n sample_height_n : integer\n The height of the sample along the rotational axis (in number of pixels)\n\n sample_size_cm: scalar\n sample size in cm on the side along the probe propagation axis\n \n this_aN_dic: dictionary\n a dictionary of items with key = element symbol (string), and value = atomic number\n e.g. this_aN_dic = {\"C\":6, \"O\": 8}\n \n probe_energy : ndarray\n This array is an array with only 1 element. The element is the keV energy of the incident beam.\n \n dev : string\n specify \"cpu\" or the cuda diveice (ex: cuda:0)\n\n Returns\n -------\n fl_map_tot : TYPE\n DESCRIPTION.\n\n \"\"\"\n element_ls = np.array(list(this_aN_dic.keys()))\n n_element = tc.tensor(len(element_ls)).to(dev)\n theta_ls = - tc.linspace(theta_st, theta_end, n_theta+1)[:-1].to(dev)\n\n grid_concentration = tc.tensor(np.load(src_path)).float().to(dev)\n\n fl_all_lines_dic = MakeFLlinesDictionary(this_aN_dic, probe_energy,\n sample_size_n.cpu().numpy(), sample_size_cm.cpu().numpy(),\n fl_line_groups = np.array([\"K\", \"L\", \"M\"]), fl_K = fl_K, fl_L = fl_L, fl_M = fl_M,\n group_lines = True)\n\n fl_map_tot = tc.zeros((n_theta, fl_all_lines_dic[\"n_lines\"], sample_height_n * sample_size_n * sample_size_n), device=dev)\n for i, theta in enumerate(theta_ls):\n concentration_map_rot = rotate(grid_concentration, tc.tensor(theta, dtype=tc.float32), dev)\n concentration_map_rot_flat = concentration_map_rot.view(len(element_ls), sample_height_n * sample_size_n * sample_size_n)\n line_idx = 0\n for j in range(n_element):\n ## fetch the generated fl signal at unit concentration for the calculated voxel size\n fl_unit = fl_all_lines_dic[\"detected_fl_unit_concentration\"][line_idx:line_idx + int(fl_all_lines_dic[\"n_line_group_each_element\"][j])] \n ## FL signal over the current elemental lines for each voxel\n fl_map = [concentration_map_rot_flat[j] * fl_unit_single_line for fl_unit_single_line in fl_unit]\n fl_map = tc.stack(fl_map).float()\n fl_map_tot[i, line_idx:line_idx + fl_map.shape[0],:] = fl_map \n line_idx = line_idx + len(fl_unit)\n \n return fl_map_tot\n\n\n### The following trace_beam functions solves the intersection of a ray with planes \n### There're 3 types of plane could be specified: x = some constant (d_x), y = some constant (d_y) and z = some constant (d_z)\n### The correspoinding intersecting points can be solved using trace_beam_x, trace_beam_y, trace_beam_z respectively\n\n# The ray uses a parametric form with a parameter, t: R(t) = (1-t) * S + t * D, S and D are the coordinates which spefify the points of sample voxels and the detector points\n# The intersecting coordinates: (x, y, z) = (Ix, Iy, Iz) at t=t'\n# 4 equations are used to solve the intersecting point:\n# From the parametric function of the ray\n# Iz = (1-t') * z_s + t' * z_d\n# Ix = (1-t') * x_s + t' * x_d\n# Iy = (1-t') * y_s + t' * y_d\n# From the function fo the plane: \n# Ix = some constant (d_x), Iy = some constant (d_y) or Iz = some constant (d_z)\n\n# Rearrange the equations above to solve (Iz, Ix, Iy, t')\n# Define the system of equation AX = b to solve the intersecting point, A is with the dimension: (n_batch, 4, 4), b is with the dimension: (n_batch, 4, 1)\n# n_batch is the number of planes we put into the equation that we want to solve the intersecting point with the the ray\n\ndef trace_beam_z(z_s, x_s, y_s, z_d, x_d, y_d, d_z_ls):\n if len(d_z_ls) == 0 or z_s == z_d:\n Z = np.stack((np.array([]), np.array([]), np.array([])), axis=-1)\n else:\n A = tc.tensor([[1, 0, 0, z_s - z_d],[0, 1, 0, x_s - x_d],[0, 0, 1, y_s - y_d],[1, 0, 0, 0]])\n A = A.repeat([len(d_z_ls), 1, 1])\n\n b1 = tc.tensor([[[z_s], [x_s], [y_s]]]).repeat([len(d_z_ls), 1, 1])\n b2 = tc.tensor([[[d_z]] for d_z in d_z_ls])\n b = tc.cat((b1, b2), dim=1)\n\n Z, LU = tc.solve(b, A)\n Z = np.array(Z[:,:-1].view(len(d_z_ls), 3))\n# t = X[:,-1] \n \n return Z\n\ndef trace_beam_x(z_s, x_s, y_s, z_d, x_d, y_d, d_x_ls):\n if len(d_x_ls) == 0:\n X = np.stack((np.array([]), np.array([]), np.array([])), axis=-1)\n else: \n A = tc.tensor([[1, 0, 0, z_s - z_d],[0, 1, 0, x_s - x_d],[0, 0, 1, y_s - y_d],[0, 1, 0, 0]])\n A = A.repeat([len(d_x_ls), 1, 1])\n\n b1 = tc.tensor([[[z_s], [x_s], [y_s]]]).repeat([len(d_x_ls), 1, 1])\n b2 = tc.tensor([[[d_x]] for d_x in d_x_ls])\n b = tc.cat((b1, b2), dim=1)\n\n X, LU = tc.solve(b, A)\n X = np.array(X[:,:-1].view(len(d_x_ls), 3))\n# t = Y[:,-1]\n \n return X\n\ndef trace_beam_y(z_s, x_s, y_s, z_d, x_d, y_d, d_y_ls):\n if len(d_y_ls) == 0 or y_s == y_d:\n Y = np.stack((np.array([]), np.array([]), np.array([])), axis=-1)\n else:\n A = tc.tensor([[1, 0, 0, z_s - z_d],[0, 1, 0, x_s - x_d],[0, 0, 1, y_s - y_d],[0, 0, 1, 0]])\n A = A.repeat([len(d_y_ls), 1, 1])\n\n b1 = tc.tensor([[[z_s], [x_s], [y_s]]]).repeat([len(d_y_ls), 1, 1])\n b2 = tc.tensor([[[d_y]] for d_y in d_y_ls])\n b = tc.cat((b1, b2), dim=1)\n\n Y, LU = tc.solve(b, A)\n Y = np.array(Y[:,:-1].view(len(d_y_ls), 3))\n# t = Z[:,-1]\n \n return Y\n\n\ndef intersecting_length_fl_detectorlet_3d(det_size_cm, det_from_sample_cm, det_ds_spacing_cm, sample_size_n, sample_size_cm, sample_height_n, P_save_path):\n \"\"\"\n Parameters\n ----------\n det_size_cm : float\n The diameter of the circle to distribute the detector points\n \n det_from_sample_cm : float\n The distance between the detector plane and the sample boundary plane\n \n det_ds_spacing_cm : float\n The spacing between detector points\n \n sample_size_n: int scalar\n sample size in number of pixles on the side along the probe propagation axis\n \n sample_size_cm: scalar\n sample size in cm on the side along the probe propagation axis\n \n sample_height_n : integer\n The height of the sample along the rotational axis (in number of pixels)\n \n P_save_path : string\n The path that saves the tensor P\n\n Returns\n -------\n n_det : integer\n The number of the detector points within the circle with the diatmeter, det_size_cm.\n \n P : torch tensor\n a tensor with the dimension (n_det, 3, n_voxels * diagnal_length_n)\n n_voxels: the number of voxels of the sample.\n diagnal_length_n: the number of voxels along the diagnol direction of the sample\n \n P tensor contains the information of intersecting voxels of the emitted XRF rays (along the connection between each FL emitting source voxel and each detector point)\n For each detector point (total: n_det), 3 rows of values representing the following values:\n 1st row, the index of the FL emitting soruce voxel. The index is the index of the flattened grid of the sample.\n 2nd row, the index of the intersecting voxels.\n 3rd row, the intersecting length in cm.\n \n \n For example:\n [[0, 0, 0, 0, 0, 0, ..., 0, 1, 1, 1, 1, 0, ..., 0, 2, 2, 2, 0, ..., 0, ......, 0, ...,0]\n |_________| \\________|\n \\ \\The remain (diagnal_length_n - 4) spaces are then set to 0\n \\4 intersecting voxels from the emitting source at index 1 \n \n [5,10,15,20,25, 0, ..., 0, 6,11,16,21, 0, ..., 0, 7,12,17, 0, ..., 0, ......, 0, ...,0]\n |_________| \\________|\n \\ \\The remain (diagnal_length_n - 4) spaces are then set to 0\n \\4 intersecting voxels at index 6, 11, 16, 21 from the emitting source at index 1 \n \n \n [0.1, 0.1, 0.1, 0.1, 0, 0, ..., 0, 0.2, 0.2, 0.2 ,0.2, 0, ..., 0, 0.3, 0.3, 0.3, 0, ..., 0, ......, 0, ...,0]]\n |_________________| \\________|\n \\ \\The remain (diagnal_length_n - 4) spaces are then set to 0\n \\4 intersecting lengths corresponging to the intersecting voxels in the 2nd row of this tensor\n \n The intersecting number of voxels from each source is not always the same. The maximal possible intersecting number of voxels\n is the number of voxels along the diagnol direction of the sample.\n Therefore, diagnal_length_n spaces are used to store the intersecting voxels for each emitting source.\n In most cases, the number of intersecting voxels for each source voxel is less than diagnal_length_n, The remaining spaces are filled with zeros.\n \n \"\"\"\n if os.path.isfile(P_save_path + \".npy\"):\n P = np.load(P_save_path + \".npy\")\n n_det = P.shape[0]\n longest_int_length = P.shape[2]//(sample_height_n * sample_size_n**2)\n print(f\"numbder of detecting points: {n_det}\")\n \n \n else: \n ### Calculating voxel size in cm\n voxel_size_cm = sample_size_cm/sample_size_n\n\n ### Calculating the diameter of the XRF detector with \n det_size_n = int(np.ceil(det_size_cm/voxel_size_cm)) \n\n ### Set the desired spacing between detectorlets, and then convert the unit of spacing to the number of the sample voxels\n det_ds_spacing_n = int(det_ds_spacing_cm/voxel_size_cm)\n\n # Define position of center of the source voxel (z_s, x_s, y_s), note that it's shifted by 0.5 from the voxel idx to represent the loc of center\n z_s, x_s, y_s = np.indices((int(sample_height_n), int(sample_size_n), int(sample_size_n))) + 0.5\n voxel_pos_ls_flat = np.stack((z_s.flatten(), x_s.flatten(), y_s.flatten()), axis=-1)\n\n\n ### Define the location of the detectorlets, the detector is parallel to the yz-plane\n ### The x-posision depends on the distance between the sample and the detecor\n ## x index of the location of the XRF detector\n det_axis_1_idx = sample_size_n + np.ceil(det_from_sample_cm/voxel_size_cm) + 0.5\n\n ### y, z index of the location of the XRF detector\n ## Define the center of the detector on yz-plane\n det_center_yz = (int(sample_size_n)/2., int(sample_size_n)/2.)\n\n ## Define the y and z loc(namely the loc along axis 2 and axis 0) of the detectorlets. The y and z loc are confined to be within a circle on the yz plane\n end_det_axis_2_idx_ls = np.array([int((sample_size_n - det_ds_spacing_n * np.floor(det_size_n/det_ds_spacing_n))/2.),\n int((sample_size_n + det_ds_spacing_n * np.floor(det_size_n/det_ds_spacing_n))/2.)])\n \n det_axis_2_idx_ls = np.linspace(end_det_axis_2_idx_ls[0], end_det_axis_2_idx_ls[1], np.int(det_size_n/det_ds_spacing_n + 1))\n\n end_det_axis_0_idx_ls = np.array([int((sample_height_n - det_ds_spacing_n * np.floor(det_size_n/det_ds_spacing_n))/2.),\n int((sample_height_n + det_ds_spacing_n * np.floor(det_size_n/det_ds_spacing_n))/2.)])\n\n det_axis_0_idx_ls = np.linspace(end_det_axis_0_idx_ls[0], end_det_axis_0_idx_ls[1], np.int(det_size_n/det_ds_spacing_n + 1))\n ## Create the meshgrid of y and z coordinates and keep only the coordinates within the detector circle\n y_d, z_d = np.meshgrid(det_axis_2_idx_ls, det_axis_0_idx_ls)\n\n yz_mask = ((y_d - det_center_yz[0])**2 + (z_d - det_center_yz[1])**2 <= (det_size_n/2)**2).flatten()\n y_d_flat, z_d_flat = y_d.flatten()[yz_mask], z_d.flatten()[yz_mask]\n\n\n ## The number of x posision needed to fill into the coodinates depends on the number of the y(or z) coodinates within the circle of detector\n x_d_flat = np.full((y_d_flat.shape), det_axis_1_idx)\n\n ##\n det_pos_ls_flat = np.stack((z_d_flat, x_d_flat, y_d_flat), axis=-1)\n n_det = len(det_pos_ls_flat)\n print(f\"numbder of detecting points: {n_det}\")\n ## define sample edges: \n ## sample_x_edge is the edge that is closer to the XRF detector\n ## sample_y_edge has two components representing the left and the right edge\n sample_x_edge = np.array([sample_size_n])\n sample_y_edge = np.array([0, sample_size_n]) \n sample_z_edge = np.array([0, sample_height_n]) \n\n dia_len_n = int((sample_height_n**2 + sample_size_n**2 + sample_size_n**2)**0.5)\n P = tc.zeros(n_det, 3, dia_len_n * sample_height_n * sample_size_n**2)\n longest_int_length = 0\n \n for i, det_pos in enumerate(det_pos_ls_flat):\n for j, v in enumerate(tqdm(voxel_pos_ls_flat)): \n\n # Solving the intersection of the ray with the sample boundary along axis-0\n bdx_int = trace_beam_x(v[0], v[1], v[2], det_pos[0], det_pos[1], det_pos[2], sample_x_edge) # pick the 0th component just because the coordinate is doubly braced\n\n # Solving the intersection of the ray with the sample boundaries along axis-1 and axis-2, we will get 2 solutions for each axis since there're 2 bdry plane on each axis\n # The desired intersecting point is within the segment(voxel - detectorlet) which is always the one with the larger x coordinate\n bdy_int = trace_beam_y(v[0], v[1], v[2], det_pos[0], det_pos[1], det_pos[2], sample_y_edge)\n if len(bdy_int) != 0:\n bdy_int = np.array([bdy_int[np.argmax(bdy_int[:,1])]])\n else:\n pass\n\n\n bdz_int = trace_beam_z(v[0], v[1], v[2], det_pos[0], det_pos[1], det_pos[2], sample_z_edge)\n if len(bdz_int) != 0:\n bdz_int = np.array([bdz_int[np.argmax(bdz_int[:,1])]])\n else:\n pass\n\n # Pick the intersecting point that first hit the boundary plan. This point is with the least x value among the 3 intersections.\n bd_int_ls = np.concatenate((bdz_int, bdx_int, bdy_int))\n bd_int = np.clip(np.abs((bd_int_ls[np.argmin(bd_int_ls[:,1])])), 0, sample_size_n)\n\n\n # when the beam intersects with a voxel, it either intersects with the x or y or z boundary plane of the voxel\n # find the x,y,z-value of the voxel boundary except the ones on the sample edge\n\n z_edge_ls = np.where(bd_int[0] > v[0], np.linspace(np.ceil(bd_int[0])-1, np.ceil(v[0]), int(np.abs(np.ceil(bd_int[0]) - np.ceil(v[0])))),\n np.linspace(np.ceil(v[0])-1, np.ceil(bd_int[0]), int(np.abs(np.ceil(bd_int[0]) - np.ceil(v[0])))))\n\n x_edge_ls = np.where(bd_int[1] > v[1], np.linspace(np.ceil(bd_int[1])-1, np.ceil(v[1]), int(np.abs(np.ceil(bd_int[1]) - np.ceil(v[1])))),\n np.linspace(np.ceil(v[1])-1, np.ceil(bd_int[1]), int(np.abs(np.ceil(bd_int[1]) - np.ceil(v[1])))))\n\n y_edge_ls = np.where(bd_int[2] > v[2], np.linspace(np.ceil(bd_int[2])-1, np.ceil(v[2]), int(np.abs(np.ceil(bd_int[2]) - np.ceil(v[2])))),\n np.linspace(np.ceil(v[2])-1, np.ceil(bd_int[2]), int(np.abs(np.ceil(bd_int[2]) - np.ceil(v[2])))))\n\n\n z_edge_int_ls = trace_beam_z(v[0], v[1], v[2], det_pos[0], det_pos[1], det_pos[2], z_edge_ls)\n x_edge_int_ls = trace_beam_x(v[0], v[1], v[2], det_pos[0], det_pos[1], det_pos[2], x_edge_ls)\n y_edge_int_ls = trace_beam_y(v[0], v[1], v[2], det_pos[0], det_pos[1], det_pos[2], y_edge_ls)\n\n # Collect all intersecting points and sort all intersections using the x coordinate\n int_ls = np.concatenate((x_edge_int_ls, y_edge_int_ls, z_edge_int_ls, np.array(bd_int)[np.newaxis,:])) \n int_ls = int_ls[np.argsort(int_ls[:,1])]\n\n # calculate the intersecting length in the intersecting voxels\n int_length = np.sqrt(np.diff(int_ls[:,0])**2 + np.diff(int_ls[:,1])**2 + np.diff(int_ls[:,2])**2)\n # just in case that we count some intersections twice, delete the duplicates\n idx_duplicate = np.array(np.where(int_length==0)).flatten()\n int_ls = np.delete(int_ls, idx_duplicate, 0)\n int_length = np.delete(int_length, idx_duplicate) \n\n # determine the indices of the intersecting voxels according to the intersecting x,y,z-coordinates\n int_ls_shift = np.zeros((int_ls.shape))\n int_ls_shift[1:] = int_ls[:-1]\n int_idx = np.floor((int_ls + int_ls_shift)/2)[1:]\n# int_idx = (int_idx[:,0].astype('int'), int_idx[:,1].astype('int'), int_idx[:,2].astype('int'))\n int_idx_flat = int_idx[:,0] * (sample_height_n.item() * sample_size_n.item()) + int_idx[:,1] * sample_size_n.item() + int_idx[:,2]\n \n if len(int_idx_flat) > longest_int_length:\n longest_int_length = len(int_idx_flat)\n \n P[i, 0, j * dia_len_n: j * dia_len_n + len(int_idx_flat)] = j\n P[i, 1, j * dia_len_n: j * dia_len_n + len(int_idx_flat)] = tc.tensor(int_idx_flat)\n P[i, 2, j * dia_len_n: j * dia_len_n + len(int_idx_flat)] = tc.tensor(int_length * voxel_size_cm.item()) \n \n tqdm._instances.clear()\n \n P_short = tc.zeros(n_det, 3, longest_int_length * sample_height_n * sample_size_n**2)\n \n for j, v in enumerate(tqdm(voxel_pos_ls_flat)):\n P_short[:,:,j * longest_int_length: (j+1) * longest_int_length] = P[:,:, j * dia_len_n: j * dia_len_n + longest_int_length]\n \n P = P.numpy()\n P_short = P_short.numpy()\n \n np.save(P_save_path + '_short.npy', P_short)\n np.save(P_save_path + \".npy\", P)\n \n return longest_int_length, n_det, P\n\n\n\ndef self_absorption_att_ratio_single_theta_3d(src_path, n_det, P, det_size_cm, det_from_sample_cm, det_ds_spacing_cm, sample_size_n, sample_size_cm, sample_height_n, \n this_aN_dic, probe_energy, dev, theta):\n \n fl_all_lines_dic = MakeFLlinesDictionary(this_aN_dic, probe_energy, sample_size_n.cpu().numpy(), sample_size_cm.cpu().numpy(),\n fl_line_groups = np.array([\"K\", \"L\", \"M\"]), fl_K = fl_K, fl_L = fl_L, fl_M = fl_M, group_lines = True)\n \n n_voxel = sample_height_n * sample_size_n * sample_size_n\n dia_len_n = int((sample_height_n**2 + sample_size_n**2 + sample_size_n**2)**0.5)\n n_lines = tc.as_tensor(fl_all_lines_dic[\"n_lines\"]).to(dev)\n aN_ls = np.array(list(this_aN_dic.values())) \n grid_concentration = tc.from_numpy(np.load(src_path)).float().to(dev)\n n_element = len(this_aN_dic)\n \n # generate an arrary of total attenuation cross section with the dimension: (n_element, n_elemental_lines)\n # The component in the array represents the total attenuation cross section at some line energy in some element (with unitary concentration)\n FL_line_attCS_ls = tc.as_tensor(xlib_np.CS_Total(aN_ls, fl_all_lines_dic[\"fl_energy\"])).float().to(dev)\n\n concentration_map_rot = rotate(grid_concentration, theta, dev).float()\n concentration_map_rot_flat = concentration_map_rot.view(n_element, n_voxel).float()\n\n\n # lac: linear attenuation coefficient = concentration * attenuation_cross_section, \n # dimension: n_element, n_lines, n_voxel(FL source), n_voxel)\n lac = concentration_map_rot_flat.view(n_element, 1, 1, n_voxel) * FL_line_attCS_ls.view(n_element, n_lines, 1, 1)\n lac = lac.expand(-1, -1, n_voxel, -1).float()\n \n att_exponent = tc.stack([lac[:,:, P[m][0].to(dtype=tc.long), P[m][1].to(dtype=tc.long)] * P[m][2].view(1, 1, -1).repeat(n_element, n_lines, 1) for m in range(n_det)])\n \n ## summing over the attenation exponent contributed by all intersecting voxels, dim = (n_det, n_element, n_lines, n_voxel (FL source))\n att_exponent_voxel_sum = tc.sum(att_exponent.view(n_det, n_element, n_lines, n_voxel, dia_len_n), axis=-1)\n \n ## calculate the attenuation caused by all elements and get an array of dim = (n_det, n_lines, n_voxel (FL source)), and then take the average over n_det FL ray paths\n ## Final dim = (n_lines, n_voxel (FL source)) representing the attenuation ratio of each fluorescence line emitting from each source voxel.\n SA_att = tc.mean(tc.exp(-tc.sum(att_exponent_voxel_sum, axis=1)), axis=0)\n \n return SA_att\n\n\ndef create_XRF_data_single_theta_3d(n_det, P, theta_st, theta_end, n_theta, src_path, det_size_cm, det_from_sample_cm, det_ds_spacing_cm, sample_size_n,\n sample_size_cm, sample_height_n, this_aN_dic, probe_cts, probe_energy, save_path, save_fname, Poisson_noise, dev, this_theta_idx):\n # (n_theta, sample_size_n * sample_size_n)\n theta_ls = - tc.linspace(theta_st, theta_end, n_theta + 1)[:-1]\n theta = theta_ls[this_theta_idx]\n probe_before_attenuation_flat = probe_cts * tc.ones((sample_height_n * sample_size_n * sample_size_n), device=dev)\n att_ratio_map_flat = attenuation_3d(src_path, theta_st, theta_end, n_theta, sample_height_n, sample_size_n, sample_size_cm, this_aN_dic, probe_energy, dev)[0][this_theta_idx]\n SA_att_ratio = self_absorption_att_ratio_single_theta_3d(src_path, n_det, P, det_size_cm, det_from_sample_cm, det_ds_spacing_cm, sample_size_n, sample_size_cm, sample_height_n, \n this_aN_dic, probe_energy, dev, theta)\n \n\n \n # probe_after_attenuation_flat: dimension (sample_height_n * sample_size_n * sample_size_n)\n probe_after_attenuation_flat = probe_before_attenuation_flat * att_ratio_map_flat\n \n #(n_elemental_line, sample_height * sample_size * sample_size)\n fl_ratio_map_tot = generate_fl_signal_from_each_voxel_3d(src_path, theta_st, theta_end, n_theta, sample_size_n, sample_height_n, sample_size_cm, this_aN_dic, probe_energy, dev)[this_theta_idx]\n\n #calculate fluorescence after self-absorption. dimension: (n_line, n_voxel (FL source))\n fl_signal_SA = tc.unsqueeze(probe_after_attenuation_flat, dim=0) * fl_ratio_map_tot * SA_att_ratio \n fl_signal_SA = fl_signal_SA.view(-1, sample_height_n * sample_size_n, sample_size_n)\n \n ## summing over the XRF signal collected from strip of voxels along the probe propagation direction\n fl_signal_SA = tc.sum(fl_signal_SA, axis=-1)\n \n ## Calculate the signal collected within the solid angle covered by the detector\n fl_signal_SA = fl_signal_SA * ((np.pi * (det_size_cm/2)**2) / det_from_sample_cm**2)/(4*np.pi)\n \n if Poisson_noise == True:\n random_noise_generator = default_rng()\n fl_signal_SA = random_noise_generator.poisson(fl_signal_SA)\n \n \n np.save(os.path.join(save_path, save_fname +'_{}'.format(this_theta_idx)), fl_signal_SA)\n \n return fl_signal_SA \n\n\ndef create_XRF_data_3d(P_save_path, theta_st, theta_end, n_theta, src_path, det_size_cm, det_from_sample_cm, det_ds_spacing_cm, sample_size_n,\n sample_size_cm, sample_height_n, this_aN_dic, probe_cts, probe_energy, save_path, save_fname, Poisson_noise, dev):\n \n longest_int_length, n_det, P = intersecting_length_fl_detectorlet_3d(det_size_cm, det_from_sample_cm, det_ds_spacing_cm, sample_size_n.cpu(), sample_size_cm.cpu(), sample_height_n.cpu(), P_save_path)\n P = tc.from_numpy(P).to(tc.float)\n theta_ls = - tc.linspace(theta_st, theta_end, n_theta + 1)[:-1]\n \n for this_theta_idx, theta in enumerate(tqdm(theta_ls)):\n create_XRF_data_single_theta_3d(n_det, P, theta_st, theta_end, n_theta, src_path, det_size_cm, det_from_sample_cm, det_ds_spacing_cm, sample_size_n,\n sample_size_cm, sample_height_n, this_aN_dic, probe_cts, probe_energy, save_path, save_fname, Poisson_noise, dev, this_theta_idx)\n \n \n \n \n \n" ]
[ [ "torch.cat", "torch.stack", "numpy.argmin", "numpy.load", "torch.ones", "numpy.where", "torch.nn.functional.affine_grid", "torch.exp", "torch.sum", "numpy.concatenate", "numpy.full", "numpy.save", "torch.unsqueeze", "torch.tensor", "numpy.argmax", "numpy.append", "torch.as_tensor", "torch.zeros", "torch.cos", "numpy.array", "numpy.int", "numpy.delete", "numpy.reshape", "numpy.zeros", "torch.linspace", "numpy.diff", "numpy.stack", "numpy.argsort", "torch.cumsum", "numpy.floor", "numpy.ceil", "torch.sin", "numpy.sum", "numpy.random.default_rng", "torch.from_numpy", "torch.nn.functional.grid_sample", "torch.solve", "numpy.average", "numpy.meshgrid" ] ]
konami86/DeepLab-v3-plus-cityscapes-Res50
[ "3b4d6b4b5d373e8f1206485d4867866eb4ffac7b" ]
[ "models/deeplabv3plus.py" ]
[ "#!/usr/bin/python\n# -*- encoding: utf-8 -*-\n\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.model_zoo as modelzoo\nimport torch.nn.functional as F\nimport torchvision\n\nfrom .resnet import Resnet50\nfrom modules import InPlaceABNSync as BatchNorm2d\n\n\n\nclass ConvBNReLU(nn.Module):\n def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1, dilation=1, *args, **kwargs):\n super(ConvBNReLU, self).__init__()\n self.conv = nn.Conv2d(in_chan,\n out_chan,\n kernel_size = ks,\n stride = stride,\n padding = padding,\n dilation = dilation,\n bias = True)\n self.bn = BatchNorm2d(out_chan)\n self.init_weight()\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return x\n\n def init_weight(self):\n for ly in self.children():\n if isinstance(ly, nn.Conv2d):\n nn.init.kaiming_normal_(ly.weight, a=1)\n if not ly.bias is None: nn.init.constant_(ly.bias, 0)\n\n\nclass ASPP(nn.Module):\n def __init__(self, in_chan=2048, out_chan=256, with_gp=True, *args, **kwargs):\n super(ASPP, self).__init__()\n self.with_gp = with_gp\n self.conv1 = ConvBNReLU(in_chan, out_chan, ks=1, dilation=1, padding=0)\n self.conv2 = ConvBNReLU(in_chan, out_chan, ks=3, dilation=6, padding=6)\n self.conv3 = ConvBNReLU(in_chan, out_chan, ks=3, dilation=12, padding=12)\n self.conv4 = ConvBNReLU(in_chan, out_chan, ks=3, dilation=18, padding=18)\n if self.with_gp:\n self.avg = nn.AdaptiveAvgPool2d((1, 1))\n self.conv1x1 = ConvBNReLU(in_chan, out_chan, ks=1)\n self.conv_out = ConvBNReLU(out_chan*5, out_chan, ks=1)\n else:\n self.conv_out = ConvBNReLU(out_chan*4, out_chan, ks=1)\n\n self.init_weight()\n\n def forward(self, x):\n H, W = x.size()[2:]\n feat1 = self.conv1(x)\n feat2 = self.conv2(x)\n feat3 = self.conv3(x)\n feat4 = self.conv4(x)\n if self.with_gp:\n avg = self.avg(x)\n feat5 = self.conv1x1(avg)\n feat5 = F.interpolate(feat5, (H, W), mode='bilinear', align_corners=True)\n feat = torch.cat([feat1, feat2, feat3, feat4, feat5], 1)\n else:\n feat = torch.cat([feat1, feat2, feat3, feat4], 1)\n feat = self.conv_out(feat)\n return feat\n\n def init_weight(self):\n for ly in self.children():\n if isinstance(ly, nn.Conv2d):\n nn.init.kaiming_normal_(ly.weight, a=1)\n if not ly.bias is None: nn.init.constant_(ly.bias, 0)\n\n\nclass Decoder(nn.Module):\n def __init__(self, n_classes, low_chan=256, *args, **kwargs):\n super(Decoder, self).__init__()\n self.conv_low = ConvBNReLU(low_chan, 48, ks=1, padding=0)\n self.conv_cat = nn.Sequential(\n ConvBNReLU(304, 256, ks=3, padding=1),\n ConvBNReLU(256, 256, ks=3, padding=1),\n )\n self.conv_out = nn.Conv2d(256, n_classes, kernel_size=1, bias=False)\n\n self.init_weight()\n\n def forward(self, feat_low, feat_aspp):\n H, W = feat_low.size()[2:]\n feat_low = self.conv_low(feat_low)\n feat_aspp_up = F.interpolate(feat_aspp, (H, W), mode='bilinear',\n align_corners=True)\n feat_cat = torch.cat([feat_low, feat_aspp_up], dim=1)\n feat_out = self.conv_cat(feat_cat)\n logits = self.conv_out(feat_out)\n return logits\n\n def init_weight(self):\n for ly in self.children():\n if isinstance(ly, nn.Conv2d):\n nn.init.kaiming_normal_(ly.weight, a=1)\n if not ly.bias is None: nn.init.constant_(ly.bias, 0)\n\n\nclass Deeplab_v3plus(nn.Module):\n def __init__(self, cfg, *args, **kwargs):\n super(Deeplab_v3plus, self).__init__()\n self.backbone = Resnet50(stride=16)\n self.aspp = ASPP(in_chan=2048, out_chan=256, with_gp=cfg.aspp_global_feature)\n self.decoder = Decoder(cfg.n_classes, low_chan=256)\n # self.backbone = Darknet53(stride=16)\n # self.aspp = ASPP(in_chan=1024, out_chan=256, with_gp=False)\n # self.decoder = Decoder(cfg.n_classes, low_chan=128)\n\n self.init_weight()\n\n def forward(self, x):\n H, W = x.size()[2:]\n feat4, _, _, feat32 = self.backbone(x)\n feat_aspp = self.aspp(feat32)\n logits = self.decoder(feat4, feat_aspp)\n logits = F.interpolate(logits, (H, W), mode='bilinear', align_corners=True)\n\n return logits\n\n def init_weight(self):\n for ly in self.children():\n if isinstance(ly, nn.Conv2d):\n nn.init.kaiming_normal_(ly.weight, a=1)\n if not ly.bias is None: nn.init.constant_(ly.bias, 0)\n\n def get_params(self):\n back_bn_params, back_no_bn_params = self.backbone.get_params()\n tune_wd_params = list(self.aspp.parameters()) \\\n + list(self.decoder.parameters()) \\\n + back_no_bn_params\n no_tune_wd_params = back_bn_params\n return tune_wd_params, no_tune_wd_params\n\n\n\n\nif __name__ == \"__main__\":\n net = Deeplab_v3plus(19)\n net.cuda()\n net.train()\n net = nn.DataParallel(net)\n for i in range(100):\n # with torch.no_grad():\n in_ten = torch.randn((1, 3, 768, 768)).cuda()\n logits = net(in_ten)\n print(i)\n print(logits.size())\n" ]
[ [ "torch.cat", "torch.nn.init.constant_", "torch.nn.functional.interpolate", "torch.nn.init.kaiming_normal_", "torch.nn.Conv2d", "torch.nn.AdaptiveAvgPool2d", "torch.randn", "torch.nn.DataParallel" ] ]
j-c-cook/RadiationHeatTransfer
[ "23f65c15b7118aab02b272905700fe1c12a16e35" ]
[ "RadiationHeatTransfer/examples/blackbody_radiation.py" ]
[ "# Jack C. Cook\n# Sunday, January 31, 2021\n\n\"\"\"\nPlancks Law:\nImplement Plancks law using Eb,lambda function\n\"\"\"\n\nimport RadiationHeatTransfer as RHT\nfrom scipy.optimize import fminbound\nfrom itertools import count\nfrom itertools import takewhile\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import cumtrapz\nfrom scipy.interpolate import interp1d\n\n\ndef any_range(start, stop, step):\n start = type(start + step)(start)\n return takewhile(lambda n: n < stop, count(start, step))\n\n\ndef main():\n # Recreate Figure 12-9\n # Figure 12-9: The variation of the blackbody emissive power with wavelength for\n # several temperatures\n fig, ax = plt.subplots()\n\n several_temperatures: list = [100, 300, 500, 1000, 2000, 4000, 5800] # Kelvin\n\n wavelengths: list = list(any_range(.01, 1000, .01))\n\n for _, T in enumerate(several_temperatures):\n # T = several_temperatures[-1] # the current temperature\n\n Eblambdas: list = [RHT.blackbody.Eblambda(wavelength, T) for _, wavelength in enumerate(wavelengths)]\n\n # Find the maximum value over the range of 0.01-1000 micrometers\n # Source: https://stackoverflow.com/a/16781456/11637415\n max_lmbda = fminbound(lambda lmbda: -RHT.blackbody.Eblambda(lmbda, T), 0.01, 1000)\n max_Eblmbda = RHT.blackbody.Eblambda(max_lmbda, T)\n\n ax.plot(wavelengths, Eblambdas, color='k')\n ax.scatter(max_lmbda, max_Eblmbda, color='r')\n ax.annotate('{0} K ($\\lambda={1:.2f}$)'.format(int(T), max_lmbda), xy=(max_lmbda, max_Eblmbda), xytext=(max_lmbda*10, max_Eblmbda),\n arrowprops=dict(arrowstyle='->'))\n\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_ylim([10**-6, 10**9])\n\n ax.grid(which='both')\n ax.set_axisbelow(True)\n\n ax.set_xlabel('Wavelength $\\lambda$, $\\mu$m')\n ax.set_ylabel('E$_{b \\lambda}$, W/m$^2 \\cdot \\mu$m')\n\n fig.savefig('blackbody_emissive_power.png')\n plt.close(fig)\n\n # use the Eb function to integrate over the whole wavelength to determine the total black body emissive power\n T = 5800\n integral, _ = RHT.blackbody.Eb(T)\n sigma = integral / T**4\n print('Stephan Boltzman constant, sigma={0:.2E}'.format(sigma))\n\n # Pick a few points to make sure the 0-lambda is working right\n T = 5000\n lmbda = 0.2\n print('lambda * T = {}'.format(T * lmbda))\n print('f_lambda = {0:.6}'.format(RHT.blackbody.f_lambda(lmbda, T)))\n lmbda = 0.4\n print('lambda * T = {}'.format(T * lmbda))\n print('f_lambda = {0:.6}'.format(RHT.blackbody.f_lambda(lmbda, T)))\n lmbda = 1.\n print('lambda * T = {}'.format(T * lmbda))\n print('f_lambda = {0:.6}'.format(RHT.blackbody.f_lambda(lmbda, T)))\n\n # Use the effective spectral function to find an effective value\n lmbdas_nm: list = [0.1, 250, 625, 1200, 2200, 2250, 2500, 1000000] # lambda values in nanometers\n alphas: list = [0.92, 0.92, 0.92, 0.39, 0.39, 0.47, 0.47, 0.47]\n lmbdas_mm: list = [x / 10**3 for _, x in enumerate(lmbdas_nm)] # 1000 nm = 1micrometer\n # create a linear 1D interpolation function for the absorptivity\n f = interp1d(lmbdas_mm, alphas, kind='linear')\n\n T_sun: float = 5800\n eff, wavelengths, eff_Eblambdas, Eblambdas = RHT.blackbody.effective_spectral(lmbdas_mm[0], lmbdas_mm[-1], T_sun, f)\n print('The effective absorptivity: {0:.5f}'.format(eff))\n\n # Plot the absorptivity function\n fig, ax = plt.subplots()\n\n ax.plot(lmbdas_mm, alphas, color='blue')\n\n ax.set_ylabel(r'$\\alpha$')\n ax.set_xlabel('$\\lambda$ ($\\mu$m)')\n\n ax.set_xlim([-0.2, 3])\n ax.set_ylim([0, 1])\n\n fig.savefig('absorptivity.png')\n plt.close(fig)\n\n # Plot the black body emissive power and the effective black body emissive power\n fig, ax = plt.subplots()\n\n ax.plot(wavelengths, Eblambdas, label='E$_{b \\lambda}$')\n ax.plot(wavelengths, eff_Eblambdas, '--', label=r'$\\alpha \\cdot$ E$_{b \\lambda}$')\n\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_ylim([10 ** -6, 10 ** 9])\n ax.set_xlim([10**-2, 10**3])\n\n fig.legend()\n\n ax.grid(which='both')\n ax.set_axisbelow(True)\n\n ax.set_xlabel('Wavelength $\\lambda$, $\\mu$m')\n ax.set_ylabel('E$_{b \\lambda}$, W/m$^2 \\cdot \\mu$m')\n\n fig.savefig('alpha_lambda.png')\n plt.close(fig)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "scipy.interpolate.interp1d", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots" ] ]
k0kod/pylas
[ "e1637f3de4e13c4037f0177da661cb67d3c3dfe9" ]
[ "pylastests/test_chunk_read_write.py" ]
[ "\"\"\"\nTests related to the 'chunked' reading and writing\n\"\"\"\nimport io\nimport math\n\nimport numpy as np\nimport pytest\n\nimport pylas\n\n\ndef test_chunked_las_reading_gives_expected_points(las_file_path):\n \"\"\"\n Test chunked LAS reading\n \"\"\"\n with pylas.open(las_file_path) as las_reader:\n with pylas.open(las_file_path) as reader:\n las = las_reader.read()\n check_chunked_reading_is_gives_expected_points(\n las, reader, iter_size=50)\n\n\ndef test_chunked_laz_reading_gives_expected_points(laz_file_path, laz_backend):\n \"\"\"\n Test LAZ reading in chunked mode with different backends\n \"\"\"\n with pylas.open(laz_file_path) as las_reader:\n with pylas.open(laz_file_path, laz_backend=laz_backend) as laz_reader:\n expected_las = las_reader.read()\n check_chunked_reading_is_gives_expected_points(\n expected_las, laz_reader, iter_size=50\n )\n\n\[email protected](\"backend\", pylas.LazBackend.detect_available() + (None,))\ndef test_chunked_writing_gives_expected_points(file_path, backend):\n \"\"\"\n Write in chunked mode then test that the points are correct\n \"\"\"\n original_las = pylas.read(file_path)\n iter_size = 51\n\n do_compress = True if backend is not None else False\n\n with io.BytesIO() as tmp_output:\n with pylas.open(\n tmp_output,\n mode=\"w\",\n closefd=False,\n header=original_las.header,\n do_compress=do_compress,\n laz_backend=backend\n ) as las:\n for i in range(int(math.ceil(len(original_las.points) / iter_size))):\n original_points = original_las.points[\n i * iter_size: (i + 1) * iter_size\n ]\n las.write_points(original_points)\n\n tmp_output.seek(0)\n with pylas.open(tmp_output, closefd=False) as reader:\n check_chunked_reading_is_gives_expected_points(\n original_las, reader, iter_size\n )\n\n\ndef check_chunked_reading_is_gives_expected_points(groundtruth_las, reader, iter_size):\n \"\"\"Checks that the points read by the reader are the same as groundtruth points.\"\"\"\n assert groundtruth_las.point_format == reader.header.point_format\n for i, points in enumerate(reader.chunk_iterator(iter_size)):\n expected_points = groundtruth_las.points[i * iter_size: (i + 1) * iter_size]\n for dim_name in points.array.dtype.names:\n assert np.allclose(expected_points[dim_name], points[dim_name]), f\"{dim_name} not equal\"\n" ]
[ [ "numpy.allclose" ] ]
lagrassa/ray
[ "02bdaf221d04ed0bcadbf649674d936cfdd46761" ]
[ "test/failure_test.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport json\nimport os\nimport ray\nimport sys\nimport tempfile\nimport threading\nimport time\n\nimport ray.ray_constants as ray_constants\nfrom ray.utils import _random_string\nimport pytest\n\nfrom ray.test.cluster_utils import Cluster\n\n\ndef relevant_errors(error_type):\n return [info for info in ray.error_info() if info[\"type\"] == error_type]\n\n\ndef wait_for_errors(error_type, num_errors, timeout=10):\n start_time = time.time()\n while time.time() - start_time < timeout:\n if len(relevant_errors(error_type)) >= num_errors:\n return\n time.sleep(0.1)\n raise Exception(\"Timing out of wait.\")\n\n\[email protected]\ndef ray_start_regular():\n # Start the Ray processes.\n ray.init(num_cpus=2)\n yield None\n # The code after the yield will run as teardown code.\n ray.shutdown()\n\n\[email protected]\ndef shutdown_only():\n yield None\n # The code after the yield will run as teardown code.\n ray.shutdown()\n\n\ndef test_failed_task(ray_start_regular):\n @ray.remote\n def throw_exception_fct1():\n raise Exception(\"Test function 1 intentionally failed.\")\n\n @ray.remote\n def throw_exception_fct2():\n raise Exception(\"Test function 2 intentionally failed.\")\n\n @ray.remote(num_return_vals=3)\n def throw_exception_fct3(x):\n raise Exception(\"Test function 3 intentionally failed.\")\n\n throw_exception_fct1.remote()\n throw_exception_fct1.remote()\n wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)\n assert len(relevant_errors(ray_constants.TASK_PUSH_ERROR)) == 2\n for task in relevant_errors(ray_constants.TASK_PUSH_ERROR):\n msg = task.get(\"message\")\n assert \"Test function 1 intentionally failed.\" in msg\n\n x = throw_exception_fct2.remote()\n try:\n ray.get(x)\n except Exception as e:\n assert \"Test function 2 intentionally failed.\" in str(e)\n else:\n # ray.get should throw an exception.\n assert False\n\n x, y, z = throw_exception_fct3.remote(1.0)\n for ref in [x, y, z]:\n try:\n ray.get(ref)\n except Exception as e:\n assert \"Test function 3 intentionally failed.\" in str(e)\n else:\n # ray.get should throw an exception.\n assert False\n\n @ray.remote\n def f():\n raise Exception(\"This function failed.\")\n\n try:\n ray.get(f.remote())\n except Exception as e:\n assert \"This function failed.\" in str(e)\n else:\n # ray.get should throw an exception.\n assert False\n\n\ndef test_fail_importing_remote_function(ray_start_regular):\n # Create the contents of a temporary Python file.\n temporary_python_file = \"\"\"\ndef temporary_helper_function():\n return 1\n\"\"\"\n\n f = tempfile.NamedTemporaryFile(suffix=\".py\")\n f.write(temporary_python_file.encode(\"ascii\"))\n f.flush()\n directory = os.path.dirname(f.name)\n # Get the module name and strip \".py\" from the end.\n module_name = os.path.basename(f.name)[:-3]\n sys.path.append(directory)\n module = __import__(module_name)\n\n # Define a function that closes over this temporary module. This should\n # fail when it is unpickled.\n @ray.remote\n def g():\n return module.temporary_python_file()\n\n wait_for_errors(ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR, 2)\n errors = relevant_errors(ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR)\n assert len(errors) == 2\n assert \"No module named\" in errors[0][\"message\"]\n assert \"No module named\" in errors[1][\"message\"]\n\n # Check that if we try to call the function it throws an exception and\n # does not hang.\n for _ in range(10):\n with pytest.raises(Exception):\n ray.get(g.remote())\n\n f.close()\n\n # Clean up the junk we added to sys.path.\n sys.path.pop(-1)\n\n\ndef test_failed_function_to_run(ray_start_regular):\n def f(worker):\n if ray.worker.global_worker.mode == ray.WORKER_MODE:\n raise Exception(\"Function to run failed.\")\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n wait_for_errors(ray_constants.FUNCTION_TO_RUN_PUSH_ERROR, 2)\n # Check that the error message is in the task info.\n errors = relevant_errors(ray_constants.FUNCTION_TO_RUN_PUSH_ERROR)\n assert len(errors) == 2\n assert \"Function to run failed.\" in errors[0][\"message\"]\n assert \"Function to run failed.\" in errors[1][\"message\"]\n\n\ndef test_fail_importing_actor(ray_start_regular):\n # Create the contents of a temporary Python file.\n temporary_python_file = \"\"\"\ndef temporary_helper_function():\n return 1\n\"\"\"\n\n f = tempfile.NamedTemporaryFile(suffix=\".py\")\n f.write(temporary_python_file.encode(\"ascii\"))\n f.flush()\n directory = os.path.dirname(f.name)\n # Get the module name and strip \".py\" from the end.\n module_name = os.path.basename(f.name)[:-3]\n sys.path.append(directory)\n module = __import__(module_name)\n\n # Define an actor that closes over this temporary module. This should\n # fail when it is unpickled.\n @ray.remote\n class Foo(object):\n def __init__(self):\n self.x = module.temporary_python_file()\n\n def get_val(self):\n return 1\n\n # There should be no errors yet.\n assert len(ray.error_info()) == 0\n\n # Create an actor.\n foo = Foo.remote()\n\n # Wait for the error to arrive.\n wait_for_errors(ray_constants.REGISTER_ACTOR_PUSH_ERROR, 1)\n errors = relevant_errors(ray_constants.REGISTER_ACTOR_PUSH_ERROR)\n assert \"No module named\" in errors[0][\"message\"]\n\n # Wait for the error from when the __init__ tries to run.\n wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)\n errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)\n assert (\"failed to be imported, and so cannot execute this method\" in\n errors[0][\"message\"])\n\n # Check that if we try to get the function it throws an exception and\n # does not hang.\n with pytest.raises(Exception):\n ray.get(foo.get_val.remote())\n\n # Wait for the error from when the call to get_val.\n wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)\n errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)\n assert (\"failed to be imported, and so cannot execute this method\" in\n errors[1][\"message\"])\n\n f.close()\n\n # Clean up the junk we added to sys.path.\n sys.path.pop(-1)\n\n\ndef test_failed_actor_init(ray_start_regular):\n error_message1 = \"actor constructor failed\"\n error_message2 = \"actor method failed\"\n\n @ray.remote\n class FailedActor(object):\n def __init__(self):\n raise Exception(error_message1)\n\n def fail_method(self):\n raise Exception(error_message2)\n\n a = FailedActor.remote()\n\n # Make sure that we get errors from a failed constructor.\n wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)\n errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)\n assert len(errors) == 1\n assert error_message1 in errors[0][\"message\"]\n\n # Make sure that we get errors from a failed method.\n a.fail_method.remote()\n wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)\n errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)\n assert len(errors) == 2\n assert error_message1 in errors[1][\"message\"]\n\n\ndef test_failed_actor_method(ray_start_regular):\n error_message2 = \"actor method failed\"\n\n @ray.remote\n class FailedActor(object):\n def __init__(self):\n pass\n\n def fail_method(self):\n raise Exception(error_message2)\n\n a = FailedActor.remote()\n\n # Make sure that we get errors from a failed method.\n a.fail_method.remote()\n wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)\n errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)\n assert len(errors) == 1\n assert error_message2 in errors[0][\"message\"]\n\n\ndef test_incorrect_method_calls(ray_start_regular):\n @ray.remote\n class Actor(object):\n def __init__(self, missing_variable_name):\n pass\n\n def get_val(self, x):\n pass\n\n # Make sure that we get errors if we call the constructor incorrectly.\n\n # Create an actor with too few arguments.\n with pytest.raises(Exception):\n a = Actor.remote()\n\n # Create an actor with too many arguments.\n with pytest.raises(Exception):\n a = Actor.remote(1, 2)\n\n # Create an actor the correct number of arguments.\n a = Actor.remote(1)\n\n # Call a method with too few arguments.\n with pytest.raises(Exception):\n a.get_val.remote()\n\n # Call a method with too many arguments.\n with pytest.raises(Exception):\n a.get_val.remote(1, 2)\n # Call a method that doesn't exist.\n with pytest.raises(AttributeError):\n a.nonexistent_method()\n with pytest.raises(AttributeError):\n a.nonexistent_method.remote()\n\n\ndef test_worker_raising_exception(ray_start_regular):\n @ray.remote\n def f():\n ray.worker.global_worker._get_next_task_from_local_scheduler = None\n\n # Running this task should cause the worker to raise an exception after\n # the task has successfully completed.\n f.remote()\n\n wait_for_errors(ray_constants.WORKER_CRASH_PUSH_ERROR, 1)\n wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)\n\n\ndef test_worker_dying(ray_start_regular):\n # Define a remote function that will kill the worker that runs it.\n @ray.remote\n def f():\n eval(\"exit()\")\n\n f.remote()\n\n wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)\n\n errors = relevant_errors(ray_constants.WORKER_DIED_PUSH_ERROR)\n assert len(errors) == 1\n assert \"died or was killed while executing\" in errors[0][\"message\"]\n\n\ndef test_actor_worker_dying(ray_start_regular):\n @ray.remote\n class Actor(object):\n def kill(self):\n eval(\"exit()\")\n\n @ray.remote\n def consume(x):\n pass\n\n a = Actor.remote()\n [obj], _ = ray.wait([a.kill.remote()], timeout=5.0)\n with pytest.raises(Exception):\n ray.get(obj)\n with pytest.raises(Exception):\n ray.get(consume.remote(obj))\n wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)\n\n\ndef test_actor_worker_dying_future_tasks(ray_start_regular):\n @ray.remote\n class Actor(object):\n def getpid(self):\n return os.getpid()\n\n def sleep(self):\n time.sleep(1)\n\n a = Actor.remote()\n pid = ray.get(a.getpid.remote())\n tasks1 = [a.sleep.remote() for _ in range(10)]\n os.kill(pid, 9)\n time.sleep(0.1)\n tasks2 = [a.sleep.remote() for _ in range(10)]\n for obj in tasks1 + tasks2:\n with pytest.raises(Exception):\n ray.get(obj)\n\n wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)\n\n\ndef test_actor_worker_dying_nothing_in_progress(ray_start_regular):\n @ray.remote\n class Actor(object):\n def getpid(self):\n return os.getpid()\n\n a = Actor.remote()\n pid = ray.get(a.getpid.remote())\n os.kill(pid, 9)\n time.sleep(0.1)\n task2 = a.getpid.remote()\n with pytest.raises(Exception):\n ray.get(task2)\n\n\ndef test_actor_scope_or_intentionally_killed_message(ray_start_regular):\n @ray.remote\n class Actor(object):\n pass\n\n a = Actor.remote()\n a = Actor.remote()\n a.__ray_terminate__.remote()\n time.sleep(1)\n assert len(ray.error_info()) == 0, (\n \"Should not have propogated an error - {}\".format(ray.error_info()))\n\n\[email protected]\ndef ray_start_object_store_memory():\n # Start the Ray processes.\n store_size = 10**6\n ray.init(num_cpus=1, object_store_memory=store_size)\n yield None\n # The code after the yield will run as teardown code.\n ray.shutdown()\n\n\[email protected](\"This test does not work yet.\")\ndef test_put_error1(ray_start_object_store_memory):\n num_objects = 3\n object_size = 4 * 10**5\n\n # Define a task with a single dependency, a numpy array, that returns\n # another array.\n @ray.remote\n def single_dependency(i, arg):\n arg = np.copy(arg)\n arg[0] = i\n return arg\n\n @ray.remote\n def put_arg_task():\n # Launch num_objects instances of the remote task, each dependent\n # on the one before it. The result of the first task should get\n # evicted.\n args = []\n arg = single_dependency.remote(0, np.zeros(\n object_size, dtype=np.uint8))\n for i in range(num_objects):\n arg = single_dependency.remote(i, arg)\n args.append(arg)\n\n # Get the last value to force all tasks to finish.\n value = ray.get(args[-1])\n assert value[0] == i\n\n # Get the first value (which should have been evicted) to force\n # reconstruction. Currently, since we're not able to reconstruct\n # `ray.put` objects that were evicted and whose originating tasks\n # are still running, this for-loop should hang and push an error to\n # the driver.\n ray.get(args[0])\n\n put_arg_task.remote()\n\n # Make sure we receive the correct error message.\n wait_for_errors(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)\n\n\[email protected](\"This test does not work yet.\")\ndef test_put_error2(ray_start_object_store_memory):\n # This is the same as the previous test, but it calls ray.put directly.\n num_objects = 3\n object_size = 4 * 10**5\n\n # Define a task with a single dependency, a numpy array, that returns\n # another array.\n @ray.remote\n def single_dependency(i, arg):\n arg = np.copy(arg)\n arg[0] = i\n return arg\n\n @ray.remote\n def put_task():\n # Launch num_objects instances of the remote task, each dependent\n # on the one before it. The result of the first task should get\n # evicted.\n args = []\n arg = ray.put(np.zeros(object_size, dtype=np.uint8))\n for i in range(num_objects):\n arg = single_dependency.remote(i, arg)\n args.append(arg)\n\n # Get the last value to force all tasks to finish.\n value = ray.get(args[-1])\n assert value[0] == i\n\n # Get the first value (which should have been evicted) to force\n # reconstruction. Currently, since we're not able to reconstruct\n # `ray.put` objects that were evicted and whose originating tasks\n # are still running, this for-loop should hang and push an error to\n # the driver.\n ray.get(args[0])\n\n put_task.remote()\n\n # Make sure we receive the correct error message.\n wait_for_errors(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)\n\n\ndef test_version_mismatch(shutdown_only):\n ray_version = ray.__version__\n ray.__version__ = \"fake ray version\"\n\n ray.init(num_cpus=1)\n\n wait_for_errors(ray_constants.VERSION_MISMATCH_PUSH_ERROR, 1)\n\n # Reset the version.\n ray.__version__ = ray_version\n\n\ndef test_warning_monitor_died(shutdown_only):\n ray.init(num_cpus=0)\n\n time.sleep(1) # Make sure the monitor has started.\n\n # Cause the monitor to raise an exception by pushing a malformed message to\n # Redis. This will probably kill the raylets and the raylet_monitor in\n # addition to the monitor.\n fake_id = 20 * b\"\\x00\"\n malformed_message = \"asdf\"\n redis_client = ray.worker.global_worker.redis_client\n redis_client.execute_command(\n \"RAY.TABLE_ADD\", ray.gcs_utils.TablePrefix.HEARTBEAT_BATCH,\n ray.gcs_utils.TablePubsub.HEARTBEAT_BATCH, fake_id, malformed_message)\n\n wait_for_errors(ray_constants.MONITOR_DIED_ERROR, 1)\n\n\ndef test_export_large_objects(ray_start_regular):\n import ray.ray_constants as ray_constants\n\n large_object = np.zeros(2 * ray_constants.PICKLE_OBJECT_WARNING_SIZE)\n\n @ray.remote\n def f():\n large_object\n\n # Make sure that a warning is generated.\n wait_for_errors(ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, 1)\n\n @ray.remote\n class Foo(object):\n def __init__(self):\n large_object\n\n Foo.remote()\n\n # Make sure that a warning is generated.\n wait_for_errors(ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, 2)\n\n\ndef test_warning_for_infeasible_tasks(ray_start_regular):\n # Check that we get warning messages for infeasible tasks.\n\n @ray.remote(num_gpus=1)\n def f():\n pass\n\n @ray.remote(resources={\"Custom\": 1})\n class Foo(object):\n pass\n\n # This task is infeasible.\n f.remote()\n wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 1)\n\n # This actor placement task is infeasible.\n Foo.remote()\n wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 2)\n\n\ndef test_warning_for_infeasible_zero_cpu_actor(shutdown_only):\n # Check that we cannot place an actor on a 0 CPU machine and that we get an\n # infeasibility warning (even though the actor creation task itself\n # requires no CPUs).\n\n ray.init(num_cpus=0)\n\n @ray.remote\n class Foo(object):\n pass\n\n # The actor creation should be infeasible.\n Foo.remote()\n wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 1)\n\n\ndef test_warning_for_too_many_actors(shutdown_only):\n # Check that if we run a workload which requires too many workers to be\n # started that we will receive a warning.\n num_cpus = 2\n ray.init(num_cpus=num_cpus)\n\n @ray.remote\n class Foo(object):\n def __init__(self):\n time.sleep(1000)\n\n [Foo.remote() for _ in range(num_cpus * 3)]\n wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 1)\n [Foo.remote() for _ in range(num_cpus)]\n wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 2)\n\n\ndef test_warning_for_too_many_nested_tasks(shutdown_only):\n # Check that if we run a workload which requires too many workers to be\n # started that we will receive a warning.\n num_cpus = 2\n ray.init(num_cpus=num_cpus)\n\n @ray.remote\n def f():\n time.sleep(1000)\n return 1\n\n @ray.remote\n def g():\n # Sleep so that the f tasks all get submitted to the scheduler after\n # the g tasks.\n time.sleep(1)\n ray.get(f.remote())\n\n [g.remote() for _ in range(num_cpus * 4)]\n wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 1)\n\n\[email protected]\ndef ray_start_two_nodes():\n # Start the Ray processes.\n cluster = Cluster()\n for _ in range(2):\n cluster.add_node(\n num_cpus=0,\n _internal_config=json.dumps({\n \"num_heartbeats_timeout\": 40\n }))\n ray.init(redis_address=cluster.redis_address)\n\n yield cluster\n # The code after the yield will run as teardown code.\n ray.shutdown()\n cluster.shutdown()\n\n\n# Note that this test will take at least 10 seconds because it must wait for\n# the monitor to detect enough missed heartbeats.\ndef test_warning_for_dead_node(ray_start_two_nodes):\n cluster = ray_start_two_nodes\n cluster.wait_for_nodes()\n\n client_ids = {item[\"ClientID\"] for item in ray.global_state.client_table()}\n\n # Try to make sure that the monitor has received at least one heartbeat\n # from the node.\n time.sleep(0.5)\n\n # Kill both raylets.\n cluster.list_all_nodes()[1].kill_raylet()\n cluster.list_all_nodes()[0].kill_raylet()\n\n # Check that we get warning messages for both raylets.\n wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 2, timeout=40)\n\n # Extract the client IDs from the error messages. This will need to be\n # changed if the error message changes.\n warning_client_ids = {\n item[\"message\"].split(\" \")[5]\n for item in relevant_errors(ray_constants.REMOVED_NODE_ERROR)\n }\n\n assert client_ids == warning_client_ids\n\n\ndef test_raylet_crash_when_get(ray_start_regular):\n nonexistent_id = ray.ObjectID(_random_string())\n\n def sleep_to_kill_raylet():\n # Don't kill raylet before default workers get connected.\n time.sleep(2)\n ray.worker._global_node.kill_raylet()\n\n thread = threading.Thread(target=sleep_to_kill_raylet)\n thread.start()\n with pytest.raises(\n ray.raylet.RayCommonError,\n match=r\".*raylet client may be closed.*\"):\n ray.get(nonexistent_id)\n thread.join()\n" ]
[ [ "numpy.copy", "numpy.zeros" ] ]
nbip/IWAE
[ "3a5e38b4d6eafceb5ec47dbe59aee3b42ad576f6" ]
[ "tasks/task01.py" ]
[ "import tensorflow as tf\nfrom tensorflow_probability import distributions as tfd\nfrom tensorflow import keras\nimport numpy as np\nimport os\nimport argparse\nimport datetime\nimport time\nimport sys\nsys.path.insert(0, './src')\nsys.path.insert(0, './tasks')\nimport utils\nimport iwae1\nimport iwae2\nimport plot_task01\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--stochastic_layers\", type=int, default=1, choices=[1, 2], help=\"number of stochastic layers in the model\")\nparser.add_argument(\"--n_samples\", type=int, default=5, help=\"number of importance samples\")\nparser.add_argument(\"--batch_size\", type=int, default=20, help=\"batch size\")\nparser.add_argument(\"--epochs\", type=int, default=-1,\n help=\"numper of epochs, if set to -1 number of epochs \"\n \"will be set based on the learning rate scheme from the paper\")\nparser.add_argument(\"--objective\", type=str, default=\"iwae_elbo\", choices=[\"vae_elbo\", \"iwae_elbo\", \"iwae_eq14\", \"vae_elbo_kl\"])\nparser.add_argument(\"--gpu\", type=str, default='0', help=\"Choose GPU\")\nargs = parser.parse_args()\nprint(args)\n\n# ---- string describing the experiment, to use in tensorboard and plots\nstring = \"task01_{0}_{1}_{2}\".format(args.objective, args.stochastic_layers, args.n_samples)\n\n# ---- set the visible GPU devices\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\n\n# ---- dynamic GPU memory allocation\ngpus = tf.config.list_physical_devices('GPU')\nif gpus:\n tf.config.experimental.set_memory_growth(gpus[0], True)\n\n# ---- set random seeds\nnp.random.seed(123)\ntf.random.set_seed(123)\n\n# ---- number of passes over the data, see bottom of page 6 in [1]\nif args.epochs == -1:\n epochs = 0\n learning_rate_dict = {}\n\n for i in range(8):\n learning_rate = 0.001 * 10**(-i/7)\n learning_rate_dict[epochs] = learning_rate\n epochs += 3 ** i\n\nelse:\n epochs = args.epochs\n learning_rate_dict = {}\n learning_rate_dict[0] = 0.0001\n\n# ---- load data\n(Xtrain, ytrain), (Xtest, ytest) = keras.datasets.mnist.load_data()\nNtrain = Xtrain.shape[0]\nNtest = Xtest.shape[0]\n\n# ---- reshape to vectors\nXtrain = Xtrain.reshape(Ntrain, -1) / 255\nXtest = Xtest.reshape(Ntest, -1) / 255\n\n# ---- experiment settings\nobjective = args.objective\nn_samples = args.n_samples\nbatch_size = args.batch_size\nsteps_pr_epoch = Ntrain // batch_size\ntotal_steps = steps_pr_epoch * epochs\n\n# ---- prepare tensorboard\ncurrent_time = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\ntrain_log_dir = \"/tmp/iwae/{0}/\".format(string) + current_time + \"/train\"\ntrain_summary_writer = tf.summary.create_file_writer(train_log_dir)\ntest_log_dir = \"/tmp/iwae/{0}/\".format(string) + current_time + \"/test\"\ntest_summary_writer = tf.summary.create_file_writer(test_log_dir)\n\n# ---- instantiate the model, optimizer and metrics\nif args.stochastic_layers == 1:\n n_latent = [2]\n n_hidden = [200]\n model = iwae1.IWAE(n_hidden[0], n_latent[0])\nelse:\n n_latent = [2, 2]\n n_hidden = [200, 100]\n model = iwae2.IWAE(n_hidden, n_latent)\n\noptimizer = keras.optimizers.Adam(learning_rate_dict[0], epsilon=1e-4)\nprint(\"Initial learning rate: \", optimizer.learning_rate.numpy())\n\n# ---- prepare plotting of samples during training\n# use the same samples from the prior throughout training\npz = tfd.Normal(0, 1)\nz = pz.sample([100, n_latent[-1]])\n\nplt_epochs = list(2**np.arange(12))\nplt_epochs.insert(0, 0)\nplt_epochs.append(epochs-1)\n\n# ---- binarize the test data\n# we'll only do this once, while the training data is binarized at the\n# start of each epoch\nXtest = utils.bernoullisample(Xtest)\n\n# ---- do the training\nstart = time.time()\nbest = float(-np.inf)\n\nfor epoch in range(epochs):\n\n # ---- binarize the training data at the start of each epoch\n Xtrain_binarized = utils.bernoullisample(Xtrain)\n\n train_dataset = (tf.data.Dataset.from_tensor_slices(Xtrain_binarized)\n .shuffle(Ntrain).batch(batch_size))\n\n # ---- plot samples from the prior at this epoch\n if epoch in plt_epochs:\n model.generate_and_save_images(z, epoch, string)\n model.generate_and_save_posteriors(Xtest, ytest, 10, epoch, string)\n\n # ---- check if the learning rate needs to be updated\n if args.epochs == -1 and epoch in learning_rate_dict:\n new_learning_rate = learning_rate_dict[epoch]\n old_learning_rate = optimizer.learning_rate.numpy()\n\n print(\"Changing learning rate from {0} to {1}\".format(old_learning_rate, new_learning_rate))\n optimizer.learning_rate.assign(new_learning_rate)\n\n for _step, x_batch in enumerate(train_dataset):\n step = _step + steps_pr_epoch * epoch\n\n # ---- warm-up\n beta = 1.0\n # beta = np.min([step / 200000, 1.0]).astype(np.float32)\n\n # ---- one training step\n res = model.train_step(x_batch, n_samples, beta, optimizer, objective=objective)\n\n if step % 200 == 0:\n\n # ---- write training stats to tensorboard\n with train_summary_writer.as_default():\n model.write_to_tensorboard(res, step)\n\n # ---- monitor the test-set\n test_res = model.val_step(Xtest, n_samples, beta)\n\n # ---- write test stats to tensorboard\n with test_summary_writer.as_default():\n model.write_to_tensorboard(test_res, step)\n\n took = time.time() - start\n start = time.time()\n\n print(\"epoch {0}/{1}, step {2}/{3}, train ELBO: {4:.2f}, val ELBO: {5:.2f}, time: {6:.2f}\"\n .format(epoch, epochs, step, total_steps, res[objective].numpy(), test_res[objective], took))\n\n# ---- save final weights\nmodel.save_weights('/tmp/iwae/{0}/final_weights'.format(string))\n\n# ---- load the final weights?\n# model.load_weights('/tmp/iwae/{0}/final_weights'.format(string))\n\n# ---- test-set llh estimate using 5000 samples\ntest_elbo_metric = utils.MyMetric()\nL = 5000\n\n# ---- since we are using 5000 importance samples we have to loop over each element of the test-set\nfor i, x in enumerate(Xtest):\n res = model(x[None, :].astype(np.float32), L)\n test_elbo_metric.update_state(res['iwae_elbo'][None, None])\n if i % 200 == 0:\n print(\"{0}/{1}\".format(i, Ntest))\n\ntest_set_llh = test_elbo_metric.result()\ntest_elbo_metric.reset_states()\n\nprint(\"Test-set {0} sample log likelihood estimate: {1:.4f}\".format(L, test_set_llh))\n\n# ---- plot variational and true posteriors\nplot_task01.plot(model, Xtest, string)\n" ]
[ [ "tensorflow.keras.datasets.mnist.load_data", "tensorflow.data.Dataset.from_tensor_slices", "numpy.random.seed", "tensorflow.random.set_seed", "tensorflow.summary.create_file_writer", "tensorflow.config.experimental.set_memory_growth", "numpy.arange", "tensorflow.config.list_physical_devices", "tensorflow.keras.optimizers.Adam" ] ]
josephhardinee/pyart
[ "909cd4a36bb4cae34349294d2013bc7ad71d0969" ]
[ "pyart/setup.py" ]
[ "\n\ndef configuration(parent_package='', top_path=None):\n from numpy.distutils.misc_util import Configuration\n config = Configuration('pyart', parent_package, top_path)\n config.add_subpackage('io') # io first to detect if RSL is missing.\n config.add_subpackage('__check_build')\n config.add_subpackage('core')\n config.add_subpackage('correct')\n config.add_subpackage('graph')\n config.add_subpackage('map')\n config.add_subpackage('retrieve')\n config.add_subpackage('filters')\n config.add_subpackage('testing')\n config.add_subpackage('util')\n config.add_subpackage('aux_io')\n config.add_subpackage('bridge')\n\n config.add_data_dir('tests')\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n" ]
[ [ "numpy.distutils.misc_util.Configuration" ] ]
Midnighter/component-contribution
[ "e580480a1979fa7b57b378c9a02a99f2f0b5bde6" ]
[ "examples/mdf.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 14 18:32:46 2014\n\n@author: noore\n\"\"\"\nfrom scripts.max_min_driving_force import KeggFile2ModelList, MaxMinDrivingForce\nfrom python.component_contribution import ComponentContribution\nfrom scripts.html_writer import HtmlWriter\nimport logging\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nREACTION_FNAME = 'examples/glycolysis.txt'\nHTML_FNAME = 'res/mdf_glycolysis.html'\n\nhtml_writer = HtmlWriter(HTML_FNAME)\npathways = KeggFile2ModelList(REACTION_FNAME)\np = pathways[0]\ncc = ComponentContribution.init()\n\np['model'].add_thermo(cc)\n\nmdf = MaxMinDrivingForce(p['model'], p['fluxes'], p['bounds'],\n pH=p['pH'], I=p['I'], T=p['T'],\n html_writer=html_writer)\n\nmdf_solution, dG_r_prime = mdf.Solve(uncertainty_factor=3.0)\nplt.show()\n" ]
[ [ "matplotlib.pyplot.show" ] ]
gjbang/graduation-design
[ "deaaa11ed652ac0b8843a039f04665da67181566" ]
[ "singlenet-function/estimation/renderers.py" ]
[ "import cv2\nimport math\nimport numpy as np\n\n\ndef draw(config, input_image, coords, subset, resize_fac = 1):\n\n stickwidth = 1\n\n canvas = input_image.copy()\n\n for body_part_type, body_part_meta in config.body_parts.items():\n color = body_part_meta.color\n body_part_peaks = coords[body_part_type.name]\n\n for peak in body_part_peaks:\n a = (int)(peak[0] * resize_fac)\n b = (int)(peak[1] * resize_fac)\n cv2.circle(canvas, (a, b), stickwidth, color, thickness=-1)\n\n # dict(id: [y,x]) Note, coord are reversed\n xy_by_id = dict([(item[3], np.array([item[1], item[0]])) for sublist in coords.values() for item in sublist])\n\n xy = np.zeros((2,2))\n for i, conn_type in enumerate(config.connection_types):\n index1 = config.body_parts[conn_type.from_body_part].slot_idx\n index2 = config.body_parts[conn_type.to_body_part].slot_idx\n indexes = np.array([index1, index2]) \n for s in subset:\n\n ids = s[indexes] \n if -1 in ids:\n continue\n\n cur_canvas = canvas.copy()\n xy[0, :] = xy_by_id[ids[0]]\n xy[1, :] = xy_by_id[ids[1]]\n \n m_x = np.mean(xy[:, 0])\n m_y = np.mean(xy[:, 1])\n sss = xy[1, 1]\n length = ((xy[0, 0] - xy[1, 0]) ** 2 + (xy[0, 1] - xy[1, 1]) ** 2) ** 0.5\n\n angle = math.degrees(math.atan2(xy[0, 0] - xy[1, 0], xy[0, 1] - xy[1, 1]))\n\n polygon = cv2.ellipse2Poly((int(m_y * resize_fac), int(m_x * resize_fac)),\n (int(length * resize_fac / 2), stickwidth), int(angle), 0, 360, 1)\n cv2.fillConvexPoly(cur_canvas, polygon, conn_type.color)\n canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)\n\n return canvas\n" ]
[ [ "numpy.array", "numpy.mean", "numpy.zeros" ] ]
aod321/new_train
[ "23bf0a64ac274433cbc372898d97ae9d1aa5f6cd" ]
[ "preprocess.py" ]
[ "import torch\nimport torch.nn\nfrom torchvision import transforms\nfrom torchvision.transforms import functional as TF\nimport cv2\nimport numpy as np\nfrom skimage.util import random_noise\nfrom PIL import Image\nimport torch.nn.functional as F\nimport imgaug.augmenters as iaa\nimport imgaug as ia\nsometimes = lambda aug: iaa.Sometimes(0.5, aug)\n\n\nseq = iaa.Sequential([\n iaa.Fliplr(0.5), # horizontally flip 50% of all images\n # crop images by -5% to 10% of their height/width\n sometimes(iaa.CropAndPad(\n percent=(-0.05, 0.1),\n pad_mode=ia.ALL,\n pad_cval=(0, 255)\n )),\n sometimes(iaa.Affine(\n scale={\"x\": (0.8, 1.2), \"y\": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis\n translate_percent={\"x\": (-0.5, 0.5), \"y\": (-0.5, 0.5)}, # translate by -20 to +20 percent (per axis)\n rotate=(-45, 45), # rotate by -45 to +45 degrees\n shear=(-16, 16), # shear by -16 to +16 degrees\n order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)\n cval=(0, 255), # if mode is constant, use a cval between 0 and 255\n mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)\n )),\n iaa.SomeOf((0, 5),\n [\n iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5), # add gaussian noise to images\n iaa.OneOf([\n iaa.Dropout((0.01, 0.1), per_channel=0.5), # randomly remove up to 10% of the pixels\n iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),\n ]),\n iaa.Add((-10, 10), per_channel=0.5), # change brightness of images (by -10 to 10 of original value)\n iaa.AddToHueAndSaturation((-20, 20)), # change hue and saturation\n iaa.LinearContrast((0.5, 2.0), per_channel=0.5), # improve or worsen the contrast\n iaa.Grayscale(alpha=(0.0, 1.0)),\n iaa.GammaContrast((0.5, 2.0), per_channel=True),\n # iaa.PerspectiveTransform(scale=(0.01, 0.15)),\n # iaa.RandAugment(n=2, m=9)\n ],\n random_order=True\n )\n])\n \nclass Stage1Aug(transforms.ToTensor):\n \n def __call__(self, sample):\n img = sample['image']\n labels = sample['labels']\n H, W = labels[0].shape\n labels = [TF.to_tensor(labels[r])\n for r in range(len(labels))\n ]\n labels = torch.cat(labels, dim=0).float()\n segmaps = labels.argmax(dim=0, keepdim=False).numpy().astype(np.int32).reshape(1, H, W, 1)\n images_aug, segmaps_aug = seq(image=img, segmentation_maps=segmaps)\n segmaps_aug = torch.from_numpy(segmaps_aug.reshape(H, W))\n label_onehot = torch.zeros(9, H, W)\n for i in range(9):\n label_onehot[i] = (segmaps_aug == i).float()\n sample.update({'image': images_aug, 'labels': label_onehot})\n return sample\n\n \nclass Stage2Aug(transforms.ToTensor):\n \n def __call__(self, sample):\n parts, parts_mask = sample['image'], sample['labels']\n parts_aug = []\n mask_aug = []\n for r in range(len(parts)):\n H, W = parts_mask[r].shape\n segmap = parts_mask[r].astype(np.int32).reshape(1, H, W, 1)\n images_aug, segmaps_aug = seq(image=parts[r], segmentation_maps=segmap)\n segmaps_aug = segmaps_aug.reshape(H, W)\n parts_aug.append(images_aug)\n mask_aug.append(segmaps_aug)\n sample.update({\"image\": parts_aug, \"labels\": mask_aug})\n \n return sample\n\n\n\nclass Resize(transforms.Resize):\n \"\"\"Resize the input PIL Image to the given size.\n Override the __call__ of transforms.Resize\n \"\"\"\n\n def __call__(self, sample):\n \"\"\"\n Args:\n sample:{'image':PIL Image to be resized,'labels':labels to be resized}\n\n Returns:\n sample:{'image':resized PIL Image,'labels': resized PIL label list}\n\n \"\"\"\n image, labels = sample['image'], sample['labels']\n if type(image) is Image.Image:\n image = TF.to_tensor(image)\n resized_image = TF.to_pil_image(F.interpolate(image.unsqueeze(0),\n self.size, mode='bilinear', align_corners=True).squeeze(0)\n )\n\n resized_labels = [TF.resize(labels[r], self.size, Image.NEAREST)\n for r in range(len(labels))\n ]\n\n # assert resized_labels.shape == (9, 128, 128)\n\n sample = {'image': resized_image, 'labels': resized_labels,\n 'orig': sample['orig'], 'orig_label': sample['orig_label'],\n 'orig_size': sample['orig_size'], 'name': sample['name'],\n 'parts_gt': sample['parts_gt'], 'parts_mask_gt': sample['parts_mask_gt']}\n\n return sample\n\n\nclass ToTensor(transforms.ToTensor):\n \"\"\"Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.\n\n Override the __call__ of transforms.ToTensor\n \"\"\"\n\n def __call__(self, sample):\n \"\"\"\n Args:\n dict of pic (PIL Image or numpy.ndarray): Image to be converted to tensor.\n\n Returns:y\n Tensor: Converted image.\n \"\"\"\n image, labels = sample['image'], sample['labels']\n\n labels = [TF.to_tensor(labels[r])\n for r in range(len(labels))\n ]\n labels = torch.cat(labels, dim=0).float()\n try:\n parts, parts_mask = sample['parts_gt'], sample['parts_mask_gt']\n parts = torch.stack([TF.to_tensor(parts[r])\n for r in range(len(parts))])\n\n parts_mask = torch.cat([TF.to_tensor(parts_mask[r])\n for r in range(len(parts_mask))])\n\n assert parts.shape == (6, 3, 81, 81)\n assert parts_mask.shape == (6, 81, 81)\n sample.update({'image': TF.to_tensor(image), 'labels': labels, 'parts_gt': parts, 'parts_mask_gt': parts_mask})\n except:\n sample.update({'image': TF.to_tensor(image), 'labels': labels})\n return sample\n\n\nclass Stage2_ToTensor(transforms.ToTensor):\n \"\"\"Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.\n\n Override the __call__ of transforms.ToTensor\n \"\"\"\n\n def __call__(self, sample):\n \"\"\"\n Args:\n dict of pic (PIL Image or numpy.ndarray): Image to be converted to tensor.\n\n Returns:y\n Tensor: Converted image.\n \"\"\"\n parts, parts_mask = sample['image'], sample['labels']\n\n parts = torch.stack([TF.to_tensor(parts[r])\n for r in range(len(parts))])\n\n parts_mask = torch.cat([TF.to_tensor(parts_mask[r])\n for r in range(len(parts_mask))])\n\n sample = {'image': parts, 'labels': parts_mask}\n\n return sample\n\n\nclass OrigPad(object):\n def __init__(self):\n super(OrigPad, self).__init__()\n\n def __call__(self, sample):\n \"\"\"\n Args:\n sample:{'image':PIL Image to be resized,'labels':labels to be resized}\n\n Returns:\n sample:{'image':resized PIL Image,'labels': resized PIL label list}\n\n \"\"\"\n image, labels = sample['image'], sample['labels']\n parts, parts_mask = sample['parts_gt'], sample['parts_mask_gt']\n orig_label = sample['orig_label']\n orig = sample['orig']\n if type(orig) is not Image.Image:\n orig = TF.to_pil_image(sample['orig'])\n\n if type(orig_label[0]) is not Image.Image:\n orig_label = [TF.to_pil_image(orig_label[r])\n for r in range(len(orig_label))]\n\n desired_size = 1024\n delta_width = desired_size - orig.size[0]\n delta_height = desired_size - orig.size[1]\n pad_width = delta_width // 2\n pad_height = delta_height // 2\n orig_size = np.array([orig.size[0], orig.size[1]])\n padding = np.array([pad_width, pad_height, delta_width - pad_width, delta_height - pad_height])\n\n pad_orig = TF.to_tensor(TF.pad(orig, tuple(padding)))\n\n orig_label = [TF.to_tensor(TF.pad(orig_label[r], tuple(padding)))\n for r in range(len(orig_label))\n ]\n orig_label = torch.cat(orig_label, dim=0).float()\n orig_label[0] = torch.tensor(1.) - torch.sum(orig_label[1:], dim=0, keepdim=True)\n\n assert pad_orig.shape == (3, 1024, 1024)\n assert orig_label.shape == (9, 1024, 1024)\n\n sample = {'image': image, 'labels': labels, 'orig': pad_orig, 'orig_label': orig_label,\n 'orig_size': orig_size, 'padding': padding,\n 'name': sample['name'],\n 'parts_gt': parts, 'parts_mask_gt': parts_mask,\n }\n\n return sample\n\n\nclass RandomAffine(transforms.RandomAffine):\n\n def __call__(self, sample):\n \"\"\"\n img (PIL Image): Image to be transformed.\n\n Returns:\n PIL Image: Affine transformed image.\n \"\"\"\n img, labels = sample['image'], sample['labels']\n\n ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img.size)\n img = TF.affine(img, *ret, resample=self.resample, fillcolor=self.fillcolor)\n labels = [TF.affine(labels[r], *ret, resample=self.resample, fillcolor=self.fillcolor)\n for r in range(len(labels))]\n\n sample = {'image': img, 'labels': labels, 'orig': img, 'orig_label': labels,\n 'orig_size': sample['orig_size'],'name': sample['name'],\n 'parts_gt': sample['parts_gt'], 'parts_mask_gt': sample['parts_mask_gt']}\n return sample\n\n\nclass ToPILImage(object):\n \"\"\"Convert a ``numpy.ndarray`` to ``PIL Image``\n\n \"\"\"\n\n def __call__(self, sample):\n \"\"\"\n Args:\n dict of sample (numpy.ndarray): Image and Labels to be converted.\n\n Returns:\n dict of sample(PIL,List of PIL): Converted image and Labels.\n \"\"\"\n image, labels = sample['image'], sample['labels']\n\n image = TF.to_pil_image(image)\n if type(labels) is not torch.Tensor:\n labels = np.uint8(labels)\n labels = [TF.to_pil_image(labels[i])\n for i in range(labels.shape[0])]\n\n sample = {'image': image, 'labels': labels, 'orig': image, 'orig_label': labels,\n 'orig_size': sample['orig_size'],'name': sample['name'],\n 'parts_gt': sample['parts_gt'], 'parts_mask_gt': sample['parts_mask_gt']}\n return sample\n\n\nclass Stage2ToPILImage(object):\n \"\"\"Convert a ``numpy.ndarray`` to ``PIL Image``\n\n \"\"\"\n\n def __call__(self, sample):\n \"\"\"\n Args:\n dict of sample (numpy.ndarray): Image and Labels to be converted.\n\n Returns:\n dict of sample(PIL,List of PIL): Converted image and Labels.\n \"\"\"\n parts, parts_mask = sample['image'], sample['labels']\n\n parts = [TF.to_pil_image(parts[r])\n for r in range(len(parts))]\n\n parts_mask = [TF.to_pil_image(parts_mask[r])\n for r in range(len(parts_mask))]\n\n sample = {'image': parts, 'labels': parts_mask}\n\n return sample\n\n\nclass GaussianNoise(object):\n def __call__(self, sample):\n img = sample['image']\n img = np.array(img).astype(np.uint8)\n img = np.where(img != 0, random_noise(img), img)\n img = TF.to_pil_image(np.uint8(255 * img))\n\n sample = {'image': img, 'labels': sample['labels'], 'orig': img,\n 'orig_label': sample['orig_label'], 'parts_gt': sample['parts_gt'],\n 'orig_size': sample['orig_size'],'name': sample['name'],\n 'parts_mask_gt': sample['parts_mask_gt']\n }\n return sample\n\n\nclass Stage2_RandomAffine(transforms.RandomAffine):\n\n def __call__(self, sample):\n \"\"\"\n img (PIL Image): Image to be transformed.\n\n Returns:\n PIL Image: Affine transformed image.\n \"\"\"\n img, labels = sample['image'], sample['labels']\n ret = [self.get_params(self.degrees, self.translate, self.scale, self.shear, img[r].size)\n for r in range(4)]\n new_img = [TF.affine(img[r], *ret[r], resample=self.resample, fillcolor=self.fillcolor)\n for r in range(4)]\n new_labels = [TF.affine(labels[r], *ret[r], resample=self.resample, fillcolor=self.fillcolor)\n for r in range(4)]\n for r in range(4):\n img[r] = new_img[r]\n labels[r] = new_labels[r]\n\n sample = {'image': img, 'labels': labels}\n return sample\n\n\nclass Stage2_nose_mouth_RandomAffine(transforms.RandomAffine):\n\n def __call__(self, sample):\n \"\"\"\n img (PIL Image): Image to be transformed.\n\n Returns:\n PIL Image: Affine transformed image.\n \"\"\"\n img, labels = sample['image'], sample['labels']\n ret = {r: self.get_params(self.degrees, self.translate, self.scale, self.shear, img[r].size)\n for r in range(4, 6)}\n new_part = [TF.affine(img[r], *ret[r], resample=self.resample, fillcolor=self.fillcolor)\n for r in range(4, 6)]\n new_labels = [TF.affine(labels[r], *ret[r], resample=self.resample, fillcolor=self.fillcolor)\n for r in range(4, 6)]\n for r in range(4, 6):\n img[r] = new_part[r - 4]\n labels[r] = new_labels[r - 4]\n sample = {'image': img, 'labels': labels}\n return sample\n\n\nclass Stage2_GaussianNoise(object):\n def __call__(self, sample):\n parts = sample['image']\n parts = [np.array(parts[r], np.uint8)\n for r in range(len(parts))]\n for r in range(len(parts)):\n parts[r] = np.where(parts[r] != 0, random_noise(parts[r]), parts[r])\n\n parts = [TF.to_pil_image(np.uint8(255 * parts[r]))\n for r in range(len(parts))\n ]\n sample = {'image': parts, 'labels': sample['labels']}\n return sample\n\n\nname_list = ['eyebrow1', 'eyebrow2', 'eye1', 'eye2', 'nose', 'mouth']\n\n\nclass OldStage2Resize(transforms.Resize):\n \"\"\"Resize the input PIL Image to the given size.\n Override the __call__ of transforms.Resize\n \"\"\"\n\n def __call__(self, sample):\n \"\"\"\n Args:\n sample:{'image':PIL Image to be resized,'labels':labels to be resized}\n\n Returns:\n sample:{'image':resized PIL Image,'labels': resized PIL label list}\n\n \"\"\"\n image, labels = sample['image'], sample['labels']\n resized_image = np.array([cv2.resize(image[i], self.size, interpolation=cv2.INTER_AREA)\n for i in range(len(image))])\n labels = {x: np.array([np.array(TF.resize(TF.to_pil_image(labels[x][r]), self.size, Image.ANTIALIAS))\n for r in range(len(labels[x]))])\n for x in name_list\n }\n\n sample = {'image': resized_image,\n 'labels': labels\n }\n\n return sample\n\n\nclass OldStage2ToTensor(transforms.ToTensor):\n \"\"\"Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.\n\n Override the __call__ of transforms.ToTensor\n \"\"\"\n\n def __call__(self, sample):\n \"\"\"\n Args:\n dict of pic (PIL Image or numpy.ndarray): Image to be converted to tensor.\n\n Returns:y\n Tensor: Converted image.\n \"\"\"\n image = sample['image']\n labels = sample['labels']\n image = torch.stack([TF.to_tensor(image[i])\n for i in range(len(image))])\n\n labels = {x: torch.cat([TF.to_tensor(labels[x][r])\n for r in range(len(labels[x]))\n ])\n for x in name_list\n }\n\n return {'image': image,\n 'labels': labels\n }\n\n\nclass OldStage2_ToPILImage(object):\n \"\"\"Convert a ``numpy.ndarray`` to ``PIL Image``\n\n \"\"\"\n\n def __call__(self, sample):\n \"\"\"\n Args:\n dict of sample (numpy.ndarray): Image and Labels to be converted.\n\n Returns:\n dict of sample(PIL,List of PIL): Converted image and Labels.\n \"\"\"\n image, labels = sample['image'], sample['labels']\n image = [TF.to_pil_image(image[i])\n for i in range(len(image))]\n labels = {x: [TF.to_pil_image(labels[x][i])\n for i in range(len(labels[x]))]\n for x in name_list\n }\n\n return {'image': image,\n 'labels': labels\n }\n" ]
[ [ "torch.zeros", "numpy.array", "numpy.uint8", "torch.cat", "torch.tensor", "torch.sum" ] ]
SabrinaKall/credential-digger
[ "372b25aa19c7c04a2c2a8f385f4875f66de73af1" ]
[ "credentialdigger/generator/generator.py" ]
[ "import json\nimport random\nimport re\nimport pkg_resources\nimport shutil\nimport tempfile\nfrom collections import Counter\nfrom pathlib import Path\n\nimport pandas as pd\nimport string_utils\nfrom git import Repo as GitRepo\nfrom tqdm import tqdm\n\nfrom .qlearning import compute_dataset\nfrom .training import create_snippet_model\n\nBLACKLISTED_NAMES = set(['changelog', 'contribute', 'docker-compose',\n 'dockerfile', 'license', 'makefile'])\nBLACKLISTED_EXTS = set(['bin', 'csv', 'jpg', 'md', 'pdf', 'png', 'rst', 'svg',\n 'txt', 'yml'])\n\n\nclass ExtractorGenerator:\n\n def generate_leak_snippets(self, repo_url, num_extracts=30):\n \"\"\" Generate the extractor model adapted to a repository.\n\n Parameters\n ----------\n repo_url: str\n The url of the repository\n num_extracts: int, optional\n The maximum number of extracts needed (default `30`)\n\n Returns\n -------\n str\n The name of the model folder\n str\n The name of the binary for the extractor model\n \"\"\"\n # Generate the corpus for the repo\n corpus = self.build_corpus(repo_url, num_extracts)\n try:\n return self.train_model(corpus, repo_url)\n except FileExistsError:\n print('Model for this developer already created.',\n 'Do not generate a new one.')\n # Return the existing one\n return self._search_model_extractor(repo_url)\n\n def _clone_git_repo(self, git_url):\n \"\"\" Clone git repository. \"\"\"\n project_path = tempfile.mkdtemp()\n GitRepo.clone_from(git_url, project_path)\n return project_path\n\n def _get_relevant_files(self, local_repo_path):\n \"\"\" Sort the files of this repository according to their relevance. The\n relevance of a file is calculated as the number of commits which\n changed it.\n\n Parameters\n ----------\n local_repo_path: str\n The local path of the repo (cloned from github)\n\n Returns\n -------\n list\n A list of file names, sorted by relevance\n \"\"\"\n r = GitRepo(local_repo_path)\n all_commits = r.git.log('--name-only', '--pretty=format:').split()\n counted_commits = Counter(all_commits)\n # Sort the files according to the number of commits they appear in\n sorted_commits = sorted(counted_commits.items(),\n key=lambda x: x[1],\n reverse=True)\n # Return the file names sorted per commits number\n return list(zip(*sorted_commits))[0]\n\n def _search_model_extractor(self, repo_url):\n \"\"\" Find the existing extractor binary.\n\n If the model for this developer has already been generated, then we\n should find it in the `models_data` folder (i.e., the default folder\n for the ML models).\n\n Parameters\n ----------\n repo_url: str\n The url of the repository\n\n Returns\n -------\n str\n The name of the model folder\n str\n The name of the binary for the extractor model\n \"\"\"\n # Find model folder\n # The model name is the name of the author of the repository\n model_name = 'snippet_model_%s' % repo_url.split('/')[-2]\n # It is stored in the models_data folder\n models_data = Path(pkg_resources.resource_filename('credentialdigger',\n 'models_data'))\n dev_model = models_data / model_name\n\n # Find extractor binary\n # Get name and version from the metafile\n with open(dev_model / 'meta.json', 'r') as f:\n meta = json.loads(f.read())\n inner_folder = dev_model / ('%s-%s' % (meta['name'], meta['version']))\n # There should be only one binary in the inner folder\n extractor_file = list(inner_folder.glob('**/*.bin'))[0]\n\n return dev_model.name, extractor_file.name\n\n def build_corpus(self, repo_url, num_extracts):\n \"\"\" Build the corpus for this repo.\n\n Parameters\n ----------\n repo_url: str\n The url of the repository\n num_extracts: int\n The maximum number of extracts needed\n\n Returns\n -------\n list\n A list of strings (i.e., the extracts)\n \"\"\"\n # Clone the repo from Github (the scanner deletes it when it finishes\n # its tasks)\n repo_path = self._clone_git_repo(repo_url)\n # Get the ranking of the files of this repo\n ranking = self._get_relevant_files(repo_path)\n\n # Build the corpus\n repo_local_path = Path(repo_path)\n corpus = []\n fi = 0\n while fi < len(ranking) and len(corpus) < num_extracts:\n current = repo_local_path / ranking[fi]\n # Some files cannot be used to produce extracts\n pp = Path(current).name\n if pp[0] == '.' or pp.split('.')[-1] in BLACKLISTED_EXTS or \\\n pp.split('.')[0].lower() in BLACKLISTED_NAMES:\n fi += 1\n continue\n try:\n with open(current, 'r') as f:\n # Extend the corpus with the extracts found in this file\n corpus.extend(self._get_extracts(f.read()))\n except UnicodeDecodeError:\n # If the read raises this exception, then either the language\n # uses a different charset or the file may be a csv (or a\n # binary). In both cases, skip it.\n # print('Skip file %s (encoding error)' % current)\n pass\n except FileNotFoundError:\n # If the read raises this exception, then the file has been\n # deleted from the repository. In this case, ignore it (since\n # for the generator we only need the stylometry of the\n # developer, the content is not important).\n # print('Skip file %s (deleted)' % current)\n pass\n\n fi += 1\n\n # Delete local repo folder\n shutil.rmtree(repo_path)\n\n return corpus\n\n def _get_extracts(self, code):\n \"\"\" Use the code to produce extracts.\n Parameters\n ----------\n code: str\n The content of a file\n\n Returns\n -------\n list\n A list of extracts (i.e., a list of strings)\n \"\"\"\n rows = code.split('\\n')\n extracts = []\n # If the code is shorter than 10 lines, we ignore this file\n if 10 <= len(rows) < 15:\n # If the code is 10 to 15 lines, we use the whole file as corpus\n extracts.append(code)\n elif len(rows) >= 15:\n # If the code is longer than 15 lines, we split it into multiple\n # extracts of lenght generated randomly (10 to 15 lines each)\n while len(rows) > 10:\n # Generate an extract using the first r rows, with r a random\n # number between 10 and 20\n r = random.randint(10, 20)\n extracts.append('\\n'.join(rows[:r]))\n # Remove the first r rows\n rows = rows[r + 1:]\n return extracts\n\n def train_model(self, corpus, repo_url, training_data_size=75000,\n actions_n=12, states_n=13, alpha=0.5, gamma=0.85,\n epochs_basis=50, extract_max_length=150):\n \"\"\" Train the snippet model according to the user stylometry.\n\n Parameters\n ----------\n corpus: list\n A corpus of code, i.e., a list of excerpts of a repository\n repo_url: str\n The url of the repository\n training_data_size: int, optional\n The size of the training dataset (default `75000`)\n actions_n: int, optional\n The number of actions in the Q-table (default `12`)\n states_n: int, optional\n The number of states in the Q-table (default `13`)\n alpha: float, optional\n The alpha parameter in the reward function (default `0.5`)\n gamma: float, optional\n The gamma parameter in the reward function (default `0.85`)\n epochs_basis: int, optional\n The base number of epochs (default `50`)\n extract_max_length: int, optional\n The maximum length of extracts for being processed (default `150`)\n\n Returns\n -------\n str\n The name of the model folder\n str\n The name of the binary for the extractor model\n \"\"\"\n # Compute dataset with qlearning algorithm\n raw_df = compute_dataset(corpus, actions_n, states_n, alpha, gamma,\n epochs_basis, extract_max_length)\n\n # Load dataframe\n df = pd.DataFrame(data=raw_df).sample(n=training_data_size,\n replace=False)\n\n # Preprocess data before training\n df = self._preprocess_training_model(df)\n\n # Create the model\n return create_snippet_model(df, repo_url)\n\n def _preprocess_training_model(self, data):\n \"\"\" Pre-process the data for the model.\n\n Parameters\n ----------\n data: `pandas.DataFrame`\n The training dataset\n\n Returns\n -------\n `pandas.DataFrame`\n Pre-processed dataframe\n \"\"\"\n def _pre_process(raw_data):\n \"\"\" Pre-process raw data. \"\"\"\n pattern = re.compile(\n r\"((?<=')\\w\\d.*?(?=')|(?<=\\\")\\w\\d.*?(?=\\\")|[\\w\\d]+)\")\n words = re.findall(pattern, raw_data)\n return ' '.join(list(map(string_utils.snake_case_to_camel, words)))\n\n data_list = []\n # Preprocess the dataset with naming convention, etc.\n for idx, row in tqdm(data.iterrows(), total=data.shape[0]):\n row_data = {}\n for column in ['text', 'key', 'value']:\n row_data[column] = _pre_process(row[column])\n data_list.append(row_data)\n\n return pd.DataFrame(data=data_list)\n" ]
[ [ "pandas.DataFrame" ] ]
yingxinac/DSGRN
[ "b5bc64e5a99e6d266f6ac5ba7ac9d04954f12d32" ]
[ "software/HillSimulations/hillmodel.py" ]
[ "# The MIT License (MIT)\n\n# Copyright (c) 2016 Breschine Cummins\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n# -----\n# This file has been modified from its original version.\n# It contains changes to the parsing routines for use with the DSGRN project.\n# Michael Lan, Shaun Harker, 2016\n# -----\n\nimport re\nimport sqlite3\nimport numpy as np\nfrom scipy.integrate import ode\nimport matplotlib.pyplot as plt\n\"\"\"\nimport matplotlib\nfont = {'family' : 'normal',\n 'size' : 22}\nmatplotlib.rc('font', **font)\n\"\"\"\n\n\nclass hillmodel(object):\n '''\n This class takes a network file, a parameter, and a Hill\n exponent and builds a Hill function model. The class has two public\n methods:\n 1) time,timeseries = hillmodel.simulateHillModel(initialconditions,initialtime,finaltime,timestep)\n 2) hillmodel.plotResults(times,timeseries)\n The first method generates a time series for a given set of initial conditions,\n and the second method plots the results. \n '''\n def __init__(self,network_spec_file_or_string,parameter_spec_file_or_dict,hillexp):\n '''\n Construct the Hill model for a given network and parameter sample.\n Inputs:\n network_file_or_string -- either (a) the filename of a network specification file (.txt file) or DSGRN database (.db file)\n or (b) the network spec string (identified as such if it contains a newline character)\n parameter_file_or_dict -- either (a) parameter specification filename\n or (b) the dictionary object describing the parameter choice\n hillexp -- Hill function exponent to use in the model\n Note:\n The format of the parameter specification file is that it contains in JSON format \n for a dictionary object giving a key-value mapping from parameters to numbers, e.g.\n { \"L[X, Y]\" : 2.34848, \"U[X, Y]\" : 1.23888, ... }\n '''\n eqnstr, self.varnames, self.varindex = self._parseEqns(network_spec_file_or_string)\n if isinstance(parameter_spec_file_or_dict, dict):\n parameter = self._parseParameter(parameter_spec_file_or_dict)\n else:\n parameter = self._parseSamples(parameter_spec_file_or_dict)\n self.eqns=self._makeHillEqns(eqnstr,parameter,hillexp)\n self.d=len(eqnstr)\n\n def dim(self):\n \"\"\"\n Return number of variables in model\n \"\"\"\n return self.d\n\n def network(self):\n \"\"\"\n Return the associated network specification string\n \"\"\"\n return self.network_spec_string\n\n def simulateHillModel(self,initialconditions,initialtime,finaltime,timestep):\n '''\n Simulate the constructed Hill model for a given set of initial conditions \n and time period. The given time step only specifies which output timeseries\n is returned. The time step for the backwards difference ODE solver is \n determined by the algorithm.\n\n '''\n def RHS(t,x,eqns):\n return eqns(x) #np.array(eqns(x))\n def integrate(r,y0,t0,t1,dt):\n times=[t0]\n timeseries=[y0]\n while r.successful() and r.t < t1:\n r.integrate(r.t+dt)\n times.append(r.t)\n timeseries.append(r.y)\n return times,timeseries\n r = ode(RHS).set_integrator('vode', method='bdf')\n r.set_initial_value(initialconditions,initialtime).set_f_params(self.eqns)\n times,timeseries = integrate(r,initialconditions,initialtime,finaltime,timestep)\n return times,timeseries,self.varnames\n\n def plotResults(self,times,timeseries,plotoptions={},legendoptions={},figuresize=()):\n '''\n Plot a time series.\n\n plotoptions and legendoptions are optional dictionaries with keys corresponding \n to the options for matplotlib.pyplot.plot and matplotlib.pyplot.legend.\n\n Examples: \n plotoptions={'linewidth':2}\n legendoptions={'fontsize':24,'loc':'upper left', 'bbox_to_anchor':(1, 1)}\n figuresize = (20,10)\n\n '''\n if figuresize:\n plt.figure(figsize=figuresize)\n timeseries=np.array(timeseries)\n for k in range(timeseries.shape[1]):\n plt.plot(times,timeseries[:,k],label=self.varnames[k],**plotoptions)\n plt.legend(**legendoptions)\n plt.axis('tight')\n plt.show()\n\n# The remainder of the file consists of private methods implementing various parsing voodoo.\n\n def _parseEqns(self,network_spec_file_or_string):\n \"\"\"\n Parse a network specification file to obtain data structures representing ODEs\n Input: \"network_file_or_string\" is either (a) the filename of a network specification file or DSGRN database\n or (b) the network spec string (i.e. contents)\n Output: The function outputs \n eqnstr, varnames, varindex \n where\n eqnstr is a list of strings representing the ODE in a p-n formatting (see below)\n varnames is a list of variable names, the order of which gives an internal indexing\n varindex is a dictionary with keys being the variable names and values being an internal indexing\n\n Note: \"p-n formatting\" of a network node's input formula replaces the variables occuring in \n the string instead with the variable indices and suffixes them with either \"n\" or \"p\" \n depending on whether they are negated. Multiplication in this formatting is always explicit \n (never mere juxtaposition). It is easiest to describe by example: \n (~X + Y)(Z) becomes ((0n)+(1p))*((2p)) when \n varindex[\"X\"] == 0, varindex[\"Y\"] == 1, and varindex[\"Z\"] == 2\n Notes on Network specification file:\n A network spec file contains on each line \n <varname> : <input-formula> [: E]\n (The optional last colon and what follows we may ignore.)\n An input-formula is an algebraic combination of variable names which allows the\n usage of the variables and the symbols (, ), +, and * in the usual ways. We may also\n prepend any variable name with the symbol \"~\". \n We note the following:\n (a) Some variable names are contained inside of other variable names\n (b) There may be redundant whitespace (even between ~ and variable name)\n (c) We may write \"X*Y\" \"X(Y)\" \"(X)Y\" \"X Y\" which are equivalent and refer to the product, \n but \"XY\" can only refer to a single variable \"XY\", and not the product of \"X\" and \"Y\".\n \"\"\"\n if '\\n' in network_spec_file_or_string:\n self.network_spec_string = network_spec_file_or_string\n elif network_spec_file_or_string.lower().endswith('.db'):\n conn = sqlite3.connect(network_spec_file_or_string)\n c = conn.cursor()\n c . execute ( \"select Specification from Network;\" )\n self.network_spec_string = c.fetchone()[0]\n else:\n with open(network_spec_file_or_string) as f:\n self.network_spec_string = f.read()\n eqns=[]\n varnames = []\n varindex = {}\n for line in self.network_spec_string.splitlines():\n parsed = line.split(':')\n if len(parsed) < 2: continue # Ignore blank lines\n varname = parsed[0].strip() # e.g. \"X\"\n formula = parsed[1].strip() # e.g. \"(~X + Y)U Z\"\n if varname[0] == '.' or varname[0] == '@': continue # Ignore comment lines\n varnames.append(varname)\n varindex[varname]=str(len(varindex))\n eqns.append(formula)\n eqnstr=[]\n for e in eqns:\n # Replace occurences of variables with variable indices followed by p if occurring with ~ prefix and followed by n otherwise\n # Example: \"(~X + Y)U Z\" --> \"((0n) + (1p))(2p) (3p)\"\n e = re.sub('([ ()+*]*)(~?) *([^ ~()+*]+)([ ()+*]*)', lambda x: x.group(1) + \"(\" + varindex[x.group(3)] + (\"n\" if (x.group(2) == '~') else \"p\") + ')' + x.group(4), e)\n # Remove spaces and make multiplications explicit\n # Example: \"((0n) + (1p))(2p) (3p)\" --> \"((0n)+(1p))*(2p)*(3p)\"\n e = e.replace(' ','').replace(')(',')*(')\n # Add parsed equation to eqnstr output list\n eqnstr.append(e)\n return eqnstr,varnames,varindex\n \n def _parseParameter(self,parameter):\n \"\"\"\n Converts a parameter for internal use by replacing variable names with indices in the lookup table\n Inputs: \"parameter\" is a key-value table from parameter names to numeric values, e.g.\n e.g. { \"L[X, Y]\" : 2.34848, \"U[X, Y]\" : 1.23888, ... }\n Outputs: the return value is obtained from rewriting parameter names using internal variable indexing\n e.g. { \"L[0,1]\" : 2.34848, \"U[0,1]\" : 1.23888, ... }\n \"\"\"\n replace = lambda key : re.sub(' *([LUT])\\[ *([^ ]*) *, *([^ ]*) *\\] *', \n lambda match : match.group(1) + '[' + self.varindex[match.group(2)] + ',' + self.varindex[match.group(3)] + ']', key)\n return { replace(key) : value for key, value in parameter.items() }\n \n def _parseSamples(self,fname='samples.txt'):\n \"\"\"\n Read the samples and return the names of parameters along with their values.\n Inputs: fname is the name of a file containing parameter data\n Output: parameter is a key-value table from parameter names (using variable indexing) to numeric values, e.g.\n e.g. { \"L[0,1]\" : 2.34848, \"U[0,1]\" : 1.23888, ... }\n \"\"\"\n with open(fname) as parameter_file: \n named_parameter = json.load(parameter_file)\n return self._parseParameter(named_parameter)\n\n def _makeHillStrs(self,U,L,T,n,J):\n \"\"\"\n Create the Hill function expressions\n neghill = \"(U-L)*(T**n)/(X[J]**n+T**n) + L\"\n poshill = \"(U-L)*(X[J]**n)/(X[J]**n+T**n) + L\"\n with the appropriate values for U, L, T, n, and J substituted\n \"\"\" \n scalar = \"(\"+U+\"-\"+L+\")\"\n Xn = \"X[\"+J+\"]**\"+n\n Tn = T+\"**\"+n\n denom = \"(\"+Xn+\"+\"+Tn+\")\"\n neghill=scalar+\"*\"+Tn+\"/\"+denom+\" + \"+ L\n poshill=scalar+\"*\"+Xn+\"/\"+denom+\" + \"+ L\n return neghill,poshill\n\n def _makeHillEqns(self,eqnstr,parameter,n):\n \"\"\"\n Construct a lambda expression evaluation the right hand side of the Hill Model ODE\n Inputs: eqnstr -- a list of p-n format specification of the network node inputs\n parameternames -- a list of parameter names, e.g. [\"L[0,1]\",\"U[0,1]\",...]\n samples -- a list of parameter values corresponding to the parameter names in 1-1 fashion\n n -- Hill function exponent\n Output: eqns -- A list of lambda functions representing the right-hand-side of an ODE\n The length of the list is the dimension of the ODE (which is also the length of eqnstr).\n The ODE which is represented is d/dt x[i] = eqns[i]\n Implementation:\n This task is accomplished by reading the network specification file and replacing each variable\n in the input formulas with a suitable Hill function. The parsing is facilitated by having \"eqnstr\"\n in the form outputted by _parseEqns, e.g. \n eqnstr == [\"((0n)+(1p)*((2p))\", ... ]\n and the algebraic syntax already present in the input formula already being suitable.\n We also add a decay term for each input formula.\n - X[0] + algebraic-combination of Hill functions\n \"\"\"\n expression = \"[\";\n for k,e in enumerate(eqnstr):\n K = str(k)\n def replaceWithHillFunction(match):\n J = match.group(1) # integer which indexes input variable\n regulation = match.group(2) # either \"n\" or \"p\"\n pair = \"[\"+J+\",\"+K+\"]\"\n U = str(parameter[\"U\" + pair])\n L = str(parameter[\"L\" + pair])\n T = str(parameter[\"T\" + pair])\n neghill, poshill = self._makeHillStrs(U,L,T,str(n),J)\n return (poshill if regulation == 'p' else neghill)\n # Include the formula into the expression\n expression += (',' if len(expression) > 1 else '') + \"-X[\"+K+\"]+\" + re.sub('([0-9]*)([np])', replaceWithHillFunction, e)\n expression += ']'\n return eval('lambda X :' + expression)\n" ]
[ [ "numpy.array", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "scipy.integrate.ode", "matplotlib.pyplot.axis" ] ]
jacobtomlinson/ucx-py
[ "7ac246f521d936b8f1fe9026c593d01ac50efbf7" ]
[ "benchmarks/old_tests/send-recv-py-obj.py" ]
[ "# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n# See file LICENSE for terms.\n#\n# Description: 2-process test that tests the functionality of sending\n# and receiving contiguous python objects\n#\n# Server Steps:\n# 1. activate listener\n# 2. Obtains the coroutine that accepts incoming connection -> coro\n# 3. When connection is established, first send a `str` object and\n# then receive `str` object to and from client respectively\n#\n# Client Steps:\n# 1. Obtains the coroutine that connects to server -> coro\n# 2. When connection is established, first recv a `str` object and\n# then send `str` object to and from server respectively\n#\n# Options include sending python object as is (contig), or string\n# wrapped in strings (to workaround contiguous memory requirement) or\n# strings wrapped in bytes object\n\nimport argparse\nimport asyncio\nimport reprlib\n\nimport ucp\n\n\ndef get_msg(base, obj_type):\n \"\"\"\n Construct the message from bytes or a BufferRegion.\n \"\"\"\n if obj_type == \"bytes\":\n return bytes(base)\n elif obj_type == \"memoryview\":\n return memoryview(base)\n elif obj_type == \"numpy\":\n import numpy as np\n\n return np.frombuffer(base, dtype=\"u1\")\n elif obj_type == \"cupy\":\n import cupy\n\n if isinstance(base, bytes):\n return cupy.asarray(memoryview(base), dtype=\"u1\")\n else:\n return cupy.asarray(base)\n elif obj_type == \"numba\":\n import numba\n import numba.cuda\n import numpy as np\n\n np_arr = np.frombuffer(base, dtype=\"u1\")\n numba_arr = numba.cuda.to_device(np_arr)\n return numba_arr\n else:\n raise ValueError(obj_type)\n\n\ndef check(a, b, obj_type):\n \"\"\"\n Check that the sent and recv'd data matches.\n \"\"\"\n if obj_type in (\"bytes\", \"memoryview\"):\n assert a == b\n elif obj_type == \"numpy\":\n import numpy as np\n\n np.testing.assert_array_equal(a, b)\n elif obj_type == \"cupy\":\n import cupy\n\n cupy.testing.assert_array_equal(a, b)\n elif obj_type == \"numba\":\n import numba\n import numba.cuda\n import numpy as np\n\n np_a = a.copy_to_host()\n np_b = b.copy_to_host()\n np.testing.assert_array_equal(np_a, np_b)\n else:\n raise ValueError(obj_type)\n\n\nasync def talk_to_client(ep, listener):\n print(\"about to send\")\n base = b\"0\" * args.n_bytes\n send_msg = get_msg(base, args.object_type)\n await ep.send_obj(send_msg)\n\n print(\"about to recv\")\n\n if not args.blind_recv:\n recv_req = await ep.recv_obj(args.n_bytes)\n recv_msg = get_msg(recv_req.get_obj(), args.object_type)\n else:\n recv_req = await ep.recv_future()\n recv_msg = ucp.get_obj_from_msg(recv_req)\n\n if not args.validate:\n print(\"server sent: \", reprlib.repr(send_msg), type(send_msg))\n print(\"server recv: \", reprlib.repr(recv_msg), type(recv_msg))\n else:\n check(send_msg, recv_msg, args.object_type)\n\n ucp.destroy_ep(ep)\n print(\"talk_to_client done\")\n ucp.stop_listener(listener)\n\n\nasync def talk_to_server(ip, port):\n # recv, send\n ep = await ucp.get_endpoint(ip, port, timeout=10)\n\n if not args.blind_recv:\n recv_req = await ep.recv_obj(args.n_bytes)\n else:\n recv_req = await ep.recv_future()\n\n br = recv_req.get_buffer_region()\n\n print(\"about to reply\")\n await ep.send_obj(br)\n ucp.destroy_ep(ep)\n print(\"talk_to_server done\")\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-s\", \"--server\", help=\"enter server ip\")\nparser.add_argument(\"-p\", \"--port\", help=\"enter server port number\")\nparser.add_argument(\n \"-o\",\n \"--object_type\",\n help=\"Send object type. Default = bytes\",\n choices=[\"bytes\", \"memoryview\", \"numpy\", \"cupy\", \"numba\"],\n default=\"bytes\",\n)\nparser.add_argument(\n \"-b\", \"--blind_recv\", help=\"Use blind receive. Default = false\", action=\"store_true\"\n)\nparser.add_argument(\n \"-v\", \"--validate\", help=\"Validate data. Default = false\", action=\"store_true\"\n)\nparser.add_argument(\n \"-n\", \"--n-bytes\", help=\"Size of the messages (in bytes)\", type=int, default=1024\n)\nargs = parser.parse_args()\n\n# initiate ucp\ninit_str = \"\"\nserver = False\n\nif args.server is None:\n server = True\nelse:\n server = False\n init_str = args.server\n\n\nucp.init()\nloop = asyncio.get_event_loop()\n# coro points to either client or server-side coroutine\nif server:\n listener = ucp.start_listener(talk_to_client, is_coroutine=True)\n coro = listener.coroutine\n print(f\"listening at port {listener.port}\")\nelse:\n coro = talk_to_server(init_str.encode(), int(args.port))\n\nloop.run_until_complete(coro)\n\nloop.close()\nucp.fin()\n" ]
[ [ "numpy.testing.assert_array_equal", "numpy.frombuffer" ] ]
adshidtadka/server-allocation
[ "ce533ce31cc2ce12f0c6a01bff97be3875e35b30" ]
[ "graph/Plot.py" ]
[ "\n# %%\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# %%\n\n\nclass Graph:\n def initialize_rcparams():\n plt.clf()\n plt.style.use('default')\n plt.rcParams['xtick.direction'] = 'in'\n plt.rcParams['ytick.direction'] = 'in'\n plt.gca().yaxis.set_ticks_position('left')\n plt.gca().xaxis.set_ticks_position('bottom')\n plt.rcParams['font.family'] = 'Times New Roman'\n plt.rcParams['font.weight'] = 'light'\n plt.rcParams['font.size'] = 18\n plt.rcParams['axes.linewidth'] = 0.8\n plt.rcParams['lines.linewidth'] = 3\n plt.rcParams['lines.marker'] = '.'\n plt.rcParams['lines.markersize'] = 8\n plt.rcParams['legend.borderaxespad'] = 0\n plt.rcParams['legend.frameon'] = False\n plt.rcParams['legend.numpoints'] = 1\n plt.rcParams['legend.labelspacing'] = 0.1\n plt.rcParams['savefig.bbox'] = 'tight'\n plt.rc('text', usetex=True)\n\n\n# %%\nct = 'Computation time'\ncts = 'Computation time with SUM'\nctes = 'Computation time with ESUM'\nctc = 'Computation time with CPLEX'\nctg = 'Computation time with GLPK'\nctsc = 'Computation time with SCIP'\ntd = 'Total delay'\ntds_max = 'Total maximum delay with SUM'\ntds_min = 'Total minimum delay with SUM'\ntdes = 'Total delay with SUM'\nun = 'Number of users'\nsn = 'Number of servers'\ncp = 'Capacity'\n\n# %%\n\ndf_user_50_cap_10 = pd.read_csv(\"../result/user_50_cap_10.csv\", names=(un, tds_min, tds_max, cts, tdes, ctes, ctc)).replace(0.0, np.nan)\ndf_user_100_cap_15 = pd.read_csv(\"../result/user_100_cap_15.csv\", names=(un, tds_min, tds_max, cts, tdes, ctes, ctc)).replace(0.0, np.nan)\ndf_cap_20_user_20 = pd.read_csv(\"../result/cap_20_user_20.csv\", names=(cp, tds_min, tds_max, cts, tdes, ctes, ctc)).replace(0.0, np.nan)\ndf_cap_40_user_40 = pd.read_csv(\"../result/cap_40_user_40.csv\", names=(cp, tds_min, tds_max, cts, tdes, ctes, ctc)).replace(0.0, np.nan)\ndf_user_10_cap_5 = pd.read_csv(\"../result/user_10_cap_5.csv\", names=(un, tds_min, tds_max, cts, tdes, ctes, ctc)).replace(0.0, np.nan)\n\n# %%\nrate_seriese = pd.concat([df_user_50_cap_10[ctes] / df_user_50_cap_10[ctc], df_user_100_cap_15[ctes] / df_user_100_cap_15[ctc]])\nrate_seriese.mean()\n\n# %%\nGraph.initialize_rcparams()\n\n# %%\n\nplt.plot(df_user_10_cap_5[un], df_user_10_cap_5[ctes], label=('ESUM'), color='k', marker='x', linestyle='-')\nplt.plot(df_user_10_cap_5[un], df_user_10_cap_5[cts], label=('SUM'), color='k', marker='o', linestyle='--')\n\nplt.xlabel(un + ', ' + r'$|{V_{\\rm U}}|$')\nplt.ylabel(ct + ' [s]')\n\nplt.legend(loc=\"upper left\")\n\nplt.ylim([-0.02, 0.2])\n\nplt.savefig('/Users/takaaki/Dropbox/oki_lab/m2/paper/ieice_server/workspace/fig/user_10_cap_5.pdf')\nplt.show()\nplt.close()\n\n# %%\n\nplt.plot(df_user_10_cap_5[un], df_user_10_cap_5[tdes], label=('ESUM'), color='k', marker='x', linestyle='-')\nplt.plot(df_user_10_cap_5[un], df_user_10_cap_5[tds_max], label=('SUM (upper-bound)'), color='k', marker='o', linestyle='--')\nplt.plot(df_user_10_cap_5[un], df_user_10_cap_5[tds_min], label=('SUM (lower-bound)'), color='k', marker='^', linestyle='-.')\n\n\nplt.xlabel(un + ', ' + r'$|{V_{\\rm U}}|$')\nplt.ylabel(td)\n\nplt.ylim([50, 250])\n\nplt.legend(loc=\"lower right\")\n\nplt.savefig('/Users/takaaki/Dropbox/oki_lab/m2/paper/ieice_server/workspace/fig/delay_user_10_cap_5.pdf')\nplt.show()\nplt.close()\n\n# %%\n\nplt.plot(df_user_50_cap_10[un], df_user_50_cap_10[ctes], label=('ESUM'), color='k', marker='x', linestyle='-')\nplt.plot(df_user_50_cap_10[un], df_user_50_cap_10[ctc], label=('ILP'), color='k', marker='o', linestyle=':')\n\nplt.xlabel(un + ', ' + r'$|{V_{\\rm U}}|$')\nplt.ylabel(ct + ' [s]')\n\nplt.ylim([0, 0.12])\n\nplt.legend(loc=\"upper left\")\n\nplt.savefig('/Users/takaaki/Dropbox/oki_lab/m2/paper/ieice_server/workspace/fig/user_50_cap_10.pdf')\nplt.show()\nplt.close()\n\n\n# %%\n\nplt.plot(df_user_100_cap_15[un], df_user_100_cap_15[ctes], label=('ESUM'), color='k', marker='x', linestyle='-')\nplt.plot(df_user_100_cap_15[un], df_user_100_cap_15[ctc], label=('ILP'), color='k', marker='o', linestyle=':')\n\nplt.xlabel(un + ', ' + r'$|{V_{\\rm U}}|$')\nplt.ylabel(ct + ' [s]')\n\nplt.ylim([0, 0.2])\n\nplt.legend(loc=\"upper left\")\n\nplt.savefig('/Users/takaaki/Dropbox/oki_lab/m2/paper/ieice_server/workspace/fig/user_100_cap_15.pdf')\nplt.show()\nplt.close()\n\n# %%\n\nplt.plot(df_cap_20_user_20[cp], df_cap_20_user_20[ctes], label=('ESUM'), color='k', marker='x', linestyle='-')\nplt.plot(df_cap_20_user_20[cp], df_cap_20_user_20[ctc], label=('ILP'), color='k', marker='o', linestyle=':')\n\nplt.xlabel(cp + ', ' + r'$M_s$')\nplt.ylabel(ct + ' [s]')\n\nplt.ylim([0, 0.1])\n\nplt.legend(loc=\"upper left\")\n\nplt.savefig('/Users/takaaki/Dropbox/oki_lab/m2/paper/ieice_server/workspace/fig/cap_20_user_20.pdf')\nplt.show()\nplt.close()\n\n\n# %%\n\nplt.plot(df_cap_40_user_40[cp], df_cap_40_user_40[ctes], label=('ESUM'), color='k', marker='x', linestyle='-')\nplt.plot(df_cap_40_user_40[cp], df_cap_40_user_40[ctc], label=('ILP'), color='k', marker='o', linestyle=':')\n\nplt.xlabel(cp + ', ' + r'$M_s$')\nplt.ylabel(ct + ' [s]')\n\nplt.ylim([0, 0.175])\n\nplt.legend(loc=\"upper left\")\n\nplt.savefig('/Users/takaaki/Dropbox/oki_lab/m2/paper/ieice_server/workspace/fig/cap_40_user_40.pdf')\nplt.show()\nplt.close()\n\n\n# %%\n" ]
[ [ "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.savefig", "matplotlib.pyplot.close", "matplotlib.pyplot.rc", "matplotlib.pyplot.gca", "matplotlib.pyplot.ylabel", "pandas.concat", "matplotlib.pyplot.style.use", "matplotlib.pyplot.show", "pandas.read_csv" ] ]
jhamman/ndpyramid
[ "34e6bfbc7d217917f90816e520450269eae3934b" ]
[ "ndpyramid/regrid.py" ]
[ "import pathlib\n\nimport datatree as dt\nimport numpy as np\nimport xarray as xr\n\nfrom .utils import get_version, multiscales_template\n\n\ndef make_grid_ds(level: int, pixels_per_tile: int = 128) -> xr.Dataset:\n \"\"\"Make a dataset representing a target grid\n\n Parameters\n ----------\n level : int\n The zoom level to compute the grid for. Level zero is the furthest out zoom level\n pixels_per_tile : int, optional\n Number of pixels to include along each axis in individual tiles, by default 128\n\n Returns\n -------\n xr.Dataset\n Target grid dataset with the following variables:\n - \"x\": X coordinate in Web Mercator projection (grid cell center)\n - \"y\": Y coordinate in Web Mercator projection (grid cell center)\n - \"lat\": latitude coordinate (grid cell center)\n - \"lon\": longitude coordinate (grid cell center)\n - \"lat_b\": latitude bounds for grid cell\n - \"lon_b\": longitude bounds for grid cell\n \"\"\"\n from pyproj import Proj\n from rasterio.transform import Affine\n\n p = Proj('EPSG:3857')\n dim = (2 ** level) * pixels_per_tile\n\n transform = Affine.translation(-20026376.39, 20048966.10) * Affine.scale(\n (20026376.39 * 2) / dim, -(20048966.10 * 2) / dim\n )\n\n grid_shape = (dim, dim)\n bounds_shape = (dim + 1, dim + 1)\n\n xs = np.empty(grid_shape)\n ys = np.empty(grid_shape)\n lat = np.empty(grid_shape)\n lon = np.empty(grid_shape)\n lat_b = np.zeros(bounds_shape)\n lon_b = np.zeros(bounds_shape)\n\n # calc grid cell center coordinates\n ii, jj = np.meshgrid(np.arange(dim) + 0.5, np.arange(dim) + 0.5)\n for i in range(grid_shape[0]):\n for j in range(grid_shape[1]):\n locs = [ii[i, j], jj[i, j]]\n xs[i, j], ys[i, j] = transform * locs\n lon[i, j], lat[i, j] = p(xs[i, j], ys[i, j], inverse=True)\n\n # calc grid cell bounds\n iib, jjb = np.meshgrid(np.arange(dim + 1), np.arange(dim + 1))\n for i in range(bounds_shape[0]):\n for j in range(bounds_shape[1]):\n locs = [iib[i, j], jjb[i, j]]\n x, y = transform * locs\n lon_b[i, j], lat_b[i, j] = p(x, y, inverse=True)\n\n # pack data into xarray.Dataset\n ds = xr.Dataset(\n {\n 'x': xr.DataArray(xs[0, :], dims=['x']),\n 'y': xr.DataArray(ys[:, 0], dims=['y']),\n 'lat': xr.DataArray(lat, dims=['y', 'x']),\n 'lon': xr.DataArray(lon, dims=['y', 'x']),\n 'lat_b': xr.DataArray(lat_b, dims=['y_b', 'x_b']),\n 'lon_b': xr.DataArray(lon_b, dims=['y_b', 'x_b']),\n },\n attrs=dict(title='Web Mercator Grid', Convensions='CF-1.8'),\n )\n\n return ds\n\n\ndef make_grid_pyramid(levels: int = 6) -> dt.DataTree:\n \"\"\"helper function to create a grid pyramid for use with xesmf\n\n Parameters\n ----------\n levels : int, optional\n Number of levels in pyramid, by default 6\n\n Returns\n -------\n pyramid : dt.DataTree\n Multiscale grid definition\n \"\"\"\n data = dt.DataTree()\n for level in range(levels):\n data[str(level)] = make_grid_ds(level).chunk(-1)\n return data\n\n # data.to_zarr('gs://carbonplan-scratch/grids/epsg:3857/', consolidated=True)\n\n\ndef pyramid_regrid(\n ds: xr.Dataset,\n target_pyramid: dt.DataTree = None,\n levels: int = None,\n weights_template: str = None,\n method: str = 'bilinear',\n) -> dt.DataTree:\n \"\"\"Make a pyramid using xesmf's regridders\n\n Parameters\n ----------\n ds : xr.Dataset\n Input dataset\n target_pyramid : dt.DataTree, optional\n Target grids, if not provided, they will be generated, by default None\n levels : int, optional\n Number of levels in pyramid, by default None\n weights_template : str, optional\n Filepath to write generated weights to, e.g. `'weights_{level}'`, by default None\n method : str, optional\n Regridding method. See ``xesmf.Regridder`` for valid options, by default 'bilinear'\n\n Returns\n -------\n pyramid : dt.DataTree\n Multiscale data pyramid\n \"\"\"\n import xesmf as xe\n\n if target_pyramid is None:\n if levels is not None:\n target_pyramid = make_grid_pyramid(levels)\n else:\n raise ValueError('must either provide a target_pyramid or number of levels')\n if levels is None:\n levels = len(target_pyramid.keys()) # TODO: get levels from the pyramid metadata\n\n # multiscales spec\n save_kwargs = locals()\n del save_kwargs['ds']\n del save_kwargs['target_pyramid']\n del save_kwargs['xe']\n\n attrs = {\n 'multiscales': multiscales_template(\n datasets=[{'path': str(i)} for i in range(levels)],\n type='reduce',\n method='pyramid_regrid',\n version=get_version(),\n kwargs=save_kwargs,\n )\n }\n\n # set up pyramid\n root = xr.Dataset(attrs=attrs)\n pyramid = dt.DataTree(data=root, name='root')\n\n # pyramid data\n for level in range(levels):\n grid = target_pyramid[str(level)].ds.load()\n\n # get the regridder object\n if not weights_template:\n regridder = xe.Regridder(ds, grid, method)\n else:\n fn = pathlib.PosixPath(weights_template.format(level=level))\n if not fn.exists():\n regridder = xe.Regridder(ds, grid, method)\n regridder.to_netcdf(filename=fn)\n else:\n regridder = xe.Regridder(ds, grid, method, weights=fn)\n\n # regrid\n pyramid[str(level)] = regridder(ds)\n\n return pyramid\n" ]
[ [ "numpy.arange", "numpy.empty", "numpy.zeros" ] ]
jdapoorv/lung-cancer-detector
[ "bd98e99ef4b6c7c1f4dec8458c6c655ea6c8b1ba" ]
[ "dataloader/stage1.py" ]
[ "import numpy as np\r\nimport pandas as pd\r\nimport pickle as p\r\nimport os\r\nimport math\r\nfrom dataloader.base_dataloader import BaseDataLoader\r\n\r\nimport utils.dicom_processor as dp\r\n\r\nclass Stage1Kaggle(BaseDataLoader):\r\n\tdef __init__(self, config):\r\n\t\tsuper(Stage1Kaggle, self).__init__(config)\r\n\t\tself._load()\r\n\r\n\tdef _load_sets(self):\r\n\t\tprint(\"Loading datasets\")\r\n\r\n\t\ttrain_patients = pd.read_csv(os.path.join(self._directory, \"stage1_labels.csv\"))\r\n\t\ttest_patients = pd.read_csv(os.path.join(self._directory, \"stage1_sample_submission.csv\"))\r\n\r\n\t\tfor idx, row in test_patients.iterrows():\r\n\t\t\tself._test_set.append(row['id'])\r\n\r\n\t\tfor idx, row in train_patients.iterrows():\r\n\t\t\tself._train_set.append([row['id'], row['cancer']])\r\n\r\n\t\t#Create permutation for random loading\r\n\t\tself.shuffle()\r\n\r\n\t\tprint(\"Loading datasets: Done!\")\r\n\r\n\tdef shuffle(self):\r\n\t\tself._train_set = [self._train_set[i] for i in np.random.permutation(len(self._train_set))]\r\n\r\n\tdef _pre_processed_exists(self):\r\n\t\tif not(os.path.exists(self._target_directory) \r\n\t\t\tand os.path.isdir(self._target_directory)):\r\n\t\t\treturn False\r\n\r\n\t\t#Check if all patients exists\r\n\t\tfor patient in self._train_set:\r\n\t\t\tif not os.path.exists(os.path.join(self._target_directory, patient[0] + \".pick\")):\r\n\t\t\t\treturn False\r\n\r\n\t\tfor patient in self._test_set:\r\n\t\t\tif not os.path.exists(os.path.join(self._target_directory, patient + \".pick\")):\r\n\t\t\t\treturn False\r\n\r\n\t\tprint(\"Found pre-processed datasets\")\r\n\t\treturn True\r\n\r\n\tdef _pre_process(self):\r\n\t\tif self._pre_processed_exists():\r\n\t\t\treturn\r\n\r\n\t\tprint(\"No pre-processed dataset found, pre-processing\")\r\n\t\tif not(os.path.exists(self._target_directory)):\r\n\t\t\tos.makedirs(self._target_directory)\r\n\r\n\t\tsize = len(self._train_set)\r\n\t\tfor idx, patient in enumerate(self._train_set):\r\n\t\t\tprint(\"Pre-processing patient: \", patient[0], str(idx+1) + \"/\" + str(size))\r\n\t\t\tif self._original_size:\r\n\t\t\t\timage = dp.get_image_HU(os.path.join(self._directory, patient[0]))\r\n\t\t\telse:\r\n\t\t\t\timage = dp.get_resized(os.path.join(self._directory, patient[0]), self._size)\r\n\t\t\tp.dump(image, open(os.path.join(self._target_directory, patient[0] + \".pick\"), \"wb\"), protocol=2)\r\n\r\n\t\tsize = len(self._test_set)\r\n\t\tfor idx, patient in enumerate(self._test_set):\r\n\t\t\tprint(\"Pre-processing patient: \", patient, str(idx+1) + \"/\" + str(size))\r\n\t\t\tif self._original_size:\r\n\t\t\t\timage = dp.get_image_HU(os.path.join(self._directory, patient))\r\n\t\t\telse:\r\n\t\t\t\timage = dp.get_resized(os.path.join(self._directory, patient), self._size)\r\n\t\t\tp.dump(image, open(os.path.join(self._target_directory, patient + \".pick\"), \"wb\"), protocol=2)\r\n\r\n\t\tprint(\"Pre-processing: Done!\")\r\n\r\n\tdef train(self, do_shuffle=True):\r\n\t\tif do_shuffle:\r\n\t\t\tself.shuffle()\r\n\r\n\t\ttrain_size = int(math.ceil((1.0 - self._val) * len(self._train_set)))\r\n\t\tself._current_set_x = [s[0] for s in self._train_set[:train_size]]\r\n\t\tself._current_set_y = [s[1] for s in self._train_set[:train_size]]\r\n\r\n\t\tself._current_set_size = train_size\r\n\r\n\tdef validate(self):\r\n\t\ttrain_size = int(math.ceil((1.0 - self._val) * len(self._train_set)))\r\n\t\tself._current_set_x = [s[0] for s in self._train_set[train_size:]]\r\n\t\tself._current_set_y = [s[1] for s in self._train_set[train_size:]]\r\n\r\n\t\tself._current_set_size = len(self._current_set_x)\r\n\r\n\tdef test(self):\r\n\t\tself._current_set_x = self._test_set[:]\r\n\t\tself._current_set_size = len(self._current_set_x)\r\n\r\n\t\tself._current_set_y = [0] * self._current_set_size\r\n\r\n\tdef _load_patient(self, patient):\r\n\t\timg = p.load(open(os.path.join(self._target_directory, patient + \".pick\"), \"rb\"))\r\n\t\timg = dp.normalize_planes(img)\r\n\t\treturn img\r\n\r\n\tdef data_iter(self):\r\n\t\tself._current_pointer = 0\r\n\r\n\t\twhile self._current_pointer < self._current_set_size:\r\n\t\t\tbatch_x = self._current_set_x[self._current_pointer: self._current_pointer+self._batch_size]\r\n\t\t\tbatch_y = self._current_set_y[self._current_pointer: self._current_pointer+self._batch_size]\r\n\r\n\t\t\tself._current_pointer += self._batch_size\r\n\r\n\t\t\tyield np.stack([self._load_patient(s) for s in batch_x]), np.array(batch_y), batch_x\r\n\r\n\tdef _set_directories(self):\r\n\t\tself._directory = \"data/\" + self._get_directory()\r\n\t\tif self._original_size:\r\n\t\t\tself._target_directory = \"data/preprocessed/\" + self._get_directory() + \"/original\"\r\n\t\telse:\r\n\t\t\tself._target_directory = \"data/preprocessed/\" + self._get_directory() + \"/\" \\\r\n\t\t\t\t\t+ str(self._size[0]) + \"_\" + str(self._size[1]) + \"_\" + str(self._size[2])\r\n\t\r\n\tdef _get_directory(self):\r\n\t\treturn \"stage1\"\r\n\r\n\tdef _load(self):\r\n\t\tself._directory_root = self._get_directory()\r\n\t\tself._size = self._config.size\r\n\t\tself._original_size = self._config.original\r\n\t\tself._padded = self._config.padded_images\r\n\t\tself._batch_size = self._config.batch\r\n\t\tself._no_val = self._config.no_validation\r\n\t\tif self._no_val:\r\n\t\t\tself._val = 0\r\n\t\telse:\r\n\t\t\tself._val = self._config.validation_ratio\r\n\r\n\t\tself._train_set = []\r\n\t\tself._test_set = []\r\n\r\n\t\tself._current_set_x = None\r\n\t\tself._current_set_y = None\r\n\t\tself._current_pointer = 0\r\n\t\tself._current_set_size = 0\r\n\r\n\t\tself._set_directories()\r\n\t\tself._load_sets()\r\n\t\tself._pre_process()\r\n\r\n\t\tself.train()\r\n\r\ndef get_data_loader(config):\r\n\treturn Stage1Kaggle(config)\r\n" ]
[ [ "numpy.array" ] ]
mspkvp/MiningOpinionTweets
[ "23f05b4cea22254748675e03a51844da1dff70ac" ]
[ "src/lda_topics.py" ]
[ "from __future__ import print_function\r\nfrom time import time\r\nimport csv\r\nimport sys\r\n\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\n\r\nimport numpy as np\r\nimport lda\r\n\r\nimport logging\r\n\r\n#start_time = str(time())\r\nlogging.basicConfig(filename='lda_analyser.log', level=logging.DEBUG)\r\ncorpus = []\r\ntopics_write_file = csv.writer(open(\"lda_topics.csv\", \"wb\"), delimiter=\"\\t\", quotechar='|', quoting=csv.QUOTE_MINIMAL)\r\nwrite_file = csv.writer(open(\"lda_topics_mapping.csv\", \"wb\"), delimiter=\"\\t\", quotechar='|', quoting=csv.QUOTE_MINIMAL)\r\n\r\n\r\ndef print_top_words(model, doc_topic, feature_names, n_top_words, dictionary):\r\n for i, topic_dist in enumerate(model):\r\n\r\n topic_words = np.array(feature_names)[np.argsort(topic_dist)][:-n_top_words:-1]\r\n #write_file.write('Topic {}: {}\\n'.format(i, ' '.join(topic_words)))\r\n topic_row = [str(i)]\r\n topic_row.extend(topic_words)\r\n topics_write_file.writerow(topic_row)\r\n\r\n for i in range(len(corpus)):\r\n document_row = [dictionary[i][0], dictionary[i][1]]\r\n document_row.append(doc_topic[i].argmax())\r\n document_row.append(corpus[i])\r\n write_file.writerow(document_row)\r\n\r\n\r\nentity_day_dict = dict()\r\n\r\n\r\ntfidif_top_topics = csv.reader(open(\"tfidf_scores.csv\", 'rb'), delimiter=\"\\t\", quotechar='|', quoting=csv.QUOTE_MINIMAL)\r\n\r\ni = 0\r\nfor row in tfidif_top_topics:\r\n document = ''\r\n split_row = row[0].split(\",\")\r\n entity_day_dict[i] = split_row[:2]\r\n for item in split_row[2:]:\r\n document += item + ' '\r\n corpus.append(document)\r\n i += 1\r\n\r\n#for row in corpus:\r\n# print (row)\r\n\r\n#raise SystemExit(0)\r\n\r\n\r\nn_features = 10000\r\nn_topics = int(sys.argv[1])\r\nn_top_words = int(sys.argv[2]) + 1\r\n\r\n# Use tf (raw term count) features for LDA.\r\nlogging.info(\"Extracting tf features for LDA...\")\r\ntf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,\r\n stop_words='english')\r\nt0 = time()\r\ntf = tf_vectorizer.fit_transform(corpus)\r\n\r\nlogging.info(\"done in %0.3fs.\" % (time() - t0))\r\n\r\nlogging.info(\"Fitting LDA models with tf\")\r\nmodel = lda.LDA(n_topics=n_topics, n_iter=1500, random_state=1)\r\n #LatentDirichletAllocation(n_topics=n_topics, max_iter=5, learning_method='online', #learning_offset=50., random_state=0)\r\nt0 = time()\r\nmodel.fit(tf)\r\nlogging.info(\"done in %0.3fs.\" % (time() - t0))\r\n\r\n\r\ntopic_word = model.topic_word_\r\ndoc_topic = model.doc_topic_\r\nlogging.info(\"\\nTopics in LDA model:\")\r\ntf_feature_names = tf_vectorizer.get_feature_names()\r\nprint_top_words(topic_word, doc_topic, tf_feature_names, n_top_words, entity_day_dict)" ]
[ [ "numpy.array", "sklearn.feature_extraction.text.CountVectorizer", "numpy.argsort" ] ]
skmkedar/FewShotLearn
[ "3f27581ef93bda0fb1d6661027f5d19e7418c4f4" ]
[ "model/baselines/fce-embedding.py" ]
[ "##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n## Created by: Albert Berenguel\n## Computer Vision Center (CVC). Universitat Autonoma de Barcelona\n## Email: [email protected]\n## Copyright (c) 2017\n##\n## This source code is licensed under the MIT-style license found in the\n## LICENSE file in the root directory of this source tree\n##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\nimport os\nimport torch\nimport torch.nn as nn\nimport importlib\nimport pickle\nimport numpy as np\n#from model.lstm.bnlstm import RecurrentLSTMNetwork\n\nclass FceEmbedding():\n def __init__(self, opt):\n self.opt = opt # Store the parameters\n self.maxGradNorm = opt['maxGradNorm'] if ['maxGradNorm'] in opt.keys() else 0.25\n self.numLayersAttLstm = opt['numLayersAttLstm'] if ['numLayersAttLstm'] in opt.keys() else 1\n self.numLayersBiLstm = opt['numLayersBiLstm'] if ['numLayersBiLstm'] in opt.keys() else 1\n self.buildModels(self.opt)\n self.setCuda()\n\n # Build F and G models\n def buildModels(self,opt):\n # F function\n modelF = importlib.import_module(opt['learner']).build(opt)\n self.embedNetF = modelF.net\n # G function\n modelG = importlib.import_module(opt['learner']).build(opt)\n self.embedNetG = modelG.net\n\n '''\n # Build LSTM for attention model.\n self.attLSTM = RecurrentLSTMNetwork({\n 'inputFeatures': self.embedNetF.outSize + self.embedNetG.outSize,\n 'hiddenFeatures': self.embedNetF.outSize,\n 'outputType': 'all'\n })\n\n self.biLSTMForward = RecurrentLSTMNetwork({\n 'inputFeatures': self.embedNetG.outSize,\n 'hiddenFeatures': self.embedNetG.outSize,\n 'outputType': 'all'\n })\n\n self.biLSTMBackward = RecurrentLSTMNetwork({\n 'inputFeatures': self.embedNetG.outSize,\n 'hiddenFeatures': self.embedNetG.outSize,\n 'outputType': 'all'\n })\n '''\n\n self.attLSTM = nn.LSTM(input_size=self.embedNetF.outSize + self.embedNetG.outSize,\n hidden_size=self.embedNetF.outSize,\n num_layers = self.numLayersAttLstm)\n # Build bidirectional LSTM\n self.biLSTM = nn.LSTM(input_size=self.embedNetG.outSize,\n hidden_size=self.embedNetG.outSize,\n num_layers=self.numLayersBiLstm,\n bidirectional=True)\n\n self.softmax = nn.Softmax()\n\n # Build list of parameters for optim\n def parameters(self):\n # TODO: why in the original code creates a dictionary with the same\n # parameters. model.params = {f=paramsG, g=paramsG, attLST, biLSTM}\n return list(self.embedNetG.parameters()) + \\\n list(self.embedNetG.parameters()) + \\\n list(self.attLSTM.parameters()) + \\\n list(self.biLSTM.parameters())\n\n # Set training or evaluation mode\n def set(self,mode):\n if mode == 'training':\n self.embedNetF.train()\n self.embedNetG.train()\n elif mode == 'evaluate':\n self.embedNetF.eval()\n self.embedNetG.eval()\n else:\n print('model.set: undefined mode - %s' % (mode))\n\n def isTraining(self):\n return self.embedNetF.training\n\n def attLSTM_forward(self,gS,fX, K):\n\n r = gS.mean(0).expand_as(fX)\n for i in np.arange(K):\n x = torch.cat((fX, r), 1)\n x = x.unsqueeze(0)\n if i == 0:\n #dim: [sequence = 1, batch_size, num_features * 2]\n output, (h, c) = self.attLSTM(x)\n else:\n output, (h, c) = self.attLSTM(x,(h,c))\n h = fX.squeeze(0) + output\n\n embed = None\n # Iterate over batch size\n for j in np.arange(h.size(1)):\n hInd = h[0,i, :].expand_as(gS)\n weight = (gS*hInd).sum(1).unsqueeze(1)\n embed_tmp = (self.softmax(weight).expand_as(gS) * gS).sum(0).unsqueeze(0)\n if embed is None:\n embed = embed_tmp\n else:\n embed = torch.cat([embed,embed_tmp],0)\n # output dim: [batch, num_features]\n return h.squeeze(0)\n\n def biLSTM_forward(self, input):\n gX = input\n # Expected input dimension of the form [sequence_length, batch_size, num_features]\n gX = gX.unsqueeze(1)\n output, (hn, cn) = self.biLSTM(gX)\n # output dim: [sequence, batch_size, num_features * 2]\n output = output[:, :, :self.embedNetG.outSize] + output[:, :, self.embedNetG.outSize:]\n output = output.squeeze(1)\n # output dim: [sequence, num_features]\n return output\n\n def embedG(self, input):\n g = self.embedNetG(input)\n return self.biLSTM_forward(g)\n\n def embedF(self, input, g, K):\n f = self.embedNetF(input)\n return self.attLSTM_forward(g,f,K)\n\n def save(self, path = './data'):\n # Save the opt parameters\n optParametersFile = open(os.path.join(path,'SimpleEmbedding_opt.pkl'), 'wb')\n pickle.dump(self.opt, optParametersFile)\n optParametersFile.close()\n # Clean not needed data of the models\n self.embedNetF.clearState()\n self.embedNetG.clearState()\n torch.save(self.embedNetF.state_dict(), os.path.join(path,'embedNetF.pth.tar'))\n torch.save(self.embedNetG.state_dict(), os.path.join(path, 'embedNetG.pth.tar'))\n\n def load(self, pathParams, pathModelF, pathModelG):\n # Load opt parameters 'SimpleEmbedding_opt.pkl'\n optParametersFile = open(pathParams, 'rb')\n self.opt = pickle.load(optParametersFile)\n optParametersFile.close()\n # build the models\n self.buildModels(self.opt)\n # Load the weights and biases of F and G\n checkpoint = torch.load(pathModelF)\n self.embedNetF.load_state_dict(checkpoint['state_dict'])\n checkpoint = torch.load(pathModelG)\n self.embedNetG.load_state_dict(checkpoint['state_dict'])\n # Set cuda\n self.setCuda()\n\n def setCuda(self, value = 'default'):\n # If value is a string then use self.opt\n # If it is not a string then it should be True or False\n if type(value) == str:\n value = self.opt['useCUDA']\n else:\n assert(type(value)==bool)\n\n if value == True:\n print('Check CUDA')\n self.embedNetF.cuda()\n self.embedNetG.cuda()\n self.attLSTM.cuda()\n self.biLSTM.cuda()\n else:\n self.embedNetF.cpu()\n self.embedNetG.cpu()\n self.attLSTM.cpu()\n self.biLSTM.cpu()\n\ndef build(opt):\n model = FceEmbedding(opt)\n return model\n\n\n" ]
[ [ "torch.cat", "torch.nn.LSTM", "torch.nn.Softmax", "numpy.arange", "torch.load" ] ]
xemio/ANTsPy
[ "ef610318e217bb04d3850d480c2e51df695d56c0" ]
[ "ants/utils/label_image_centroids.py" ]
[ "\n\n__all__ = ['label_image_centroids']\n\nimport numpy as np\n\nfrom ..core import ants_transform as tio\n\n\ndef label_image_centroids(image, physical=False, convex=True, verbose=False):\n \"\"\"\n Converts a label image to coordinates summarizing their positions\n\n ANTsR function: `labelImageCentroids`\n\n Arguments\n ---------\n image : ANTsImage\n image of integer labels\n \n physical : boolean\n whether you want physical space coordinates or not\n \n convex : boolean\n if True, return centroid\n if False return point with min average distance to other points with same label\n \n Returns\n -------\n dictionary w/ following key-value pairs:\n `labels` : 1D-ndarray\n array of label values\n\n `vertices` : pd.DataFrame\n coordinates of label centroids\n\n Example\n -------\n >>> import ants\n >>> import numpy as np\n >>> image = ants.from_numpy(np.asarray([[[0,2],[1,3]],[[4,6],[5,7]]]).astype('float32'))\n >>> labels = ants.label_image_centroids(image)\n \"\"\"\n d = image.shape\n if len(d) != 3:\n raise ValueError('image must be 3 dimensions')\n\n xcoords = np.asarray(np.arange(d[0]).tolist()*(d[1]*d[2]))\n ycoords = np.asarray(np.repeat(np.arange(d[1]),d[0]).tolist()*d[2])\n zcoords = np.asarray(np.repeat(np.arange(d[1]), d[0]*d[2]))\n\n labels = image.numpy()\n mylabels = np.sort(np.unique(labels[labels > 0])).astype('int')\n n_labels = len(mylabels)\n xc = np.zeros(n_labels)\n yc = np.zeros(n_labels)\n zc = np.zeros(n_labels)\n\n if convex:\n for i in mylabels:\n idx = (labels == i).flatten()\n xc[i-1] = np.mean(xcoords[idx])\n yc[i-1] = np.mean(ycoords[idx])\n zc[i-1] = np.mean(zcoords[idx])\n else:\n for i in mylabels:\n idx = (labels == i).flatten()\n xci = xcoords[idx]\n yci = ycoords[idx]\n zci = zcoords[idx]\n dist = np.zeros(len(xci))\n\n for j in range(len(xci)):\n dist[j] = np.mean(np.sqrt((xci[j] - xci)**2 + (yci[j] - yci)**2 + (zci[j] - zci)**2))\n\n mid = np.where(dist==np.min(dist))\n xc[i-1] = xci[mid]\n yc[i-1] = yci[mid]\n zc[i-1] = zci[mid]\n\n centroids = np.vstack([xc,yc,zc]).T\n\n #if physical:\n # centroids = tio.transform_index_to_physical_point(image, centroids)\n\n return {\n 'labels': mylabels,\n 'vertices': centroids\n }\n\n\n\n\n" ]
[ [ "numpy.zeros", "numpy.min", "numpy.mean", "numpy.arange", "numpy.unique", "numpy.sqrt", "numpy.vstack" ] ]
Oscarlight/PiNN_Caffe2
[ "fd5127c88960a863049f4bda658a4c26a8b5d376" ]
[ "ac_qv_api.py" ]
[ "import caffe2_paths\r\nimport os\r\nimport pickle\r\nfrom caffe2.python import (\r\n\tworkspace, layer_model_helper, schema, optimizer, net_drawer\r\n)\r\nimport caffe2.python.layer_model_instantiator as instantiator\r\nimport numpy as np\r\nfrom pinn.adjoint_mlp_lib import build_adjoint_mlp, init_model_with_schemas\r\nimport pinn.data_reader as data_reader\r\nimport pinn.preproc as preproc\r\nimport pinn.parser as parser\r\nimport pinn.visualizer as visualizer\r\nimport pinn.exporter as exporter\r\nfrom shutil import copyfile\r\n# import logging\r\nimport matplotlib.pyplot as plt\r\n\r\nclass ACQVModel:\r\n\tdef __init__(\r\n\t\tself, \r\n\t\tmodel_name,\r\n\t\tinput_dim=1,\r\n\t\toutput_dim=1,\r\n\t):\t\r\n\t\tself.model_name = model_name\r\n\t\tself.input_dim = input_dim\r\n\t\tself.output_dim = output_dim\r\n\t\tself.model = init_model_with_schemas(\r\n\t\t\tmodel_name, self.input_dim, self.output_dim)\r\n\t\tself.input_data_store = {}\r\n\t\tself.preproc_param = {}\r\n\t\tself.net_store = {}\r\n\t\tself.reports = {'epoch':[],'train_loss':[], 'eval_loss':[]}\r\n\r\n\r\n\tdef add_data(\r\n\t\tself,\r\n\t\tdata_tag,\r\n\t\tdata_arrays, \r\n\t\tpreproc_param,\r\n\t\toverride=True,\r\n\t):\r\n\t\t'''\r\n\t\tdata_arrays are in the order of origin_input, adjoint_label\r\n\t\torigin_input and adjoint_label must be numpy arrays\r\n\t\t'''\r\n\t\t#check length and dimensions of origin input and adjoint label \r\n\t\tassert len(data_arrays) == 2, 'Incorrect number of input data'\r\n\t\tvoltages = data_arrays[0]\r\n\t\tcapas = data_arrays[1]\r\n\t\tassert voltages.shape == capas.shape, 'Mismatch dimensions'\r\n\t\t\r\n\t\t#Set preprocess parameters and database name\r\n\t\tself.preproc_param = preproc_param\r\n\t\tself.pickle_file_name = self.model_name + '_preproc_param' + '.p'\r\n\t\tdb_name = self.model_name + '_' + data_tag + '.minidb'\r\n\r\n\t\tif os.path.isfile(db_name):\r\n\t\t\tif override:\r\n\t\t\t\tprint(\"XXX Delete the old database...\")\r\n\t\t\t\tos.remove(db_name)\r\n\t\t\t\tos.remove(self.pickle_file_name)\r\n\t\t\telse:\r\n\t\t\t\traise Exception('Encounter database with the same name. ' +\r\n\t\t\t\t\t'Choose the other model name or set override to True.')\r\n\t\tprint(\"+++ Create a new database...\")\t\r\n\t\t\r\n\t\tself.preproc_param.setdefault('max_loss_scale', 1.)\r\n\t\t\r\n\t\tpickle.dump(\r\n\t\t\tself.preproc_param, \r\n\t\t\topen(self.pickle_file_name, 'wb')\r\n\t\t)\r\n\r\n\t\t#Preprocess the data\r\n\t\tvoltages, capas = preproc.ac_qv_preproc(\r\n\t\t\tvoltages, capas,\r\n\t\t\tself.preproc_param['scale'], \r\n\t\t\tself.preproc_param['vg_shift']\r\n\t\t)\r\n\t\t\r\n\t\t# Only expand the dim if the number of dimension is 1\r\n\t\torigin_input = np.expand_dims(\r\n\t\t\tvoltages, axis=1) if voltages.ndim == 1 else voltages\r\n\t\tadjoint_label = np.expand_dims(\r\n\t\t\tcapas, axis=1) if capas.ndim == 1 else capas\t\t\r\n\r\n\t\t# Create adjoint_input data\r\n\t\tadjoint_input = np.ones((origin_input.shape[0], 1))\r\n\r\n\t\t# Set the data type to np float for origin input, adjoint input, adjoint label\r\n\t\torigin_input = origin_input.astype(np.float32)\r\n\t\tadjoint_input = adjoint_input.astype(np.float32)\r\n\t\tadjoint_label = adjoint_label.astype(np.float32)\r\n\t\t\r\n\t\t# Write to database\r\n\t\tdata_reader.write_db(\r\n\t\t\t'minidb', db_name, \r\n\t\t\t[origin_input, adjoint_input, adjoint_label]\r\n\t\t)\r\n\t\tself.input_data_store[data_tag] = [db_name, origin_input.shape[0]]\r\n\r\n\t# add_data_base: add the database file directly\r\n\tdef add_database(\r\n\t\tself,\r\n\t\tdata_tag,\r\n\t\tdb_name,\r\n\t\tnum_example,\r\n\t\tpreproc_param_pickle_name,\r\n\t\t):\r\n\t\tself.input_data_store[data_tag] = [db_name, num_example]\r\n\t\t# Save the preproc_param with the model\r\n\t\tself.pickle_file_name = self.model_name + '_' + preproc_param_pickle_name\r\n\t\tcopyfile(preproc_param_pickle_name, self.pickle_file_name)\r\n\r\n\tdef build_nets(\r\n\t\tself,\r\n\t\thidden_dims, \r\n\t\tbatch_size=1,\r\n\t\toptim_method = 'AdaGrad',\r\n\t\toptim_param = {'alpha':0.01, 'epsilon':1e-4},\r\n\t):\r\n\t\tassert len(self.input_data_store) > 0, 'Input data store is empty.'\r\n\t\tassert 'train' in self.input_data_store, 'Missing training data.'\r\n\t\tself.batch_size = batch_size\r\n\t\t# Build the date reader net for train net\r\n\t\tinput_data_train = data_reader.build_input_reader(\r\n\t\t\tself.model, \r\n\t\t\tself.input_data_store['train'][0], \r\n\t\t\t'minidb', \r\n\t\t\t['origin_input', 'adjoint_input', 'label'], \r\n\t\t\tbatch_size=batch_size,\r\n\t\t\tdata_type='train',\r\n\t\t)\r\n\r\n\t\tif 'eval' in self.input_data_store:\r\n\t\t\t# Build the data reader net for eval net\r\n\t\t\tinput_data_eval = data_reader.build_input_reader(\r\n\t\t\t\tself.model, \r\n\t\t\t\tself.input_data_store['eval'][0], \r\n\t\t\t\t'minidb', \r\n\t\t\t\t['origin_input', 'adjoint_input', 'label'], \r\n\t\t\t\tbatch_size=batch_size,\r\n\t\t\t\tdata_type='eval',\r\n\t\t\t)\r\n\r\n\t\t# Build the computational nets\r\n\t\t# Create train net\r\n\t\tself.model.input_feature_schema.origin_input.set_value(\r\n\t\t\tinput_data_train[0].get(), unsafe=True)\r\n\t\tself.model.input_feature_schema.adjoint_input.set_value(\r\n\t\t\tinput_data_train[1].get(), unsafe=True)\r\n\t\tself.model.trainer_extra_schema.label.set_value(\r\n\t\t\tinput_data_train[2].get(), unsafe=True)\r\n\r\n\t\tself.origin_pred, self.adjoint_pred, self.loss = build_adjoint_mlp(\r\n\t\t\tself.model,\r\n\t\t\tinput_dim = self.input_dim,\r\n\t\t\thidden_dims = hidden_dims,\r\n\t\t\toutput_dim = self.output_dim,\r\n\t\t\toptim=_build_optimizer(\r\n\t\t\t\toptim_method, optim_param),\r\n\t\t)\r\n\r\n\t\ttrain_init_net, train_net = instantiator.generate_training_nets(self.model)\r\n\t\tworkspace.RunNetOnce(train_init_net)\r\n\t\tworkspace.CreateNet(train_net)\r\n\t\tself.net_store['train_net'] = train_net\r\n\r\n\t\tpred_net = instantiator.generate_predict_net(self.model)\r\n\t\tworkspace.CreateNet(pred_net)\r\n\t\tself.net_store['pred_net'] = pred_net\r\n\t\t\r\n\t\tif 'eval' in self.input_data_store:\r\n\t\t\t# Create eval net\r\n\t\t\tself.model.input_feature_schema.origin_input.set_value(\r\n\t\t\t\tinput_data_eval[0].get(), unsafe=True)\r\n\t\t\tself.model.input_feature_schema.adjoint_input.set_value(\r\n\t\t\t\tinput_data_eval[1].get(), unsafe=True)\r\n\t\t\tself.model.trainer_extra_schema.label.set_value(\r\n\t\t\t\tinput_data_eval[2].get(), unsafe=True)\r\n\t\t\teval_net = instantiator.generate_eval_net(self.model)\r\n\t\t\tworkspace.CreateNet(eval_net)\r\n\t\t\tself.net_store['eval_net'] = eval_net\r\n\r\n\r\n\tdef train_with_eval(\r\n\t\tself, \r\n\t\tnum_epoch=1,\r\n\t\treport_interval=0,\r\n\t\teval_during_training=False,\r\n\r\n\t):\r\n\t\t''' Fastest mode: report_interval = 0\r\n\t\t\tMedium mode: report_interval > 0, eval_during_training=False\r\n\t\t\tSlowest mode: report_interval > 0, eval_during_training=True\r\n\t\t'''\r\n\t\tnum_batch_per_epoch = int(\r\n\t\t\tself.input_data_store['train'][1] / \r\n\t\t\tself.batch_size\r\n\t\t)\r\n\t\tif not self.input_data_store['train'][1] % self.batch_size == 0:\r\n\t\t\tnum_batch_per_epoch += 1\r\n\t\t\tprint('[Warning]: batch_size cannot be divided. ' + \r\n\t\t\t\t'Run on {} example instead of {}'.format(\r\n\t\t\t\t\t\tnum_batch_per_epoch * self.batch_size,\r\n\t\t\t\t\t\tself.input_data_store['train'][1]\r\n\t\t\t\t\t)\r\n\t\t\t\t)\r\n\t\tprint('<<< Run {} iteration'.format(num_epoch * num_batch_per_epoch))\r\n\r\n\t\ttrain_net = self.net_store['train_net']\r\n\t\tif report_interval > 0:\r\n\t\t\tprint('>>> Training with Reports')\r\n\t\t\tnum_eval = int(num_epoch / report_interval)\r\n\t\t\tnum_unit_iter = int((num_batch_per_epoch * num_epoch)/num_eval)\r\n\t\t\tif eval_during_training and 'eval_net' in self.net_store:\r\n\t\t\t\tprint('>>> Training with Eval Reports (Slowest mode)')\r\n\t\t\t\teval_net = self.net_store['eval_net']\r\n\t\t\tfor i in range(num_eval):\r\n\t\t\t\tworkspace.RunNet(\r\n\t\t\t\t\ttrain_net.Proto().name, \r\n\t\t\t\t\tnum_iter=num_unit_iter\r\n\t\t\t\t)\r\n\t\t\t\tself.reports['epoch'].append((i + 1) * report_interval)\r\n\t\t\t\ttrain_loss = np.asscalar(schema.FetchRecord(self.loss).get())\r\n\t\t\t\tself.reports['train_loss'].append(train_loss)\r\n\t\t\t\tif eval_during_training and 'eval_net' in self.net_store:\r\n\t\t\t\t\tworkspace.RunNet(\r\n\t\t\t\t\t\teval_net.Proto().name,\r\n\t\t\t\t\t\tnum_iter=num_unit_iter)\r\n\t\t\t\t\teval_loss = np.asscalar(schema.FetchRecord(self.loss).get())\r\n\t\t\t\t\tself.reports['eval_loss'].append(eval_loss)\r\n\t\telse:\r\n\t\t\tprint('>>> Training without Reports (Fastest mode)')\r\n\t\t\tnum_iter = num_epoch*num_batch_per_epoch\r\n\t\t\tworkspace.RunNet(\r\n\t\t\t\ttrain_net, \r\n\t\t\t\tnum_iter=num_iter\r\n\t\t\t)\r\n\t\t\t\r\n\t\tprint('>>> Saving test model')\r\n\r\n\t\texporter.save_net(\r\n\t\t\tself.net_store['pred_net'], \r\n\t\t\tself.model, \r\n\t\t\tself.model_name+'_init', self.model_name+'_predict'\r\n\t\t)\r\n\r\n\r\n\tdef draw_nets(self):\r\n\t\tfor net_name in self.net_store:\r\n\t\t\tnet = self.net_store[net_name]\r\n\t\t\tgraph = net_drawer.GetPydotGraph(net.Proto().op, rankdir='TB')\r\n\t\t\twith open(net.Name() + \".png\",'wb') as f:\r\n\t\t\t\tf.write(graph.create_png())\r\n\r\n\tdef plot_loss_trend(self):\r\n\t\tplt.plot(self.reports['epoch'], self.reports['train_loss'])\r\n\t\tif len(self.reports['eval_loss']) > 0:\r\n\t\t\tplt.plot(self.reports['epoch'], self.reports['eval_loss'], 'r--')\r\n\t\tplt.show()\r\n\r\n\tdef save_loss_trend(self,save_name):\r\n\t\tif len(self.reports['eval_loss'])>0:\r\n\t\t\tf = open(save_name+'_loss_trend.csv', \"w\")\r\n\t\t\tf.write(\r\n\t\t\t\t\"{},{},{}\\n\".format(\r\n\t\t\t\t\t\"epoch\", \"train_loss\",\"eval_loss\"))\r\n\t\t\tfor x in zip(\r\n\t\t\t\tself.reports['epoch'],\r\n\t\t\t\tself.reports['train_loss'],\r\n\t\t\t\tself.reports['eval_loss']):\r\n\t\t\t\tf.write(\"{},{},{}\\n\".format(\r\n\t\t\t\t\tx[0], x[1], x[2]))\r\n\t\t\tf.close()\r\n\t\telse:\r\n\t\t\tf = open(save_name+'_loss_trend.csv', \"w\")\r\n\t\t\tf.write(\"{},{}\\n\".format(\"epoch\", \"train_loss\"))\r\n\t\t\tfor x in zip(\r\n\t\t\t\tself.reports['epoch'],\r\n\t\t\t\tself.reports['train_loss']):\r\n\t\t\t\tf.write(\"{},{}\\n\".format(x[0], x[1]))\r\n\t\t\tf.close()\r\n\t\r\n\r\n\r\n\t\r\n# --------------------------------------------------------\r\n# ---------------- Global functions -------------------\r\n# --------------------------------------------------------\r\n\r\ndef predict_qs(model_name, terminal, voltages):\r\n\tworkspace.ResetWorkspace()\r\n\r\n\t# requires voltages is an numpy array of size \r\n\t# (batch size, input_dimension)\r\n\t# the first dimension is Vg and the second dimenstion is Vd\r\n\r\n\t# preprocess the origin input and create adjoint input\r\n\tpreproc_param = pickle.load(\r\n\t\t\topen(model_name+'_' + terminal + '_preproc_param.p', \"rb\" )\r\n\t\t)\r\n\tdummy_qs = np.zeros(voltages[0].shape[0])\r\n\tvoltages, dummy_qs = preproc.ac_qv_preproc(\r\n\t\tvoltages, dummy_qs, \r\n\t\tpreproc_param['scale'], \r\n\t\tpreproc_param['vg_shift']\r\n\t)\r\n\tadjoint_input = np.ones((voltages.shape[0], 1))\r\n\t# Expand dimensions of input and set data type of inputs\r\n\torigin_input = np.expand_dims(\r\n\t\tvoltages, axis=1)\r\n\torigin_input = origin_input.astype(np.float32)\r\n\tadjoint_input = adjoint_input.astype(np.float32)\r\n\r\n\tworkspace.FeedBlob('DBInput_train/origin_input', voltages)\r\n\tworkspace.FeedBlob('DBInput_train/adjoint_input', adjoint_input)\r\n\tpred_net = exporter.load_net(model_name+'_init', model_name+'_predict')\r\n\tworkspace.RunNet(pred_net)\r\n\r\n\tqs = np.squeeze(workspace.FetchBlob('origin/NanCheck/origin_pred'))\r\n\tgradients = np.squeeze(workspace.FetchBlob('adjoint/fc0/output'))\r\n\trestore_integral_func, restore_gradient_func = preproc.get_restore_q_func( \r\n\t\tpreproc_param['scale'], \r\n\t\tpreproc_param['vg_shift']\r\n\t)\r\n\toriginal_qs = restore_integral_func(qs)\r\n\toriginal_gradients = restore_gradient_func(gradients)\r\n\treturn original_qs, original_gradients\r\n\r\n\r\ndef plot_iv( \r\n\tvg, vd, ids, \r\n\tvg_comp = None, vd_comp = None, ids_comp = None,\r\n\tstyles = ['vg_major_linear', 'vd_major_linear', 'vg_major_log', 'vd_major_log']\r\n):\r\n\tif 'vg_major_linear' in styles:\r\n\t\tvisualizer.plot_linear_Id_vs_Vd_at_Vg(\r\n\t\t\tvg, vd, ids, \r\n\t\t\tvg_comp = vg_comp, vd_comp = vd_comp, ids_comp = ids_comp,\r\n\t\t)\r\n\tif 'vd_major_linear' in styles:\r\n\t\tvisualizer.plot_linear_Id_vs_Vg_at_Vd(\r\n\t\t\tvg, vd, ids, \r\n\t\t\tvg_comp = vg_comp, vd_comp = vd_comp, ids_comp = ids_comp,\r\n\t\t)\r\n\tif 'vg_major_log' in styles:\r\n\t\tvisualizer.plot_log_Id_vs_Vd_at_Vg(\r\n\t\t\tvg, vd, ids, \r\n\t\t\tvg_comp = vg_comp, vd_comp = vd_comp, ids_comp = ids_comp,\r\n\t\t)\r\n\tif 'vd_major_log' in styles:\r\n\t\tvisualizer.plot_log_Id_vs_Vg_at_Vd(\r\n\t\t\tvg, vd, ids, \r\n\t\t\tvg_comp = vg_comp, vd_comp = vd_comp, ids_comp = ids_comp,\r\n\t\t)\r\n\r\ndef _build_optimizer(optim_method, optim_param):\r\n\tif optim_method == 'AdaGrad':\r\n\t\toptim = optimizer.AdagradOptimizer(**optim_param)\r\n\telif optim_method == 'SgdOptimizer':\r\n\t\toptim = optimizer.SgdOptimizer(**optim_param)\r\n\telif optim_method == 'Adam':\r\n\t\toptim = optimizer.AdamOptimizer(**optim_param)\r\n\telse:\r\n\t\traise Exception(\r\n\t\t\t'Did you foget to implement {}?'.format(optim_method))\r\n\treturn optim\r\n" ]
[ [ "numpy.zeros", "numpy.ones", "matplotlib.pyplot.plot", "matplotlib.pyplot.show", "numpy.expand_dims" ] ]
jfyao90/SDN_DDoS_Simulation
[ "f148bf6b11e039a77ccb7a9dc015083941e428ce" ]
[ "networks/critic.py" ]
[ "from keras.layers import Dense, Input, merge\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nimport keras.backend as keras_backend\nimport tensorflow\n\n\nclass CriticNetwork(object):\n def __init__(self, tensorflow_session, state_size, action_size,\n hidden_units=(300, 600), learning_rate=0.0001, batch_size=64,\n tau=0.001):\n \"\"\"\n Constructor for the Actor network\n\n :param tensorflow_session: The tensorflow session.\n See https://www.tensorflow.org for more information on tensorflow\n sessions.\n :param state_size: An integer denoting the dimensionality of the states\n in the current problem\n :param action_size: An integer denoting the dimensionality of the\n actions in the current problem\n :param hidden_units: An iterable defining the number of hidden units in\n each layer. Soon to be depreciated. default: (300, 600)\n :param learning_rate: A fload denoting the speed at which the network\n will learn. default: 0.0001\n :param batch_size: An integer denoting the batch size. default: 64\n :param tau: A flot denoting the rate at which the target model will\n track the main model. Formally, the tracking function is defined as:\n\n target_weights = tau * main_weights + (1 - tau) * target_weights\n\n for more explanation on how and why this happens,\n please refer to the DDPG paper:\n\n Lillicrap, Hunt, Pritzel, Heess, Erez, Tassa, Silver, & Wiestra.\n Continuous Control with Deep Reinforcement Learning. arXiv preprint\n arXiv:1509.02971, 2015.\n\n default: 0.001\n \"\"\"\n # Store parameters\n self._tensorflow_session = tensorflow_session\n self._batch_size = batch_size\n self._tau = tau\n self._learning_rate = learning_rate\n self._hidden = hidden_units\n\n # Let tensorflow and keras work together\n keras_backend.set_session(tensorflow_session)\n\n # Generate the main model\n self._model, self._state_input, self._action_input = \\\n self._generate_model()\n # Generate carbon copy of the model so that we avoid divergence\n self._target_model, self._target_weights, self._target_state = \\\n self._generate_model()\n # gradients for policy update\n self._action_gradients = tensorflow.gradients(self._model.output,\n self._action_input)\n self._tensorflow_session.run(tensorflow.initialize_all_variables())\n\n def get_gradients(self, states, actions):\n \"\"\"\n Returns the gradients.\n :param states:\n :param actions:\n :return:\n \"\"\"\n return self._tensorflow_session.run(self._action_gradients, feed_dict={\n self._state_inputs: states,\n self._action_input: actions\n })[0]\n\n def train_target_model(self):\n \"\"\"\n Updates the weights of the target network to slowly track the main\n network.\n\n The speed at which the target network tracks the main network is\n defined by tau, given in the constructor to this class. Formally,\n the tracking function is defined as:\n\n target_weights = tau * main_weights + (1 - tau) * target_weights\n\n :return: None\n \"\"\"\n main_weights = self._model.get_weights()\n target_weights = self._target_model.get_weights()\n target_weights = [self._tau * main_weight + (1 - self._tau) *\n target_weight for main_weight, target_weight in\n zip(actor_weights, actor_target_weights)]\n self._target_model.set_weights(target_weights)\n\n def _generate_model(self):\n \"\"\"\n Generates the model based on the hyperparameters defined in the\n constructor.\n\n :return: at tuple containing references to the model, state input layer,\n and action input later\n \"\"\"\n state_input_layer = Input(shape=[self._state_size])\n action_input_layer = Input(shape=[self._action_size])\n s_layer = Dense(self._hidden[0], activation='relu')(state_input_layer)\n a_layer = Dense(self._hidden[0], activation='linear')(action_input_layer)\n hidden = Dense(self._hidden[1], activation='linear')(s_layer)\n hidden = merge([hidden, a_layer], mode='sum')\n hidden = Dense(self._hidden[1], activation='relu')(hidden)\n output_layer = Dense(1, activation='linear')(hidden)\n model = Model(input=[state_input_layer, action_input_layer],\n output=output_layer)\n model.compile(loss='mse', optimizer=Adam(lr=self._learning_rate))\n return model, state_input_layer, action_input_layer\n" ]
[ [ "tensorflow.initialize_all_variables", "tensorflow.gradients" ] ]
lfchener/Parakeet
[ "a84b6d3383b2a8a5fb45d0c233bee1ed80d0b389" ]
[ "parakeet/modules/audio.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nfrom scipy import signal\nimport numpy as np\n\n__all__ = [\"quantize\", \"dequantize\", \"STFT\"]\n\n\ndef quantize(values, n_bands):\n \"\"\"Linearlly quantize a float Tensor in [-1, 1) to an interger Tensor in \n [0, n_bands).\n\n Parameters\n -----------\n values : Tensor [dtype: flaot32 or float64]\n The floating point value.\n \n n_bands : int\n The number of bands. The output integer Tensor's value is in the range \n [0, n_bans).\n\n Returns\n ----------\n Tensor [dtype: int 64]\n The quantized tensor.\n \"\"\"\n quantized = paddle.cast((values + 1.0) / 2.0 * n_bands, \"int64\")\n return quantized\n\n\ndef dequantize(quantized, n_bands, dtype=None):\n \"\"\"Linearlly dequantize an integer Tensor into a float Tensor in the range \n [-1, 1).\n\n Parameters\n -----------\n quantized : Tensor [dtype: int]\n The quantized value in the range [0, n_bands).\n \n n_bands : int\n Number of bands. The input integer Tensor's value is in the range \n [0, n_bans).\n \n dtype : str, optional\n Data type of the output.\n \n Returns\n -----------\n Tensor\n The dequantized tensor, dtype is specified by `dtype`. If `dtype` is \n not specified, the default float data type is used.\n \"\"\"\n dtype = dtype or paddle.get_default_dtype()\n value = (paddle.cast(quantized, dtype) + 0.5) * (2.0 / n_bands) - 1.0\n return value\n\n\nclass STFT(nn.Layer):\n \"\"\"A module for computing stft transformation in a differentiable way. \n \n Parameters\n ------------\n n_fft : int\n Number of samples in a frame.\n \n hop_length : int\n Number of samples shifted between adjacent frames.\n \n win_length : int\n Length of the window.\n \n window : str, optional\n Name of window function, see `scipy.signal.get_window` for more \n details. Defaults to \"hanning\".\n \n Notes\n -----------\n It behaves like ``librosa.core.stft``. See ``librosa.core.stft`` for more \n details.\n \n Given a audio which ``T`` samples, it the STFT transformation outputs a \n spectrum with (C, frames) and complex dtype, where ``C = 1 + n_fft / 2`` \n and ``frames = 1 + T // hop_lenghth``.\n \n Ony ``center`` and ``reflect`` padding is supported now.\n \n \"\"\"\n\n def __init__(self, n_fft, hop_length, win_length, window=\"hanning\"):\n super(STFT, self).__init__()\n self.hop_length = hop_length\n self.n_bin = 1 + n_fft // 2\n self.n_fft = n_fft\n\n # calculate window\n window = signal.get_window(window, win_length)\n if n_fft != win_length:\n pad = (n_fft - win_length) // 2\n window = np.pad(window, ((pad, pad), ), 'constant')\n\n # calculate weights\n r = np.arange(0, n_fft)\n M = np.expand_dims(r, -1) * np.expand_dims(r, 0)\n w_real = np.reshape(window *\n np.cos(2 * np.pi * M / n_fft)[:self.n_bin],\n (self.n_bin, 1, 1, self.n_fft))\n w_imag = np.reshape(window *\n np.sin(-2 * np.pi * M / n_fft)[:self.n_bin],\n (self.n_bin, 1, 1, self.n_fft))\n\n w = np.concatenate([w_real, w_imag], axis=0)\n self.weight = paddle.cast(\n paddle.to_tensor(w), paddle.get_default_dtype())\n\n def forward(self, x):\n \"\"\"Compute the stft transform.\n\n Parameters\n ------------\n x : Tensor [shape=(B, T)]\n The input waveform.\n\n Returns\n ------------\n real : Tensor [shape=(B, C, 1, frames)] \n The real part of the spectrogram.\n \n imag : Tensor [shape=(B, C, 1, frames)] \n The image part of the spectrogram.\n \"\"\"\n # x(batch_size, time_steps)\n # pad it first with reflect mode\n # TODO(chenfeiyu): report an issue on paddle.flip\n pad_start = paddle.reverse(x[:, 1:1 + self.n_fft // 2], axis=[1])\n pad_stop = paddle.reverse(x[:, -(1 + self.n_fft // 2):-1], axis=[1])\n x = paddle.concat([pad_start, x, pad_stop], axis=-1)\n\n # to BC1T, C=1\n x = paddle.unsqueeze(x, axis=[1, 2])\n out = F.conv2d(x, self.weight, stride=(1, self.hop_length))\n real, imag = paddle.chunk(out, 2, axis=1) # BC1T\n return real, imag\n\n def power(self, x):\n \"\"\"Compute the power spectrum.\n\n Parameters\n ------------\n x : Tensor [shape=(B, T)]\n The input waveform.\n\n Returns\n ------------\n Tensor [shape=(B, C, 1, T)] \n The power spectrum.\n \"\"\"\n real, imag = self(x)\n power = real**2 + imag**2\n return power\n\n def magnitude(self, x):\n \"\"\"Compute the magnitude of the spectrum.\n\n Parameters\n ------------\n x : Tensor [shape=(B, T)]\n The input waveform.\n\n Returns\n ------------\n Tensor [shape=(B, C, 1, T)] \n The magnitude of the spectrum.\n \"\"\"\n power = self.power(x)\n magnitude = paddle.sqrt(power)\n return magnitude\n" ]
[ [ "numpy.concatenate", "numpy.pad", "numpy.sin", "numpy.arange", "scipy.signal.get_window", "numpy.cos", "numpy.expand_dims" ] ]
franpena-kth/learning-deep-learning
[ "9cd287b602dee1358672c4189445721a9c24f107" ]
[ "unif/unif_playground.py" ]
[ "import random\nimport time\n\nimport torch\n\nimport utils\nfrom unif.unif_data import CodeDescDataset\nfrom unif.unif_model import UNIFAttention\nfrom unif.unif_tokenizer import tokenize_data\n\n\ndef load_unif_model():\n load_path = './unif_model.ckpt'\n code_snippets_file = './data/parallel_bodies'\n descriptions_file = './data/parallel_desc'\n train_size = 11\n embedding_size = 128\n dataset = CodeDescDataset(code_snippets_file, descriptions_file, train_size)\n model = UNIFAttention(dataset.code_vocab_size, dataset.desc_vocab_size, embedding_size)\n model.load_state_dict(torch.load(load_path))\n\n code_snippet = dataset.code_snippets[3]\n description = dataset.descriptions[3]\n\n # code_snippet_10 = dataset.code_snippets[10]\n # description_10 = dataset.descriptions[10]\n\n print(code_snippet)\n print(description)\n # print()\n # print(code_snippet_10)\n # print(description_10)\n # print()\n\n tokenized_code_data, code_mask, tokenized_desc_data, desc_mask =\\\n tokenize_data(dataset)\n code_embedding, desc_embedding = model(\n tokenized_code_data, code_mask, tokenized_desc_data, desc_mask)\n\n print(code_embedding[10])\n print(desc_embedding[10])\n\n\ndef main():\n # load_unif_model()\n print(utils.get_best_device())\n\n\nstart = time.time()\nmain()\nend = time.time()\ntotal_time = end - start\nprint(\"%s: Total time = %f seconds\" % (time.strftime(\"%Y/%m/%d-%H:%M:%S\"), total_time))\n" ]
[ [ "torch.load" ] ]
matt-peters/text-to-text-transfer-transformer
[ "614af25d4379c74ea829f4bbfcfcaa13f0a463cf" ]
[ "t5/data/test_utils_test.py" ]
[ "# Copyright 2020 The T5 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for asserts.\"\"\"\n\nfrom absl.testing import absltest\nfrom t5.data.test_utils import assert_dataset\nimport tensorflow.compat.v2 as tf\n\ntf.compat.v1.enable_eager_execution()\n\n\n# Note that the b'string' values are for PY3 to interpret as bytes literals,\n# which match the tf.data.Dataset from tensor slices.\nclass TestUtilsTest(absltest.TestCase):\n\n def test_assert_dataset(self):\n first_dataset = tf.data.Dataset.from_tensor_slices(\n {'key1': ['val1'], 'key2': ['val2']})\n\n # Equal\n assert_dataset(first_dataset, {'key1': [b'val1'], 'key2': [b'val2']})\n assert_dataset(first_dataset, {'key1': [b'val1'], 'key2': [b'val2']},\n expected_dtypes={'key1': tf.string})\n\n # Unequal value\n with self.assertRaises(AssertionError):\n assert_dataset(first_dataset, {'key1': [b'val1'], 'key2': [b'val2x']})\n\n # Wrong dtype\n with self.assertRaises(AssertionError):\n assert_dataset(first_dataset, {'key1': [b'val1'], 'key2': [b'val2']},\n expected_dtypes={'key1': tf.int32})\n\n # Additional key, value\n with self.assertRaises(AssertionError):\n assert_dataset(first_dataset,\n {'key1': [b'val1'], 'key2': [b'val2'], 'key3': [b'val3']})\n\n # Additional key, value\n with self.assertRaises(AssertionError):\n assert_dataset(first_dataset,\n {'key1': [b'val1'], 'key2': [b'val2'], 'key3': [b'val3']})\n\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "tensorflow.compat.v2.compat.v1.enable_eager_execution", "tensorflow.compat.v2.data.Dataset.from_tensor_slices" ] ]
dl4184/alpha-zero-general
[ "7236f62f334cb0609971e3b9a48cb23da39447f3" ]
[ "connect4/Connect4Heuristics.py" ]
[ "import numpy as np\nfrom numba import njit\n\n\ndef heuristic1lookahead(board):\n board = np.copy(board)\n player = 1\n # if np.sum(board) == 0:\n # player = 1\n\n valid_moves = board[0] == 0\n l_len = 4\n for j in range(valid_moves.size):\n if valid_moves[j]:\n\n available_idx, = np.where(board[:, j] == 0)\n board[available_idx[-1]][j] = player\n\n # we look if we can win in one move\n player_pieces = board * player\n\n f1l = straight_lines(player_pieces, l_len, 4)\n f1c = straight_lines(player_pieces, l_len, 4, transpose=True)\n f1d = diagonal_lines(player_pieces, l_len, 4)\n board[available_idx[-1]][j] = 0\n if f1l or f1c or f1d:\n return j\n\n return None\n\n\ndef heuristic1(board):\n player = 1\n # if np.sum(board) == 0:\n # player = 1\n res = np.zeros(7)\n res[heuristic1player(board, player)] = 1\n return res\n\n\ndef heuristic1player(board, player):\n valid_moves = board[0] == 0\n scores = np.zeros(valid_moves.size)\n valid_moves_scores = []\n\n # feature 1\n win_move = -1\n prevent_win_move = -1\n for j in range(valid_moves.size):\n if valid_moves[j]:\n available_idx, = np.where(board[:, j] == 0)\n board[available_idx[-1]][j] = player\n player_score = board_score(board, player, [1])\n if player_score == np.inf:\n win_move = j\n\n board[available_idx[-1]][j] = -player\n enemy_score = board_score(board, -player, [1])\n if enemy_score == np.inf:\n prevent_win_move = j\n board[available_idx[-1]][j] = 0\n\n if win_move != -1:\n return win_move\n\n if prevent_win_move != -1:\n return prevent_win_move\n\n for j in range(valid_moves.size):\n if valid_moves[j]:\n available_idx, = np.where(board[:, j] == 0)\n board[available_idx[-1]][j] = player\n\n scores[j] = board_score(board, player, [2, 3, 4])\n if scores[j] != np.inf:\n board[available_idx[-1]][j] = -player\n scores[j] -= board_score(board, -player, [2, 3, 4])\n\n board[available_idx[-1]][j] = 0 # we undo the move\n valid_moves_scores.append(scores[j])\n else:\n scores[j] = -np.inf\n\n if np.max(scores) == -np.inf:\n return np.argmax(valid_moves)\n\n return np.argmax(scores)\n\n\ndef board_score(board, player, features):\n board = np.copy(board)\n score = 0\n player_pieces = board * player\n shape = board.shape\n l_len = 4\n\n \"\"\"FEATURE 1\"\"\"\n if 1 in features:\n f1l = straight_lines(player_pieces, l_len, 4)\n f1c = straight_lines(player_pieces, l_len, 4, transpose=True)\n f1d = diagonal_lines(player_pieces, l_len, 4)\n\n if f1l or f1c or f1d:\n return np.inf\n\n \"\"\"FEATURE 2\"\"\"\n if 2 in features:\n # inf cases\n f2l = straight_lines(player_pieces, l_len, 3)\n for i, j in f2l:\n if j < shape[1] - 4:\n if playable(board, (i, j)) and playable(board, (i, j + 4)): # left field and right field are playable\n return np.inf\n\n f2d = diagonal_lines(player_pieces, l_len, 3)\n for (i, j), up in f2d:\n if playable(board, (i, j)): # left field is playable\n if j + 4 < shape[1] and shape[0] > i + up * 4 >= 0:\n if playable(board, (i + up * 4, j + 4)):\n return np.inf\n\n # preventable win cases - line\n for i, j in f2l:\n for y in range(j, j + 4):\n if playable(board, (i, y)):\n score += 900000\n break\n # perhaps there should be some reward even if it is not immediately playable\n\n # preventable win cases - column\n\n f2c = [(i, j) for i, j in straight_lines(player_pieces, l_len, 3, transpose=True) if i < shape[0] - 3]\n\n score += 900000 * len(f2c)\n\n # preventable win cases diagonal\n for (i, j), up in f2d:\n for y in range(4):\n if playable(board, (i + up * y, j + y)):\n score += 900000\n break\n\n \"\"\"FEATURE 3\"\"\"\n\n if 3 in features:\n # case 1 - line\n f3l = straight_lines(player_pieces, l_len, 2)\n for i, j in f3l:\n if playable(board, (i, j)): # left field is empty\n if playable(board, (i, j + 3)): # right field is empty\n score += 50000\n\n # case 1 - diag\n f3d = diagonal_lines(player_pieces, l_len, 2)\n for (i, j), up in f3d:\n if playable(board, (i, j)): # left field is empty\n if playable(board, (i + up * 3, j + 3)): # right field is empty\n score += 50000\n\n # case 2 - line\n for i, j in f3l:\n if playable(board, (i, j)) and playable(board, (i, j + 1)):\n score += 10000\n for y in range(j - 1, -1, -1):\n if playable(board, (i, y)):\n score += 10000\n else:\n break\n elif playable(board, (i, j + 2)) and playable(board, (i, j + 3)):\n score += 10000\n for y in range(j + 4, shape[1]):\n if playable(board, (i, y)):\n score += 10000\n else:\n break\n\n # case 2 - diag\n for (i, j), up in f3d:\n if playable(board, (i, j)) and playable(board, (i + up, j + 1)):\n score += 10000\n x = i - up\n for y in range(j - 1, -1, -1):\n if 0 <= x < shape[0] and playable(board, (x, y)):\n score += 10000\n x -= up\n else:\n break\n elif playable(board, (i + 2 * up, j + 2)) and playable(board, (i + 3 * up, j + 3)):\n score += 10000\n x = i + 4 * up\n for y in range(j + 4, shape[1]):\n if 0 <= x < shape[0] and playable(board, (x, y)):\n score += 10000\n x += up\n else:\n break\n\n # case 2 - column\n f3c = [(i, j) for i, j in straight_lines(player_pieces, l_len, 2, transpose=True) if i < shape[0] - 3]\n score += len(f3c) * 10000\n\n if 4 in features:\n \"\"\"FEATURE 4 \"\"\"\n for i in range(shape[0]):\n for j in range(shape[1]):\n if player_pieces[i][j] == 1 and is_isolated(player_pieces, (i, j)):\n if j == 0 or j == 6:\n score += 40\n elif j == 1 or j == 5:\n score += 70\n elif j == 2 or j == 4:\n score += 120\n elif j == 3:\n score += 200\n return score\n\n\ndef playable(board, field):\n if board[field[0]][field[1]] != 0:\n return False\n if field[0] == board.shape[0] - 1:\n return True\n if board[field[0] + 1][field[1]] != 0:\n return True\n return False\n\n\ndef straight_lines(player_pieces, l_len, no_pieces, transpose=False):\n if transpose:\n player_pieces = player_pieces.transpose()\n run_lengths = [player_pieces[:, i:i + l_len].sum(axis=1) for i in range(len(player_pieces) - l_len + 2)]\n positions = np.where(np.array(run_lengths) == no_pieces)\n if positions[0].size == 0:\n return []\n\n if not transpose:\n return list(zip(positions[1], positions[0]))\n else:\n return list(zip(positions[0], positions[1]))\n\n\ndef diagonal_lines(player_pieces, l_len, no_pieces):\n results = []\n for i in range(len(player_pieces) - l_len + 1):\n for j in range(len(player_pieces[0]) - l_len + 1):\n if sum(player_pieces[i + x][j + x] for x in range(l_len)) == no_pieces:\n results.append(((i, j), 1))\n for j in range(l_len - 1, len(player_pieces[0])):\n if sum(player_pieces[i + x][j - x] for x in range(l_len)) == no_pieces:\n results.append(((i + 3, j - 3), -1))\n return results\n\n\ndef is_isolated(board, pos):\n i, j = pos\n el = board[i][j]\n b = np.pad(board, pad_width=1, mode='constant', constant_values=2)\n b[i + 1][j + 1] = 3\n\n return np.all(b[i:i + 3, j:j + 3] != el)\n\n\nh = [[3, 4, 5, 3, 5, 4, 3],\n [4, 6, 8, 10, 8, 6, 4],\n [5, 8, 11, 13, 11, 8, 5],\n [5, 8, 11, 13, 11, 8, 5],\n [4, 6, 8, 10, 8, 6, 4],\n [3, 4, 5, 7, 5, 4, 3]]\n\"\"\"\nh = [[1, 1, 1, 3, 1, 1, 1],\n [1, 1, 1, 10, 1, 1, 1],\n [1, 1, 1, 13, 1, 1, 1],\n [1, 1, 1, 13, 1, 1, 1],\n [1, 1, 1, 10, 1, 1, 1],\n [1, 1, 1, 7, 1, 1, 1]]\n\"\"\"\n\n\ndef heuristic2(board):\n probs = heuristic2_prob(board)\n res = probs == np.max(probs)\n return res / res.sum()\n\n\n@njit\ndef heuristic2_prob(board):\n h = [[3, 4, 5, 1, 5, 4, 3],\n [4, 6, 8, 13, 8, 6, 4],\n [5, 8, 11, 25, 11, 8, 5],\n [5, 8, 11, 25, 11, 8, 5],\n [4, 6, 8, 25, 8, 6, 4],\n [3, 4, 5, 25, 5, 4, 3]]\n\n fields = last_nonzero(board)\n prob = np.zeros(fields.size)\n for i, j in enumerate(fields):\n if j != -1:\n prob[i] = h[j][i]\n\n return prob / np.linalg.norm(prob, 1)\n\n\n@njit\ndef last_nonzero(arr):\n res = np.ones(arr.shape[1], dtype=np.int_) * (arr.shape[0] - 1)\n for j in range(arr.shape[1]):\n for i in range(arr.shape[0]):\n if arr[i][j] != 0:\n res[j] = i - 1\n break\n return res\n\n\n\"\"\"\ndef last_nonzero(arr):\n mask = arr == 0\n val = arr.shape[0] - np.flip(mask, axis=0).argmax(axis=0) - 1\n return np.where(mask.any(axis=0), val, None)\n\"\"\"\n\n\ndef heuristic3(cannonical_board):\n # we can win the game so we should\n\n res = winnable_move(cannonical_board)\n if res is not None:\n return res\n\n # we can prevent opponent from winning the game so we should play a move to prevent it\n cannonical_board_opponent = cannonical_board * -1\n res = winnable_move(cannonical_board_opponent)\n if res is not None:\n return res\n\n # we mask away the moves that would make us lose\n mask = np.ones(7)\n valid_moves = cannonical_board_opponent[1] == 0\n for j in range(valid_moves.size):\n if valid_moves[j]:\n available_idx, = np.where(cannonical_board_opponent[:, j] == 0)\n played_row = available_idx[-1]\n played_row -= 1\n cannonical_board_opponent[played_row][j] = 1\n if connected_four(cannonical_board_opponent, j, played_row):\n mask[j] = 0\n cannonical_board_opponent[played_row][j] = 0\n\n res = heuristic2_prob(cannonical_board)\n if (res * mask).sum() == 0:\n return res\n\n return res * mask\n\n\ndef winnable_move(cannonical_board):\n valid_moves = cannonical_board[0] == 0\n for j in range(valid_moves.size):\n if valid_moves[j]:\n available_idx, = np.where(cannonical_board[:, j] == 0)\n played_row = available_idx[-1]\n cannonical_board[played_row][j] = 1\n\n if connected_four(cannonical_board, j, played_row):\n res = np.zeros(7)\n res[j] = 1\n cannonical_board[played_row][j] = 0\n return res\n cannonical_board[played_row][j] = 0\n return None\n\n\n@njit\ndef connected_four(cannonical_board, played_col, played_row):\n column = cannonical_board[:, played_col]\n for i in range(3):\n if column[i:i + 4].sum() == 4:\n return True\n\n row = cannonical_board[played_row, :]\n for i in range(4):\n if row[i:i + 4].sum() == 4:\n return True\n\n start_diag_1 = (played_row - min([played_row, played_col]), played_col - min([played_row, played_col]))\n length_diag_1 = min([6 - start_diag_1[0], 7 - start_diag_1[1]])\n\n if length_diag_1 >= 4:\n\n diag_1 = np.array([cannonical_board[start_diag_1[0] + i, start_diag_1[1] + i] for i in range(length_diag_1)])\n for i in range(length_diag_1 - 3):\n if np.all(diag_1[i:i + 4]):\n return True\n\n start_diag_2 = (played_row + min([5 - played_row, played_col]), played_col - min([5 - played_row, played_col]))\n length_diag_2 = min([start_diag_2[0] + 1, 7 - start_diag_2[1]])\n\n if length_diag_2 >= 4:\n diag_2 = np.array([cannonical_board[start_diag_2[0] - i, start_diag_2[1] + i] for i in range(length_diag_2)])\n for i in range(length_diag_2 - 3):\n if np.all(diag_2[i:i + 4]):\n return True\n\n return False\n\n\nif __name__ == \"__main__\":\n\n import time\n\n board = np.array([[-1, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0],\n [-1, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0],\n [-1, 0, 0, -1, 0, 0, 0],\n [1, 0, 0, 1, 0, 0, 0]]\n )\n\n \"\"\"\n t = time.time()\n for i in range(repeat):\n last_nonzero2(board)\n print(time.time() - t)\n \n print(last_nonzero(board))\n print(last_nonzero2(board))\n \"\"\"\n\n \"\"\"\n board = np.array([[0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0]]\n )\n \n \"\"\"\n\n # print(diagonal_lines(board, 4, 3))\n # print(straight_lines(board, 4, 3))\n # print(board_score(board, 1))\n" ]
[ [ "numpy.max", "numpy.pad", "numpy.array", "numpy.linalg.norm", "numpy.zeros", "numpy.copy", "numpy.ones", "numpy.where", "numpy.argmax", "numpy.all" ] ]
anna-guinet/dpa-chi-function
[ "70990b8c7ac715f3bb34540fdf7ce639493e30cf" ]
[ "num_sim_1x5bits_pr_success_to_csv_sq.py" ]
[ "#!/usr/bin/env python\n\n\"\"\" DPA ON KECCAK 5-BIT CHI ROW & 1-BIT K\n\nSave the figures for a simulation which display the outcome of a test\naccording to the SNR, in order to recover kappa bits. \n\nWithin one simulation, we guess 2 bits of kappa, with the scalar product,\nand a fixed noise power consumption vector. \n\"\"\"\n\n__author__ = \"Anna Guinet\"\n__email__ = \"[email protected]\"\n__version__ = \"1.2\"\n\nimport numpy as np\nimport itertools\nimport pandas as pd\nfrom decimal import *\nimport math\nimport random\n\nimport matplotlib.pylab as plt\n\nimport argparse\nimport sys\nimport time\n\ndef noise(mu):\n\t\"\"\"\n\tCompute a noise power consumption vector R for each message mu. \n\n\tParameter:\n\tmu -- list of Bool\n\n\tReturn:\n\tR -- list of integers, length 2^n\n\t\"\"\"\n\tR = []\n\n\t# For each message mu\n\tfor j in range(len(mu)):\n\n\t\t# Activity of noise part\n\t\td = random.gauss(0, 2)\n\n\t\t# R = SUM_i (-1)^d_i\n\t\tR.append(d)\n\n\treturn R\n\ndef gen_key(i, n):\n\t\"\"\"\n\tGenerate all key values for key_(i+2 mod n) and key_(i+1 mod n), \n\tregardless the i-th bit:\n\t- key_(i mod n) = kappa_i XOR K = 0\n\t- key_(i+1 mod n) = kappa_(i+1 mod n)\n\t- key_(i+2 mod n) = kappa_(i+2 mod n)\n\t- key_(i+3 mod n) = 0 if n = 5 \n\t- key_(i+4 mod n) = 0 if n = 5\n\n\tEg: for i = 1, n = 5, key = [[ 0, 0, 0, 0, 0 ], [ 0, 0, 0, 1, 0 ], [ 0, 0, 1, 0, 0 ], [ 0, 0, 1, 1, 0 ]]\n\t\n\tParameters:\n\ti -- integer in [0,n-1]\n\tn -- integer\n\t\n\tReturn:\n\tkey -- list of tuples of length 16\n\t\"\"\"\n\t# All possible values for 2 bits\n\tkey = list(itertools.product([bool(0), bool(1)], repeat=2))\n\n\t# Transform nested tuples into nested lists\n\tkey = [list(j) for j in key]\n\n\t# Insert 'False' as the i-th, (i+3)-th and (i+4)-th positions\n\tif n == 5:\n\t\tkey_extend = [j.insert((i+3) % n, bool(0)) for j in key]\n\t\tkey_extend = [j.insert((i+4) % n, bool(0)) for j in key]\n\n\t# Insert 'False' as the i-th position\n\tkey_extend = [j.insert(i % n, bool(0)) for j in key]\n\t\n\treturn key\n\t\ndef signal_2D(n, i, key, mu):\n\t\"\"\"\n\tGenerate signal power consumption S for all mu.\n\t\n\t'key' is a value such as key = kappa, except for bit i: \n\tkey_i = kappa_i XOR K\n\t\n\tParameters:\n\tn -- integer\n\ti -- integer in [0,n-1]\n\tkey -- list of bool\n\tmu -- 2D list of bool, size n x 2^n\n\t\n\tReturn:\n\tS -- 2D list of integers, size 2^n x 4\n\t\"\"\"\n\tS = []\n\n\t# For each key value\n\tfor l in range(len(key)):\n\n\t\tS_l = []\n\n\t\t# For each message mu\n\t\tfor j in range(len(mu)):\n\t\t\t\n\t\t\t# Activity of the first storage cell of the register\n\t\t\td = key[l][i] ^ mu[j][i] ^ (key[l][(i+1) % n] ^ mu[j][(i+1) % n] ^ bool(1)) & (key[l][(i+2) % n] ^ mu[j][(i+2) % n])\n\n\t\t\t# Power consumption model\n\t\t\tS_lj = (-1)**(d)\n\n\t\t\tS_l.append(S_lj)\n\n\t\tS += [S_l]\n\n\treturn S\n\t\ndef gen_S_ref(mu, n):\n\t\"\"\"\n\tGenerate signal power consumption for every bit i of first chi's row. \n\n\tParameters:\n\tmu -- 2D list of bool, size n x 2^n\n\tn -- integer (default 3)\n\n\tReturn:\n\tS_ref -- 3D list of integers, size 2^n x 2^(n-1) x n\n\t\"\"\"\n\tS_ref = []\n\t\n\tfor i in range(n):\n\t\tkey = gen_key(i, n)\n\t\tS_ref_i = signal_2D(n, i, key, mu)\n\t\tS_ref += [S_ref_i]\n\n\treturn S_ref\n\ndef gen_scalar_init(n, R, S_ref):\n\t\"\"\"\n\tGenerate initial scalar products for the chi row. \n\n\tParameters:\n\tn \t -- integer\n\tR \t -- list\n\tS_ref -- list of integers\n\n\tReturn:\n\tscalar_init -- list\n\t\"\"\"\n\tscalar_init = [] \n\n\t# Global power consumption P_init = R\n\tgetcontext().prec = 8\n\tP_init = [Decimal(r) for r in R]\n\n\t# i-th bit of register state studied\n\tfor i in range(n):\n\n\t\t# Scalar product <S-ref, P>\n\t\tscalar_init_i = np.dot(S_ref[i], P_init)\n\t\tscalar_init += [scalar_init_i]\n\n\treturn scalar_init\n\ndef kappa_idx():\n\t\"\"\"\n\tProvide scalar products indexes for kappa values.\n\n\tReturn:\n\tlist_kappa_idx -- list of lists\n\t\"\"\"\n\tlist_kappa_idx00 = [[0, 0, 0, 0, 0, '00000'],\n\t\t\t\t\t\t[0, 0, 1, 1, 0, '00001'],\n\t\t\t\t\t\t[0, 1, 2, 0, 0, '00010'],\n\t\t\t\t\t\t[0, 1, 3, 1, 0, '00011'],\n\t\t\t\t\t\t[1, 2, 0, 0, 0, '00100'],\n\t\t\t\t\t\t[1, 2, 1, 1, 0, '00101'],\n\t\t\t\t\t\t[1, 3, 2, 0, 0, '00110'],\n\t\t\t\t\t\t[1, 3, 3, 1, 0, '00111']]\n\n\tlist_kappa_idx01 = [[2, 0, 0, 0, 1, '01000'],\n\t\t\t\t\t\t[2, 0, 1, 1, 1, '01001'],\n\t\t\t\t\t\t[2, 1, 2, 0, 1, '01010'],\n\t\t\t\t\t\t[2, 1, 3, 1, 1, '01011'],\n\t\t\t\t\t\t[3, 2, 0, 0, 1, '01100'],\n\t\t\t\t\t\t[3, 2, 1, 1, 1, '01101'],\n\t\t\t\t\t\t[3, 3, 2, 0, 1, '01110'],\n\t\t\t\t\t\t[3, 3, 3, 1, 1, '01111']]\n\n\tlist_kappa_idx10 = [[0, 0, 0, 2, 2, '10000'],\n\t\t\t\t\t\t[0, 0, 1, 3, 2, '10001'],\n\t\t\t\t\t\t[0, 1, 2, 2, 2, '10010'],\n\t\t\t\t\t\t[0, 1, 3, 3, 2, '10011'],\n\t\t\t\t\t\t[1, 2, 0, 2, 2, '10100'],\n\t\t\t\t\t\t[1, 2, 1, 3, 2, '10101'],\n\t\t\t\t\t\t[1, 3, 2, 2, 2, '10110'],\n\t\t\t\t\t\t[1, 3, 3, 3, 2, '10111']]\n\t\t\n\tlist_kappa_idx11 = [[2, 0, 0, 2, 3, '11000'],\n\t\t\t\t\t\t[2, 0, 1, 3, 3, '11001'],\n\t\t\t\t\t\t[2, 1, 2, 2, 3, '11010'],\n\t\t\t\t\t\t[2, 1, 3, 3, 3, '11011'],\n\t\t\t\t\t\t[3, 2, 0, 2, 3, '11100'],\n\t\t\t\t\t\t[3, 2, 1, 3, 3, '11101'],\n\t\t\t\t\t\t[3, 3, 2, 2, 3, '11110'],\n\t\t\t\t\t\t[3, 3, 3, 3, 3, '11111']]\n\n\tlist_kappa_idx = list_kappa_idx00 + list_kappa_idx01 + list_kappa_idx10 + list_kappa_idx11\n\n\treturn list_kappa_idx\n\ndef xor_secret_i(K, kappa, i):\n\t\"\"\"\n\tCompute power consumption at bit i = kappa_i XOR K_i\n\n\tParameters:\n\tkappa -- string\n\tK \t -- string\n\ti \t -- integer\n\n\tReturn:\n\tp_i -- float\n\t\"\"\"\n\t# Transform string into list of booleans\n\tkappa = list(kappa)\n\tkappa = [bool(int(j)) for j in kappa]\n\t\n\tK = list(K)\n\tK = [bool(int(j)) for j in K]\n\t\n\t# Initialize new kappa values\n\tkappa_copy = kappa.copy()\n\tK_copy = K.copy()\n\t\n\t# XOR at indice i\n\td = K_copy[i] ^ kappa_copy[i]\n\t\n\t# Power consumption at i-th bit\n\tp_i = (-1)**d\n\n\treturn p_i\n\ndef xor_solution(n, K, kappa, solution_init):\n\t\"\"\"\n\tSign scalar products for solution function.\n\n\tParameters:\n\tn \t\t\t -- integer\n\tK \t\t\t -- string\n\tkappa \t\t -- string\n\tsolution_init -- list of Decimal\n\n\tReturn:\n\tsolution_init -- list of Decimal\n\t\"\"\"\t\n\tfor i in range(n):\n\t\tp_i = xor_secret_i(K, kappa, i)\n\n\t\tsolution_init[i] = solution_init[i] * p_i\n\ndef xor_common_i(i, b, n, K, kappa, common_init):\n\t\"\"\"\n\tSign scalar products for functions with b common scalar products.\n\n\tParameters:\n\ti \t\t\t-- integer\n\tb \t\t\t-- integer\n\tn \t\t\t-- integer\n\tK \t\t\t-- string\n\tkappa \t\t-- string\n\tcommon_init -- list of Decimal\n\n\tReturn:\n\tcommon_init -- list of Decimal\n\t\"\"\"\t\n\tfor j in range(i, i + b):\n\t\tp_j = xor_secret_i(K, kappa, (j % n))\n\n\t\tcommon_init[(j % n)] = common_init[(j % n)] * p_j\n\n\treturn common_init\n\ndef xor_common(b, n, K, kappa, common_init):\n\t\"\"\"\n\tSign iteratively initial scalar products \n\tfor functions with b common scalar products.\n\n\tParameters:\n\tb \t\t\t-- integer\n\tn \t\t\t-- integer\n\tK \t\t\t-- string\n\tkappa \t\t-- string\n\tcommon_init -- list of Decimal\n\n\tReturn:\n\tNone\n\t\"\"\"\t\n\t# Common4b or common3b\n\tif len(common_init) == 5:\n\n\t\tfor j in range(len(common_init)):\n\t\t\t\tcommon_init[j] = xor_common_i(j, b, n, K, kappa, common_init[j])\n\n\t# Common2b\n\tif len(common_init) == 10:\n\t\tfor j in range(5):\n\t\t\tfor l in range(2):\n\t\t\t\tcommon_init[2*j + l] = xor_common_i(j, b, n, K, kappa, common_init[2*j + l])\n\ndef find_w0(solution_init, norm):\n\t\"\"\"\n\tFind the fiber points for each DoM scalar product function.\n\n\tParameters:\n\tnorm \t\t -- integer\n\tsolution_init -- list of Decimal\n\n\tReturn:\n\tlist_tup_w0 -- list of Decimal\n\t\"\"\"\t\n\tlist_tup_w0 = []\n\n\tfor a in solution_init:\n\t\tif (a != norm):\n\t\t\tw0 = a / (a - Decimal(norm))\n\n\t\t\tif (w0 > 0) & (w0 < 1):\n\t\t\t\tlist_tup_w0.append((w0, a))\n\t\t\telse:\n\t\t\t\tlist_tup_w0.append((0, a)) # by default 0 for the loop afterwards\n\t\telse:\n\t\t\tlist_tup_w0.append((0, a)) # by default 0 for the loop afterwards\n\t\n\treturn list_tup_w0 #(w0, scalar_product)\n\ndef find_idx_common4b(i, kappa, list_kappa_idx, solution_idx):\n\t\"\"\"\n\tFind the scalar products for common function with 4 solution consecutive kappas. \n\n\tParameters:\n\ti \t\t\t -- integer\n\tkappa \t\t -- string\n\tlist_kappa_idx -- list\n\tsolution_idx -- list of Decimal\n\n\tReturn:\n\tcommon4b_idx -- list\n\t\"\"\"\t\n\tcommon4b_idx = [sublist for sublist in list_kappa_idx if (sublist[i % 5] == solution_idx[i % 5]) and (sublist[(i + 1) % 5] == solution_idx[(i + 1) % 5]) and (sublist[(i + 2) % 5] == solution_idx[(i + 2) % 5]) and (sublist[5] != kappa)]\n\t\n\t# Un-nest the previous list\n\tcommon4b_idx = list(itertools.chain.from_iterable(common4b_idx)) \n\n\treturn common4b_idx\n\ndef find_idx_common3b(i, list_kappa_idx, solution_idx):\n\t\"\"\"\n\tFind the scalar products for common function with 3 solution consecutive kappas. \n\n\tParameters:\n\ti \t\t\t -- integer\n\tlist_kappa_idx -- list\n\tsolution_idx -- list of Decimal\n\n\tReturn:\n\tcommon3b_idx -- list\n\t\"\"\"\t\n\tcommon3b_idx = [sublist for sublist in list_kappa_idx if (sublist[(i - 1) % 5] != solution_idx[(i - 1) % 5]) and (sublist[i % 5] == solution_idx[i % 5]) and (sublist[(i + 1) % 5] == solution_idx[(i + 1) % 5]) and (sublist[(i + 2) % 5] != solution_idx[(i + 2) % 5])]\n\n\t# Un-nest the previous list\n\tcommon3b_idx = list(itertools.chain.from_iterable(common3b_idx)) \n\n\treturn common3b_idx\n\ndef find_idx_common2b(i, list_kappa_idx, solution_idx):\n\t\"\"\"\n\tFind the scalar products for common function with 2 solution consecutive kappas. \n\n\tParameters:\n\ti \t\t\t -- integer\n\tlist_kappa_idx -- list\n\tsolution_idx -- list of Decimal\n\n\tReturn:\n\tcommon2b_idx0 -- list\n\tcommon2b_idx1 -- list\n\t\"\"\"\t\n\tcommon2b_idx = [sublist for sublist in list_kappa_idx if (sublist[(i - 1) % 5] != solution_idx[(i - 1) % 5]) and (sublist[i % 5] == solution_idx[i % 5]) and (sublist[(i + 1) % 5] != solution_idx[(i + 1) % 5])]\n\n\tcommon2b_idx0 = common2b_idx[0]\n\tcommon2b_idx1 = common2b_idx[1]\n\n\treturn common2b_idx0, common2b_idx1\n\ndef find_idx_nonsol(list_kappa_idx, solution_idx):\n\t\"\"\"\n\tFind the scalar products for nonsolutions. \n\n\tParameters:\n\tlist_kappa_idx -- list\n\tsolution_idx -- list of Decimal\n\n\tReturn:\n\tnonsol_idx -- list\n\t\"\"\"\t\n\tnonsol_idx = [sublist for sublist in list_kappa_idx if (sublist[0] != solution_idx[0]) and (sublist[1] != solution_idx[1]) and (sublist[2] != solution_idx[2]) and (sublist[3] != solution_idx[3]) and (sublist[4] != solution_idx[4]) and (sublist[4] != solution_idx[4])]\n\n\treturn nonsol_idx\n\ndef find_init(n, init_scalar, list_idx):\n\t\"\"\"\n\tFind the list scalar products for functions from init_scalar. \n\n\tParameters:\n\tn \t\t\t-- integer\n\tinit_scalar -- list of Decimal\n\tlist_idx \t-- list of list\n\n\tReturn:\n\tlist_init -- list of Decimal\n\t\"\"\"\t\n\tlist_init = []\n\n\tfor j in range(len(list_idx)):\n\t\tinit = [init_scalar[i][list_idx[j][i]] for i in range(n)]\n\t\tlist_init.append(init)\n\n\treturn list_init\n\ndef find_sq_w0(solution_init, norm):\n\t\"\"\"\n\tFind the fiber points for each DoM scalar product function.\n\t\n\tParameters:\n\tnorm \t\t -- Decimal\n\tsolution_init -- list of Decimal\n\n\tReturn:\n\tlist_tup_w0 -- list of Decimal\n\t\"\"\"\t\n\tlist_tup_w0 = []\n\n\tfor a in solution_init:\n\t\tif (a != norm):\n\t\t\tw0 = a / (a - norm)\n\n\t\t\tif (w0 > 0) & (w0 < 1):\n\t\t\t\tlist_tup_w0.append((w0, a))\n\t\t\telse:\n\t\t\t\tlist_tup_w0.append((0, a)) # by default 0 for the loop afterwards\n\t\telse:\n\t\t\tlist_tup_w0.append((0, a)) # by default 0 for the loop afterwards\n\t\n\treturn list_tup_w0 #(w0, scalar_product)\n\ndef find_wr_nonsol(norm, solution_init, nonsol_init):\n\t\"\"\"\n\tFind the point when for the scalar product of the solution \n\tequals the scalar product of a nonsolutions.\n\t\n\tF(w) = (A - D) w^2 + 2 (B + D) w + (C - D) = 0 for which w? \n\t\n\tParameters:\n\tnorm \t\t -- Decimal\n\tsolution_init -- list of Decimal\n\tnonsol_init -- list of Decimal\n\t\n\tReturn:\n\tw1 -- Decimal\n\tw2 -- Decimal\n\t\"\"\"\n\ta0 = solution_init[0]\n\ta1 = solution_init[1]\n\ta2 = solution_init[2]\n\ta3 = solution_init[3]\n\ta4 = solution_init[4]\n\n\tb = nonsol_init[0]\n\tc = nonsol_init[1]\n\td = nonsol_init[2]\n\te = nonsol_init[3]\n\tf = nonsol_init[4]\n\n\t# A = SUM (norm - ai)^2\n\tA = (norm - a0)**2 + (norm - a1)**2 + (norm - a2)**2 + (norm - a3)**2 + (norm - a4)**2\n\n\t# B = SUM (aj (norm - ai)\n\tB = a0 * (norm - a0) + a1 * (norm - a1) + a2 * (norm - a2) + a3 * (norm - a3) + a4 * (norm - a4)\n\n\t# C = SUM aj^2\n\tC = a0**2 + a1**2 + a2**2 + a3**2 + a4**2\n\t\n\t# D = b^2 + c^2 + d^2 + e^2 + f^2\n\tD = b**2 + c**2 + d**2 + e**2 + f**2\n\t\n\t# discriminant = 4 (B + D)^2 - 4 (A - D) (C - D)\n\tdiscriminant = Decimal(4) * ((B + D)**2) - (Decimal(4) * (A - D) * (C - D))\n\t\n\tw1 = Decimal()\n\tw2 = Decimal()\n\t\n\tif (A != D) and (discriminant >= 0):\n\t\t\n\t\tsqrt_discriminant = Decimal(discriminant).sqrt()\n\t\n\t\t# w1 = - 2(B + D) + sqrt_discriminant / 2 (A - D)\n\t\tw1 = (- Decimal(2) * (B + D) + sqrt_discriminant) / (Decimal(2) * (A - D))\n\n\t\t# w2 = - 2(B + D) - sqrt_discriminant / 2 (A - D)\n\t\tw2 = (- Decimal(2) * (B + D) - sqrt_discriminant) / (Decimal(2) * (A - D))\n\t\t\n\telse:\n\t\tw1 = None\n\t\tw2 = None\n\n\treturn w1, w2\n\ndef find_wr_common4(i, n, norm, solution_init, common4b_init):\n\t\"\"\"\n\tFind the point when for the scalar product of the solution\n\tequals the scalar product of an common guess with 4 consecutive kappa bits.\n\t\n\tF(w) = (A - E) w^2 + 2 (B - F) w + (C - G) = 0 for which w? \n\t\n\tParameter:\n\ti \t\t\t -- integer\n\tn \t\t\t -- integer\n\tnorm \t\t -- Decimal\n\tsolution_init -- list of Decimal\n\tcommon4b_init -- list of Decimal\n\t\n\tReturn:\n\tw1 -- Decimal\n\tw2 -- Decimal\n\t\"\"\"\n\ta0 = solution_init[0]\n\ta1 = solution_init[1]\n\ta2 = solution_init[2]\n\ta3 = solution_init[3]\n\ta4 = solution_init[4]\n\n\tai = common4b_init[i % n]\n\tai1 = common4b_init[(i + 1) % n]\n\tai2 = common4b_init[(i + 2) % n]\n\tb = common4b_init[(i + 3) % n]\n\tc = common4b_init[(i + 4) % n]\n\n\t# A = SUM (norm - ai)^2\n\tA = (norm - a0)**2 + (norm - a1)**2 + (norm - a2)**2 + (norm - a3)**2 + (norm - a4)**2\n\n\t# B = SUM (aj (norm - ai)\n\tB = a0 * (norm - a0) + a1 * (norm - a1) + a2 * (norm - a2) + a3 * (norm - a3) + a4 * (norm - a4)\n\n\t# C = SUM aj^2\n\tC = a0**2 + a1**2 + a2**2 + a3**2 + a4**2\n\t\n\t# E = SUM (norm - ai)^2\n\tE = (norm - ai)**2 + (norm - ai1)**2 + (norm - ai2)**2 + b**2 + c**2\n\n\t# F = SUM (aj (norm - ai)\n\tF = ai * (norm - ai) + ai1 * (norm - ai1) + ai2 * (norm - ai2) - b**2 - c**2\n\n\t# G = ai^2 + ai1^2 + ai2^2 + b^2 + c^2\n\tG = ai**2 + ai1**2 + ai2**2 + b**2 + c**2\n\n\t# discriminant = 4 (B - F)^2 - 4 (A - E) (C - G)\n\tdiscriminant = Decimal(4) * ((B - F)**2) - (Decimal(4) * (A - E) * (C - G))\n\t\n\tw1 = Decimal()\n\tw2 = Decimal()\n\t\n\tif (A != E) and (discriminant >= 0):\n\t\t\n\t\tsqrt_discriminant = Decimal(discriminant).sqrt()\n\t\n\t\t# w1 = - 2(B - F) + sqrt_discriminant / 2 (A - E)\n\t\tw1 = (- Decimal(2) * (B - F) + sqrt_discriminant) / (Decimal(2) * (A - E))\n\n\t\t# w2 = - 2(B - F) - sqrt_discriminant / 2 (A - E)\n\t\tw2 = (- Decimal(2) * (B - F) - sqrt_discriminant) / (Decimal(2) * (A - E))\n\t\t\n\telse:\n\t\tw1 = None\n\t\tw2 = None\n\n\treturn w1, w2\n\ndef find_wr_common3(i, n, norm, solution_init, common3b_init):\n\t\"\"\"\n\tFind the point when for the scalar product of the solution\n\tequals the scalar product of an common guess with 3 consecutive kappa bits.\n\t\n\tF(w) = (A - H) w^2 + 2 (B - I) w + (C - J) = 0 for which w? \n\t\n\tParameter:\n\ti \t\t\t -- integer\n\tn \t\t\t -- integer\n\tnorm \t\t -- Decimal\n\tsolution_init -- list of Decimal\n\tcommon3b_init -- list of Decimal\n\t\n\tReturn:\n\tw1 -- Decimal\n\tw2 -- Decimal\n\t\"\"\"\n\ta0 = solution_init[0]\n\ta1 = solution_init[1]\n\ta2 = solution_init[2]\n\ta3 = solution_init[3]\n\ta4 = solution_init[4]\n\n\tai = common3b_init[i % n]\n\tai1 = common3b_init[(i + 1) % n]\n\tb = common3b_init[(i + 2) % n]\n\tc = common3b_init[(i + 3) % n]\n\td = common3b_init[(i + 4) % n]\n\n\t# A = SUM (norm - ai)^2\n\tA = (norm - a0)**2 + (norm - a1)**2 + (norm - a2)**2 + (norm - a3)**2 + (norm - a4)**2\n\n\t# B = SUM (aj (norm - ai)\n\tB = a0 * (norm - a0) + a1 * (norm - a1) + a2 * (norm - a2) + a3 * (norm - a3) + a4 * (norm - a4)\n\n\t# C = SUM aj^2\n\tC = a0**2 + a1**2 + a2**2 + a3**2 + a4**2\n\t\n\t# H = (norm - ai)^2 + (norm - ai1)^2 + b^2 + c^2 + d^2\n\tH = (norm - ai)**2 + (norm - ai1)**2 + b**2 + c**2 + d**2\n\n\t# I = ai(norm - ai) + ai1(norm - ai1) - b^2 - c^2 - d^2\n\tI = (ai * (norm - ai) + ai1 * (norm - ai1) - b**2 - c**2 - d**2)\n\n\t# J = ai^2 + ai1^2 + b^2 + c^2 + d^2\n\tJ = ai**2 + ai1**2 + b**2 + c**2 + d**2\n\n\t# discriminant = 4 (B - F)^2 - 4 (A - E) (C - G)\n\tdiscriminant = Decimal(4) * ((B - I)**2) - (Decimal(4) * (A - H) * (C - J))\n\t\n\tw1 = Decimal()\n\tw2 = Decimal()\n\t\n\tif (A != H) and (discriminant >= 0):\n\t\t\n\t\tsqrt_discriminant = Decimal(discriminant).sqrt()\n\t\n\t\t# w1 = - 2(B - I) + sqrt_discriminant / 2 (A - H)\n\t\tw1 = (- Decimal(2) * (B - I) + sqrt_discriminant) / (Decimal(2) * (A - H))\n\n\t\t# w2 = - 2(B - I) - sqrt_discriminant / 2 (A - H)\n\t\tw2 = (- Decimal(2) * (B - I) - sqrt_discriminant) / (Decimal(2) * (A - H))\n\t\t\n\telse:\n\t\tw1 = None\n\t\tw2 = None\n\n\treturn w1, w2\n\ndef find_wr_common2(i, n, norm, solution_init, common2b_init):\n\t\"\"\"\n\tFind the point when for the scalar product of the solution\n\tequals the scalar product of an common guess with 2 consecutive kappa bits.\n\t\n\tF(w) = (A - K) w^2 + 2 (B - L) w + (C - M) = 0 for which w? \n\t\n\tParameter:\n\ti \t\t\t -- integer\n\tn \t\t\t -- integer\n\tnorm \t\t -- Decimal\n\tsolution_init -- list of Decimal\n\tcommon2b_init -- list of Decimal\n\t\n\tReturn:\n\tw1 -- Decimal\n\tw2 -- Decimal\n\t\"\"\"\n\ta0 = solution_init[0]\n\ta1 = solution_init[1]\n\ta2 = solution_init[2]\n\ta3 = solution_init[3]\n\ta4 = solution_init[4]\n\n\tai = common2b_init[i % n]\n\tb = common2b_init[(i + 1) % n]\n\tc = common2b_init[(i + 2) % n]\n\td = common2b_init[(i + 3) % n]\n\te = common2b_init[(i + 4) % n]\n\n\t# A = SUM (norm - ai)^2\n\tA = (norm - a0)**2 + (norm - a1)**2 + (norm - a2)**2 + (norm - a3)**2 + (norm - a4)**2\n\n\t# B = SUM (aj (norm - ai)\n\tB = a0 * (norm - a0) + a1 * (norm - a1) + a2 * (norm - a2) + a3 * (norm - a3) + a4 * (norm - a4)\n\n\t# C = SUM aj^2\n\tC = a0**2 + a1**2 + a2**2 + a3**2 + a4**2\n\t\n\t# K = (norm - ai)^2 + b^2 + c^2 + d^2 + e^2\n\tK = (norm - ai)**2 + b**2 + c**2 + d**2 + e**2\n\n\t# L = ai(norm - ai) - b^2 - c^2 - d^2 - e^2\n\tL = (ai * (norm - ai) - b**2 - c**2 - d**2 - e**2)\n\n\t# M = ai^2 + b^2 + c^2 + d^2 + e^2\n\tM = ai**2 + b**2 + c**2 + d**2 + e**2\n\n\t# discriminant = 4 (B - L)^2 - 4 (A - K) (C - M)\n\tdiscriminant = Decimal(4) * ((B - L)**2) - (Decimal(4) * (A - K) * (C - M))\n\t\n\tw1 = Decimal()\n\tw2 = Decimal()\n\t\n\tif (A != K) and (discriminant >= 0):\n\n\t\tsqrt_discriminant = Decimal(discriminant).sqrt()\n\n\t\t# w1 = - 2(B - L) + sqrt_discriminant / 2 (A - K)\n\t\tw1 = (- Decimal(2) * (B - L) + sqrt_discriminant) / (Decimal(2) * (A - K))\n\n\t\t# w2 = - 2(B - L) - sqrt_discriminant / 2 (A - K)\n\t\tw2 = (- Decimal(2) * (B - L) - sqrt_discriminant) / (Decimal(2) * (A - K))\n\t\t\n\telse:\n\t\tw1 = None\n\t\tw2 = None\n\n\treturn w1, w2\n\ndef append_wr(j, value, interval, wr):\n\t\"\"\"\n\tAppend value to wr if in interval. \n\n\tParameters:\n\tj \t\t -- integer\n\tvalue \t -- Decimal\n\tinterval -- list\n\twr \t\t -- list of Decimal\n\n\tReturn: None\n\t\"\"\"\n\tif (value != None) and (value > interval[0]) and (value < interval[1]):\n\t\twr.append((value, 'wr%s' % j))\n\ndef find_rank(wr):\n\t\"\"\"\n\tReturn the list of ranks for the solution kappa.\n\n\tParameter:\n\twr -- list of Decimal\n\n\tReturn:\n\trank -- list of integer\n\t\"\"\"\n\n\t# List of ranks\n\trank = []\n\n\t# If the list is not empty, retrieve the rank in [0,1]\n\tif wr:\n\n\t\t# Count number of rank increment ('wr1') \n\t\t# and rank decrement ('wr2')\n\t\tcounter_1 = sum(1 for tuple in wr if tuple[1] == ('wr1'))\n\t\tcounter_2 = sum(-1 for tuple in wr if tuple[1] == ('wr2'))\n\t\tnum_wr = counter_1 + counter_2\n\n\t\trank_init = 1 + num_wr\n\n\t\trank.append(rank_init)\n\n\t\tfor tuple in wr:\n\n\t\t\t# If 'wr1', rank increases\n\t\t\tif tuple[1] == ('wr1'):\n\t\t\t\trank.append(rank[-1] - 1)\n\t\t\t\t\n\t\t\t# If 'wr2', rank decreases\n\t\t\tif tuple[1] == ('wr2'):\n\t\t\t\trank.append(rank[-1] + 1)\n\telse:\n\t\trank = [1]\n\t\t\n\treturn rank, wr\n\ndef compute_rank_wr(wr):\n\t\"\"\"\n\tCompute intervals for each ranks in order to plot them.\n\n\tParameter:\n\twr -- list of Decimal\n\n\tReturn:\n\trank_wr -- list of tuples\n\t\"\"\"\n\n\t# Transform Decimal into float\n\tgetcontext().prec = 6\n\twr = [(float(w), string) for w, string in wr]\n\t\t\n\t# Sort by w\n\twr.sort(key=lambda x: x[0])\n\t\t\t\n\trank, wr = find_rank(wr)\n\t\n\t# Expand the lists for plotting\n\trank = [r for r in zip(rank, rank)]\n\twr = [i for i in zip(wr, wr)]\n\t\t\t\n\t# Un-nest the previous lists\n\trank = list(itertools.chain.from_iterable(rank))\n\twr = list(itertools.chain.from_iterable(wr))\n\t\t\t\n\t# Extract abscissas for intersection points\n\twr = [tuple[0] for tuple in wr]\n\twr.insert(0, 0)\n\twr.append(1)\n\t\t\n\t# Cut the edges\n\trank = rank[1:-1]\n\twr = wr[1:-1]\n\n\t# w_i = (-1)^i * w_i\n\t# To determine the intervals of the rank values:\n\t# - negative value is the beginning of the interval from the origin\n\t# - positive value is the end of the interval from the origin\n\twr = [((-1)**i)*wr[i] for i in range(len(wr))]\n\t\t\n\t# Group to save and manipulate later on\n\trank_wr = [i for i in zip(rank, wr)]\n\n\treturn rank_wr\n\ndef pr_success(df, pr_end, num_sim):\n\t\"\"\"\n\tComputes probability of success for a w value regarding the rank in order to plot it.\n\tCount down from the max w value to the min one the probability of success\n\taccording to the number of simulations (pr_sim).\n\tTherefore, we need to specify the end probability of success for a rank (pr_end).\n\n\tParameters:\n\tdf \t\t-- DataFrame\n\tpr_end -- integer\n\tnum_sim -- integer\n\n\tReturn:\n\tdf -- DataFrame\n\t\"\"\"\n\t# Probability of a simulation\n\tpr_sim = 1 / num_sim\n\n\t# Add a new pr_sucess column with initial value = 1 for rank 1 and = 0 for lower ranks. \n\tdf.loc[0, 'pr_success'] = df.loc[0, 'wr']\n\tdf.loc[0, 'pr_success'] = pr_end\n\t\n\t# Compute probabilities of success\n\tfor i in range(1, len(df)):\n\t\tif df.at[i, 'wr'] < 0:\n\t\t\tdf.loc[i, 'pr_success'] = df.loc[i-1, 'pr_success'] - pr_sim\n\t\telse:\n\t\t\tdf.loc[i, 'pr_success'] = df.loc[i-1, 'pr_success'] + pr_sim\n\n\t# Round float in pr_success column\n\tdecimals = 8 \n\tdf['pr_success'] = df['pr_success'].apply(lambda x: round(x, decimals))\n\n\t# Absolute value\n\tdf['wr'] = df['wr'].abs()\n\n\treturn df\n\ndef list_to_frames(num_rank, rank_wr_list):\n\t\"\"\"\n\tTransform list in Dataframes for plotting.\n\n\tParameters:\n\tnum_rank \t -- integer\n\trank_wr_list -- list of Decimal\n\n\tReturn:\n\tframes -- list of Dataframes\n\t\"\"\"\n\t# Prepare dataset by sorting by rank first\n\trank_wr_list = sorted(rank_wr_list)\n\trank_wr_df = pd.DataFrame(rank_wr_list, columns=['rank', 'wr'])\n\n\t# Drop rows with zero values for w\n\trank_wr_df = rank_wr_df[(rank_wr_df != 0).all(1)]\n\n\t# Sort by absolute value, descending order, for each rank\n\tframes = []\n\tfor rank in range(1, num_rank + 1):\n\t\tdf = rank_wr_df.loc[rank_wr_df['rank'] == rank]\n\t\tdf = df.iloc[(-df['wr'].abs()).argsort()].reset_index(drop=True)\n\t\tframes.append(df)\n\n\treturn frames\n\ndef create_fig(K, kappa, frames):\n\t\"\"\"\n\tCreate figure to plot or to save\n\n\tParameters:\n\tK \t -- string\n\tkappa -- string\n\tframes -- list of DataFrames\n\n\tReturn: None\n\t\"\"\"\n\tfig = plt.figure()\n\trank = 1\n\tfor frame in frames:\n\t\tplt.plot(frame['wr'], frame['pr_success'], '-', markersize=2, label='rank %s' % (rank))\n\t\trank += 1\n\n\tplt.legend(loc='upper right')\n\tplt.title(r'Combined DoM with squared scalar product | K=%s & kappa=%s' %(K, kappa))\n\tplt.xlabel('Weight w')\n\tplt.ylabel('Probability of success')\n\ndef write_csv(K, kappa, num_sim, frames):\n\t\"\"\"\n\tWrite Dataframes in CSV files.\n\n\tParameters:\n\tK \t\t-- string\n\tkappa \t-- string\n\tnum_sim -- integer\n\tframes -- list of DataFrames\n\n\tReturn: None\n\t\"\"\"\n\tnum_rank = 1\n\tfor df in frames:\n\t\tfile_name = r'./csv/1x5bits_sq_num-sim=%s_K=%s_kappa=%s_rank%s.csv' % (num_sim, K, kappa, num_rank)\n\t\tdf.to_csv(file_name, encoding='utf-8', index=False)\n\t\tnum_rank += 1\n\ndef sim_1x5bits(K, kappa, num_sim):\n\t\"\"\"\n\tCompute simulation scalar products and \n\treturn a list of ranks and intersection values for several simulations. \n\t\n\tParameters:\n\tK \t\t-- string\n\tkappa -- string\n\tnum_sim -- integer\n\t\n\tReturn:\n\trank_wr_list -- list of tuples\n\t\"\"\"\n\t# Length of signal part\n\tn = 5\n\n\t# Message mu\n\tmu = list(itertools.product([bool(0), bool(1)], repeat=n))\n\tnorm = Decimal(2**n)\n\n\t# Signal reference vectors\n\tS_ref = gen_S_ref(mu, n)\n\t\n\t# Save the results of simulations\n\trank_wr_list = []\n\t\n\tfor j in range(num_sim):\n\t\n\t\t# Noise power consumption for m bits\n\t\tR = noise(mu)\n\n\t\t# Initial values of scalar product\n\t\tscalar_init = gen_scalar_init(n, R, S_ref)\n\t\tinit_scalar = [sublist.tolist() for sublist in scalar_init]\n\n\t\t# List of indexes\n\t\tlist_kappa_idx = kappa_idx()\n\n\t\t# Retrieve idx of kappa's from solution kappa\n\t\tsolution_idx = [sublist for sublist in list_kappa_idx if sublist[5] == kappa]\n\t\tsolution_idx = list(itertools.chain.from_iterable(solution_idx)) # Un-nest the previous list\n\n\t\tcommon4b_idx = []\n\t\tfor j in range(5): # Five possibilities with four consecutive kappa bits in common\n\t\t\tcommon4b_idx += [find_idx_common4b(j, kappa, list_kappa_idx, solution_idx)]\n\n\t\tcommon3b_idx = []\n\t\tfor j in range(5): # Five possibilities with three consecutive kappa bits in common\n\t\t\tcommon3b_idx += [find_idx_common3b(j, list_kappa_idx, solution_idx)]\n\n\t\tcommon2b_idx = []\n\t\tfor j in range(5): # Ten possibilities with four consecutive kappa bits in common\n\t\t\tidx0, idx1 = find_idx_common2b(j, list_kappa_idx, solution_idx)\n\t\t\tcommon2b_idx.append(idx0)\n\t\t\tcommon2b_idx.append(idx1)\n\n\t\tnonsol_idx = find_idx_nonsol(list_kappa_idx, solution_idx)\n\n\t\t# ------------------------------------------------------------------------------------------- #\n\t\t\n\t\t# Retrieve corresponding scalar products\n\t\tsolution_init = [init_scalar[i][solution_idx[i]] for i in range(n)]\n\t\tcommon4b_init = find_init(n, init_scalar, common4b_idx)\n\t\tcommon3b_init = find_init(n, init_scalar, common3b_idx)\n\t\tcommon2b_init = find_init(n, init_scalar, common2b_idx)\n\t\tnonsol_init = find_init(n, init_scalar, nonsol_idx)\n\n\t\t# Determine the sign of initial solution value depending on the activity of the register at bit i\n\t\txor_solution(n, K, kappa, solution_init)\n\t\txor_common(3, n, K, kappa, common4b_init)\n\t\txor_common(2, n, K, kappa, common3b_init)\n\t\txor_common(1, n, K, kappa, common2b_init)\n\n\t\t# Find w0 for each bit of the register, tup = (w0, a)\n\t\t# Sort result by increasing order on w0\n\t\t# list_tup_w0 = find_w0(solution_init, len(mu))\n\t\t# list_tup_w0 = sorted(list_tup_w0, key=lambda x: x[0])\n\t\t# list_w0 = [tup[0] for tup in list_tup_w0]\n\n\t\t# ------------------------------------------------------------------------------------------- #\n\t\t\n\t\t# List of all intersections\n\t\twr = []\n\n\t\tinterval = [0, 1]\n\n\t\t# Intersections between solution function and other scalar product functions\n\t\tfor j in range(len(common4b_init)):\n\t\t\twr1_common4b, wr2_common4b = find_wr_common4(j, n, norm, solution_init, common4b_init[j])\n\t\t\tappend_wr(1, wr1_common4b, interval, wr)\n\t\t\tappend_wr(2, wr2_common4b, interval, wr)\n\n\t\tfor j in range(len(common3b_init)):\n\t\t\twr1_common3b, wr2_common3b = find_wr_common3(j, n, norm, solution_init, common3b_init[j])\n\t\t\tappend_wr(1, wr1_common3b, interval, wr)\n\t\t\tappend_wr(2, wr2_common3b, interval, wr)\n\n\t\tfor j in range(len(common2b_init) // 2):\n\t\t\tfor l in range(2):\n\t\t\t\twr1_common2b, wr2_common2b = find_wr_common2(j, n, norm, solution_init, common2b_init[2*j + l])\n\t\t\t\tappend_wr(1, wr1_common2b, interval, wr)\n\t\t\t\tappend_wr(2, wr2_common2b, interval, wr)\n\n\t\tfor j in range(len(nonsol_init)):\n\t\t\twr1_nonsol, wr2_nonsol = find_wr_nonsol(norm, solution_init, nonsol_init[j])\t\t\n\t\t\tappend_wr(1, wr1_nonsol, interval, wr)\n\t\t\tappend_wr(2, wr2_nonsol, interval, wr)\n\n\t\t# Determine the intervals for the ranks per noise vector R\n\t\trank_wr = compute_rank_wr(wr)\n\t\trank_wr_list += rank_wr\n\n\treturn rank_wr_list\n\ndef sim_1x5bits_to_csv(K, kappa, num_sim):\n\t\"\"\"\n\tPlot probabilities of success for all possibilities for several simulations. \n\t\n\tParameters:\n\tK \t\t-- string\n\tkappa -- string\n\tnum_sim -- integer\n\t\n\tReturn: None\n\t\"\"\"\n\trank_wr_list = sim_1x5bits(K, kappa, num_sim)\n\n\tnum_rank = 32\n\tframes = list_to_frames(num_rank, rank_wr_list)\n\n\t# Compute probabilities of success\n\tfor rank in range(num_rank):\n\t\tif rank == 0: # 1st rank\n\t\t\tframes[rank] = pr_success(frames[rank], 1, num_sim)\n\t\telse:\n\t\t\tframes[rank] = pr_success(frames[rank], 0, num_sim)\n\n\t# create_fig(K, kappa, frames)\n\t# plt.savefig('./plotSim/1x5bits_sq_num-sim=%s_K=%s_kappa=%s.png' % (num_sim, K, kappa))\n\t# plt.close(fig)\n\t# plt.show()\n\n\twrite_csv(K, kappa, num_sim, frames)\n\t\ndef main(unused_command_line_args):\n\n\tparser = argparse.ArgumentParser(description='K, kappa, and num_sim')\n\n\tparser.add_argument('K', metavar='K', type=str, default='000', help='5-bit value')\n\tparser.add_argument('kappa', metavar='kappa', type=str, default='000', help='5-bit value')\n\tparser.add_argument('num_sim', metavar='num_sim', type=int, default=100, help='number of simulations to perform')\n\n\targs = parser.parse_args()\n\n\tif len(args.K) != 5 or len(args.kappa) != 5:\n\t\tprint('\\n**ERROR**')\n\t\tprint('Required length of K and kappa: 5\\n')\n\n\telse:\n\t\t# Time option\n\t\tstart_t = time.perf_counter()\n\t\t\n\t\t# Initial i-th bit register state value\n\t\tK = args.K\n\n\t\t# Key value after linear layer\n\t\tkappa = args.kappa\n\n\t\t# Number of simulations\n\t\tnum_sim = args.num_sim\n\n\t\tsim_1x5bits_to_csv(K, kappa, num_sim)\n\t\t\n\t\t# Time option\n\t\tend_t = time.perf_counter()\n\t\tprint('time', end_t - start_t, '\\n')\n\nif __name__ == '__main__':\n\tsys.exit(main(sys.argv))\n" ]
[ [ "numpy.dot", "matplotlib.pylab.ylabel", "pandas.DataFrame", "matplotlib.pylab.legend", "matplotlib.pylab.figure", "matplotlib.pylab.xlabel", "matplotlib.pylab.title", "matplotlib.pylab.plot" ] ]
derhaudraufmann/kddm-eeg-eyestate
[ "7751751cf456132fec7f2e24ad7f56111c19facf" ]
[ "src/gradient_boost.py" ]
[ "# classification with Gradient boost, this approach in the end resulted in 74% accuracy and was used as final result\n\nimport numpy as np\nfrom sklearn.model_selection import KFold\n\nfrom sklearn.ensemble import GradientBoostingClassifier\n\nfrom src.util import extract_column, load_data\n\ndef gradient_boot():\n data, rawData = load_data()\n\n col0Data, col0Name = extract_column(rawData, 0)\n col1Data, col1Name = extract_column(rawData, 1)\n col2Data, col2Name = extract_column(rawData, 2)\n col3Data, col3Name = extract_column(rawData, 3)\n col4Data, col3Name = extract_column(rawData, 4)\n col5Data, col3Name = extract_column(rawData, 5)\n col6Data, col3Name = extract_column(rawData, 6)\n col7Data, col3Name = extract_column(rawData, 7)\n col8Data, col3Name = extract_column(rawData, 8)\n col9Data, col3Name = extract_column(rawData, 9)\n col10Data, col3Name = extract_column(rawData, 10)\n col11Data, col3Name = extract_column(rawData, 11)\n col12Data, col3Name = extract_column(rawData, 12)\n col13Data, col3Name = extract_column(rawData, 13)\n\n X = np.column_stack(\n (col0Data, col1Data, col2Data, col3Data, col4Data, col5Data, col6Data, col7Data, col8Data, col9Data, col10Data, col11Data, col12Data, col13Data))\n y = data[:, -1]\n\n\n X_window = np.reshape(X[:-4], (1872, 8, X.shape[1]))\n\n kfold = KFold(n_splits=5, random_state=1, shuffle=False)\n\n result = np.array([])\n for train_index, test_index in kfold.split(X_window):\n print('TRAIN:' + str(train_index) + 'TEST:' + str(test_index))\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n cls = GradientBoostingClassifier(n_estimators=500, learning_rate=0.125, min_samples_split=1000, min_samples_leaf=1, max_depth=5)\n cls.fit(X_train, y_train)\n train_score = cls.score(X_train, y_train)\n test_score = cls.score(X_test, y_test)\n print(\"Train Score for the ECG dataset is about: {}\".format(train_score))\n print(str(test_score))\n result = np.append(result, test_score)\n print(\"Overall results\")\n print(\"Mean:\" + str(np.mean(result)))\n print(\"Median:\" + str(np.median(result)))\n print(\"Min:\" + str(np.min(result)) + \" , max:\" + str(np.max(result)))\n\n\ngradient_boot()\n" ]
[ [ "numpy.max", "numpy.array", "numpy.reshape", "numpy.median", "numpy.min", "numpy.mean", "numpy.append", "sklearn.model_selection.KFold", "numpy.column_stack", "sklearn.ensemble.GradientBoostingClassifier" ] ]
simonedeldeo/DAIN
[ "273d1a26de22a4c22bac173fc5b8f97c9ed25b1e" ]
[ "train.py" ]
[ "import sys\nimport os\n\nimport threading\nimport torch\nfrom torch.autograd import Variable\nimport torch.utils.data\nfrom lr_scheduler import *\n\nimport numpy\nfrom AverageMeter import *\nfrom loss_function import *\nimport datasets\nimport balancedsampler\nimport networks\nfrom my_args import args\n\n\n\ndef train():\n torch.manual_seed(args.seed)\n\n model = networks.__dict__[args.netName](channel=args.channels,\n filter_size = args.filter_size ,\n timestep=args.time_step,\n training=True)\n if args.use_cuda:\n print(\"Turn the model into CUDA\")\n model = model.cuda()\n\n if not args.SAVED_MODEL==None:\n print(\"Fine tuning on \" + args.SAVED_MODEL)\n if not args.use_cuda:\n pretrained_dict = torch.load(args.SAVED_MODEL, map_location=lambda storage, loc: storage)\n # model.load_state_dict(torch.load(args.SAVED_MODEL, map_location=lambda storage, loc: storage))\n else:\n pretrained_dict = torch.load(args.SAVED_MODEL)\n # model.load_state_dict(torch.load(args.SAVED_MODEL))\n #print([k for k,v in pretrained_dict.items()])\n\n model_dict = model.state_dict()\n # 1. filter out unnecessary keys\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n # 2. overwrite entries in the existing state dict\n model_dict.update(pretrained_dict)\n # 3. load the new state dict\n model.load_state_dict(model_dict)\n pretrained_dict = None\n\n if type(args.datasetName) == list:\n train_sets, test_sets = [],[]\n for ii, jj in zip(args.datasetName, args.datasetPath):\n tr_s, te_s = datasets.__dict__[ii](jj, split = args.dataset_split,single = args.single_output, task = args.task)\n train_sets.append(tr_s)\n test_sets.append(te_s)\n train_set = torch.utils.data.ConcatDataset(train_sets)\n test_set = torch.utils.data.ConcatDataset(test_sets)\n else:\n train_set, test_set = datasets.__dict__[args.datasetName](args.datasetPath)\n train_loader = torch.utils.data.DataLoader(\n train_set, batch_size = args.batch_size,\n sampler=balancedsampler.RandomBalancedSampler(train_set, int(len(train_set) / args.batch_size )),\n num_workers= args.workers, pin_memory=True if args.use_cuda else False)\n\n val_loader = torch.utils.data.DataLoader(test_set, batch_size=args.batch_size,\n num_workers=args.workers, pin_memory=True if args.use_cuda else False)\n print('{} samples found, {} train samples and {} test samples '.format(len(test_set)+len(train_set),\n len(train_set),\n len(test_set)))\n\n\n # if not args.lr == 0:\n print(\"train the interpolation net\")\n optimizer = torch.optim.Adamax([\n {'params': model.initScaleNets_filter.parameters(), 'lr': args.filter_lr_coe * args.lr},\n {'params': model.initScaleNets_filter1.parameters(), 'lr': args.filter_lr_coe * args.lr},\n {'params': model.initScaleNets_filter2.parameters(), 'lr': args.filter_lr_coe * args.lr},\n {'params': model.ctxNet.parameters(), 'lr': args.ctx_lr_coe * args.lr},\n {'params': model.flownets.parameters(), 'lr': args.flow_lr_coe * args.lr},\n {'params': model.depthNet.parameters(), 'lr': args.depth_lr_coe * args.lr},\n {'params': model.rectifyNet.parameters(), 'lr': args.rectify_lr}\n ],\n lr=args.lr, betas=(0.9, 0.999), eps=1e-8, weight_decay=args.weight_decay)\n\n\n scheduler = ReduceLROnPlateau(optimizer, 'min',factor=args.factor, patience=args.patience,verbose=True)\n\n print(\"*********Start Training********\")\n print(\"LR is: \"+ str(float(optimizer.param_groups[0]['lr'])))\n print(\"EPOCH is: \"+ str(int(len(train_set) / args.batch_size )))\n print(\"Num of EPOCH is: \"+ str(args.numEpoch))\n def count_network_parameters(model):\n\n parameters = filter(lambda p: p.requires_grad, model.parameters())\n N = sum([numpy.prod(p.size()) for p in parameters])\n\n return N\n print(\"Num. of model parameters is :\" + str(count_network_parameters(model)))\n if hasattr(model,'flownets'):\n print(\"Num. of flow model parameters is :\" +\n str(count_network_parameters(model.flownets)))\n if hasattr(model,'initScaleNets_occlusion'):\n print(\"Num. of initScaleNets_occlusion model parameters is :\" +\n str(count_network_parameters(model.initScaleNets_occlusion) +\n count_network_parameters(model.initScaleNets_occlusion1) +\n count_network_parameters(model.initScaleNets_occlusion2)))\n if hasattr(model,'initScaleNets_filter'):\n print(\"Num. of initScaleNets_filter model parameters is :\" +\n str(count_network_parameters(model.initScaleNets_filter) +\n count_network_parameters(model.initScaleNets_filter1) +\n count_network_parameters(model.initScaleNets_filter2)))\n if hasattr(model, 'ctxNet'):\n print(\"Num. of ctxNet model parameters is :\" +\n str(count_network_parameters(model.ctxNet)))\n if hasattr(model, 'depthNet'):\n print(\"Num. of depthNet model parameters is :\" +\n str(count_network_parameters(model.depthNet)))\n if hasattr(model,'rectifyNet'):\n print(\"Num. of rectifyNet model parameters is :\" +\n str(count_network_parameters(model.rectifyNet)))\n\n training_losses = AverageMeter()\n auxiliary_data = []\n saved_total_loss = 10e10\n saved_total_PSNR = -1\n ikk = 0\n for kk in optimizer.param_groups:\n if kk['lr'] > 0:\n ikk = kk\n break\n\n for t in range(args.numEpoch):\n print(\"The id of this in-training network is \" + str(args.uid))\n print(args)\n #Turn into training mode\n model = model.train()\n\n for i, (X0_half,X1_half, y_half) in enumerate(train_loader):\n\n if i >= int(len(train_set) / args.batch_size ):\n #(0 if t == 0 else EPOCH):#\n break\n\n X0_half = X0_half.cuda() if args.use_cuda else X0_half\n X1_half = X1_half.cuda() if args.use_cuda else X1_half\n y_half = y_half.cuda() if args.use_cuda else y_half\n\n X0 = Variable(X0_half, requires_grad= False)\n X1 = Variable(X1_half, requires_grad= False)\n y = Variable(y_half,requires_grad= False)\n\n diffs, offsets,filters,occlusions = model(torch.stack((X0,y,X1),dim = 0))\n\n pixel_loss, offset_loss, sym_loss = part_loss(diffs,offsets,occlusions, [X0,X1],epsilon=args.epsilon)\n\n total_loss = sum(x*y if x > 0 else 0 for x,y in zip(args.alpha, pixel_loss))\n\n training_losses.update(total_loss.item(), args.batch_size)\n if i % max(1, int(int(len(train_set) / args.batch_size )/500.0)) == 0:\n\n print(\"Ep [\" + str(t) +\"/\" + str(i) +\n \"]\\tl.r.: \" + str(round(float(ikk['lr']),7))+\n \"\\tPix: \" + str([round(x.item(),5) for x in pixel_loss]) +\n \"\\tTV: \" + str([round(x.item(),4) for x in offset_loss]) +\n \"\\tSym: \" + str([round(x.item(), 4) for x in sym_loss]) +\n \"\\tTotal: \" + str([round(x.item(),5) for x in [total_loss]]) +\n \"\\tAvg. Loss: \" + str([round(training_losses.avg, 5)]))\n\n optimizer.zero_grad()\n total_loss.backward()\n optimizer.step()\n\n if t == 1:\n # delete the pre validation weights for cleaner workspace\n if os.path.exists(args.save_path + \"/epoch\" + str(0) +\".pth\" ):\n os.remove(args.save_path + \"/epoch\" + str(0) +\".pth\")\n\n if os.path.exists(args.save_path + \"/epoch\" + str(t-1) +\".pth\"):\n os.remove(args.save_path + \"/epoch\" + str(t-1) +\".pth\")\n torch.save(model.state_dict(), args.save_path + \"/epoch\" + str(t) +\".pth\")\n\n # print(\"\\t\\t**************Start Validation*****************\")\n #Turn into evaluation mode\n\n val_total_losses = AverageMeter()\n val_total_pixel_loss = AverageMeter()\n val_total_PSNR_loss = AverageMeter()\n val_total_tv_loss = AverageMeter()\n val_total_pws_loss = AverageMeter()\n val_total_sym_loss = AverageMeter()\n\n for i, (X0,X1,y) in enumerate(val_loader):\n if i >= int(len(test_set)/ args.batch_size):\n break\n\n with torch.no_grad():\n X0 = X0.cuda() if args.use_cuda else X0\n X1 = X1.cuda() if args.use_cuda else X1\n y = y.cuda() if args.use_cuda else y\n\n diffs, offsets,filters,occlusions = model(torch.stack((X0,y,X1),dim = 0))\n\n pixel_loss, offset_loss,sym_loss = part_loss(diffs, offsets, occlusions, [X0,X1],epsilon=args.epsilon)\n\n val_total_loss = sum(x * y for x, y in zip(args.alpha, pixel_loss))\n\n per_sample_pix_error = torch.mean(torch.mean(torch.mean(diffs[args.save_which] ** 2,\n dim=1),dim=1),dim=1)\n per_sample_pix_error = per_sample_pix_error.data # extract tensor\n psnr_loss = torch.mean(20 * torch.log(1.0/torch.sqrt(per_sample_pix_error)))/torch.log(torch.Tensor([10]))\n #\n\n val_total_losses.update(val_total_loss.item(),args.batch_size)\n val_total_pixel_loss.update(pixel_loss[args.save_which].item(), args.batch_size)\n val_total_tv_loss.update(offset_loss[0].item(), args.batch_size)\n val_total_sym_loss.update(sym_loss[0].item(), args.batch_size)\n val_total_PSNR_loss.update(psnr_loss[0],args.batch_size)\n print(\".\",end='',flush=True)\n\n print(\"\\nEpoch \" + str(int(t)) +\n \"\\tlearning rate: \" + str(float(ikk['lr'])) +\n \"\\tAvg Training Loss: \" + str(round(training_losses.avg,5)) +\n \"\\tValidate Loss: \" + str([round(float(val_total_losses.avg), 5)]) +\n \"\\tValidate PSNR: \" + str([round(float(val_total_PSNR_loss.avg), 5)]) +\n \"\\tPixel Loss: \" + str([round(float(val_total_pixel_loss.avg), 5)]) +\n \"\\tTV Loss: \" + str([round(float(val_total_tv_loss.avg), 4)]) +\n \"\\tPWS Loss: \" + str([round(float(val_total_pws_loss.avg), 4)]) +\n \"\\tSym Loss: \" + str([round(float(val_total_sym_loss.avg), 4)])\n )\n\n auxiliary_data.append([t, float(ikk['lr']),\n training_losses.avg, val_total_losses.avg, val_total_pixel_loss.avg,\n val_total_tv_loss.avg,val_total_pws_loss.avg,val_total_sym_loss.avg])\n\n numpy.savetxt(args.log, numpy.array(auxiliary_data), fmt='%.8f', delimiter=',')\n training_losses.reset()\n\n print(\"\\t\\tFinished an epoch, Check and Save the model weights\")\n # we check the validation loss instead of training loss. OK~\n if saved_total_loss >= val_total_losses.avg:\n saved_total_loss = val_total_losses.avg\n torch.save(model.state_dict(), args.save_path + \"/best\"+\".pth\")\n print(\"\\t\\tBest Weights updated for decreased validation loss\\n\")\n\n else:\n print(\"\\t\\tWeights Not updated for undecreased validation loss\\n\")\n\n #schdule the learning rate\n scheduler.step(val_total_losses.avg)\n\n\n print(\"*********Finish Training********\")\n\nif __name__ == '__main__':\n sys.setrecursionlimit(100000)# 0xC00000FD exception for the recursive detach of gradients.\n threading.stack_size(200000000)# 0xC00000FD exception for the recursive detach of gradients.\n thread = threading.Thread(target=train)\n thread.start()\n thread.join()\n\n exit(0)\n" ]
[ [ "torch.utils.data.ConcatDataset", "numpy.array", "torch.sqrt", "torch.stack", "torch.autograd.Variable", "torch.no_grad", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.load", "torch.Tensor", "torch.mean" ] ]
jcasse/mlflow
[ "7bba35ceed7ac3219622583a2041cc4af4801159" ]
[ "tests/pytorch/test_pytorch_autolog.py" ]
[ "from distutils.version import LooseVersion\nimport pytest\nimport pytorch_lightning as pl\nimport torch\nfrom iris import IrisClassification\nimport mlflow\nimport mlflow.pytorch\nfrom pytorch_lightning.callbacks.early_stopping import EarlyStopping\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom mlflow.utils.file_utils import TempDir\nfrom iris_data_module import IrisDataModule\nfrom mlflow.utils.autologging_utils import BatchMetricsLogger\nfrom mlflow.pytorch._pytorch_autolog import _get_optimizer_name\nfrom unittest.mock import patch\n\nNUM_EPOCHS = 20\n\n\[email protected]\ndef pytorch_model():\n mlflow.pytorch.autolog()\n model = IrisClassification()\n dm = IrisDataModule()\n dm.prepare_data()\n dm.setup(stage=\"fit\")\n trainer = pl.Trainer(max_epochs=NUM_EPOCHS)\n trainer.fit(model, dm)\n client = mlflow.tracking.MlflowClient()\n run = client.get_run(client.list_run_infos(experiment_id=\"0\")[0].run_id)\n return trainer, run\n\n\[email protected]\[email protected](\"log_models\", [True, False])\ndef test_pytorch_autolog_log_models_configuration(log_models):\n mlflow.pytorch.autolog(log_models=log_models)\n model = IrisClassification()\n dm = IrisDataModule()\n dm.prepare_data()\n dm.setup(stage=\"fit\")\n trainer = pl.Trainer(max_epochs=NUM_EPOCHS)\n trainer.fit(model, dm)\n client = mlflow.tracking.MlflowClient()\n run = client.get_run(client.list_run_infos(experiment_id=\"0\")[0].run_id)\n run_id = run.info.run_id\n client = mlflow.tracking.MlflowClient()\n artifacts = [f.path for f in client.list_artifacts(run_id)]\n assert (\"model\" in artifacts) == log_models\n\n\ndef test_pytorch_autolog_logs_default_params(pytorch_model):\n _, run = pytorch_model\n data = run.data\n assert \"lr\" in data.params\n assert \"eps\" in data.params\n assert \"optimizer_name\" in data.params\n assert \"weight_decay\" in data.params\n assert \"betas\" in data.params\n\n\ndef test_pytorch_autolog_logs_expected_data(pytorch_model):\n _, run = pytorch_model\n data = run.data\n\n # Checking if metrics are logged\n assert \"loss\" in data.metrics\n assert \"val_loss\" in data.metrics\n\n # Testing optimizer parameters are logged\n assert \"optimizer_name\" in data.params\n assert data.params[\"optimizer_name\"] == \"Adam\"\n\n # Testing model_summary.txt is saved\n client = mlflow.tracking.MlflowClient()\n artifacts = client.list_artifacts(run.info.run_id)\n artifacts = map(lambda x: x.path, artifacts)\n assert \"model_summary.txt\" in artifacts\n\n\n# pylint: disable=unused-argument\ndef test_pytorch_autolog_persists_manually_created_run():\n with mlflow.start_run() as manual_run:\n mlflow.pytorch.autolog()\n model = IrisClassification()\n dm = IrisDataModule()\n dm.prepare_data()\n dm.setup(stage=\"fit\")\n trainer = pl.Trainer(max_epochs=NUM_EPOCHS)\n trainer.fit(model, dm)\n trainer.test()\n assert mlflow.active_run() is not None\n assert mlflow.active_run().info.run_id == manual_run.info.run_id\n\n\ndef test_pytorch_autolog_ends_auto_created_run(pytorch_model):\n assert mlflow.active_run() is None\n\n\[email protected]\ndef pytorch_model_with_callback(patience):\n mlflow.pytorch.autolog()\n model = IrisClassification()\n dm = IrisDataModule()\n dm.prepare_data()\n dm.setup(stage=\"fit\")\n early_stopping = EarlyStopping(\n monitor=\"val_loss\",\n mode=\"min\",\n min_delta=99999999, # forces early stopping\n patience=patience,\n verbose=True,\n )\n\n with TempDir() as tmp:\n checkpoint_callback = ModelCheckpoint(\n filepath=tmp.path(),\n save_top_k=1,\n verbose=True,\n monitor=\"val_loss\",\n mode=\"min\",\n prefix=\"\",\n )\n\n trainer = pl.Trainer(\n max_epochs=NUM_EPOCHS * 2,\n callbacks=[early_stopping],\n checkpoint_callback=checkpoint_callback,\n )\n trainer.fit(model, dm)\n\n client = mlflow.tracking.MlflowClient()\n run = client.get_run(client.list_run_infos(experiment_id=\"0\")[0].run_id)\n\n return trainer, run\n\n\[email protected](\"patience\", [3])\ndef test_pytorch_early_stop_artifacts_logged(pytorch_model_with_callback):\n _, run = pytorch_model_with_callback\n client = mlflow.tracking.MlflowClient()\n artifacts = client.list_artifacts(run.info.run_id)\n artifacts = map(lambda x: x.path, artifacts)\n assert \"restored_model_checkpoint\" in artifacts\n\n\[email protected](\"patience\", [3])\ndef test_pytorch_autolog_model_can_load_from_artifact(pytorch_model_with_callback):\n _, run = pytorch_model_with_callback\n run_id = run.info.run_id\n client = mlflow.tracking.MlflowClient()\n artifacts = client.list_artifacts(run_id)\n artifacts = map(lambda x: x.path, artifacts)\n assert \"model\" in artifacts\n model = mlflow.pytorch.load_model(\"runs:/\" + run_id + \"/model\")\n result = model(torch.Tensor([1.5, 2, 2.5, 3.5]).unsqueeze(0))\n assert result is not None\n\n\[email protected]\[email protected](\"log_models\", [True, False])\[email protected](\"patience\", [3])\ndef test_pytorch_with_early_stopping_autolog_log_models_configuration_with(log_models, patience):\n mlflow.pytorch.autolog(log_models=log_models)\n model = IrisClassification()\n dm = IrisDataModule()\n dm.prepare_data()\n dm.setup(stage=\"fit\")\n early_stopping = EarlyStopping(monitor=\"val_loss\", mode=\"min\", patience=patience, verbose=True)\n\n with TempDir() as tmp:\n checkpoint_callback = ModelCheckpoint(\n filepath=tmp.path(),\n save_top_k=1,\n verbose=True,\n monitor=\"val_loss\",\n mode=\"min\",\n prefix=\"\",\n )\n\n trainer = pl.Trainer(\n max_epochs=NUM_EPOCHS * 2,\n callbacks=[early_stopping],\n checkpoint_callback=checkpoint_callback,\n )\n trainer.fit(model, dm)\n\n client = mlflow.tracking.MlflowClient()\n run = client.get_run(client.list_run_infos(experiment_id=\"0\")[0].run_id)\n run_id = run.info.run_id\n client = mlflow.tracking.MlflowClient()\n artifacts = [f.path for f in client.list_artifacts(run_id)]\n assert (\"restored_model_checkpoint\" in artifacts) == log_models\n\n\[email protected](\"patience\", [0, 1, 5])\ndef test_pytorch_early_stop_params_logged(pytorch_model_with_callback, patience):\n _, run = pytorch_model_with_callback\n data = run.data\n assert \"monitor\" in data.params\n assert \"mode\" in data.params\n assert \"patience\" in data.params\n assert float(data.params[\"patience\"]) == patience\n assert \"min_delta\" in data.params\n assert \"stopped_epoch\" in data.params\n\n\[email protected](\"patience\", [3])\ndef test_pytorch_autolog_batch_metrics_logger_logs_expected_metrics(patience):\n patched_metrics_data = []\n\n # Mock patching BatchMetricsLogger.record_metrics()\n # to ensure that expected metrics are being logged.\n original = BatchMetricsLogger.record_metrics\n\n with patch(\n \"mlflow.utils.autologging_utils.BatchMetricsLogger.record_metrics\", autospec=True\n ) as record_metrics_mock:\n\n def record_metrics_side_effect(self, metrics, step=None):\n patched_metrics_data.extend(metrics.items())\n original(self, metrics, step)\n\n record_metrics_mock.side_effect = record_metrics_side_effect\n _, run = pytorch_model_with_callback(patience)\n\n patched_metrics_data = dict(patched_metrics_data)\n original_metrics = run.data.metrics\n\n for metric_name in original_metrics:\n assert metric_name in patched_metrics_data\n assert original_metrics[metric_name] == patched_metrics_data[metric_name]\n\n assert \"loss\" in original_metrics\n assert \"loss\" in patched_metrics_data\n\n\ndef test_pytorch_autolog_non_early_stop_callback_does_not_log(pytorch_model):\n trainer, run = pytorch_model\n client = mlflow.tracking.MlflowClient()\n metric_history = client.get_metric_history(run.info.run_id, \"loss\")\n assert trainer.max_epochs == NUM_EPOCHS\n assert len(metric_history) == NUM_EPOCHS\n\n\[email protected]\ndef pytorch_model_tests():\n model = IrisClassification()\n dm = IrisDataModule()\n dm.prepare_data()\n dm.setup(stage=\"fit\")\n trainer = pl.Trainer(max_epochs=NUM_EPOCHS)\n trainer.fit(model, dm)\n trainer.test()\n client = mlflow.tracking.MlflowClient()\n run = client.get_run(client.list_run_infos(experiment_id=\"0\")[0].run_id)\n return trainer, run\n\n\ndef test_pytorch_test_metrics_logged(pytorch_model_tests):\n _, run = pytorch_model_tests\n data = run.data\n assert \"test_loss\" in data.metrics\n assert \"test_acc\" in data.metrics\n\n\ndef test_get_optimizer_name():\n adam = torch.optim.Adam(torch.nn.Linear(1, 1).parameters())\n assert _get_optimizer_name(adam) == \"Adam\"\n\n\[email protected](\n LooseVersion(pl.__version__) < LooseVersion(\"1.1.0\"),\n reason=\"`LightningOptimizer` doesn't exist in pytorch-lightning < 1.1.0\",\n)\ndef test_get_optimizer_name_with_lightning_optimizer():\n from pytorch_lightning.core.optimizer import LightningOptimizer\n\n adam = torch.optim.Adam(torch.nn.Linear(1, 1).parameters())\n assert _get_optimizer_name(LightningOptimizer(adam)) == \"Adam\"\n" ]
[ [ "torch.nn.Linear", "torch.Tensor" ] ]
MinxZ/multi_label
[ "aed67d4bb4102962eb73b996288aa56983589858" ]
[ "multi_loss/train.py" ]
[ "from __future__ import absolute_import, division, print_function\n\nimport argparse\nimport multiprocessing as mp\nimport random\n\nimport numpy as np\nimport tensorflow as tf\nfrom keras import backend\nfrom keras.applications import *\nfrom keras.backend.common import (epsilon, floatx, image_data_format,\n image_dim_ordering, set_image_dim_ordering)\nfrom keras.callbacks import *\nfrom keras.layers import *\nfrom keras.models import *\nfrom keras.optimizers import *\nfrom keras.utils.generic_utils import CustomObjectScope\nfrom tqdm import tqdm\n\nfrom image import *\nfrom model import *\n\nmodel_name = 'ResNet50'\noptimizer = 2e-5\nlr = 2e-5\n\n\ndef run(model_name, optimizer, lr):\n if model_name == \"ResNet50\":\n print('\\n For Resnet')\n from keras.applications.imagenet_utils import preprocess_input\n elif model_name[:-3] == \"DenseNet\":\n print('\\n For DenseNet')\n from keras.applications.densenet import preprocess_input\n else:\n print('\\n For model = tf')\n from keras.applications.inception_v3 import preprocess_input\n\n # Load datasets\n x_train, x_val, x_test, y_train, y_val, y_test = load_data()\n epochs = 10000\n\n # Loading model\n model_config, fc, pred, layer_names, input_shape = load_model_config()\n MODEL = model_config[model_name][1]\n batch_size = model_config[model_name][0]\n\n def build_model():\n print('\\n Build model')\n h5_name = f'../models/{model_name}_{len(fc)}_fc.h5'\n checkpointer = ModelCheckpoint(\n filepath=h5_name, verbose=0, save_best_only=True)\n\n cnn_model = MODEL(\n include_top=False, input_shape=input_shape, weights='imagenet', pooling='avg')\n inputs = Input(shape=input_shape)\n x = cnn_model(inputs)\n model = tri_fc(inputs, x, fc, pred, layer_names)\n\n try:\n model.load_weights(\n f'../models/{len(fc)}_fc_{model_name}.h5', by_name=True)\n print('\\n Succeed on loading fc wight ')\n except:\n print('\\n Train fc')\n data_val = x_val\n split = int(data_val.shape[0] * 4 / 5)\n x_train_fc = data_val[:split]\n x_val_fc = data_val[split:]\n y_train_fc = []\n y_val_fc = []\n for x in range(3):\n y_train_fc.append(y_val[x][:split])\n y_val_fc.append(y_val[x][split:])\n\n fc_model_train(x_train_fc, y_train_fc, x_val_fc, y_val_fc, batch_size,\n cnn_model, fc, pred, layer_names, model_name, preprocess_input)\n # fc_model_train(x_train, y_train, x_val, y_val, batch_size,\n # cnn_model, fc, pred, layer_names, model_name, preprocess_input)\n model.load_weights(\n f'../models/{len(fc)}_fc_{model_name}.h5', by_name=True)\n return model, checkpointer\n\n print('\\n Loading model')\n try:\n with CustomObjectScope({'f1_loss': f1_loss, 'f1_score': f1_score}):\n model = load_model(\n f'../models/{model_name}_{len(fc)}_fc.h5')\n checkpointer = ModelCheckpoint(\n filepath=f'../models/{model_name}_{len(fc)}_fc_fine_tune.h5', verbose=0, save_best_only=True)\n print('\\n Ready to fine tune.')\n except:\n model, checkpointer = build_model()\n\n # callbacks\n\n datagen = ImageDataGenerator(\n preprocessing_function=preprocess_input,\n # preprocessing_function=get_random_eraser( p=0.2, v_l=0, v_h=255, pixel_level=True),\n rotation_range=30,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n fill_mode='nearest')\n val_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)\n activation_2 = ['softmax', 'sigmoid', 'sigmoid']\n for i in range(2):\n if i == 0:\n lossWeights = {\n f'{layer_names[0]}_{activation_2[0]}_{pred[0]}': 0.3,\n f'{layer_names[1]}_{activation_2[1]}_{pred[1]}': 10,\n f'{layer_names[2]}_{activation_2[2]}_{pred[2]}': 10}\n losses = {\n f'{layer_names[0]}_{activation_2[0]}_{pred[0]}': \"categorical_crossentropy\",\n f'{layer_names[1]}_{activation_2[1]}_{pred[1]}': 'binary_crossentropy',\n f'{layer_names[2]}_{activation_2[2]}_{pred[2]}': 'binary_crossentropy'}\n metrics = {\n f'{layer_names[0]}_{activation_2[0]}_{pred[0]}': [\"categorical_accuracy\"],\n f'{layer_names[1]}_{activation_2[1]}_{pred[1]}': [f1_score],\n f'{layer_names[2]}_{activation_2[2]}_{pred[2]}': [f1_score]}\n early_stopping = EarlyStopping(\n monitor='val_loss', patience=6, verbose=2, mode='auto')\n reduce_lr = ReduceLROnPlateau(\n factor=np.sqrt(0.1), patience=3, verbose=2)\n opt = Adam(lr=lr)\n print(f\"\\n {model_name}: Optimizer=\" +\n optimizer + \" lr=\" + str(lr) + \" \\n\")\n elif i == 1:\n lossWeights = {\n f'{layer_names[0]}_{activation_2[0]}_{pred[0]}': 0.2,\n f'{layer_names[1]}_{activation_2[1]}_{pred[1]}': 10,\n f'{layer_names[2]}_{activation_2[2]}_{pred[2]}': 10}\n losses = {\n f'{layer_names[0]}_{activation_2[0]}_{pred[0]}': \"categorical_crossentropy\",\n f'{layer_names[1]}_{activation_2[1]}_{pred[1]}': f1_loss,\n f'{layer_names[2]}_{activation_2[2]}_{pred[2]}': 'binary_crossentropy'}\n metrics = {\n f'{layer_names[0]}_{activation_2[0]}_{pred[0]}': [\"categorical_accuracy\"],\n f'{layer_names[1]}_{activation_2[1]}_{pred[1]}': [f1_score],\n f'{layer_names[2]}_{activation_2[2]}_{pred[2]}': [f1_score]}\n opt = SGD(lr=1e-5, momentum=0.9, nesterov=True)\n early_stopping = EarlyStopping(\n monitor='val_loss', patience=6, verbose=2, mode='auto')\n reduce_lr = ReduceLROnPlateau(\n factor=np.sqrt(0.1), patience=3, verbose=2)\n checkpointer = ModelCheckpoint(\n filepath=f'../models/{model_name}_{len(fc)}_fc_fine_tune.h5', verbose=0, save_best_only=True)\n\n model.compile(optimizer=opt, loss=losses,\n loss_weights=lossWeights, metrics=metrics)\n\n model.fit_generator(\n datagen.flow(x_train, y_train, batch_size=batch_size),\n steps_per_epoch=len(x_train) / batch_size / 5,\n validation_data=val_datagen.flow(\n x_val, y_val, batch_size=batch_size),\n validation_steps=len(x_val) / batch_size / 2,\n epochs=epochs,\n callbacks=[early_stopping, checkpointer, reduce_lr],\n max_queue_size=20,\n workers=8,\n use_multiprocessing=True)\n\n quit()\n\n\ndef parse_args():\n \"\"\" Parse command line arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Hyper parameter\")\n parser.add_argument(\n \"--model\", help=\"Model to use\", default=\"DenseNet169\", type=str)\n parser.add_argument(\n \"--optimizer\", help=\"which optimizer to use\", default=\"Adam\", type=str)\n parser.add_argument(\n \"--lr\", help=\"learning rate\", default=2e-5, type=float)\n\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n run(args.model, args.optimizer, args.lr)\n" ]
[ [ "numpy.sqrt" ] ]
quanpands/wflow
[ "b454a55e4a63556eaac3fbabd97f8a0b80901e5a" ]
[ "wflow/wflow/wflow_funcs.py" ]
[ "# Copyright (c) J. Schellekens 2005-2011\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"\nwflow_funcs - hydrological functions library\n---------------------------------------------\n\nIn addition this library contain a number of hydrological functions\nthat may be used within the wflow models\n\n\"\"\"\n\nfrom numba import jit\nimport math\nimport numpy as np\nimport pcraster as pcr\n\n\n@jit(nopython=True)\ndef _up_nb(ldd_f, idx0, shape, _ldd_us, sr=1):\n \"\"\"returns a numpy array with 1d indices of upstream neighbors on a ldd\n \"\"\"\n nrow, ncol = shape\n r = idx0 // ncol\n c = idx0 % ncol\n wdw_idx = list()\n i = 0\n for dr in range(-sr, sr+1):\n row = r + dr\n if row >= 0 and row < nrow:\n for dc in range(-sr, sr+1):\n col = c + dc\n if col >= 0 and col < ncol:\n idx = idx0 + dc + dr*ncol\n if ldd_f[idx] == _ldd_us[i]:\n wdw_idx.append(idx)\n i += 1\n else:\n i += sr*2+1\n return np.array(wdw_idx, dtype=np.int32)\n\n\n@jit(nopython=True)\ndef set_dd(ldd, _ldd_us, river, pit_value=5):\n \"\"\"set drainage direction network from downstream to upstream\n \"\"\"\n shape = ldd.shape\n ldd = ldd.flatten()\n river = np.concatenate((river, np.array([0], dtype=river.dtype)))\n nodes = list()\n nodes_up = list()\n rnodes = list()\n rnodes_up = list()\n \n idx_ds_ = np.where(ldd==np.array(pit_value).astype(ldd.dtype))[0].astype(np.int32)\n \n for c, idx_ in enumerate(idx_ds_):\n idx_ds = np.array([idx_])\n \n # move upstream\n while True:\n nodes.append(idx_ds)\n idx_r_ds = idx_ds[np.where(river[idx_ds])]\n if idx_r_ds.size > 0:\n rnodes.append(idx_r_ds)\n r_nbs_up = np.ones((idx_r_ds.size, 8), dtype=np.int32)*-1\n idx_next = list()\n nbs_up = np.ones((idx_ds.size, 8), dtype=np.int32)*-1\n j = 0\n for i, idx in enumerate(idx_ds):\n idx_up = _up_nb(ldd, idx, shape, _ldd_us)\n if np.any(idx_up):\n idx_next.extend(idx_up)\n nbs_up[i, :idx_up.size] = idx_up\n if river[idx]:\n idx_r_up = idx_up[np.where(river[idx_up])]\n r_nbs_up[j, :idx_r_up.size] = idx_r_up\n j = j + 1\n nodes_up.append(nbs_up)\n if idx_r_ds.size > 0:\n rnodes_up.append(r_nbs_up)\n if len(idx_next) == 0:\n break\n idx_ds = np.array(idx_next, dtype=np.int32)\n return nodes[::-1], nodes_up[::-1], rnodes[::-1], rnodes_up[::-1]\n\n\n@jit(nopython=True)\ndef kinematic_wave(Qin,Qold,q,alpha,beta,deltaT,deltaX):\n \n epsilon = 1e-12\n MAX_ITERS = 3000\n\n if ((Qin+Qold+q) == 0.):\n return 0.\n else:\n #common terms\n ab_pQ = alpha*beta*pow(((Qold+Qin)/2.),beta-1.)\n deltaTX = deltaT/deltaX\n C = deltaTX*Qin + alpha*pow(Qold,beta) + deltaT*q\n \n Qkx = (deltaTX * Qin + Qold * ab_pQ + deltaT * q) / (deltaTX + ab_pQ)\n \n if math.isnan(Qkx):\n Qkx = 0.\n \n Qkx = max(Qkx, 1e-30)\n fQkx = deltaTX * Qkx + alpha * pow(Qkx, beta) - C\n dfQkx = deltaTX + alpha * beta * pow(Qkx, beta - 1.)\n Qkx = Qkx - fQkx / dfQkx\n Qkx = max(Qkx, 1e-30)\n count = 0\n \n while abs(fQkx) > epsilon and count < MAX_ITERS:\n fQkx = deltaTX * Qkx + alpha * pow(Qkx, beta) - C\n dfQkx = deltaTX + alpha * beta * pow(Qkx, beta - 1.)\n Qkx = Qkx - fQkx / dfQkx\n Qkx = max(Qkx, 1e-30)\n count = count + 1\n \n return Qkx\n\n\n@jit(nopython=True)\ndef kin_wave(rnodes, rnodes_up, Qold, q, Alpha, Beta, DCL, River, Bw, AlpTermR, AlpPow, deltaT, it=1):\n \n acc_flow = np.zeros(Qold.size, dtype=np.float64)\n acc_flow = np.concatenate((acc_flow, np.array([0], dtype=np.float64)))\n\n\n for v in range(0,it):\n shape = Qold.shape\n # flat new state\n Qnew = np.zeros(Qold.size, dtype=np.float64)\n # append zero to end to deal with nodata (-1) in indices\n Qnew = np.concatenate((Qnew, np.array([0], dtype=np.float64)))\n\n for i in range(len(rnodes)):\n for j in range(len(rnodes[i])):\n idx = rnodes[i][j]\n nbs = rnodes_up[i][j]\n \n Qin = np.sum(Qnew[nbs])\n Qnew[idx] = kinematic_wave(Qin, Qold[idx], q[idx], Alpha[idx], Beta[idx], deltaT/it, DCL[idx])\n \n acc_flow[idx] = acc_flow[idx] + Qnew[idx] * (deltaT/it)\n WaterLevelR = (Alpha[idx] * np.power(Qnew[idx], Beta[idx])) / Bw[idx]\n Pr = Bw[idx] + (2.0 * WaterLevelR)\n Alpha[idx] = AlpTermR[idx] * np.power(Pr, AlpPow[idx])\n Qold[idx]= Qnew[idx]\n # remove last value from array and reshape to original format\n return acc_flow[:-1].reshape(shape)\n #return Qnew[:-1].reshape(shape)\n\n\n@jit(nopython=True)\ndef kinematic_wave_ssf(ssf_in, ssf_old, zi_old, r, Ks_hor, Ks ,slope ,neff, f, D, dt, dx, w, ssf_max):\n \n epsilon = 1e-6\n MAX_ITERS = 3000\n \n if (max(ssf_in+ssf_old+r,0.) == 0.):\n return 0., D, 0.\n else:\n #initial estimate\n ssf_n = (ssf_in + ssf_old)/2.\n count = 0 \n \n zi = np.log(f*ssf_n/(w*Ks_hor*Ks*slope) + np.exp(-f*D))/-f\n Cn = (Ks_hor*Ks*slope)/neff * np.exp(-f*zi) \n c = (dt/dx)*ssf_in + 1./Cn*ssf_n + dt*(r-(zi_old-zi)*neff*w)\n \n fQ = (dt/dx)*ssf_n + 1./Cn*ssf_n - c \n dfQ = (dt/dx) + 1./Cn \n ssf_n = ssf_n - (fQ/dfQ)\n \n if math.isnan(ssf_n):\n ssf_n = 0.\n ssf_n = max(ssf_n, 1e-30)\n \n while abs(fQ) > epsilon and count < MAX_ITERS:\n \n zi = np.log(f*ssf_n/(w*Ks_hor*Ks*slope) + np.exp(-f*D))/-f\n Cn = (Ks_hor*Ks*slope)/neff * np.exp(-f*zi) \n c = (dt/dx)*ssf_in + 1./Cn*ssf_n + dt*(r-(zi_old-zi)*neff*w)\n \n fQ = (dt/dx)*ssf_n + 1./Cn*ssf_n - c\n dfQ = (dt/dx) + 1./Cn \n ssf_n = ssf_n - (fQ/dfQ)\n \n if math.isnan(ssf_n):\n ssf_n = 0.\n ssf_n = max(ssf_n, 1e-30)\n\n count = count + 1\n \n ssf_n = min(ssf_n,(ssf_max*w))\n #exfilt = min(0,zi) * -neff\n exfilt = min(zi_old - (ssf_in + r*dx - ssf_n)/(w*dx)/neff,0.0) * -neff\n zi = max(0,zi)\n \n return ssf_n, zi, exfilt \n\n\ndef rainfall_interception_hbv(Rainfall, PotEvaporation, Cmax, InterceptionStorage):\n \"\"\"\n Returns:\n TF, Interception, IntEvap,InterceptionStorage\n \"\"\"\n Interception = pcr.min(\n Rainfall, Cmax - InterceptionStorage\n ) #: Interception in mm/timestep\n\n InterceptionStorage = (\n InterceptionStorage + Interception\n ) #: Current interception storage\n TF = Rainfall - Interception\n IntEvap = pcr.min(\n InterceptionStorage, PotEvaporation\n ) #: Evaporation from interception storage\n InterceptionStorage = InterceptionStorage - IntEvap\n\n return TF, Interception, IntEvap, InterceptionStorage\n\n\ndef rainfall_interception_gash(\n Cmax, EoverR, CanopyGapFraction, Precipitation, CanopyStorage, maxevap=9999\n):\n \"\"\"\n Interception according to the Gash model (For daily timesteps). \n \"\"\"\n # TODO: add other rainfall interception method (lui)\n # TODO: Include subdaily Gash model\n # TODO: add LAI variation in year\n # Hack for stemflow\n\n pt = 0.1 * CanopyGapFraction\n\n P_sat = pcr.max(\n pcr.scalar(0.0),\n pcr.cover(\n (-Cmax / EoverR) * pcr.ln(1.0 - (EoverR / (1.0 - CanopyGapFraction - pt))),\n pcr.scalar(0.0),\n ),\n )\n\n # large storms P > P_sat\n largestorms = Precipitation > P_sat\n\n Iwet = pcr.ifthenelse(\n largestorms,\n ((1 - CanopyGapFraction - pt) * P_sat) - Cmax,\n Precipitation * (1 - CanopyGapFraction - pt),\n )\n Isat = pcr.ifthenelse(largestorms, (EoverR) * (Precipitation - P_sat), 0.0)\n Idry = pcr.ifthenelse(largestorms, Cmax, 0.0)\n Itrunc = 0\n\n StemFlow = pt * Precipitation\n\n ThroughFall = Precipitation - Iwet - Idry - Isat - Itrunc - StemFlow\n Interception = Iwet + Idry + Isat + Itrunc\n\n # Non corect for area without any Interception (say open water Cmax -- zero)\n CmaxZero = Cmax <= 0.0\n ThroughFall = pcr.ifthenelse(CmaxZero, Precipitation, ThroughFall)\n Interception = pcr.ifthenelse(CmaxZero, pcr.scalar(0.0), Interception)\n StemFlow = pcr.ifthenelse(CmaxZero, pcr.scalar(0.0), StemFlow)\n\n # Now corect for maximum potential evap\n OverEstimate = pcr.ifthenelse(\n Interception > maxevap, Interception - maxevap, pcr.scalar(0.0)\n )\n Interception = pcr.min(Interception, maxevap)\n # Add surpluss to the thoughdfall\n ThroughFall = ThroughFall + OverEstimate\n\n return ThroughFall, Interception, StemFlow, CanopyStorage\n\n\ndef rainfall_interception_modrut(\n Precipitation, PotEvap, CanopyStorage, CanopyGapFraction, Cmax\n):\n \"\"\"\n Interception according to a modified Rutter model. The model is solved\n explicitly and there is no drainage below Cmax.\n \n Returns:\n - NetInterception: P - TF - SF (may be different from the actual wet canopy evaporation)\n - ThroughFall:\n - StemFlow:\n - LeftOver: Amount of potential eveporation not used\n - Interception: Actual wet canopy evaporation in this thimestep\n - CanopyStorage: Canopy storage at the end of the timestep\n \n \"\"\"\n\n ##########################################################################\n # Interception according to a modified Rutter model with hourly timesteps#\n ##########################################################################\n\n p = CanopyGapFraction\n pt = 0.1 * p\n\n # Amount of P that falls on the canopy\n Pfrac = pcr.max((1 - p - pt), 0) * Precipitation\n\n # S cannot be larger than Cmax, no gravity drainage below that\n DD = pcr.ifthenelse(CanopyStorage > Cmax, CanopyStorage - Cmax, 0.0)\n CanopyStorage = CanopyStorage - DD\n\n # Add the precipitation that falls on the canopy to the store\n CanopyStorage = CanopyStorage + Pfrac\n\n # Now do the Evap, make sure the store does not get negative\n dC = -1 * pcr.min(CanopyStorage, PotEvap)\n CanopyStorage = CanopyStorage + dC\n\n LeftOver = PotEvap + dC\n # Amount of evap not used\n\n # Now drain the canopy storage again if needed...\n D = pcr.ifthenelse(CanopyStorage > Cmax, CanopyStorage - Cmax, 0.0)\n CanopyStorage = CanopyStorage - D\n\n # Calculate throughfall\n ThroughFall = DD + D + p * Precipitation\n StemFlow = Precipitation * pt\n\n # Calculate interception, this is NET Interception\n NetInterception = Precipitation - ThroughFall - StemFlow\n Interception = -dC\n\n return NetInterception, ThroughFall, StemFlow, LeftOver, Interception, CanopyStorage\n\n\n# baseflow seperation methods\n# see http://mssanz.org.au/MODSIM97/Vol%201/Chapman.pdf\n\n\ndef bf_oneparam(discharge, k):\n bf = list(range(0, len(discharge)))\n for i in range(1, len(discharge)):\n bf[i] = (k * bf[i - 1] / (2.0 - k)) + ((1.0 - k) * discharge[i] / (2.0 - k))\n if bf[i] > discharge[i]:\n bf[i] = discharge[i]\n\n return bf\n\n\ndef bf_twoparam(discharge, k, C):\n bf = list(range(0, len(discharge)))\n for i in range(1, len(discharge)):\n bf[i] = (k * bf[i - 1] / (1.0 + C)) + ((C) * discharge[i] / (1.0 + C))\n if bf[i] > discharge[i]:\n bf[i] = discharge[i]\n\n return bf\n\n\ndef bf_threeparam(discharge, k, C, a):\n bf = list(range(0, len(discharge)))\n for i in range(1, len(discharge)):\n bf[i] = (k * bf[i - 1] / (1.0 + C)) + (\n (C) * discharge[i] + a * discharge[i - 1] / (1.0 + C)\n )\n if bf[i] > discharge[i]:\n bf[i] = discharge[i]\n\n return bf" ]
[ [ "numpy.array", "numpy.zeros", "numpy.sum", "numpy.ones", "numpy.exp", "numpy.any", "numpy.where", "numpy.power" ] ]
renqianluo/DAG2N_PTB
[ "fb7061e48de8f3e787159743d7a7d6e2f9f5dbd1" ]
[ "data/process.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pickle\nimport numpy as np\n\n\ndef main():\n with open(\"train\") as finp:\n lines = finp.read().strip().replace(\"\\n\", \"<eos>\")\n words = lines.split(\" \")\n\n vocab, index = {}, {}\n for word in sorted(words):\n if word not in vocab:\n index[len(vocab)] = word\n vocab[word] = len(vocab)\n print(\"vocab size: {}\".format(len(vocab)))\n\n x_train = [vocab[word] for word in words] + [vocab[\"<eos>\"]]\n x_train = np.array(x_train, dtype=np.int32)\n\n with open(\"valid\") as finp:\n lines = finp.read().strip().replace(\"\\n\", \"<eos>\")\n words = lines.split(\" \")\n\n x_valid = [vocab[word] for word in words] + [vocab[\"<eos>\"]]\n x_valid = np.array(x_valid, dtype=np.int32)\n\n with open(\"test\") as finp:\n lines = finp.read().strip().replace(\"\\n\", \"<eos>\")\n words = lines.split(\" \")\n\n x_test = [vocab[word] for word in words] + [vocab[\"<eos>\"]]\n x_test = np.array(x_test, dtype=np.int32)\n\n print(\"train size: {}\".format(np.size(x_train)))\n print(\"valid size: {}\".format(np.size(x_valid)))\n print(\"test size: {}\".format(np.size(x_test)))\n\n with open(\"ptb.pkl\", \"w\") as fout:\n pickle.dump((x_train, x_valid, x_test, vocab, index), fout, protocol=2)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.array", "numpy.size" ] ]
jaymwong/CppNumericalSolvers
[ "2a0f98e7c54c35325641e05c035e43cafd570808" ]
[ "tensorflow/configure.py" ]
[ "import tensorflow as tf\nimport sys\n\nif '__cxx11_abi_flag__' not in dir(tf):\n print(\"Cannot find the ABI version of TensorFlow.\")\n print(\"Your TensorFlow version is too old. Please upgrade to at least TF v1.4.\")\n sys.exit(1)\n\nwith open(\"tensorflow_config.txt\", \"w\") as f:\n print(\"TensorFlow_ABI: {}\".format(tf.__cxx11_abi_flag__))\n f.write(\"set(TensorFlow_ABI %i)\\n\" % tf.__cxx11_abi_flag__)\n print(\"TensorFlow_INCLUDE_DIRS: {}\".format(tf.sysconfig.get_include()))\n f.write(\"set(TensorFlow_INCLUDE_DIRS \\\"%s\\\")\\n\" % tf.sysconfig.get_include())\n\n" ]
[ [ "tensorflow.sysconfig.get_include" ] ]
dimitri-yatsenko/photix
[ "906e5637c8e8172e1f57c3a6f04b55db355effb2" ]
[ "photix/demix.py" ]
[ "import numpy as np\nimport tqdm\nimport datajoint as dj\nimport scipy\nfrom .sim import Fluorescence, Detection, Tissue\n\nschema = dj.schema('photixxx')\n\n\n@schema\nclass Sample(dj.Lookup):\n definition = \"\"\"\n sample : tinyint unsigned \n ---\n density : int # cells per cubic mm\n \"\"\"\n contents = [\n (0, 1000), (1, 3000), (2, 5000), (3, 10_000),\n (4, 20_000), (5, 35_000), (6, 50_000), (7, 75_000), (8, 100_000)]\n\n\n@schema\nclass IlluminationCycle(dj.Computed):\n definition = \"\"\"\n -> Fluorescence\n -> Detection\n ---\n nframes : smallint unsigned # number of illumination frames\n illumination : longblob # frames x emitters\n \"\"\"\n\n def make(self, key):\n emission = np.stack(\n [x for x in (Fluorescence.EField & key).fetch('emit_probabilities')]) # emitters x sources\n detection = np.stack(\n [x for x in (Detection.DField & key).fetch('detect_probabilities')]) # detectors x sources\n assert emission.dtype == np.float32 and detection.dtype == np.float32\n npoints, density = (Tissue & key).fetch1('npoints', 'density')\n target_rank = npoints / density * 120000\n illumination = np.identity(emission.shape[0], dtype=np.uint8)\n nframes = int(np.ceil(target_rank / detection.shape[0]))\n\n qq = emission @ detection.T\n qq = qq @ qq.T\n\n # combine illumination patterns with minimum overlap\n for _ in tqdm.tqdm(range(illumination.shape[0] - nframes)):\n i, j = np.triu_indices(qq.shape[1], 1)\n ix = np.argmin(qq[i, j])\n i, j = i[ix], j[ix]\n illumination[i] += illumination[j]\n illumination = np.delete(illumination, j, 0)\n qq[i, :] += qq[j, :]\n qq[:, i] += qq[:, j]\n qq = np.delete(qq, j, 0)\n qq = np.delete(qq, j, 1)\n\n self.insert1(dict(key, nframes=nframes, illumination=illumination))\n\n\n@schema\nclass Demix(dj.Computed):\n definition = \"\"\"\n -> IlluminationCycle\n -> Sample\n ---\n dt : float # s\n dark_noise : float # s^-1\n emitter_power : float # mW\n mean_fluorescence : float # fraction\n selection : longblob # selected cells\n mix_norm : longblob # cell's mixing vector norm\n demix_norm : longblob # cell's demixing vector norm\n bias_norm : longblob # cell's bias vector norm\n trans_bias_norm : longblob # don't use. Saved just in case of wrong axis choice\n total_power : float # mW\n \"\"\"\n\n def make(self, key):\n dt = 0.002 # (s) sample duration (one illumination cycle)\n dark_noise = 300 # counts per second\n seed = 0\n emitter_power = 1e-4 # 100 uW\n detector_efficiency = 0.6\n mean_fluorescence = 0.05 # e.g. 0.03 = 0.05 times 60% detector efficiency\n photons_per_joule = 2.4e18\n\n # load the emission and detection matrices\n npoints, volume = (Tissue & key).fetch1('npoints', 'volume')\n target_density = (Sample & key).fetch1('density')\n\n selection = np.r_[:npoints] < int(np.round(target_density) * volume)\n np.random.seed(seed)\n np.random.shuffle(selection)\n\n illumination = (IlluminationCycle & key).fetch1('illumination')\n nframes = illumination.shape[0]\n illumination = emitter_power * illumination * dt / nframes # joules\n emission = photons_per_joule * mean_fluorescence * np.stack(\n [x[selection] for x in (Fluorescence.EField & key).fetch('emit_probabilities')]) # E-pixels x sources\n emission = illumination @ emission # photons per frame\n\n detection = detector_efficiency * np.stack(\n [x[selection] for x in (Detection.DField & key).fetch('detect_probabilities')]) # D-pixels x sources\n\n # construct the mixing matrix mix: nchannels x ncells\n # mix = number of photons from neuron per frame at full fluorescence\n ncells = detection.shape[1]\n ndetectors = detection.shape[0]\n nchannels = nframes * ndetectors\n mix = np.ndarray(dtype='float32', shape=(nchannels, ncells))\n for ichannel in range(0, nchannels, ndetectors):\n mix[ichannel:ichannel + ndetectors] = detection * emission[ichannel // ndetectors]\n\n # normalize channels by their noise\n nu = dark_noise * dt / nframes\n weights = 1 / np.sqrt(mix.sum(axis=1, keepdims=True) * mean_fluorescence + nu) # used to be axis=0\n mix *= weights\n\n # regularized demix matrix\n kmax = 1e6\n square = mix.T @ mix\n identity = np.identity(mix.shape[1])\n alpha = np.sqrt(scipy.linalg.eigh(\n square, eigvals_only=True, eigvals=(ncells - 1, ncells - 1))[0]) / (2 * kmax)\n square += alpha ** 2 * identity\n demix = np.linalg.inv(square) @ mix.T\n\n # bias matrix\n bias = demix @ mix - identity\n\n self.insert1(dict(\n key,\n dt=dt,\n dark_noise=dark_noise,\n emitter_power=emitter_power*1e3,\n mean_fluorescence=mean_fluorescence,\n selection=selection,\n total_power=illumination.sum()/dt*1e3,\n mix_norm=np.linalg.norm(mix, axis=0),\n demix_norm=np.linalg.norm(demix, axis=1),\n bias_norm=np.linalg.norm(bias, axis=1),\n trans_bias_norm=np.linalg.norm(bias, axis=0)))\n\n\n@schema\nclass Cosine(dj.Computed):\n definition = \"\"\"\n -> Demix\n ---\n cosines : longblob\n \"\"\"\n\n def make(self, key):\n max_bias = 0.01\n mix_norm, demix_norm, bias_norm = (Demix & key).fetch1('mix_norm', 'demix_norm', 'bias_norm')\n cosines = (bias_norm < max_bias) / (mix_norm * demix_norm)\n self.insert1(dict(key, cosines=cosines))\n\n\n@schema\nclass SpikeSNR(dj.Computed):\n definition = \"\"\"\n -> Demix\n ---\n snr : longblob\n tau : float\n delta : float\n rho : float\n avg_snr : float\n frac_above_1 : float\n \"\"\"\n\n def make(self, key):\n max_bias = 0.01\n tau = 1.5\n dt, mean_fluorescence, inner_count, selection, demix_norm, bias = (\n Demix * Tissue & key).fetch1(\n 'dt', 'mean_fluorescence',\n 'inner_count', 'selection', 'demix_norm', 'bias_norm')\n inner = selection.copy()\n inner[inner_count:] = False # exclude cells outside the probe\n inner = inner[selection]\n delta = mean_fluorescence * 0.4\n demix_norm, bias = (Demix & key).fetch1('demix_norm', 'bias_norm')\n h = np.exp(-np.r_[0:6 * tau:dt] / tau)\n rho = np.sqrt((h**2).sum())/h[0]\n snr = (bias < max_bias) * rho * delta / demix_norm\n\n self.insert1(dict(key,\n snr=snr, delta=delta, rho=rho, tau=tau, avg_snr=snr[inner].mean(),\n frac_above_1=(snr[inner] >= 1.0).mean()))\n" ]
[ [ "numpy.ceil", "numpy.delete", "numpy.triu_indices", "numpy.linalg.norm", "numpy.argmin", "scipy.linalg.eigh", "numpy.random.seed", "numpy.round", "numpy.exp", "numpy.random.shuffle", "numpy.identity", "numpy.ndarray", "numpy.linalg.inv" ] ]
Filco306/TopologyLayer
[ "3da7af35a58bd1438d28d6cca49b40f90cb7ee14" ]
[ "examples/paper/regression/rips.py" ]
[ "import numpy as np\n\nfrom problems import generate_rips_problem\nimport torch\nimport torch.nn as nn\nfrom topologylayer.nn import *\nfrom util import penalized_ls, run_trials, run_trials_ols, get_stats, gen_snr_stats, gen_dim_stats\nfrom penalties import NormLoss\n\n\nclass TopLoss(nn.Module):\n def __init__(self):\n super(TopLoss, self).__init__()\n self.pdfn = RipsLayer(maxdim=0)\n self.topfn = SumBarcodeLengths()\n\n def forward(self, beta):\n dgminfo = self.pdfn(beta)\n return self.topfn(dgminfo)\n\n\nclass TopLoss2(nn.Module):\n def __init__(self):\n super(TopLoss2, self).__init__()\n self.pdfn = RipsLayer(maxdim=0)\n self.topfn = PartialSumBarcodeLengths(dim=0, skip=2)\n\n def forward(self, beta):\n dgminfo = self.pdfn(beta)\n return self.topfn(dgminfo)\n\n# number of features\np = 100\n\ntpen1 = TopLoss() # sum of barcodes\ntpen2 = TopLoss2() # sum of all but top 2\nlpen1 = NormLoss(p=1) # L1 penalty\nlpen2 = NormLoss(p=2) # L2 penalty\n\n# run regularization trials\nsigma = 0.1\nlams = np.logspace(-3, 0, 10)\nns = np.arange(30, 150, 10)\n\n\ndef save_csvs(problem, pen, mses, qs, lamopt):\n fname = 'results/rips_' + problem + '_mses_' + pen + '.csv'\n np.savetxt(fname, mses, delimiter=',')\n fname = 'results/rips_' + problem + '_qs_' + pen + '.csv'\n np.savetxt(fname, qs, delimiter=',')\n fname = 'results/rips_' + problem + '_lam_' + pen + '.csv'\n np.savetxt(fname, lamopt, delimiter=',')\n\n\nproblem = '123'\nbeta0 = generate_rips_problem([1., 2., 3.], p)\nnp.savetxt('results/rips_' + problem + '_beta0.csv', beta0, delimiter=',')\nmses, qs, lamopt = gen_dim_stats(beta0, ns, sigma, lams, None, ntrials=100, maxiter=200, ncv=50)\nsave_csvs(problem, 'ols', mses, qs, lamopt)\nmses, qs, lamopt = gen_dim_stats(beta0, ns, sigma, lams, lpen1, ntrials=100, maxiter=200, ncv=50)\nsave_csvs(problem, 'lpen1', mses, qs, lamopt)\nmses, qs, lamopt = gen_dim_stats(beta0, ns, sigma, lams, lpen2, ntrials=100, maxiter=200, ncv=50)\nsave_csvs(problem, 'lpen2', mses, qs, lamopt)\nmses, qs, lamopt = gen_dim_stats(beta0, ns, sigma, lams, tpen1, ntrials=100, maxiter=200, ncv=50)\nsave_csvs(problem, 'tpen1', mses, qs, lamopt)\nmses, qs, lamopt = gen_dim_stats(beta0, ns, sigma, lams, tpen2, ntrials=100, maxiter=200, ncv=50)\nsave_csvs(problem, 'tpen2', mses, qs, lamopt)\n\nproblem = '101'\nbeta0 = generate_rips_problem([-1., 0., 1.], p)\nnp.savetxt('results/rips_' + problem + '_beta0.csv', beta0, delimiter=',')\nmses, qs, lamopt = gen_dim_stats(beta0, ns, sigma, lams, None, ntrials=100, maxiter=200, ncv=50)\nsave_csvs(problem, 'ols', mses, qs, lamopt)\nmses, qs, lamopt = gen_dim_stats(beta0, ns, sigma, lams, lpen1, ntrials=100, maxiter=200, ncv=50)\nsave_csvs(problem, 'lpen1', mses, qs, lamopt)\nmses, qs, lamopt = gen_dim_stats(beta0, ns, sigma, lams, lpen2, ntrials=100, maxiter=200, ncv=50)\nsave_csvs(problem, 'lpen2', mses, qs, lamopt)\nmses, qs, lamopt = gen_dim_stats(beta0, ns, sigma, lams, tpen1, ntrials=100, maxiter=200, ncv=50)\nsave_csvs(problem, 'tpen1', mses, qs, lamopt)\nmses, qs, lamopt = gen_dim_stats(beta0, ns, sigma, lams, tpen2, ntrials=100, maxiter=200, ncv=50)\nsave_csvs(problem, 'tpen2', mses, qs, lamopt)\n" ]
[ [ "numpy.arange", "numpy.logspace", "numpy.savetxt" ] ]
Krissmedt/runko
[ "073306de9284f1502d0538d33545bc14c80e8b93" ]
[ "projects/kms_penning/col_pypic.py" ]
[ "# -*- coding: utf-8 -*-\n\n# system libraries\nfrom __future__ import print_function\nfrom mpi4py import MPI\nimport numpy as np\nimport sys, os\nimport matplotlib.pyplot as plt\nimport time\n\n# runko + auxiliary modules\nimport pytools # runko python tools\n\n# Runko-Python functionality by Krissmedt\nfrom pyhack.coll_setup import coll\nfrom pyhack.coll_pusher import *\nfrom pyhack.py_runko_aux_3d import *\n\n# problem specific modules\nnp.random.seed(1)\nfrom init_problem import Configuration_Gyro as Configuration\n\ndebug = False\n\ndef py_init(conf):\n t = [0]\n x = [np.array([conf.x_start])]\n y = [np.array([conf.NyMesh/2+ conf.NyMesh/4.])]\n z = [np.array([conf.NzMesh/2+ conf.NzMesh/4.])]\n vx = [np.array([conf.ux])]\n vy = [np.array([conf.uy])]\n vz = [np.array([conf.uz])]\n xres = [np.zeros(1,dtype=np.float)]\n vres = [np.zeros(1,dtype=np.float)]\n\n return t,x,y,z,vx,vy,vz,xres,vres\n\n\ndef debug_print(n, msg):\n if debug:\n print(\"{}: {}\".format(n.rank(), msg))\n sys.stdout.flush()\n\ndef direct_inject(grid, conf):\n cid = grid.id(0,0,0)\n c = grid.get_tile(cid)\n container = c.get_container(0)\n\n x = conf.x_start\n y = conf.NyMesh/2. + conf.NyMesh/4.\n z = conf.NzMesh/2. + conf.NzMesh/4.\n x01 = [x,y,z]\n\n vx = conf.ux\n vy = conf.uy\n vz = conf.uz\n u01 = [vx,vy,vz]\n\n container.add_particle(x01,u01,1.0)\n # container.add_particle(x02,u02,1.0)\n\n x0 = [x01]\n u0 = [u01]\n\n return x0,u0\n\n\n# Field initialization (guide field)\ndef insert_em(grid, conf):\n\n #into radians\n btheta = conf.btheta/180.*np.pi\n bphi = conf.bphi/180.*np.pi\n\n kk = 0\n for cid in grid.get_tile_ids():\n tile = grid.get_tile(cid)\n yee = tile.get_yee(0)\n\n ii,jj,kk = tile.index if conf.threeD else (*tile.index, 0)\n\n for n in range(conf.NzMesh):\n for m in range(-3, conf.NyMesh+3):\n for l in range(-3, conf.NxMesh+3):\n # get global coordinates\n iglob, jglob, kglob = pytools.ind2loc((ii, jj, kk), (l, m, n), conf)\n yee.bx[l,m,n] = 0. #conf.binit*np.cos(btheta)\n yee.by[l,m,n] = 0. #conf.binit*np.sin(btheta)*np.sin(bphi)\n yee.bz[l,m,n] = conf.binit #conf.binit*np.sin(btheta)*np.cos(bphi)\n\n yee.ex[l,m,n] = (conf.NxMesh/2.-iglob) * conf.einit\n yee.ey[l,m,n] = (conf.NyMesh/2.-jglob) * conf.einit #-beta*yee.bz[l,m,n]\n yee.ez[l,m,n] = -2*(conf.NzMesh/2.-kglob) * conf.einit #beta*yee.by[l,m,n]\n\n\nif __name__ == \"__main__\":\n\n do_plots = True\n do_print = True\n\n if MPI.COMM_WORLD.Get_rank() == 0:\n do_print = True\n\n if do_print:\n print(\"\")\n print(\"\")\n print(\"Running with {} MPI processes.\".format(MPI.COMM_WORLD.Get_size()))\n\n ##################################################\n # set up plotting and figure\n try:\n if do_plots:\n pass\n except:\n #print()\n pass\n\n\n # Timer for profiling\n timer = pytools.Timer()\n timer.start(\"total\")\n timer.start(\"init\")\n\n timer.do_print = do_print\n\n\n # parse command line arguments\n # parser = argparse.ArgumentParser(description='Simple PIC-Maxwell simulations')\n # parser.add_argument('--conf', dest='conf_filename', default=None,\n # help='Name of the configuration file (default: None)')\n args = pytools.parse_args()\n if args.conf_filename == None:\n conf = Configuration('gyration.ini', do_print=do_print)\n else:\n if do_print:\n print(\"Reading configuration setup from \", args.conf_filename)\n conf = Configuration(args.conf_filename, do_print=do_print)\n\n if conf.threeD:\n # 3D modules\n import pycorgi.threeD as pycorgi # corgi ++ bindings\n import pyrunko.pic.threeD as pypic # runko pic c++ bindings\n import pyrunko.fields.threeD as pyfld # runko fld c++ bindings\n\n elif conf.twoD:\n # 2D modules\n import pycorgi.twoD as pycorgi # corgi ++ bindings\n import pyrunko.pic.twoD as pypic # runko pic c++ bindings\n import pyrunko.fields.twoD as pyfld # runko fld c++ bindings\n\n grid = pycorgi.Grid(conf.Nx, conf.Ny, conf.Nz)\n\n xmin = 0.0\n xmax = conf.Nx*conf.NxMesh #XXX scaled length\n ymin = 0.0\n ymax = conf.Ny*conf.NyMesh\n grid.set_grid_lims(conf.xmin, conf.xmax, conf.ymin, conf.ymax,conf.zmin,conf.zmax)\n\n # compute initial mpi ranks using Hilbert's curve partitioning\n pytools.balance_mpi(grid, conf)\n\n # load pic tiles into grid\n pytools.pic.load_tiles(grid, conf)\n\n ##################################################\n # create output folders\n if grid.master():\n pytools.create_output_folders(conf)\n\n # get current restart file status\n io_stat = pytools.check_for_restart(conf)\n\n if io_stat[\"do_initialization\"]:\n if do_print:\n print(\"initializing simulation...\")\n lap = 1\n\n np.random.seed(1) # sync rnd generator seed for different mpi ranks\n\n # initialising solution arrays\n t,x,y,z,vx,vy,vz,xres,vres = py_init(conf)\n # injecting plasma particles\n prtcl_stat = direct_inject(grid,conf) #inject plasma particles individually by loc,vel\n if do_print:\n print(\"injected:\")\n print(\" e- prtcls: {}\".format(prtcl_stat[0]))\n print(\" e+ prtcls: {}\".format(prtcl_stat[1]))\n\n # inserting em grid\n insert_em(grid, conf)\n\n else:\n if do_print:\n print(\"restarting simulation from lap {}...\".format(io_stat[\"lap\"]))\n\n # read restart files\n pyfld.read_yee(grid, io_stat[\"read_lap\"], io_stat[\"read_dir\"])\n pypic.read_particles(grid, io_stat[\"read_lap\"], io_stat[\"read_dir\"])\n\n # step one step ahead\n lap = io_stat[\"lap\"] + 1\n\n\n\n #static load balancing setup; communicate neighbor info once\n debug_print(grid, \"analyze bcs\")\n grid.analyze_boundaries()\n debug_print(grid, \"send tiles\")\n grid.send_tiles()\n debug_print(grid, \"recv tiles\")\n grid.recv_tiles()\n MPI.COMM_WORLD.barrier()\n\n #sys.exit()\n\n debug_print(grid, \"init virs\")\n pytools.pic.load_virtual_tiles(grid, conf)\n\n\n timer.stop(\"init\")\n timer.stats(\"init\")\n\n\n # end of initialization\n ##################################################\n debug_print(grid, \"solvers\")\n\n\n # visualize initial condition\n if do_plots:\n try:\n plotNode( axs[0], grid, conf)\n #plotXmesh(axs[1], grid, conf, 0, \"x\")\n saveVisz(-1, grid, conf)\n except:\n pass\n\n\n Nsamples = conf.Nt\n pushloc = pypic.VerletLocPusher()\n pushvel = pypic.VerletVelPusher()\n\n\n fldprop = pyfld.FDTD2()\n # fldprop = pyfld.FDTD4()\n fintp = pypic.LinearInterpolator()\n currint = pypic.ZigZag()\n # analyzer = pypic.Analyzator()\n flt = pyfld.Binomial2(conf.NxMesh, conf.NyMesh, conf.NzMesh)\n\n #enhance numerical speed of light slightly to suppress numerical Cherenkov instability\n fldprop.corr = 1.0\n\n debug_print(grid, \"mpi_e\")\n grid.send_data(1)\n grid.recv_data(1)\n grid.wait_data(1)\n\n debug_print(grid, \"mpi_b\")\n grid.send_data(2)\n grid.recv_data(2)\n grid.wait_data(2)\n\n for tile in pytools.tiles_all(grid):\n tile.update_boundaries(grid)\n\n ##################################################\n sys.stdout.flush()\n\n#-----------------------------------------------------------------------------#\n################################ Simulation Loop ##############################\n#-----------------------------------------------------------------------------#\n col = coll(tile,dtf=conf.dtf,M=conf.M,K=0)\n\n time = lap*(conf.dtf*conf.cfl/conf.c_omp)\n for lap in range(lap, conf.Nt+1):\n debug_print(grid, \"lap_start\")\n #--------------------------------------------------\n #push particles\n timer.start_comp(\"push\")\n debug_print(grid, \"push\")\n\n for tile in pytools.tiles_local(grid):\n implicit_coll(tile,col,fintp,timer)\n timer.stop_comp(\"push\")\n\n ##################################################\n # particle communication (only local/boundary tiles)\n\n #--------------------------------------------------\n #local particle exchange (independent)\n timer.start_comp(\"check_outg_prtcls\")\n debug_print(grid, \"check_outg_prtcls\")\n\n for tile in pytools.tiles_local(grid):\n tile.check_outgoing_particles()\n\n timer.stop_comp(\"check_outg_prtcls\")\n\n #--------------------------------------------------\n # global mpi exchange (independent)\n timer.start_comp(\"pack_outg_prtcls\")\n debug_print(grid, \"pack_outg_prtcls\")\n\n for tile in pytools.tiles_boundary(grid):\n tile.pack_outgoing_particles()\n\n timer.stop_comp(\"pack_outg_prtcls\")\n\n # --------------------------------------------------\n # MPI global particle exchange\n # transfer primary and extra data\n t1 = timer.start_comp(\"mpi_prtcls\")\n grid.send_data(3)\n grid.recv_data(3)\n grid.wait_data(3)\n\n # orig just after send3\n grid.send_data(4)\n grid.recv_data(4)\n grid.wait_data(4)\n timer.stop_comp(t1)\n\n # --------------------------------------------------\n # global unpacking (independent)\n t1 = timer.start_comp(\"unpack_vir_prtcls\")\n for tile in pytools.tiles_virtual(grid):\n tile.unpack_incoming_particles()\n tile.check_outgoing_particles()\n timer.stop_comp(t1)\n\n # --------------------------------------------------\n # transfer local + global\n t1 = timer.start_comp(\"get_inc_prtcls\")\n for tile in pytools.tiles_local(grid):\n tile.get_incoming_particles(grid)\n timer.stop_comp(t1)\n\n # --------------------------------------------------\n # delete local transferred particles\n t1 = timer.start_comp(\"del_trnsfrd_prtcls\")\n for tile in pytools.tiles_local(grid):\n tile.delete_transferred_particles()\n timer.stop_comp(t1)\n\n # --------------------------------------------------\n # delete all virtual particles (because new prtcls will come)\n t1 = timer.start_comp(\"del_vir_prtcls\")\n for tile in pytools.tiles_virtual(grid):\n tile.delete_all_particles()\n timer.stop_comp(t1)\n\n ##################################################\n # filter\n timer.start_comp(\"filter\")\n\n #sweep over npasses times\n for fj in range(conf.npasses):\n\n #update global neighbors (mpi)\n grid.send_data(0)\n grid.recv_data(0)\n grid.wait_data(0)\n\n #get halo boundaries\n for cid in grid.get_local_tiles():\n tile = grid.get_tile(cid)\n tile.update_boundaries(grid)\n\n #filter each tile\n for cid in grid.get_local_tiles():\n tile = grid.get_tile(cid)\n flt.solve(tile)\n\n MPI.COMM_WORLD.barrier() # sync everybody\n\n\n # --------------------------------------------------\n timer.stop_comp(\"filter\")\n\n ##################################################\n # data reduction and I/O\n cid = grid.id(0,0,0)\n c = grid.get_tile(cid)\n container = c.get_container(0)\n\n t.append(time)\n x.append(container.loc(0))\n y.append(container.loc(1))\n z.append(container.loc(2))\n vx.append(container.vel(0))\n vy.append(container.vel(1))\n vz.append(container.vel(2))\n xres.append(np.linalg.norm(col.Rx,axis=1))\n vres.append(np.linalg.norm(col.Rv,axis=1))\n\n timer.lap(\"step\")\n if (lap % conf.interval == 0):\n debug_print(grid, \"io\")\n if do_print:\n print(\"--------------------------------------------------\")\n print(\"------ lap: {} / t: {}\".format(lap, time))\n\n print(\"------------------------------------------------------\")\n print(\"x-position:\" + str(x[lap]))\n print(\"y-position:\" + str(y[lap]))\n print(\"x-vel:\" + str(vx[lap]))\n print(\"y-vel:\" + str(vy[lap]))\n print(\"------------------------------------------------------\")\n\n #for cid in grid.get_tile_ids():\n # tile = grid.get_tile(cid)\n # tile.erase_temporary_arrays()\n\n timer.stats(\"step\")\n timer.comp_stats()\n timer.purge_comps()\n\n #analyze (independent)\n timer.start(\"io\")\n\n\n #--------------------------------------------------\n #2D plots\n if do_plots:\n try:\n pass\n\n except:\n #print()\n pass\n timer.stop(\"io\")\n\n\n timer.stats(\"io\")\n timer.start(\"step\") #refresh lap counter (avoids IO profiling)\n\n sys.stdout.flush()\n\n #next step\n time += conf.dtf*conf.cfl/conf.c_omp\n\n #end of loop\n\n timer.stop(\"total\")\n timer.stats(\"total\")\n\n filename = \"coll_M{0}_{1}\".format(conf.M,conf.name)\n output_sdc(t,x,y,z,vx,vy,vz,xres,vres,conf,filename)\n wp_dump(t,x,y,z,vx,vy,vz,conf,filename)\n\n print(\"\")\n print(\"------------------------------------- END ------------------------------------\")\n" ]
[ [ "numpy.random.seed", "numpy.array", "numpy.linalg.norm", "numpy.zeros" ] ]
WojciechKusa/datasets
[ "1406a04c3e911cec2680d8bc513653e0cafcaaa4" ]
[ "src/datasets/fingerprint.py" ]
[ "import inspect\nimport json\nimport os\nimport random\nimport shutil\nimport tempfile\nimport weakref\nfrom dataclasses import asdict\nfrom functools import wraps\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Dict, List, Optional, Union\n\nimport numpy as np\nimport pyarrow as pa\nimport xxhash\n\nfrom datasets.table import ConcatenationTable, InMemoryTable, MemoryMappedTable, Table\n\nfrom .info import DatasetInfo\nfrom .utils.logging import get_logger\nfrom .utils.py_utils import dumps\n\n\nif TYPE_CHECKING:\n from .arrow_dataset import Dataset\n\n\nlogger = get_logger(__name__)\n\n\n# Fingerprinting allows to have one deterministic fingerprint per dataset state.\n# A dataset fingerprint is updated after each transform.\n# Re-running the same transforms on a dataset in a different session results in the same fingerprint.\n# This is possible thanks to a custom hashing function that works with most python objects.\n\n# Fingerprinting is the main mechanism that enables caching.\n# The caching mechanism allows to reload an existing cache file if it's already been computed.\n\n\n#################\n# Caching\n#################\n\n_CACHING_ENABLED = True\n_TEMP_DIR_FOR_TEMP_CACHE_FILES: Optional[\"_TempDirWithCustomCleanup\"] = None\n_DATASETS_WITH_TABLE_IN_TEMP_DIR: Optional[weakref.WeakSet] = None\n\n\nclass _TempDirWithCustomCleanup:\n \"\"\"\n A temporary directory with a custom cleanup function.\n We need a custom temporary directory cleanup in order to delete the dataset objects that have\n cache files in the temporary directory before deleting the dorectory itself.\n \"\"\"\n\n def __init__(self, cleanup_func=None, *cleanup_func_args, **cleanup_func_kwargs):\n self.name = tempfile.mkdtemp()\n self._finalizer = weakref.finalize(self, self._cleanup)\n self._cleanup_func = cleanup_func\n self._cleanup_func_args = cleanup_func_args\n self._cleanup_func_kwargs = cleanup_func_kwargs\n\n def _cleanup(self):\n self._cleanup_func(*self._cleanup_func_args, **self._cleanup_func_kwargs)\n if os.path.exists(self.name):\n shutil.rmtree(self.name)\n\n def cleanup(self):\n if self._finalizer.detach():\n self._cleanup()\n\n\ndef maybe_register_dataset_for_temp_dir_deletion(dataset):\n \"\"\"\n This function registers the datasets that have cache files in _TEMP_DIR_FOR_TEMP_CACHE_FILES in order\n to properly delete them before deleting the temporary directory.\n The temporary directory _TEMP_DIR_FOR_TEMP_CACHE_FILES is used when caching is disabled.\n \"\"\"\n if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None:\n return\n\n global _DATASETS_WITH_TABLE_IN_TEMP_DIR\n if _DATASETS_WITH_TABLE_IN_TEMP_DIR is None:\n _DATASETS_WITH_TABLE_IN_TEMP_DIR = weakref.WeakSet()\n if any(\n Path(_TEMP_DIR_FOR_TEMP_CACHE_FILES.name) in Path(cache_file[\"filename\"]).parents\n for cache_file in dataset.cache_files\n ):\n _DATASETS_WITH_TABLE_IN_TEMP_DIR.add(dataset)\n\n\ndef get_datasets_with_cache_file_in_temp_dir():\n return list(_DATASETS_WITH_TABLE_IN_TEMP_DIR) if _DATASETS_WITH_TABLE_IN_TEMP_DIR is not None else []\n\n\ndef set_caching_enabled(boolean: bool):\n \"\"\"\n When applying transforms on a dataset, the data are stored in cache files.\n The caching mechanism allows to reload an existing cache file if it's already been computed.\n\n Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated\n after each transform.\n\n If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.\n More precisely, if the caching is disabled:\n - cache files are always recreated\n - cache files are written to a temporary directory that is deleted when session closes\n - cache files are named using a random hash instead of the dataset fingerprint\n - use :func:`datasets.Dataset.save_to_disk` to save a transformed dataset or it will be deleted when session closes\n - caching doesn't affect :func:`datasets.load_dataset`. If you want to regenerate a dataset from scratch you should use\n the ``download_mode`` parameter in :func:`datasets.load_dataset`.\n \"\"\"\n global _CACHING_ENABLED\n _CACHING_ENABLED = bool(boolean)\n\n\ndef is_caching_enabled() -> bool:\n \"\"\"\n When applying transforms on a dataset, the data are stored in cache files.\n The caching mechanism allows to reload an existing cache file if it's already been computed.\n\n Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated\n after each transform.\n\n If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.\n More precisely, if the caching is disabled:\n - cache files are always recreated\n - cache files are written to a temporary directory that is deleted when session closes\n - cache files are named using a random hash instead of the dataset fingerprint\n - use :func:`datasets.Dataset.save_to_disk` to save a transformed dataset or it will be deleted when session closes\n - caching doesn't affect :func:`datasets.load_dataset`. If you want to regenerate a dataset from scratch you should use\n the ``download_mode`` parameter in :func:`datasets.load_dataset`.\n \"\"\"\n global _CACHING_ENABLED\n return bool(_CACHING_ENABLED)\n\n\ndef get_temporary_cache_files_directory() -> str:\n \"\"\"Return a directory that is deleted when session closes.\"\"\"\n global _TEMP_DIR_FOR_TEMP_CACHE_FILES\n if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None:\n\n # Avoids a PermissionError on Windows caused by the datasets referencing\n # the files from the cache directory on clean-up\n def cleanup_func():\n for dset in get_datasets_with_cache_file_in_temp_dir():\n dset.__del__()\n\n _TEMP_DIR_FOR_TEMP_CACHE_FILES = _TempDirWithCustomCleanup(cleanup_func=cleanup_func)\n return _TEMP_DIR_FOR_TEMP_CACHE_FILES.name\n\n\n#################\n# Hashing\n#################\n\n\ndef hashregister(*types):\n def proxy(func):\n for t in types:\n Hasher.dispatch[t] = func\n return func\n\n return proxy\n\n\nclass Hasher:\n \"\"\"Hasher that accepts python objects as inputs.\"\"\"\n\n dispatch: Dict = {}\n\n def __init__(self):\n self.m = xxhash.xxh64()\n\n @classmethod\n def hash_bytes(cls, value: Union[bytes, List[bytes]]) -> str:\n value = [value] if isinstance(value, bytes) else value\n m = xxhash.xxh64()\n for x in value:\n m.update(x)\n return m.hexdigest()\n\n @classmethod\n def hash_default(cls, value: Any) -> str:\n return cls.hash_bytes(dumps(value))\n\n @classmethod\n def hash(cls, value: Any) -> str:\n if type(value) in cls.dispatch:\n return cls.dispatch[type(value)](cls, value)\n else:\n return cls.hash_default(value)\n\n def update(self, value: Any) -> None:\n header_for_update = f\"=={type(value)}==\"\n value_for_update = self.hash(value)\n self.m.update(header_for_update.encode(\"utf8\"))\n self.m.update(value_for_update.encode(\"utf-8\"))\n\n def hexdigest(self) -> str:\n return self.m.hexdigest()\n\n\n# Register a new hasher can be useful for two possible reasons:\n# 1 - optimize the hashing of large amount of data (e.g. pa.Table)\n# 2 - take advantage of a custom serialization method (e.g. DatasetInfo)\n\n\n@hashregister(pa.Table, Table, InMemoryTable, MemoryMappedTable, ConcatenationTable)\ndef _hash_pa_table(hasher, value):\n def _hash_pa_array(value):\n if isinstance(value, pa.ChunkedArray):\n return hasher.hash_bytes(c.to_string().encode(\"utf-8\") for c in value.chunks)\n else:\n return hasher.hash_bytes(value.to_string().encode(\"utf-8\"))\n\n value = \"-\".join(col + \"-\" + _hash_pa_array(value[col]) for col in sorted(value.column_names))\n return hasher.hash_bytes(value.encode(\"utf-8\"))\n\n\n@hashregister(DatasetInfo)\ndef _hash_dataset_info(hasher, value):\n return hasher.hash_bytes(json.dumps(asdict(value), sort_keys=True).encode(\"utf-8\"))\n\n\n#################\n# Fingerprinting\n#################\n\n# we show a warning only once when fingerprinting fails to avoid spam\nfingerprint_warnings: Dict[str, bool] = {}\n\n\ndef generate_fingerprint(dataset) -> str:\n state = dataset.__dict__\n hasher = Hasher()\n for key in sorted(state):\n if key == \"_fingerprint\":\n continue\n hasher.update(key)\n hasher.update(state[key])\n # hash data files last modification timestamps as well\n for cache_file in dataset.cache_files:\n hasher.update(os.path.getmtime(cache_file[\"filename\"]))\n return hasher.hexdigest()\n\n\ndef generate_random_fingerprint(nbits=64) -> str:\n return f\"{random.getrandbits(nbits):0{nbits//4}x}\"\n\n\ndef update_fingerprint(fingerprint, transform, transform_args):\n global fingerprint_warnings\n hasher = Hasher()\n hasher.update(fingerprint)\n try:\n hasher.update(transform)\n except: # noqa various errors might raise here from pickle or dill\n if _CACHING_ENABLED:\n if not fingerprint_warnings.get(\"update_fingerprint_transform_hash_failed\", False):\n logger.warning(\n f\"Transform {transform} couldn't be hashed properly, a random hash was used instead. \"\n \"Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. \"\n \"If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. \"\n \"This warning is only showed once. Subsequent hashing failures won't be showed.\"\n )\n fingerprint_warnings[\"update_fingerprint_transform_hash_failed\"] = True\n else:\n logger.info(f\"Transform {transform} couldn't be hashed properly, a random hash was used instead.\")\n else:\n logger.info(\n f\"Transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled.\"\n )\n\n return generate_random_fingerprint()\n for key in sorted(transform_args):\n hasher.update(key)\n try:\n hasher.update(transform_args[key])\n except: # noqa various errors might raise here from pickle or dill\n if _CACHING_ENABLED:\n if not fingerprint_warnings.get(\"update_fingerprint_transform_hash_failed\", False):\n logger.warning(\n f\"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. \"\n \"Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. \"\n \"If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. \"\n \"This warning is only showed once. Subsequent hashing failures won't be showed.\"\n )\n fingerprint_warnings[\"update_fingerprint_transform_hash_failed\"] = True\n else:\n logger.info(\n f\"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead.\"\n )\n else:\n logger.info(\n f\"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled.\"\n )\n return generate_random_fingerprint()\n return hasher.hexdigest()\n\n\ndef fingerprint_transform(\n inplace: bool,\n use_kwargs: Optional[List[str]] = None,\n ignore_kwargs: Optional[List[str]] = None,\n fingerprint_names: Optional[List[str]] = None,\n randomized_function: bool = False,\n version: Optional[str] = None,\n):\n \"\"\"\n Wrapper for dataset transforms to update the dataset fingerprint using ``update_fingerprint``\n\n Args:\n inplace (``bool``): If inplace is True, the fingerprint of the dataset is updated inplace.\n Otherwise, a parameter \"new_fingerprint\" is passed to the wrapped method that should take care of\n setting the fingerprint of the returned Dataset.\n use_kwargs (Optional ``List[str]``): optional white list of argument names to take into account\n to update the fingerprint to the wrapped method that should take care of\n setting the fingerprint of the returned Dataset. By default all the arguments are used.\n ignore_kwargs (Optional ``List[str]``): optional black list of argument names to take into account\n to update the fingerprint. Note that ignore_kwargs prevails on use_kwargs.\n fingerprint_names (Optional ``List[str]``, defaults to [\"new_fingerprint\"]):\n If the dataset transforms is not inplace and returns a DatasetDict, then it can require\n several fingerprints (one per dataset in the DatasetDict). By specifying fingerprint_names,\n one fingerprint named after each element of fingerprint_names is going to be passed.\n randomized_function (``bool``, defaults to False): If the dataset transform is random and has\n optional parameters \"seed\" and \"generator\", then you can set randomized_function to True.\n This way, even if users set \"seed\" and \"generator\" to None, then the fingerprint is\n going to be randomly generated depending on numpy's current state. In this case, the\n generator is set to np.random.default_rng(np.random.get_state()[1][0]).\n version (Optional ``str``): version of the transform. The version is taken into account when\n computing the fingerprint. If a datase transform changes (or at least if the output data\n that are cached changes), then one should increase the version. If the version stays the\n same, then old cached data could be reused that are not compatible with the new transform.\n It should be in the format \"MAJOR.MINOR.PATCH\".\n \"\"\"\n\n if use_kwargs is not None and not isinstance(use_kwargs, list):\n raise ValueError(f\"use_kwargs is supposed to be a list, not {type(use_kwargs)}\")\n\n if ignore_kwargs is not None and not isinstance(ignore_kwargs, list):\n raise ValueError(f\"ignore_kwargs is supposed to be a list, not {type(use_kwargs)}\")\n\n if inplace and fingerprint_names:\n raise ValueError(f\"fingerprint_names are only used when inplace is False\")\n\n fingerprint_names = fingerprint_names if fingerprint_names is not None else [\"new_fingerprint\"]\n\n def _fingerprint(func):\n\n if not inplace and not all(name in func.__code__.co_varnames for name in fingerprint_names):\n raise ValueError(\"function {func} is missing parameters {fingerprint_names} in signature\")\n\n if randomized_function: # randomized function have seed and generator parameters\n if \"seed\" not in func.__code__.co_varnames:\n raise ValueError(f\"'seed' must be in {func}'s signature\")\n if \"generator\" not in func.__code__.co_varnames:\n raise ValueError(f\"'generator' must be in {func}'s signature\")\n # this has to be outside the wrapper or since __qualname__ changes in multiprocessing\n transform = f\"{func.__module__}.{func.__qualname__}\"\n if version is not None:\n transform += f\"@{version}\"\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n kwargs_for_fingerprint = kwargs.copy()\n if args:\n params = [p.name for p in inspect.signature(func).parameters.values() if p != p.VAR_KEYWORD]\n self: \"Dataset\" = args[0]\n args = args[1:]\n params = params[1:]\n kwargs_for_fingerprint.update(zip(params, args))\n else:\n self: \"Dataset\" = kwargs.pop(\"self\")\n\n # keep the right kwargs to be hashed to generate the fingerprint\n\n if use_kwargs:\n kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k in use_kwargs}\n if ignore_kwargs:\n kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k not in ignore_kwargs}\n if randomized_function: # randomized functions have `seed` and `generator` parameters\n if kwargs_for_fingerprint.get(\"seed\") is None and kwargs_for_fingerprint.get(\"generator\") is None:\n kwargs_for_fingerprint[\"generator\"] = np.random.default_rng(np.random.get_state()[1][0])\n\n # remove kwargs that are the default values\n\n default_values = {\n p.name: p.default for p in inspect.signature(func).parameters.values() if p.default != inspect._empty\n }\n for default_varname, default_value in default_values.items():\n if (\n default_varname in kwargs_for_fingerprint\n and kwargs_for_fingerprint[default_varname] == default_value\n ):\n kwargs_for_fingerprint.pop(default_varname)\n\n # compute new_fingerprint and add it to the args of not in-place transforms\n if inplace:\n new_fingerprint = update_fingerprint(self._fingerprint, transform, kwargs_for_fingerprint)\n else:\n for fingerprint_name in fingerprint_names: # transforms like `train_test_split` have several hashes\n if kwargs.get(fingerprint_name) is None:\n kwargs_for_fingerprint[\"fingerprint_name\"] = fingerprint_name\n kwargs[fingerprint_name] = update_fingerprint(\n self._fingerprint, transform, kwargs_for_fingerprint\n )\n\n # Call actual function\n\n out = func(self, *args, **kwargs)\n\n # Update fingerprint of in-place transforms + update in-place history of transforms\n\n if inplace: # update after calling func so that the fingerprint doesn't change if the function fails\n self._fingerprint = new_fingerprint\n\n return out\n\n wrapper._decorator_name_ = \"fingerprint\"\n return wrapper\n\n return _fingerprint\n" ]
[ [ "numpy.random.get_state" ] ]
evopy/evopy
[ "1ca150c0a4bd76cde3e989aafa114dc476928201" ]
[ "test/test_random.py" ]
[ "\"\"\"Tests for (non-)deterministic behavior in evopy.\"\"\"\nimport numpy as np\nfrom nose.tools import raises\n\nfrom evopy.utils import random_with_seed\n\n\ndef random_integer_seed_test():\n \"\"\"Test if integers are correctly used.\"\"\"\n random = random_with_seed(42)\n assert random.randint(100) == 51\n\n\ndef random_state_seed_test():\n \"\"\"Test if states are correctly used.\"\"\"\n random = random_with_seed(np.random.RandomState(42))\n assert random.randint(100) == 51\n\n\ndef random_none_seed_test():\n \"\"\"Test if none is given the original random is used.\"\"\"\n np.random.seed(42)\n random = random_with_seed(None)\n assert random.randint(100) == 51\n\n\n@raises(ValueError)\ndef random_invalid_seed_test():\n \"\"\"Test if an error is raised when an incorrect parameter is supplied.\"\"\"\n random_with_seed(4.0)\n" ]
[ [ "numpy.random.seed", "numpy.random.RandomState" ] ]
rbtsbg/pgig
[ "d45199b88d5dfbfee27faf8df0e07a2a7afc1765" ]
[ "viz.py" ]
[ "import matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom PIL import Image\nfrom matplotlib.colors import LinearSegmentedColormap, BoundaryNorm\nimport numpy as np\n\n\ndef pil_loader(path):\n with open(path, 'rb') as f:\n with Image.open(f) as img:\n return img.convert('RGB')\n\n\ndef plot_cam(attr):\n zero_val = -attr.min()\n attr -= attr.min()\n zero_val /= attr.max()\n attr /= (attr.max() + 1e-20)\n\n cmap=plt.get_cmap('RdBu_r')\n\n neg_bounds = np.linspace(0, zero_val, 100)\n pos_bounds = np.linspace(zero_val, 1, 100)\n bounds = np.concatenate((neg_bounds, pos_bounds))\n norm = BoundaryNorm(bounds, cmap.N)\n\n # plt.imshow(xi)\n p = plt.imshow(attr, interpolation='none', norm=norm, cmap=cmap)\n #plt.colorbar()\n return p\n\ndef plot_bbox(bboxes, xi, linewidth=1):\n ax = plt.gca()\n ax.imshow(xi)\n\n if not isinstance(bboxes[0], list):\n bboxes = [bboxes]\n\n for bbox in bboxes:\n rect = patches.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1],\n linewidth=linewidth, edgecolor='r', facecolor='none')\n ax.add_patch(rect)\n ax.axis('off')\n\n" ]
[ [ "numpy.concatenate", "matplotlib.patches.Rectangle", "matplotlib.pyplot.gca", "matplotlib.pyplot.get_cmap", "matplotlib.colors.BoundaryNorm", "numpy.linspace", "matplotlib.pyplot.imshow" ] ]
woodygzp/test1
[ "1d0f8d63b6a857eab7c2252531703acaae4979f9" ]
[ "bnn/src/training/characters.py" ]
[ "\n'''\nModified version of the mnist.py training script\nDesigned to be used with the NIST SD 19 Handprinted Forms and Characters dataset: https://www.nist.gov/srd/nist-special-database-19\nBest results have been achieved by cropping all images by bounding box of the character, and scaling to 28x28 to emulate the MNIST dataset.\nThis training script assumes \nFor related work released after this project, see https://www.nist.gov/itl/iad/image-group/emnist-dataset\n'''\n\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport time\n\nimport numpy as np\nnp.random.seed(1234) # for reproducibility\n\n# specifying the gpu to use\n# import theano.sandbox.cuda\n# theano.sandbox.cuda.use('gpu1') \nimport theano\nimport theano.tensor as T\n\nimport lasagne\n\nimport cPickle as pickle\nimport gzip\nimport glob\nfrom scipy import misc\n\nimport binary_net\n\nfrom pylearn2.datasets.mnist import MNIST\nfrom pylearn2.utils import serial\n\nfrom collections import OrderedDict\n#import matplotlib.pyplot as plt\n\ndef loaddata():\n '''\n Loads the NIST SD19 Character dataset, which must can be downloaded from https://www.nist.gov/srd/nist-special-database-19\n Assumes dataset is downloaded in the current directory (..../bnn/src/training) and ordered by class.\n '''\n\n classes = [\"30\", \"31\", \"32\", \"33\", \"34\", \"35\", \"36\", \"37\", \"38\", \"39\", #Digits\n\"41\", \"42\", \"43\", \"44\", \"45\", \"46\", \"47\", \"48\", \"49\", \"4a\", \"4b\", \"4c\", \"4d\", \"4e\", \"4f\", \"50\", \"51\", \"52\", \"53\", \"54\", \"55\", \"56\", \"57\", \"58\", \"59\", \"5a\", #Upper case\n\"61\", \"62\", \"63\", \"64\", \"65\", \"66\", \"67\", \"68\", \"69\", \"6a\", \"6b\", \"6c\", \"6d\", \"6e\", \"6f\", \"70\", \"71\", \"72\", \"73\", \"74\", \"75\", \"76\", \"77\", \"78\", \"79\", \"7a\"] #Lower case\n\n NumImagesPerClassTrain = 300\n NumImagesPerClassTest = 100\n NumImagesPerClassValidation = 50\n\n pngTrain = []\n pngTest = []\n pngValidation = []\n labelsTrain = []\n labelsTest = []\n labelsValidation = []\n\n for glyph in classes:\n i = 0\n print(\"Loading Glyph code: \"+glyph)\n for image_path in glob.glob(\"./by_class/\"+glyph+\"/train_\"+glyph+\"/*.png\"):\n if (i < NumImagesPerClassTrain):\n pngTrain.append(misc.imread(image_path)) \n labelsTrain.append(classes.index(glyph))\n i=i+1\n elif(i < (NumImagesPerClassTrain + NumImagesPerClassValidation)):\n pngValidation.append(misc.imread(image_path)) \n labelsValidation.append(classes.index(glyph))\n i=i+1\n else:\n break\n k = 0\n for image_path in glob.glob(\"./by_class/\"+glyph+\"/hsf_4/*.png\"):\n if (k < NumImagesPerClassTest):\n pngTest.append(misc.imread(image_path)) \n labelsTest.append(classes.index(glyph))\n k=k+1\n else:\n break\n\n labelsTrain = np.asarray(labelsTrain)\n labelsTrain = np.float32(np.eye(62)[labelsTrain])\n labelsTrain = 2*labelsTrain -1.\n\n imgTrain = np.asarray(pngTrain)\n imgTrain = np.float32(imgTrain)\n imgTrain = imgTrain[:,:,:,1]\n imgTrain = 1. - 2*(imgTrain[:,np.newaxis,:,:]/255.)#Normalize between -1 and 1 #need to split this operation up\n\n labelsTest = np.asarray(labelsTest)\n labelsTest = np.float32(np.eye(62)[labelsTest])\n labelsTest = 2*labelsTest -1.\n\n imgTest = np.asarray(pngTest)\n imgTest = np.float32(imgTest)\n imgTest = imgTest[:,:,:,1]\n imgTest = 1. - 2*(imgTest[:,np.newaxis,:,:]/255.)#Normalize. #Need to split up operations\n\n labelsValidation = np.asarray(labelsValidation)\n labelsValidation = np.float32(np.eye(62)[labelsValidation])\n labelsValidation = 2*labelsValidation - 1. #Normalize\n\n imgValidation = np.asarray(pngValidation)\n imgValidation = np.float32(imgValidation)\n imgValidation = imgValidation[:,:,:,1]\n imgValidation = 1. - 2*(imgValidation[:,np.newaxis,:,:]/255.)\n\n return (imgTrain, labelsTrain, imgTest, labelsTest, imgValidation, labelsValidation)\n\nif __name__ == \"__main__\":\n \n # BN parameters\n input_size = 128 #Standard NIST SD19 dataset is 128x128 pixels\n batch_size = 100\n print(\"batch_size = \"+str(batch_size))\n # alpha is the exponential moving average factor\n # alpha = .15\n alpha = .1\n print(\"alpha = \"+str(alpha))\n epsilon = 1e-4\n print(\"epsilon = \"+str(epsilon))\n \n # MLP parameters\n num_units = 1024\n print(\"num_units = \"+str(num_units))\n n_hidden_layers = 3\n print(\"n_hidden_layers = \"+str(n_hidden_layers))\n \n # Training parameters\n num_epochs = 1000\n print(\"num_epochs = \"+str(num_epochs))\n \n # Dropout parameters\n dropout_in = .2 # 0. means no dropout\n print(\"dropout_in = \"+str(dropout_in))\n dropout_hidden = .5\n print(\"dropout_hidden = \"+str(dropout_hidden))\n \n # BinaryOut\n activation = binary_net.binary_tanh_unit\n print(\"activation = binary_net.binary_tanh_unit\")\n # activation = binary_net.binary_sigmoid_unit\n # print(\"activation = binary_net.binary_sigmoid_unit\")\n \n # BinaryConnect\n binary = True\n print(\"binary = \"+str(binary))\n stochastic = False\n print(\"stochastic = \"+str(stochastic))\n # (-H,+H) are the two binary values\n # H = \"Glorot\"\n H = 1.\n print(\"H = \"+str(H))\n # W_LR_scale = 1. \n W_LR_scale = \"Glorot\" # \"Glorot\" means we are using the coefficients from Glorot's paper\n print(\"W_LR_scale = \"+str(W_LR_scale))\n \n # Decaying LR \n LR_start = .003\n print(\"LR_start = \"+str(LR_start))\n LR_fin = 0.0000003\n print(\"LR_fin = \"+str(LR_fin))\n LR_decay = (LR_fin/LR_start)**(1./num_epochs)\n print(\"LR_decay = \"+str(LR_decay))\n # BTW, LR decay might good for the BN moving average...\n \n save_path = \"char_parameters.npz\"\n print(\"save_path = \"+str(save_path))\n \n shuffle_parts = 1\n print(\"shuffle_parts = \"+str(shuffle_parts))\n \n print('Loading Character dataset...')\n\n train_setX, train_setY, test_setX, test_setY, valid_setX, valid_setY = loaddata()\n \n print('Building the MLP...') \n \n # Prepare Theano variables for inputs and targets\n input = T.tensor4('inputs')\n target = T.matrix('targets')\n LR = T.scalar('LR', dtype=theano.config.floatX)\n\n mlp = lasagne.layers.InputLayer(\n shape=(None, 1, input_size, input_size),\n input_var=input)\n \n mlp = lasagne.layers.DropoutLayer(\n mlp, \n p=dropout_in)\n \n for k in range(n_hidden_layers):\n\n mlp = binary_net.DenseLayer(\n mlp, \n binary=binary,\n stochastic=stochastic,\n H=H,\n W_LR_scale=W_LR_scale,\n nonlinearity=lasagne.nonlinearities.identity,\n num_units=num_units) \n \n mlp = lasagne.layers.BatchNormLayer(\n mlp,\n epsilon=epsilon, \n alpha=alpha)\n\n mlp = lasagne.layers.NonlinearityLayer(\n mlp,\n nonlinearity=activation)\n \n mlp = lasagne.layers.DropoutLayer(\n mlp, \n p=dropout_hidden)\n \n mlp = binary_net.DenseLayer(\n mlp, \n binary=binary,\n stochastic=stochastic,\n H=H,\n W_LR_scale=W_LR_scale,\n nonlinearity=lasagne.nonlinearities.identity,\n num_units=62) \n \n mlp = lasagne.layers.BatchNormLayer(\n mlp,\n epsilon=epsilon, \n alpha=alpha)\n\n train_output = lasagne.layers.get_output(mlp, deterministic=False)\n \n # squared hinge loss\n loss = T.mean(T.sqr(T.maximum(0.,1.-target*train_output)))\n \n if binary:\n \n # W updates\n W = lasagne.layers.get_all_params(mlp, binary=True)\n W_grads = binary_net.compute_grads(loss,mlp)\n updates = lasagne.updates.adam(loss_or_grads=W_grads, params=W, learning_rate=LR)\n updates = binary_net.clipping_scaling(updates,mlp)\n \n # other parameters updates\n params = lasagne.layers.get_all_params(mlp, trainable=True, binary=False)\n updates = OrderedDict(updates.items() + lasagne.updates.adam(loss_or_grads=loss, params=params, learning_rate=LR).items())\n \n else:\n params = lasagne.layers.get_all_params(mlp, trainable=True)\n updates = lasagne.updates.adam(loss_or_grads=loss, params=params, learning_rate=LR)\n\n test_output = lasagne.layers.get_output(mlp, deterministic=True)\n test_loss = T.mean(T.sqr(T.maximum(0.,1.-target*test_output)))\n test_err = T.mean(T.neq(T.argmax(test_output, axis=1), T.argmax(target, axis=1)),dtype=theano.config.floatX)\n \n # Compile a function performing a training step on a mini-batch (by giving the updates dictionary) \n # and returning the corresponding training loss:\n train_fn = theano.function([input, target, LR], loss, updates=updates)\n\n # Compile a second function computing the validation loss and accuracy:\n val_fn = theano.function([input, target], [test_loss, test_err])\n\n print('Training...')\n \n binary_net.train(\n train_fn,val_fn,\n mlp,\n batch_size,\n LR_start,LR_decay,\n num_epochs,\n train_setX,train_setY,\n valid_setX,valid_setY,\n test_setX,test_setY,\n save_path,\n shuffle_parts)" ]
[ [ "numpy.asarray", "numpy.random.seed", "numpy.eye", "scipy.misc.imread", "numpy.float32" ] ]
isce3-testing/isce3-circleci-poc
[ "ec1dfb6019bcdc7afb7beee7be0fa0ce3f3b87b3" ]
[ "tests/python/extensions/pybind/cuda/core/device.py" ]
[ "#!/usr/bin/env python3\n\nimport numpy.testing as npt\nimport isce3.ext.isce3 as isce3\n\ndef test_get_device_count():\n count = isce3.cuda.core.get_device_count()\n assert(count >= 0)\n\ndef test_init():\n count = isce3.cuda.core.get_device_count()\n for d in range(count):\n device = isce3.cuda.core.Device(d)\n assert(device.id == d)\n\n print(\"Device %i\" % (device.id))\n print(\"--------\")\n print(\"name: %s\" % (device.name))\n print(\"compute: %s\" % (device.compute_capability))\n print(\"total mem (bytes): %i\" % (device.total_global_mem))\n\n assert(device.name != \"\")\n assert(device.total_global_mem > 0)\n assert(device.compute_capability.major >= 1)\n assert(device.compute_capability.minor >= 0)\n\ndef test_invalid_device():\n with npt.assert_raises(ValueError):\n device = isce3.cuda.core.Device(-1)\n\n count = isce3.cuda.core.get_device_count()\n with npt.assert_raises(ValueError):\n device = isce3.cuda.core.Device(count)\n\ndef test_get_device():\n device = isce3.cuda.core.get_device()\n assert(device.id >= 0)\n\ndef test_set_device():\n count = isce3.cuda.core.get_device_count()\n for d in range(count):\n device = isce3.cuda.core.Device(d)\n isce3.cuda.core.set_device(d)\n assert(isce3.cuda.core.get_device().id == d)\n\ndef test_comparison():\n device1 = isce3.cuda.core.Device(0)\n device2 = isce3.cuda.core.Device(0)\n assert(device1 == device2)\n\n count = isce3.cuda.core.get_device_count()\n if (count > 1):\n device3 = isce3.cuda.core.Device(1)\n assert(device1 != device3)\n" ]
[ [ "numpy.testing.assert_raises" ] ]
tmartins1996/6PM-clustering
[ "47147f88313a0e29d1a256120ef207add8ec8b88" ]
[ "6PM_n_clusters_Product.py" ]
[ "\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Dez 17 16:38:28 2017\r\n\r\n@group DM 2017 Semester 1, Group 2 \r\n\r\n@author: Martins T.\r\n@author: Mendes R.\r\n@author: Santos R.\r\n\r\n\r\ndataset - 2017/10/10\r\n\r\n\"\"\"\r\nprint(__doc__)\r\n\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom scipy.cluster.hierarchy import dendrogram, linkage\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.cluster import KMeans\r\nimport os\r\n\r\n\r\n\r\n\r\n#generating MinMax file\r\nif not(os.path.isfile('6PM_data_transformation.xlsx')):\r\n exec(open('6PM_data_preparation.py').read())\r\n\r\ndataset= pd.read_excel(\"6PM_data_transformation.xlsx\")\r\nfinalData = dataset[['MntAcessoriesPercent','MntBagsPercent','MntClothingPercent','MntAthleticPercent','MntShoesPercent']]\r\n\r\n\r\ndata= linkage(finalData, 'ward')\r\nplt.title('Hierarchical Clustering Dendrogram (truncated)')\r\nplt.xlabel('Observations')\r\nplt.ylabel('Distance')\r\ndendrogram(\r\n data,\r\n truncate_mode = 'lastp',\r\n p=20,\r\n show_leaf_counts=True,\r\n leaf_rotation=90.,\r\n leaf_font_size=12,\r\n show_contracted=True)\r\nplt.show()\r\n\r\n\r\n\"\"\"\r\nElbow Method\r\n\"\"\"\r\n\r\ncluster_range=range(1,10)\r\ncluster_errors = []\r\nfor num_clusters in cluster_range: \r\n clusters=KMeans(num_clusters)\r\n clusters.fit(finalData)\r\n cluster_errors.append(clusters.inertia_)\r\n \r\nclusters_df = pd.DataFrame({\"num_clusters\": cluster_range,\"cluster_errors\": cluster_errors})\r\nprint(clusters_df[0:10])\r\n\r\nplt.figure(figsize=(10,5))\r\nplt.xlabel(\"Clusters\")\r\nplt.ylabel(\"within-Cluster Sum of Squares\")\r\nplt.plot(clusters_df.num_clusters, clusters_df.cluster_errors, marker='o')" ]
[ [ "scipy.cluster.hierarchy.dendrogram", "scipy.cluster.hierarchy.linkage", "matplotlib.pyplot.xlabel", "pandas.read_excel", "matplotlib.pyplot.title", "pandas.DataFrame", "matplotlib.pyplot.plot", "sklearn.cluster.KMeans", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show" ] ]
yyywxk/RGTNet
[ "cb53cb5979caac87b10eae4c396e3f7ca3f10e6c" ]
[ "utils/loss.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass DiceLoss(nn.Module):\n def __init__(self):\n super(DiceLoss, self).__init__()\n \n def forward(self, input, target):\n N = target.size(0)\n smooth = 1\n\n input_flat = input.view(N, -1)\n target_flat = target.view(N, -1)\n \n intersection = input_flat * target_flat\n \n loss = 2 * (intersection.sum(1) + smooth) / (input_flat.sum(1) + target_flat.sum(1) + smooth)\n loss = 1 - loss.sum() / N\n \n return loss\n\n\nclass MulticlassDiceLoss(nn.Module):\n '''\n requires one hot encoded target. Applies DiceLoss on each class iteratively.\n requires input.shape[0:1] and target.shape[0:1] to be (N, C) where N is\n batch size and C is number of classes\n '''\n def __init__(self, n_classes, cuda):\n super(MulticlassDiceLoss, self).__init__()\n self.n_classes = n_classes\n self.cuda = cuda\n\n @staticmethod\n def to_one_hot(tensor, n_classes, cuda):\n n, h, w = tensor.size()\n if cuda:\n one_hot = torch.zeros(n, n_classes, h, w).cuda().scatter_(1, tensor.view(n, 1, h, w), 1)\n else:\n one_hot = torch.zeros(n, n_classes, h, w).scatter_(1, tensor.view(n, 1, h, w), 1)\n return one_hot\n \n def forward(self, input, target, weights=None):\n # logit => N x Classes x H x W\n # target => N x H x W\n pred = F.softmax(input, dim=1)\n target_onehot = self.to_one_hot(target, self.n_classes, self.cuda)\n C = target_onehot.shape[1]\n\n \n # C = target.shape[1]\n\n # if weights is None:\n # \tweights = torch.ones(C) #uniform weights for all classes\n \n dice = DiceLoss()\n totalLoss = 0\n \n for i in range(C):\n # diceLoss = dice(input[:, i], target[:, i])\n diceLoss = dice(pred[:, i], target_onehot[:, i])\n if weights is not None:\n diceLoss *= weights[i]\n totalLoss += diceLoss\n \n return totalLoss\n\n\n# Multiclass Smooth IOU loss\nclass SoftIoULoss(nn.Module):\n def __init__(self, n_classes, cuda):\n super(SoftIoULoss, self).__init__()\n self.n_classes = n_classes\n self.cuda = cuda\n\n @staticmethod\n def to_one_hot(tensor, n_classes, cuda, label_smoothing=1e-5):\n n, h, w = tensor.size()\n if cuda:\n one_hot = torch.zeros(n, n_classes, h, w).cuda().scatter_(1, tensor.view(n, 1, h, w), 1)\n else:\n one_hot = torch.zeros(n, n_classes, h, w).scatter_(1, tensor.view(n, 1, h, w), 1)\n\n one_hot = one_hot * (1 - label_smoothing) + label_smoothing / n_classes # label smoothing\n return one_hot\n\n def forward(self, input, target):\n # logit => N x Classes x H x W\n # target => N x H x W\n\n N = len(input)\n\n pred = F.softmax(input, dim=1)\n target_onehot = self.to_one_hot(target, self.n_classes, self.cuda)\n\n # Numerator Product\n inter = pred * target_onehot\n # Sum over all pixels N x C x H x W => N x C\n inter = inter.view(N, self.n_classes, -1).sum(2)\n\n # Denominator\n union = pred + target_onehot - (pred * target_onehot)\n # Sum over all pixels N x C x H x W => N x C\n union = union.view(N, self.n_classes, -1).sum(2)\n\n loss = inter / (union + 1e-16)\n\n # Return average loss over classes and batch\n return -loss.mean()\n\n\nclass SegmentationLosses(object):\n def __init__(self, weight=None, size_average=True, batch_average=True, ignore_index=255, cuda=False):\n self.ignore_index = ignore_index\n self.weight = weight\n # self.weight = torch.Tensor(np.array([0.1, 1.0]))\n self.size_average = size_average\n self.batch_average = batch_average\n self.cuda = cuda\n\n def build_loss(self, mode='dice'):\n \"\"\"Choices: ['ce' or 'focal']\"\"\"\n if mode == 'ce':\n return self.CrossEntropyLoss\n elif mode == 'focal':\n return self.FocalLoss\n elif mode == 'dice':\n return self.Dice_Loss\n elif mode == 'iou':\n return self.IouLoss\n else:\n raise NotImplementedError\n\n def CrossEntropyLoss(self, logit, target):\n n, c, h, w = logit.size()\n criterion = nn.CrossEntropyLoss(weight=self.weight, ignore_index=self.ignore_index,\n size_average=self.size_average)\n\n if self.cuda:\n criterion = criterion.cuda()\n\n loss = criterion(logit, target.long())\n\n if self.batch_average:\n loss /= n\n\n return loss\n\n def FocalLoss(self, logit, target, gamma=2, alpha=0.5):\n n, c, h, w = logit.size()\n criterion = nn.CrossEntropyLoss(weight=self.weight, ignore_index=self.ignore_index,\n size_average=self.size_average)\n if self.cuda:\n criterion = criterion.cuda()\n\n logpt = -criterion(logit, target.long())\n pt = torch.exp(logpt)\n if alpha is not None:\n logpt *= alpha\n loss = -((1 - pt) ** gamma) * logpt\n\n if self.batch_average:\n loss /= n\n\n return loss\n\n def Dice_Loss(self, logit, target):\n n, c, h, w = logit.size()\n criterion = MulticlassDiceLoss(n_classes=c, cuda=self.cuda)\n\n loss = criterion(logit, target.long())\n\n if self.batch_average:\n loss /= n\n\n return loss\n\n def IouLoss(self, logit, target):\n n, c, h, w = logit.size()\n criterion = SoftIoULoss(n_classes=c, cuda=self.cuda)\n\n loss = criterion(logit, target.long())\n\n if self.batch_average:\n loss /= n\n\n return loss\n\n\nif __name__ == \"__main__\":\n # loss = SegmentationLosses(cuda=True)\n # a = torch.rand(1, 3, 7, 7).cuda()\n # b = torch.rand(1, 7, 7).cuda()\n # print(loss.CrossEntropyLoss(a, b).item())\n # print(loss.FocalLoss(a, b, gamma=0, alpha=None).item())\n # print(loss.FocalLoss(a, b, gamma=2, alpha=0.5).item())\n # print(loss.Dice_Loss(a, b).item())\n # print(loss.IouLoss(a, b).item())\n\n loss = SegmentationLosses(cuda=False)\n a = torch.rand(1, 3, 7, 7)\n b = torch.rand(1, 7, 7)\n b[b > 0.7] = 2\n b[b <= 0.3] = 1\n b[b < 1] = 0\n # print(loss.Dice_Loss(a, b.long()).item())\n print(loss.IouLoss(a, b.long()).item())\n" ]
[ [ "torch.zeros", "torch.rand", "torch.nn.functional.softmax", "torch.exp", "torch.nn.CrossEntropyLoss" ] ]
thegreatwall/NSLS2
[ "bff36128cce475a0e7563d05b93aa63b6f706c01" ]
[ "skxray/spectroscopy.py" ]
[ "# ######################################################################\n# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #\n# National Laboratory. All rights reserved. #\n# #\n# Redistribution and use in source and binary forms, with or without #\n# modification, are permitted provided that the following conditions #\n# are met: #\n# #\n# * Redistributions of source code must retain the above copyright #\n# notice, this list of conditions and the following disclaimer. #\n# #\n# * Redistributions in binary form must reproduce the above copyright #\n# notice this list of conditions and the following disclaimer in #\n# the documentation and/or other materials provided with the #\n# distribution. #\n# #\n# * Neither the name of the Brookhaven Science Associates, Brookhaven #\n# National Laboratory nor the names of its contributors may be used #\n# to endorse or promote products derived from this software without #\n# specific prior written permission. #\n# #\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #\n# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #\n# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #\n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #\n# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #\n# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #\n# POSSIBILITY OF SUCH DAMAGE. #\n########################################################################\n\"\"\"\nThis module is for spectroscopy specific tools (spectrum fitting etc).\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\nfrom six.moves import zip\nimport numpy as np\nimport logging\nlogger = logging.getLogger(__name__)\nfrom scipy.integrate import simps\nfrom .fitting import fit_quad_to_peak\n\n\ndef align_and_scale(energy_list, counts_list, pk_find_fun=None):\n \"\"\"\n\n Parameters\n ----------\n energy_list : iterable of ndarrays\n list of ndarrays with the energy of each element\n\n counts_list : iterable of ndarrays\n list of ndarrays of counts/element\n\n pk_find_fun : function or None\n A function which takes two ndarrays and returns parameters\n about the largest peak. If None, defaults to `find_largest_peak`.\n For this demo, the output is (center, height, width), but this sould\n be pinned down better.\n\n Returns\n -------\n out_e : list of ndarray\n The aligned/scaled energy arrays\n\n out_c : list of ndarray\n The count arrays (should be the same as the input)\n \"\"\"\n if pk_find_fun is None:\n pk_find_fun = find_largest_peak\n\n base_sigma = None\n out_e, out_c = [], []\n for e, c in zip(energy_list, counts_list):\n E0, max_val, sigma = pk_find_fun(e, c)\n if base_sigma is None:\n base_sigma = sigma\n out_e.append((e - E0) * base_sigma / sigma)\n out_c.append(c)\n\n return out_e, out_c\n\n\ndef find_largest_peak(x, y, window=5):\n \"\"\"\n Finds and estimates the location, width, and height of\n the largest peak. Assumes the top of the peak can be\n approximated as a Gaussian. Finds the peak properties\n using least-squares fitting of a parabola to the log of\n the counts.\n\n The region around the peak can be approximated by\n Y = Y0 * exp(- (X - X0)**2 / (2 * sigma **2))\n\n Parameters\n ----------\n x : ndarray\n The independent variable\n\n y : ndarary\n Dependent variable sampled at positions X\n\n window : int, optional\n The size of the window around the maximum to use\n for the fitting\n\n\n Returns\n -------\n x0 : float\n The location of the peak\n\n y0 : float\n The magnitude of the peak\n\n sigma : float\n Width of the peak\n \"\"\"\n\n # make sure they are _really_ arrays\n x = np.asarray(x)\n y = np.asarray(y)\n\n # get the bin with the largest number of counts\n j = np.argmax(y)\n roi = slice(np.max(j - window, 0),\n j + window + 1)\n\n (w, x0, y0), r2 = fit_quad_to_peak(x[roi],\n np.log(y[roi]))\n\n return x0, np.exp(y0), 1/np.sqrt(-2*w)\n\n\ndef integrate_ROI_spectrum(bin_edges, counts, x_min, x_max):\n \"\"\"Integrate region(s) of histogram.\n\n If `x_min` and `x_max` are arrays/lists they must be equal in\n length. The values contained in the 'x_value_array' must be\n monotonic (up or down). The returned value is the sum of all the\n regions and a single scalar value is returned. Each region is\n computed independently, if regions overlap the overlapped area will\n be included multiple times in the final sum.\n\n `bin_edges` is an array of the left edges and the final right\n edges of the bins. `counts` is the value in each of those bins.\n\n The bins who's centers fall with in the integration limits are\n included in the sum.\n\n Parameters\n ----------\n bin_edges : array\n Independent variable, any unit.\n\n Must be one longer in length than counts\n\n counts : array\n Dependent variable, any units\n\n x_min : float or array\n The lower edge of the integration region(s).\n\n x_max : float or array\n The upper edge of the integration region(s).\n\n Returns\n -------\n float\n The totals integrated value in same units as `counts`\n\n \"\"\"\n bin_edges = np.asarray(bin_edges)\n return integrate_ROI(bin_edges[:-1] + np.diff(bin_edges),\n counts, x_min, x_max)\n\n\ndef _formatter_array_regions(x, centers, window=1, tab_count=0):\n \"\"\"Returns a formatted string of sub-sections of an array\n\n Each value in center generates a section of the string like:\n\n {tab_count*\\t}c : [x[c - n] ... x[c] ... x[c + n + 1]]\n\n\n Parameters\n ----------\n x : array\n The array to be looked into\n\n centers : iterable\n The locations to print out around\n\n window : int, optional\n how many values on either side of center to include\n\n defaults to 1\n\n tab_count : int, optional\n The number of tabs to pre-fix lines with\n\n default is 0\n\n Returns\n -------\n str\n The formatted string\n \"\"\"\n xl = len(x)\n x = np.asarray(x)\n header = (\"\\t\"*tab_count + 'center\\tarray values\\n' +\n \"\\t\"*tab_count + '------\\t------------\\n')\n return header + '\\n'.join([\"\\t\"*tab_count +\n \"{c}: \\t {vals}\".format(c=c,\n vals=x[np.max([0, c-window]):\n np.min([xl, c + window + 1])])\n for c in centers])\n\n\ndef integrate_ROI(x, y, x_min, x_max):\n \"\"\"Integrate region(s) of input data.\n\n If `x_min` and `x_max` are arrays/lists they must be equal in\n length. The values contained in the 'x' must be monotonic (up or\n down). The returned value is the sum of all the regions and a\n single scalar value is returned. Each region is computed\n independently, if regions overlap the overlapped area will be\n included multiple times in the final sum.\n\n This function assumes that `y` is a function of\n `x` sampled at `x`.\n\n Parameters\n ----------\n x : array\n Independent variable, any unit\n\n y : array\n Dependent variable, any units\n\n x_min : float or array\n The lower edge of the integration region(s)\n in units of x.\n\n x_max : float or array\n The upper edge of the integration region(s)\n in units of x.\n\n Returns\n -------\n float\n The totals integrated value in same units as `y`\n \"\"\"\n # make sure x (x-values) and y (y-values) are arrays\n x = np.asarray(x)\n y = np.asarray(y)\n\n if x.shape != y.shape:\n raise ValueError(\"Inputs (x and y) must be the same \"\n \"size. x.shape = {0} and y.shape = \"\n \"{1}\".format(x.shape, y.shape))\n\n # use np.sign() to obtain array which has evaluated sign changes in all\n # diff in input x_value array. Checks and tests are then run on the\n # evaluated sign change array.\n eval_x_arr_sign = np.sign(np.diff(x))\n\n # check to make sure no outliers exist which violate the monotonically\n # increasing requirement, and if exceptions exist, then error points to the\n # location within the source array where the exception occurs.\n if not np.all(eval_x_arr_sign == eval_x_arr_sign[0]):\n error_locations = np.where(eval_x_arr_sign != eval_x_arr_sign[0])[0]\n raise ValueError(\"Independent variable must be monotonically \"\n \"increasing. Erroneous values found at x-value \"\n \"array index locations:\\n\" +\n _formatter_array_regions(x, error_locations))\n\n # check whether the sign of all diff measures are negative in the\n # x. If so, then the input array for both x_values and\n # count are reversed so that they are positive, and monotonically increase\n # in value\n if eval_x_arr_sign[0] == -1:\n x = x[::-1]\n y = y[::-1]\n logging.debug(\"Input values for 'x' were found to be \"\n \"monotonically decreasing. The 'x' and \"\n \"'y' arrays have been reversed prior to \"\n \"integration.\")\n\n # up-cast to 1d and make sure it is flat\n x_min = np.atleast_1d(x_min).ravel()\n x_max = np.atleast_1d(x_max).ravel()\n\n # verify that the number of minimum and maximum boundary values are equal\n if len(x_min) != len(x_max):\n raise ValueError(\"integration bounds must have same lengths\")\n\n # verify that the specified minimum values are actually less than the\n # sister maximum value, and raise error if any minimum value is actually\n # greater than the sister maximum value.\n if np.any(x_min >= x_max):\n raise ValueError(\"All lower integration bounds must be less than \"\n \"upper integration bounds.\")\n\n # check to make sure that all specified minimum and maximum values are\n # actually contained within the extents of the independent variable array\n if np.any(x_min < x[0]):\n error_locations = np.where(x_min < x[0])[0]\n raise ValueError(\"Specified lower integration boundary values are \"\n \"outside the spectrum range. All minimum integration \"\n \"boundaries must be greater than, or equal to the \"\n \"lowest value in spectrum range. The erroneous x_min_\"\n \"array indices are:\\n\" +\n _formatter_array_regions(x_min,\n error_locations, window=0))\n\n if np.any(x_max > x[-1]):\n error_locations = np.where(x_max > x[-1])[0]\n raise ValueError(\"Specified upper integration boundary values \"\n \"are outside the spectrum range. All maximum \"\n \"integration boundary values must be less \"\n \"than, or equal to the highest value in the spectrum \"\n \"range. The erroneous x_max array indices are: \"\n \"\\n\" +\n _formatter_array_regions(x_max,\n error_locations, window=0))\n\n # find the bottom index of each integration bound\n bottom_indx = x.searchsorted(x_min)\n # find the top index of each integration bound\n # NOTE: +1 required for correct slicing for integration function\n top_indx = x.searchsorted(x_max) + 1\n\n # set up temporary variables\n accum = 0\n # integrate each region\n for bot, top in zip(bottom_indx, top_indx):\n # Note: If an odd number of intervals is specified, then the\n # even='avg' setting calculates and averages first AND last\n # N-2 intervals using trapezoidal rule.\n # If calculation speed become an issue, then consider changing\n # setting to 'first', or 'last' in which case trap rule is only\n # applied to either first or last N-2 intervals.\n accum += simps(y[bot:top], x[bot:top], even='avg')\n\n return accum\n" ]
[ [ "numpy.max", "scipy.integrate.simps", "numpy.asarray", "numpy.log", "numpy.exp", "numpy.min", "numpy.diff", "numpy.any", "numpy.where", "numpy.argmax", "numpy.atleast_1d", "numpy.sqrt", "numpy.all" ] ]
chawins/princeton_thesis
[ "114b1f9bc36742827c2cb285249ca30dba0ae85c" ]
[ "lib/tf_utils.py" ]
[ "\"\"\"\nAn additional utility file used for adversarial training.\nAuthor: Arjun Bhagoji ([email protected])\n\"\"\"\n\nimport sys\nimport time\n\nimport keras.backend as K\nimport numpy as np\nimport tensorflow as tf\nfrom keras.models import save_model\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom lib.keras_utils import gen_adv_loss\nfrom parameters import BATCH_SIZE\nfrom tensorflow.python.platform import flags\n\nFLAGS = flags.FLAGS\nEVAL_FREQUENCY = 1000\nBATCH_EVAL_NUM = 100\n\n\ndef batch_eval(tf_inputs, tf_outputs, numpy_inputs):\n \"\"\"\n A helper function that computes a tensor on numpy inputs by batches.\n From: https://github.com/openai/cleverhans/blob/master/cleverhans/utils_tf.py\n \"\"\"\n\n n = len(numpy_inputs)\n assert n > 0\n assert n == len(tf_inputs)\n m = numpy_inputs[0].shape[0]\n for i in range(1, n):\n assert numpy_inputs[i].shape[0] == m\n\n out = []\n for _ in tf_outputs:\n out.append([])\n\n for start in range(0, m, BATCH_SIZE):\n batch = start // BATCH_SIZE\n\n # Compute batch start and end indices\n start = batch * BATCH_SIZE\n end = start + BATCH_SIZE\n numpy_input_batches = [numpy_input[start:end]\n for numpy_input in numpy_inputs]\n cur_batch_size = numpy_input_batches[0].shape[0]\n assert cur_batch_size <= BATCH_SIZE\n for e in numpy_input_batches:\n assert e.shape[0] == cur_batch_size\n\n feed_dict = dict(zip(tf_inputs, numpy_input_batches))\n feed_dict[K.learning_phase()] = 0\n numpy_output_batches = K.get_session().run(tf_outputs,\n feed_dict=feed_dict)\n for e in numpy_output_batches:\n assert e.shape[0] == cur_batch_size, e.shape\n for out_elem, numpy_output_batch in zip(out, numpy_output_batches):\n out_elem.append(numpy_output_batch)\n\n out = [np.concatenate(x, axis=0) for x in out]\n for e in out:\n assert e.shape[0] == m, e.shape\n return out\n\n\ndef tf_train(x, y, model, X_train, Y_train, x_advs=None, benign=None, cross_lip=None):\n\n generator = ImageDataGenerator()\n generator.fit(X_train)\n\n old_vars = set(tf.global_variables())\n train_size = Y_train.shape[0]\n\n # Generate cross-entropy loss for training\n logits = model(x)\n preds = K.softmax(logits)\n l1 = gen_adv_loss(logits, y, mean=True)\n\n # add adversarial training loss\n if x_advs is not None:\n l2 = gen_adv_loss(logits, y, mean=True)\n if benign == 0:\n loss = l2\n elif benign == 1:\n loss = 0.5 * (l1 + l2)\n else:\n l2 = tf.constant(0)\n loss = l1\n\n optimizer = tf.train.AdamOptimizer().minimize(loss)\n\n # Run all the initializers to prepare the trainable parameters.\n K.get_session().run(tf.initialize_variables(\n set(tf.global_variables()) - old_vars))\n start_time = time.time()\n print('Initialized!')\n\n # Loop through training steps.\n num_steps = int(FLAGS.NUM_EPOCHS * train_size +\n BATCH_SIZE - 1) // BATCH_SIZE\n\n step = 0\n training_loss = 0\n epoch_count = 0\n step_old = 0\n for (batch_data, batch_labels) \\\n in generator.flow(X_train, Y_train, batch_size=BATCH_SIZE):\n\n if len(batch_data) < BATCH_SIZE:\n k = BATCH_SIZE - len(batch_data)\n batch_data = np.concatenate([batch_data, X_train[0:k]])\n batch_labels = np.concatenate([batch_labels, Y_train[0:k]])\n\n feed_dict = {x: batch_data,\n y: batch_labels,\n K.learning_phase(): 1}\n\n # Run the graph\n _, curr_loss, curr_l1, curr_l2, curr_preds, _ = \\\n K.get_session().run([optimizer, loss, l1, l2, preds]\n + [model.updates],\n feed_dict=feed_dict)\n training_loss += curr_loss\n\n epoch = float(step) * BATCH_SIZE / train_size\n if epoch >= epoch_count:\n epoch_count += 1\n elapsed_time = time.time() - start_time\n start_time = time.time()\n print('Step %d (epoch %d), %.2f s' %\n (step, epoch_count, elapsed_time))\n print('Training loss: %.3f' % (training_loss / (step - step_old)))\n training_loss = 0\n step_old = step\n print('Minibatch loss: %.3f (%.3f, %.3f)' %\n (curr_loss, curr_l1, curr_l2))\n\n _, _, minibatch_error = error_rate(curr_preds, batch_labels)\n\n print('Minibatch error: %.1f%%' % minibatch_error)\n\n # Save model every epoch\n save_model(model, './tmp/model_epoch{}_loss{:.3f}.ckpt'.format(\n epoch_count, curr_loss))\n\n sys.stdout.flush()\n\n step += 1\n if step == num_steps:\n break\n\n\ndef tf_test_error_rate(model, x, X_test, y_test):\n \"\"\"\n Compute test error.\n \"\"\"\n assert len(X_test) == len(y_test)\n\n # Predictions for the test set\n eval_prediction = K.softmax(model(x))\n\n predictions = batch_eval([x], [eval_prediction], [X_test])[0]\n\n return error_rate(predictions, y_test)\n\n\ndef error_rate(predictions, labels):\n \"\"\"\n Return the error rate in percent.\n \"\"\"\n\n assert len(predictions) == len(labels)\n\n preds = np.argmax(predictions, 1)\n\n orig = np.argmax(labels, 1)\n\n error_rate = 100.0 - (100.0 * np.sum(preds == orig) / predictions.shape[0])\n\n return preds, orig, error_rate\n" ]
[ [ "numpy.concatenate", "tensorflow.train.AdamOptimizer", "numpy.sum", "tensorflow.global_variables", "tensorflow.constant", "numpy.argmax" ] ]
student-work-agu-gis2021/lesson7-matplotlib-A5719050
[ "3914673641888338c7176f33afa276ee1fcea7fa" ]
[ "Exercise_7_problem_2.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# ## Problem 2 - Plotting temperatures \n# \n# In this problem we will plot monthly mean temperatures from the Helsinki-Vantaa airpot for the past 30 years.\n# \n# ## Input data\n# \n# File `data/helsinki-vantaa.csv` monthly average temperatures from Helsinki Vantaa airport. Column descriptions:\n# \n# ### Part 1\n# \n# Load the Helsinki temperature data (`data/helsinki-vantaa.csv`)\n# \n# - Read the data into variable called `data` using pandas\n# - Parse dates from the column `'DATE'` and set the dates as index in the dataframe \n\n# YOUR CODE HERE 1 to read the data into data and parse dates\nimport pandas as pd\nfp=\"data/helsinki-vantaa.csv\"\ndata=pd.read_csv(fp,parse_dates=['DATE'],index_col='DATE')\n# This test print should print first five rows\nprint(data.head())\n\n# Check the number of rows in the data frame\nprint(len(data))\n\n# ### Part 2\n# \n# Select data for a 30 year period (January 1988 - December 2018)\n# \n# - Store the selection in a new variable `selection`\n\n# YOUR CODE HERE 2\nselection=data.loc[(data.index>='1988-01-01')&(data.index<'2018-12-31')]\n# Check that the data was read in correctly:\nselection.head()\n\n# Check how many rows of data you selected:\nprint(\"Number of rows:\", len(selection))\n\n\n# ### Part 3\n# \n# #### Part 3.1\n# \n# Create a line plot that displays the temperatures (`TEMP_C`) for yeach month in the 30 year time period:\n# \n# #### Part 3.2\n# \n# Save your figure as PNG file called `temp_line_plot.png`.\n# \n\n# YOUR CODE HERE 3\nimport matplotlib.pyplot as plt\nselection=selection.sort_index()\nplt.plot(selection.index,selection['TEMP_C'],linestyle='solid',c='black',marker='o',markersize=3)\nplt.title(\"Helsinki-Vantaa Airport\")\nplt.xlabel(\"time\")\nplt.ylabel=(\"Temperature(Celsius\")\nplt.grid()\nplt.show()\n# Set output file name\noutputfp = \"\"\n\n# Save plot as image\n# YOUR CODE HERE 4\nplt.savefig(outputfp)\nimport os\n\n#Check that output file exists (also open the file and check that the plot looks ok!)\nos.path.exists(outputfp)\n\n\n# **REMINDER**: Don't forget to upload your figure and the modified notebook into your personal GitHub repository!\n# \n# ### Done!\n" ]
[ [ "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "matplotlib.pyplot.savefig", "matplotlib.pyplot.show", "pandas.read_csv" ] ]
goyeahia/SafeVid
[ "daeb2f15feac834fa3b5cea24e353f05f88dfcab" ]
[ "server.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 29 02:11:24 2018\n@author: Yeahia Sarker\n\"\"\"\n\nfrom imutils.video import FileVideoStream\nfrom imutils.video import FPS\nimport cv2\nimport imutils\nimport numpy as np\nimport pickle\nimport socket\nimport struct\nimport sys\nimport time\n\n\nclass pybroadcast_serverside:\n def __init__(self):\n \"\"\" Initializing Socket Server...\"\"\"\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \n self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n def host_server(self, host_ip, port):\n \"\"\" Fabricating Server Side....\"\"\"\n self.host_ip = host_ip\n self.port = port\n try:\n self.server.bind((host_ip, port))\n except Exception:\n print(\"Bind Error!\")\n print(\"The receiver couldn't connect\")\n print(\"Please check the host ip address\")\n self.server.close()\n sys.exit()\n self.server.listen(1)\n print(\"Server side is ready. Waiting for connection.... \" )\n print(\"Press ctrl + c to terminate the program\")\n try:\n self.__base, addr = self.server.accept()\n print(\"Connection has been established\")\n print('Got a connection from {}'.format(str(addr)))\n except Exception as e:\n print(e)\n except KeyboardInterrupt:\n self.server.close()\n sys.exit(\"Connection has been terminated\")\n\n def send_data(self):\n\n starting_video_stream = FileVideoStream(0).start()\n time.sleep(1.0)\n fps_timer = FPS().start() # Starting frame per second counter\n while True:\n frame = starting_video_stream.read()\n frame = imutils.resize(frame, width=450)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame = np.dstack([frame, frame, frame])\n frame = cv2.medianBlur(frame,5) # blurring frames to reduce noise\n serialized_data = pickle.dumps(frame)\n new_serialized_data = struct.pack(\"H\", len(serialized_data)) + serialized_data\n self.__base.send(new_serialized_data)\n print(\"Sending frames.....\")\n cv2.imshow(\"[Pybroadcast] Server\", frame)\n cv2.waitKey(1)\n fps_timer.update() # Updating frame per second counter\n\nvideoserver = pybroadcast_serverside()\nvideoserver.host_server(\"192.168.0.104\",1111) # must assign the local addressS\n" ]
[ [ "numpy.dstack" ] ]
PacktPublishing/Hands-On-Python-Deep-Learning-for-Web
[ "bb111a073a1cedda19469b311e7b441b11adc533" ]
[ "Chapter11/app/app.py" ]
[ "from flask import Flask, request, jsonify, render_template\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neural_network import MLPClassifier\n\nnp.random.seed(5)\n\ndf = pd.read_csv(\"https://raw.githubusercontent.com/PacktPublishing/Hands-On-Python-Deep-Learning-for-Web/master/Chapter11/data/heart.csv\")\n\nX = df.drop(\"target\",axis=1)\ny = df[\"target\"]\n\nX = StandardScaler().fit_transform(X)\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.20,random_state=0)\n\nclf = MLPClassifier(max_iter=200)\n\nfor i in range(100):\n xt = X_train[i].reshape(1, -1)\n yt = y_train.values[[i]]\n clf = clf.partial_fit(xt, yt, classes=[0,1])\n if i > 0 and i % 25 == 0 or i == len(X_train) - 1:\n score = clf.score(X_test, y_test)\n print(\"Iters \", i, \": \", score)\n\nscore = clf.score(X_test, y_test)\n\napp = Flask(__name__)\n\nstart_at = 100\n\[email protected]('/train_batch', methods=['GET', 'POST'])\ndef train_batch():\n global start_at, clf, X_train, y_train, X_test, y_test, score\n for i in range(start_at, min(start_at+25, len(X_train))):\n xt = X_train[i].reshape(1, -1)\n yt = y_train.values[[i]]\n clf = clf.partial_fit(xt, yt, classes=[0,1])\n\n score = clf.score(X_test, y_test)\n\n start_at += 25\n\n response = {'result': float(round(score, 5)), 'remaining': len(X_train) - start_at}\n\n return jsonify(response)\n\[email protected]('/reset', methods=['GET', 'POST'])\ndef reset():\n global start_at, clf, X_train, y_train, X_test, y_test, score\n start_at = 0\n del clf\n clf = MLPClassifier(max_iter=200)\n for i in range(start_at, start_at+1):\n xt = X_train[i].reshape(1, -1)\n yt = y_train.values[[i]]\n clf = clf.partial_fit(xt, yt, classes=[0,1])\n\n score = clf.score(X_test, y_test)\n\n start_at += 1\n\n response = {'result': float(round(score, 5)), 'remaining': len(X_train) - start_at}\n\n return jsonify(response)\n\[email protected]('/')\ndef index():\n global score, X_train\n rem = (len(X_train) - start_at) > 0\n\n return render_template(\"index.html\", score=round(score, 5), remain = rem)\n\nif __name__ == '__main__':\n app.run()" ]
[ [ "sklearn.preprocessing.StandardScaler", "numpy.random.seed", "sklearn.neural_network.MLPClassifier", "sklearn.model_selection.train_test_split", "pandas.read_csv" ] ]
gaurav272333/RUA
[ "46892cbe38a24b2c4fb440a69ee0dde1674f6b8b" ]
[ "wrn2810_cifar100/rua/wrn2810_cifar100_rua.py" ]
[ "import math\nimport os\nimport random\nimport tempfile\n\nimport fastestimator as fe\nimport numpy as np\nimport tensorflow as tf\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom fastestimator.op.numpyop import NumpyOp\nfrom fastestimator.op.numpyop.meta import OneOf, Sometimes\nfrom fastestimator.op.numpyop.multivariate import HorizontalFlip, PadIfNeeded, RandomCrop\nfrom fastestimator.op.numpyop.univariate import ChannelTranspose, CoarseDropout, Normalize\nfrom fastestimator.op.tensorop.loss import CrossEntropy\nfrom fastestimator.op.tensorop.model import ModelOp, UpdateOp\nfrom fastestimator.schedule import cosine_decay\nfrom fastestimator.trace.adapt import LRScheduler\nfrom fastestimator.trace.io import BestModelSaver\nfrom fastestimator.trace.metric import Accuracy\nfrom PIL import Image, ImageEnhance, ImageOps, ImageTransform\nfrom sklearn.model_selection import train_test_split\n\n\nclass BasicBlock(nn.Module):\n def __init__(self, in_planes, out_planes, stride, dropRate=0.0):\n super(BasicBlock, self).__init__()\n self.bn1 = nn.BatchNorm2d(in_planes)\n self.relu1 = nn.ReLU(inplace=True)\n self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(out_planes)\n self.relu2 = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.droprate = dropRate\n self.equalInOut = (in_planes == out_planes)\n self.convShortcut = (not self.equalInOut) and nn.Conv2d(\n in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False) or None\n\n def forward(self, x):\n if not self.equalInOut:\n x = self.relu1(self.bn1(x))\n else:\n out = self.relu1(self.bn1(x))\n out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))\n if self.droprate > 0:\n out = F.dropout(out, p=self.droprate, training=self.training)\n out = self.conv2(out)\n return torch.add(x if self.equalInOut else self.convShortcut(x), out)\n\n\nclass NetworkBlock(nn.Module):\n def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):\n super(NetworkBlock, self).__init__()\n self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)\n\n def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):\n layers = []\n for i in range(int(nb_layers)):\n layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))\n return nn.Sequential(*layers)\n\n def forward(self, x):\n return self.layer(x)\n\n\nclass WideResNet(nn.Module):\n def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):\n super(WideResNet, self).__init__()\n nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]\n assert ((depth - 4) % 6 == 0)\n n = (depth - 4) / 6\n block = BasicBlock\n # 1st conv before any network block\n self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False)\n # 1st block\n self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)\n # 2nd block\n self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)\n # 3rd block\n self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)\n # global average pooling and classifier\n self.bn1 = nn.BatchNorm2d(nChannels[3])\n self.relu = nn.ReLU(inplace=True)\n self.fc = nn.Linear(nChannels[3], num_classes)\n self.nChannels = nChannels[3]\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.bias.data.zero_()\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.block1(out)\n out = self.block2(out)\n out = self.block3(out)\n out = self.relu(self.bn1(out))\n out = F.avg_pool2d(out, 8, 1)\n out = out.view(-1, self.nChannels)\n return self.fc(out)\n\n\nclass Rotate(NumpyOp):\n \"\"\" rotate between 0 to 90 degree\n \"\"\"\n def __init__(self, level, inputs=None, outputs=None, mode=None):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n self.degree = level * 3.0\n\n def forward(self, data, state):\n im = Image.fromarray(data)\n degree = random.uniform(-self.degree, self.degree)\n im = im.rotate(degree)\n return np.copy(np.asarray(im))\n\n\nclass Identity(NumpyOp):\n def __init__(self, level, inputs=None, outputs=None, mode=None):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n\n\nclass AutoContrast(NumpyOp):\n def __init__(self, level, inputs=None, outputs=None, mode=None):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n\n def forward(self, data, state):\n im = Image.fromarray(data)\n im = ImageOps.autocontrast(im)\n return np.copy(np.asarray(im))\n\n\nclass Equalize(NumpyOp):\n def __init__(self, level, inputs=None, outputs=None, mode=None):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n\n def forward(self, data, state):\n im = Image.fromarray(data)\n im = ImageOps.equalize(im)\n return np.copy(np.asarray(im))\n\n\nclass Posterize(NumpyOp):\n # resuce the number of bits for each channel, this may be inconsistent with original implementation\n def __init__(self, level, inputs=None, outputs=None, mode=None):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n self.bit_loss_limit = level / 30 * 7\n\n def forward(self, data, state):\n im = Image.fromarray(data)\n bits_to_keep = 8 - round(random.uniform(0, self.bit_loss_limit))\n im = ImageOps.posterize(im, bits_to_keep)\n return np.copy(np.asarray(im))\n\n\nclass Solarize(NumpyOp):\n # this may be inconsistent with original implementation\n def __init__(self, level, inputs=None, outputs=None, mode=None):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n self.loss_limit = level / 30 * 256\n\n def forward(self, data, state):\n threshold = 256 - round(random.uniform(0, self.loss_limit))\n data = np.where(data < threshold, data, 255 - data)\n return data\n\n\nclass Sharpness(NumpyOp):\n def __init__(self, level, inputs=None, outputs=None, mode=None):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n self.diff_limit = level / 30 * 0.9\n\n def forward(self, data, state):\n im = Image.fromarray(data)\n factor = 1.0 + random.uniform(-self.diff_limit, self.diff_limit)\n im = ImageEnhance.Sharpness(im).enhance(factor)\n return np.copy(np.asarray(im))\n\n\nclass Contrast(NumpyOp):\n def __init__(self, level, inputs=None, outputs=None, mode=None):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n self.diff_limit = level / 30 * 0.9\n\n def forward(self, data, state):\n im = Image.fromarray(data)\n factor = 1.0 + random.uniform(-self.diff_limit, self.diff_limit)\n im = ImageEnhance.Contrast(im).enhance(factor)\n return np.copy(np.asarray(im))\n\n\nclass Color(NumpyOp):\n def __init__(self, level, inputs=None, outputs=None, mode=None):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n self.diff_limit = level / 30 * 0.9\n\n def forward(self, data, state):\n im = Image.fromarray(data)\n factor = 1.0 + random.uniform(-self.diff_limit, self.diff_limit)\n im = ImageEnhance.Color(im).enhance(factor)\n return np.copy(np.asarray(im))\n\n\nclass Brightness(NumpyOp):\n def __init__(self, level, inputs=None, outputs=None, mode=None):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n self.diff_limit = level / 30 * 0.9\n\n def forward(self, data, state):\n im = Image.fromarray(data)\n factor = 1.0 + random.uniform(-self.diff_limit, self.diff_limit)\n im = ImageEnhance.Brightness(im).enhance(factor)\n return np.copy(np.asarray(im))\n\n\nclass ShearX(NumpyOp):\n def __init__(self, level, inputs=None, outputs=None, mode=None):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n self.shear_coef = level / 30 * 0.5\n\n def forward(self, data, state):\n im = Image.fromarray(data)\n shear_coeff = random.uniform(-self.shear_coef, self.shear_coef)\n width, height = im.size\n xshift = round(abs(shear_coeff) * width)\n new_width = width + xshift\n im = im.transform((new_width, height),\n ImageTransform.AffineTransform(\n (1.0, shear_coeff, -xshift if shear_coeff > 0 else 0.0, 0.0, 1.0, 0.0)),\n resample=Image.BICUBIC)\n im = im.resize((width, height))\n return np.copy(np.asarray(im))\n\n\nclass ShearY(NumpyOp):\n def __init__(self, level, inputs=None, outputs=None, mode=None):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n self.shear_coef = level / 30 * 0.5\n\n def forward(self, data, state):\n im = Image.fromarray(data)\n shear_coeff = random.uniform(-self.shear_coef, self.shear_coef)\n width, height = im.size\n yshift = round(abs(shear_coeff) * height)\n newheight = height + yshift\n im = im.transform((width, newheight),\n ImageTransform.AffineTransform(\n (1.0, 0.0, 0.0, shear_coeff, 1.0, -yshift if shear_coeff > 0 else 0.0)),\n resample=Image.BICUBIC)\n im = im.resize((width, height))\n return np.copy(np.asarray(im))\n\n\nclass TranslateX(NumpyOp):\n def __init__(self, level, inputs=None, outputs=None, mode=None):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n self.level = level\n\n def forward(self, data, state):\n im = Image.fromarray(data)\n width, height = im.size\n displacement = random.uniform(-self.level / 30 * height / 3, self.level / 30 * height / 3)\n im = im.transform((width, height),\n ImageTransform.AffineTransform((1.0, 0.0, displacement, 0.0, 1.0, 0.0)),\n resample=Image.BICUBIC)\n return np.copy(np.asarray(im))\n\n\nclass TranslateY(NumpyOp):\n def __init__(self, level, inputs=None, outputs=None, mode=None):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n self.level = level\n\n def forward(self, data, state):\n im = Image.fromarray(data)\n width, height = im.size\n displacement = random.uniform(-self.level / 30 * height / 3, self.level / 30 * height / 3)\n im = im.transform((width, height),\n ImageTransform.AffineTransform((1.0, 0.0, 0.0, 0.0, 1.0, displacement)),\n resample=Image.BICUBIC)\n return np.copy(np.asarray(im))\n\n\ndef get_estimator(level, epochs=200, batch_size=128, save_dir=tempfile.mkdtemp()):\n print(\"trying level {}\".format(level))\n # step 1: prepare dataset\n (x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.cifar100.load_data()\n x_train, x_eval, y_train, y_eval = train_test_split(x_train, y_train, test_size=0.1, random_state=24, stratify=y_train)\n train_data = fe.dataset.NumpyDataset({\"x\": x_train, \"y\": y_train})\n eval_data = fe.dataset.NumpyDataset({\"x\": x_eval, \"y\": y_eval})\n aug_options = [\n Rotate(level=level, inputs=\"x\", outputs=\"x\", mode=\"train\"),\n Identity(level=level, inputs=\"x\", outputs=\"x\", mode=\"train\"),\n AutoContrast(level=level, inputs=\"x\", outputs=\"x\", mode=\"train\"),\n Equalize(level=level, inputs=\"x\", outputs=\"x\", mode=\"train\"),\n Posterize(level=level, inputs=\"x\", outputs=\"x\", mode=\"train\"),\n Solarize(level=level, inputs=\"x\", outputs=\"x\", mode=\"train\"),\n Sharpness(level=level, inputs=\"x\", outputs=\"x\", mode=\"train\"),\n Contrast(level=level, inputs=\"x\", outputs=\"x\", mode=\"train\"),\n Color(level=level, inputs=\"x\", outputs=\"x\", mode=\"train\"),\n Brightness(level=level, inputs=\"x\", outputs=\"x\", mode=\"train\"),\n ShearX(level=level, inputs=\"x\", outputs=\"x\", mode=\"train\"),\n ShearY(level=level, inputs=\"x\", outputs=\"x\", mode=\"train\"),\n TranslateX(level=level, inputs=\"x\", outputs=\"x\", mode=\"train\"),\n TranslateY(level=level, inputs=\"x\", outputs=\"x\", mode=\"train\")\n ]\n max_N = min(5, len(aug_options))\n N = min(max_N, math.ceil(level / 30 * max_N))\n pipeline = fe.Pipeline(\n train_data=train_data,\n eval_data=eval_data,\n batch_size=batch_size,\n ops=[\n PadIfNeeded(min_height=40, min_width=40, image_in=\"x\", image_out=\"x\", mode=\"train\"),\n RandomCrop(32, 32, image_in=\"x\", image_out=\"x\", mode=\"train\"),\n Sometimes(HorizontalFlip(image_in=\"x\", image_out=\"x\", mode=\"train\"))\n ] + [OneOf(*aug_options) for _ in range(N)] + [\n Normalize(inputs=\"x\", outputs=\"x\", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)),\n CoarseDropout(inputs=\"x\", outputs=\"x\", mode=\"train\", max_holes=1),\n ChannelTranspose(inputs=\"x\", outputs=\"x\")\n ])\n\n # step 2: prepare network\n model = fe.build(model_fn=lambda: WideResNet(depth=28, num_classes=100, widen_factor=10),\n optimizer_fn=lambda x: torch.optim.SGD(x, lr=0.1, momentum=0.9, weight_decay=0.0005))\n\n network = fe.Network(ops=[\n ModelOp(model=model, inputs=\"x\", outputs=\"y_pred\"),\n CrossEntropy(inputs=(\"y_pred\", \"y\"), outputs=\"ce\", from_logits=True),\n UpdateOp(model=model, loss_name=\"ce\")\n ])\n\n # step 3 prepare estimator\n traces = [\n Accuracy(true_key=\"y\", pred_key=\"y_pred\"),\n LRScheduler(model=model, lr_fn=lambda epoch: cosine_decay(epoch, cycle_length=epochs, init_lr=0.1)),\n BestModelSaver(model=model, save_dir=save_dir, metric=\"accuracy\", save_best_mode=\"max\")\n ]\n estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces)\n return estimator\n\n\ndef evaluate_result(level, epochs=200):\n est = get_estimator(level=level, epochs=epochs)\n hist = est.fit(summary=\"exp\")\n best_acc = float(hist.history[\"eval\"][\"max_accuracy\"][epochs * 352])\n return best_acc\n\n\ndef gss(a, b, total_trial=10):\n results = {}\n h = b - a\n invphi = (math.sqrt(5) - 1) / 2\n invphi2 = (3 - math.sqrt(5)) / 2\n c = int(a + invphi2 * h)\n d = int(a + invphi * h)\n yc = evaluate_result(level=c)\n results[c] = yc\n yd = evaluate_result(level=d)\n results[d] = yd\n for i in range(total_trial - 2):\n if yc > yd:\n b = d\n d = c\n yd = yc\n h = invphi * h\n c = int(a + invphi2 * h)\n if c in results:\n yc = results[c]\n else:\n yc = evaluate_result(level=c)\n results[c] = yc\n else:\n a = c\n c = d\n yc = yd\n h = invphi * h\n d = int(a + invphi * h)\n if d in results:\n yd = results[d]\n else:\n yd = evaluate_result(level=d)\n results[d] = yd\n max_value_keys = [key for key in results.keys() if results[key] == max(results.values())]\n return max_value_keys[0], results[max_value_keys[0]]\n\n\nif __name__ == \"__main__\":\n best_level, best_acc = gss(a=1, b=30, total_trial=7)\n print(\"best level is {}, best accuracy is {}\".format(best_level, best_acc))\n" ]
[ [ "torch.nn.Linear", "torch.nn.functional.avg_pool2d", "numpy.asarray", "torch.nn.Sequential", "torch.nn.BatchNorm2d", "torch.nn.functional.dropout", "torch.nn.init.kaiming_normal_", "torch.optim.SGD", "tensorflow.keras.datasets.cifar100.load_data", "torch.nn.ReLU", "numpy.where", "torch.nn.Conv2d", "sklearn.model_selection.train_test_split" ] ]
CyprienGille/Atari-Freeway-Reinforcement-Learning-Project--RAM-only-
[ "fc3bd3590e764b3a185f11b94d6415a09a8a45c0" ]
[ "Freeway_ann/ann_training.py" ]
[ "\"\"\"\nScript to train the artificial neural network: run it to get a trained_agent saved to the working dir.\n\nMost interesting parameters are defined at the start of the script for easy changing.\nNote: This script assumes that you already created data with the data_creation.py script, or in a similar way.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential\n\nif __name__ == \"__main__\":\n\n ######################### Constants/params ####################\n TRAIN_PROP = 0.80 # The rest will be reserved for validation\n BATCH_SIZE = 32 # not very important in our case\n EPOCHS = 300 # max number of epochs, almost never reached\n\n # how many epochs without improvement to wait before stopping training\n EARLY_STOPPING_PATIENCE = 30\n DROPOUT_RATE = 0.05\n ARRF_PATH = \"data/arrF.npy\"\n ARRL_PATH = \"data/arrL.npy\"\n\n PLOT_AFTER_TRAINING = True # plot accuracies and losses after training\n\n ######################### Data loading ####################\n features_array = np.load(ARRF_PATH)\n labels_array = np.load(ARRL_PATH)\n\n dataset = tf.data.Dataset.from_tensor_slices((features_array, labels_array))\n dataset_size = dataset.cardinality().numpy()\n\n dataset = dataset.shuffle(dataset_size + 1, seed=321)\n train_ds = dataset.take(int(TRAIN_PROP*dataset_size))\n val_ds = dataset.skip(int(TRAIN_PROP*dataset_size))\n train_ds = train_ds.batch(BATCH_SIZE)\n val_ds = val_ds.batch(BATCH_SIZE)\n\n ######################### Model definition ####################\n model = Sequential([\n layers.Dense(128, input_shape=(128,), activation='relu', name=\"input\"),\n layers.Dense(64, activation='relu', name=\"dense2\"),\n layers.Dropout(DROPOUT_RATE),\n layers.Dense(64, activation='relu', name=\"dense3\"),\n layers.Dropout(DROPOUT_RATE),\n layers.Dense(32, activation='relu', name=\"dense4\"),\n layers.Dropout(DROPOUT_RATE),\n layers.Dense(16, activation='relu', name=\"dense5\"),\n layers.Dense(3, activation=\"softmax\", name=\"output\")\n ])\n\n ######################### Training ####################\n # we choose the nadam optimizer because it empirically works very well with our data\n model.compile(optimizer='nadam', \n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\n # Stop training and restore the best version of the model ever\n # when we spend too many epochs without improving (see the patience parameter)\n early_stop = keras.callbacks.EarlyStopping(monitor=\"val_accuracy\", \n patience=EARLY_STOPPING_PATIENCE, \n verbose=1, \n mode=\"max\", \n restore_best_weights=True)\n \n history = model.fit(train_ds,\n validation_data=val_ds,\n epochs=EPOCHS,\n callbacks=early_stop, \n verbose=2)\n \n # save the trained model for later use\n model.save(\"trained_agent\")\n\n\n ###################### Plotting Training Results ################################\n if PLOT_AFTER_TRAINING:\n acc = history.history['accuracy']\n val_acc = history.history['val_accuracy']\n\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n plt.figure(figsize=(10, 8))\n plt.subplot(1, 2, 1)\n plt.plot(acc, label='Training Accuracy')\n plt.plot(val_acc, label='Validation Accuracy')\n plt.legend(loc='lower right')\n plt.title('Training and Validation Accuracy')\n\n plt.subplot(1, 2, 2)\n plt.plot(loss, label='Training Loss')\n plt.plot(val_loss, label='Validation Loss')\n plt.legend(loc='upper right')\n plt.title('Training and Validation Loss')\n plt.show()\n" ]
[ [ "tensorflow.data.Dataset.from_tensor_slices", "matplotlib.pyplot.plot", "numpy.load", "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Dropout", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.callbacks.EarlyStopping", "matplotlib.pyplot.show", "matplotlib.pyplot.subplot" ] ]
lierdakil/information-theory-2020
[ "e2792982bbd8f51869fc586e47275277a4058cdf" ]
[ "cyc.py" ]
[ "from sympy import mod_inverse\n\nclass ModArith:\n def __init__(self, val, mod):\n if isinstance(val, ModArith):\n assert(mod == val.m)\n self.val = val.val\n self.m = mod\n else:\n self.val = val % mod\n self.m = mod\n\n def __add__(self, other):\n assert(isinstance(other, ModArith))\n assert(self.m == other.m)\n return ModArith(self.val+other.val, self.m)\n\n def __sub__(self, other):\n assert(isinstance(other, ModArith))\n assert(self.m == other.m)\n return ModArith(self.m+self.val-other.val, self.m)\n\n def __mul__(self, other):\n assert(isinstance(other, ModArith))\n assert(self.m == other.m)\n return ModArith(self.val*other.val, self.m)\n\n def __divmod__(self, other):\n assert(isinstance(other, ModArith))\n assert(self.m == other.m)\n quot, rem = 0, self.val\n while rem >= other.val:\n rem = rem - other.val\n quot = quot+1\n return (ModArith(quot, self.m), ModArith(rem, self.m))\n\n def __eq__(self, other):\n assert(isinstance(other, ModArith))\n assert(self.m == other.m)\n return self.val == other.val\n\n def __neg__(self):\n return ModArith(self.m-self.val, self.m)\n\n def iszero(self):\n return self.val == 0\n\n def __repr__(self):\n return f\"{self.val}\"\n\n def zero(self):\n return ModArith(0, self.m)\n\n def inv(self):\n return ModArith(mod_inverse(self.val, self.m), self.m)\n\n\nimport itertools\n\ndef add_(a, b):\n if a is None: return b\n if b is None: return a\n return a+b\n\ndef sub_(a, b):\n if a is None: return -b\n if b is None: return a\n return a-b\n\nclass Poly:\n def __init__(self, c, ctor):\n self.ctor = ctor\n self.c = [ctor(i) for i in c]\n if len(self.c) > 0:\n while len(self.c) and self.c[0].iszero():\n self.c.pop(0)\n if len(self.c) > 0:\n self.order=len(self.c)-1\n else:\n self.order=0\n else:\n self.order=0\n\n def scale(self, x):\n return Poly([c*x for c in self.c], self.ctor)\n\n def __add__(self, other):\n cs = list(add_(a,b) for a, b in itertools.zip_longest(reversed(self.c), reversed(other.c)))\n return Poly(list(reversed(cs)), self.ctor)\n\n def __sub__(self, other):\n cs = list(sub_(a,b) for a, b in itertools.zip_longest(reversed(self.c), reversed(other.c)))\n return Poly(list(reversed(cs)), self.ctor)\n\n def __mul__(self, other):\n acc = Poly([], self.ctor)\n x = self\n for c in reversed(other.c):\n acc = acc + x.scale(c)\n x = Poly(x.c+[c.zero()], self.ctor)\n return acc\n\n def __mod__(self, other):\n return divmod(self, other)[1]\n\n def __neg__(self):\n return Poly([-c for c in self.c], self.ctor)\n\n def __divmod__(self, other):\n rem = self\n quot = Poly([], self.ctor)\n while len(rem.c) >= len(other.c):\n r = rem.c[0]\n o = other.c[0]\n order = len(rem.c) - len(other.c)\n c = r * o.inv()\n q = Poly([c]+order*[self.ctor(0)], self.ctor)\n quot = quot + q\n rem = rem - q*other\n return (quot, rem)\n\n def __repr__(self):\n if len(self.c) == 0: return \"0\"\n return ' + '.join(reversed([f\"{c} x^{n}\" if n > 0 else f\"{c}\" for n, c in enumerate(reversed(self.c))]))\n\nM = lambda x: ModArith(x, 7)\nP = lambda x: Poly(list(map(M, x)), M)\n\na = P([1,2,1,0])\nb = P([0,2,0,0])\nstr(a)\nstr(b)\nstr(divmod(a,b))\n\n# In[0]\n\nfrom hamming2 import allVectors\nimport numpy as np\n\ndef cycCode(gc, k, base):\n M = lambda x: ModArith(x, base)\n P = lambda x: Poly(x, M)\n g = P(gc)\n r = g.order\n n = k+r\n xr = P([1]+r*[0])\n\n def encode(m):\n z = P(m)*xr\n z = z - (z % g)\n cs = [c.val for c in z.c]\n while len(cs) < n:\n cs.insert(0, 0)\n return cs\n\n d = min(np.count_nonzero([i for i in encode(m)]) for m in itertools.islice(allVectors(k, base), 1, None))\n ec = (d-1)//2\n print(f\"Code distance is {d}, can correct {ec} errors\")\n\n tbl = {}\n for ei in allVectors(n, base):\n if 0 < np.count_nonzero(ei) <= ec:\n si = P(ei) % g\n tbl[str(si)] = np.reshape(ei, n)\n print(f\"Table size is {len(tbl)}\")\n\n def decode(c):\n s = P(c) % g\n if all(c.iszero() for c in s.c):\n return c[:k]\n e = tbl[str(s)]\n c1 = c - e\n return c1[:k]\n\n return (encode, decode)\n\nencode, decode = cycCode(gc=[1,0,1,0,0,1,1,1], k=4, base=4)\n\n# In[0]\n\nc = encode([1,2,3,0])\nc[3] += 2\nc[5] += 2\ndecode(c)\n" ]
[ [ "numpy.count_nonzero", "numpy.reshape" ] ]
JobQiu/PrototypicalNetwork
[ "b46c34f8847946c4cd41774f4c8ee87c3486474c" ]
[ "data_loader/data_generator.py" ]
[ "import numpy as np\nfrom configs.config import MiniImageNetConfig\n\n\nclass DataGenerator:\n def __init__(self, config=MiniImageNetConfig()):\n self.config = config\n # load data here\n self.input = np.ones((500, 784))\n self.y = np.ones((500, 10))\n\n def next_batch(self, batch_size):\n idx = np.random.choice(500, batch_size)\n yield self.input[idx], self.y[idx]\n\n\nclass CompressedImageNetDataGenerator:\n\n def __init__(self, config):\n self.config = config\n self.train_images = np.concatenate([np.load('demo/mini-imagenet-train_{}.npy'.format(i)) for i in range(8)])\n self.test_images = np.concatenate([np.load('demo/mini-imagenet-test_{}.npy'.format(i)) for i in range(2)])\n self.val_images = np.concatenate([np.load('demo/mini-imagenet-val_{}.npy'.format(i)) for i in range(2)])\n\n def next_batch(self):\n config = self.config\n\n total_num_class = len(self.train_images)\n total_num_sample_per_class = self.train_images.shape[1]\n\n episode_classes = np.random.permutation(total_num_class)[:config.num_class_per_episode]\n support = np.zeros(shape=[config.num_class_per_episode, config.num_sample_per_class, config.image_height,\n config.image_width, config.image_channel_size], dtype=np.float32)\n\n # if config. image augmentation, use np. flip to get the flip image to feed todo\n # np.flip(A, axis=3)\n query = np.zeros(shape=[config.num_class_per_episode, config.num_query_per_class, config.image_height,\n config.image_width, config.image_channel_size], dtype=np.float32)\n\n for idx, epi_class in enumerate(episode_classes):\n selected = np.random.permutation(total_num_sample_per_class)[\n :config.num_sample_per_class + config.num_query_per_class]\n support[idx] = self.train_images[epi_class, selected[:config.num_sample_per_class]]\n query[idx] = self.train_images[epi_class, selected[config.num_sample_per_class:]]\n\n labels = np.tile(np.arange(config.num_class_per_episode)[:, np.newaxis],\n (1, config.num_query_per_class)).astype(np.uint8)\n yield support, query, labels\n\n def next_val_batch(self):\n config = self.config\n\n total_num_class = len(self.val_images)\n total_num_sample_per_class = self.val_images.shape[1]\n\n episode_classes = np.random.permutation(total_num_class)[:config.num_class_per_episode]\n support = np.zeros(shape=[config.num_class_per_episode, config.num_sample_per_class, config.image_height,\n config.image_width, config.image_channel_size], dtype=np.float32)\n\n # todo if config. image augmentation, use np. flip to get the flip image to feed\n # np.flip(A, axis=3)\n query = np.zeros(shape=[config.num_class_per_episode, config.num_query_per_class, config.image_height,\n config.image_width, config.image_channel_size], dtype=np.float32)\n\n for idx, epi_class in enumerate(episode_classes):\n selected = np.random.permutation(total_num_sample_per_class)[\n :config.num_sample_per_class + config.num_query_per_class]\n support[idx] = self.val_images[epi_class, selected[:config.num_sample_per_class]]\n query[idx] = self.val_images[epi_class, selected[config.num_sample_per_class:]]\n\n labels = np.tile(np.arange(config.num_class_per_episode)[:, np.newaxis],\n (1, config.num_query_per_class)).astype(np.uint8)\n yield support, query, labels\n\n def next_test_batch(self):\n config = self.config\n\n total_num_class = len(self.test_images)\n total_num_sample_per_class = self.test_images.shape[1]\n\n episode_classes = np.random.permutation(total_num_class)[:config.num_class_per_episode]\n support = np.zeros(shape=[config.num_class_per_episode, config.num_sample_per_class, config.image_height,\n config.image_width, config.image_channel_size], dtype=np.float32)\n\n # if config. image augmentation, use np. flip to get the flip image to feed todo\n # np.flip(A, axis=3)\n query = np.zeros(shape=[config.num_class_per_episode, config.num_query_per_class, config.image_height,\n config.image_width, config.image_channel_size], dtype=np.float32)\n\n for idx, epi_class in enumerate(episode_classes):\n selected = np.random.permutation(total_num_sample_per_class)[\n :config.num_sample_per_class + config.num_query_per_class]\n support[idx] = self.test_images[epi_class, selected[:config.num_sample_per_class]]\n query[idx] = self.test_images[epi_class, selected[config.num_sample_per_class:]]\n\n labels = np.tile(np.arange(config.num_class_per_episode)[:, np.newaxis],\n (1, config.num_query_per_class)).astype(np.uint8)\n yield support, query, labels\n" ]
[ [ "numpy.random.choice", "numpy.zeros", "numpy.random.permutation", "numpy.ones", "numpy.arange" ] ]
pomonam/Self-Tuning-Networks
[ "3fa949bb1da5beb2b4e7f1d07a26b819b42ad7f3" ]
[ "utils/cutout_utils.py" ]
[ "import numpy as np\n\nimport torch\n\n\nclass Cutout(object):\n # Contains a code from https://github.com/uoguelph-mlrg/Cutout\n \"\"\" Randomly mask out one or more patches from an image. \"\"\"\n def __init__(self, n_holes, length):\n \"\"\" Initialize a class CutOut.\n :param n_holes: int\n :param length: int\n \"\"\"\n self.n_holes = n_holes\n self.length = length\n\n def __call__(self, img):\n if self.length <= 0 or self.n_holes <= 0:\n return img\n\n h = img.size(1)\n w = img.size(2)\n mask = np.ones((h, w), np.float32)\n\n for n in range(self.n_holes):\n y = np.random.randint(h)\n x = np.random.randint(w)\n\n y1 = int(np.clip(y - self.length / 2, 0, h))\n y2 = int(np.clip(y + self.length / 2, 0, h))\n x1 = int(np.clip(x - self.length / 2, 0, w))\n x2 = int(np.clip(x + self.length / 2, 0, w))\n\n mask[y1: y2, x1: x2] = 0.\n\n mask = torch.from_numpy(mask)\n mask = mask.expand_as(img)\n img = img * mask\n\n return img\n" ]
[ [ "numpy.clip", "numpy.ones", "numpy.random.randint", "torch.from_numpy" ] ]
jessie0306/MyCare
[ "fe8c3737835d08b9487227538a51a13e7c5717f8" ]
[ "mycare/chart_views.py" ]
[ "from django.shortcuts import render\nfrom mycare.models import Survey\nimport pandas as pd\nimport json\n\n# Create your views here.\ndef Chart(request):\n #DB안에 저장된 설문조사 결과 불러오기\n datas = Survey.objects.all() \n \n lst = []\n for d in datas:\n dic = {}\n dic['성별'] = d.gender\n dic['연령'] = d.age\n dic['샴푸 사용빈도'] = d.shampoo\n dic['펌 주기'] = d.perm\n dic['염색 주기'] = d.dye\n dic['현재 모발상태'] = d.current_hair\n dic['현재 사용중인 제품'] = d.product\n dic['두피케어 선호여부'] = d.care_prefer\n dic['제품 선택시 고려사항'] = d.buying_point\n dic['label'] = d.label\n lst.append(dic)\n \n #데이터프레임으로 저장\n survey = pd.DataFrame(lst)\n #print(survey)\n \n #========[유형별 빈도 파이차트 작성]========\n type_dic = {}\n for lbl in set(survey['label']):\n df = survey[survey['label']==lbl].drop(['label'], axis=1) #라벨컬럼 제외\n col_dic = {}\n for col in df.columns:\n x = df[col].value_counts()\n x_topn = x.head(4)\n \n if len(x) > 4: #상위 4개만 표출 나머지는 묶음 처리 \n x_topn['remaining {0} items'.format(len(x) - 4)] = sum(x[4:]) \n x = x_topn\n data = list(x)\n label = list(x.index)\n \n col_datas = {\n 'labels': label,\n 'datasets': [{\n 'label': 'count',\n 'data': data,\n 'backgroundColor': [\n 'rgb(253, 111, 150)',\n 'rgb(255, 235, 161)',\n 'rgb(149, 218, 193)',\n 'rgb(111, 105, 172)',\n 'rgb(30, 49, 99)'\n ],\n 'borderColor': [\n 'rgb(253, 111, 150)',\n 'rgb(255, 235, 161)',\n 'rgb(149, 218, 193)',\n 'rgb(111, 105, 172)',\n 'rgb(30, 49, 99)'\n ],\n 'borderWidth': 1\n }]\n }\n col_dic[col] = col_datas\n type_dic[lbl] = col_dic\n \n pie_json = json.dumps(type_dic) #json type으로 변환\n \n\n #=========[성별 두피유형 분포]=========\n type_male = survey[survey['성별'] == '남']\n m = type_male['label'].value_counts() # counts\n m_percentage = type_male['label'].value_counts(normalize=True).mul(100).round(2) # 백분율로\n\n female_type = survey[survey['성별'] == '여']\n fm = female_type['label'].value_counts()\n fm_percentage = female_type['label'].value_counts(normalize=True).mul(100).round(2)\n\n type_bysex_df = pd.concat([m, fm, m_percentage, fm_percentage], axis=1)\n type_bysex_df.columns = [\"남성\",\"여성\",\"남성(%)\", \"여성(%)\"]\n \n type_bysex_dic = {}\n for col in ['남성','여성']:\n x = type_bysex_df[col]\n x_topn = x.head(7)\n\n if len(x) > 7: #상위 7개만 표출 나머지는 묶음 처리\n x_topn['remaining {0} items'.format(len(x) - 7)] = sum(x[7:]) \n x = x_topn\n data = list(x)\n label = list(x.index)\n \n type_bysex_datas = {\n 'labels': label,\n 'datasets': [{\n 'label': 'count',\n 'data': data,\n 'backgroundColor': [\n 'rgb(255, 222, 125)',\n 'rgb(246, 65, 108)',\n 'rgb(248, 243, 212)',\n 'rgb(0, 184, 169)',\n 'rgb(150, 186, 255)',\n 'rgb(111, 105, 172)',\n 'rgb(61, 8, 123)',\n 'rgb(185, 122, 149)' \n ],\n 'borderColor': [\n 'rgb(255, 222, 125)',\n 'rgb(246, 65, 108)',\n 'rgb(248, 243, 212)',\n 'rgb(0, 184, 169)',\n 'rgb(150, 186, 255)',\n 'rgb(111, 105, 172)',\n 'rgb(61, 8, 123)',\n 'rgb(185, 122, 149)' \n ],\n 'borderWidth': 1\n }]\n }\n type_bysex_dic[col] = type_bysex_datas\n type_bysex_json = json.dumps(type_bysex_dic) #json type으로 변환\n \n \n #========성별 샴푸 고르는 기준========\n male = survey[survey['성별'] == '남']\n mm = male['제품 선택시 고려사항'].value_counts(normalize=True).mul(100).round(2)\n choice_bysex = pd.DataFrame({'남성': mm}).T\n \n female = survey[survey['성별'] == '여']\n ff = female['제품 선택시 고려사항'].value_counts(normalize=True).mul(100).round(2)\n choice_bysex = choice_bysex.append(ff)\n choice_bysex.index = ['남성','여성']\n \n data1 = list(choice_bysex['세정력'])\n data2 = list(choice_bysex['두피자극'])\n data3 = list(choice_bysex['머리결'])\n data4 = list(choice_bysex['향'])\n data5 = list(choice_bysex['헹굼후느낌'])\n data6 = list(choice_bysex['가격'])\n\n choice_bysex_datas = {\n 'labels': ['남성','여성'],\n 'datasets': [{\n 'label': '세정력',\n 'data': data1,\n 'backgroundColor': 'rgb(223, 46, 46)',\n 'hoverBackgroundColor': 'rgb(223, 46, 46)',\n 'borderWidth': 1,\n 'stack': 'combined'\n },{\n 'label': '두피자극',\n 'data': data2,\n 'backgroundColor': 'rgb(246, 209, 103)',\n 'hoverBackgroundColor': 'rgb(246, 209, 103)',\n 'borderWidth': 1,\n 'stack': 'combined' \n },{\n 'label': '머리결',\n 'data': data3,\n 'backgroundColor': 'rgb(255, 247, 174)',\n 'hoverBackgroundColor': 'rgb(255, 247, 174)',\n 'borderWidth': 1,\n 'stack': 'combined' \n },{\n 'label': '향',\n 'data': data4,\n 'backgroundColor': 'rgb(41, 127, 135)',\n 'hoverBackgroundColor': 'rgb(41, 127, 135)',\n 'borderWidth': 1,\n 'stack': 'combined' \n },{\n 'label': '헹굼후느낌',\n 'data': data5,\n 'backgroundColor': 'rgb(81, 45, 109)',\n 'hoverBackgroundColor': 'rgb(81, 45, 109)',\n 'borderWidth': 1,\n 'stack': 'combined' \n },{\n 'label': '가격',\n 'data': data6,\n 'backgroundColor': 'rgb(21, 0, 80)',\n 'hoverBackgroundColor': 'rgb(21, 0, 80)',\n 'borderWidth': 1,\n 'stack': 'combined' \n }]\n }\n choice_bysex_json = json.dumps(choice_bysex_datas)\n \n \n #========두피유형별 샴푸 고르는 기준========\n type1 = survey[survey['label'] == '양호']\n per1 = type1['제품 선택시 고려사항'].value_counts(normalize=True).mul(100).round(2)\n choice_byage_df = pd.DataFrame({'양호': per1}).T\n\n for i in ['건성','지성','민감성','지루성','염증성','비듬성','탈모성','복합성']:\n type_df = survey[survey['label'] == i]\n choice_byage_df = choice_byage_df.append(type_df['제품 선택시 고려사항'].value_counts(normalize=True).mul(100).round(2))\n\n choice_byage_df.index = [\"양호\",\"건성\",\"지성\",\"민감성\",\"지루성\",\"염증성\",\"비듬성\",\"탈모성\",\"복합성\"]\n choice_byage_df = choice_byage_df.fillna(0)\n \n label = list(choice_byage_df.index)\n data1 = list(choice_byage_df['세정력'])\n data2 = list(choice_byage_df['두피자극'])\n data3 = list(choice_byage_df['머리결'])\n data4 = list(choice_byage_df['향'])\n data5 = list(choice_byage_df['헹굼후느낌'])\n data6 = list(choice_byage_df['가격'])\n\n choice_byage_datas = {\n 'labels': list(choice_byage_df.index),\n 'datasets': [{\n 'label': '세정력',\n 'data': data1,\n 'backgroundColor': 'rgba(63,103,126,1)',\n 'hoverBackgroundColor': 'rgba(50,90,100,1)',\n 'borderWidth': 1\n },{\n 'label': '두피자극',\n 'data': data2,\n 'backgroundColor': 'rgba(163,11,126,1)',\n 'hoverBackgroundColor': 'rgba(50,98,10,1)',\n 'borderWidth': 1 \n },{\n 'label': '머리결',\n 'data': data3,\n 'backgroundColor': 'rgba(63,103,16,1)',\n 'hoverBackgroundColor': 'rgba(50,78,107,1)',\n 'borderWidth': 1 \n },{\n 'label': '향',\n 'data': data4,\n 'backgroundColor': 'rgba(63,103,126,1)',\n 'hoverBackgroundColor': 'rgba(50,90,100,1)',\n 'borderWidth': 1 \n },{\n 'label': '헹굼후느낌',\n 'data': data5,\n 'backgroundColor': 'rgba(12,33,16,1)',\n 'hoverBackgroundColor': 'rgba(50,100,11,1)',\n 'borderWidth': 1\n },{\n 'label': '가격',\n 'data': data6,\n 'backgroundColor': 'rgba(63,103,77,1)',\n 'hoverBackgroundColor': 'rgba(5,9,100,1)',\n 'borderWidth': 1 \n }]\n }\n choice_byage_json = json.dumps(choice_byage_datas)\n\n context = {'pie_json':pie_json, 'type_bysex_json':type_bysex_json, 'choice_bysex_json':choice_bysex_json,\n 'choice_byage_json':choice_byage_json}\n return render(request, 'statistics.html', context)\n\n\ndef HealthInform(request):\n \n return render(request, 'healthInform.html')\n\n" ]
[ [ "pandas.DataFrame", "pandas.concat" ] ]
omegafragger/models
[ "6518e3e78d898398aa7c19c8cfe7133a859e60e6" ]
[ "research/bayesian_deeplab/model_test.py" ]
[ "# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for DeepLab model and some helper functions.\"\"\"\n\nimport tensorflow as tf\n\nfrom deeplab import common\nfrom deeplab import model\n\n\nclass DeeplabModelTest(tf.test.TestCase):\n\n def testScaleDimensionOutput(self):\n self.assertEqual(161, model.scale_dimension(321, 0.5))\n self.assertEqual(193, model.scale_dimension(321, 0.6))\n self.assertEqual(241, model.scale_dimension(321, 0.75))\n\n def testWrongDeepLabVariant(self):\n model_options = common.ModelOptions([])._replace(\n model_variant='no_such_variant')\n with self.assertRaises(ValueError):\n model._get_logits(images=[], model_options=model_options)\n\n def testBuildDeepLabv2(self):\n batch_size = 2\n crop_size = [41, 41]\n\n # Test with two image_pyramids.\n image_pyramids = [[1], [0.5, 1]]\n\n # Test two model variants.\n model_variants = ['xception_65',\n 'mobilenet_v2']\n\n # Test with two output_types.\n outputs_to_num_classes = {'semantic': 3,\n 'direction': 2}\n\n expected_endpoints = [['merged_logits'],\n ['merged_logits',\n 'logits_0.50',\n 'logits_1.00']]\n expected_num_logits = [1, 3]\n\n for model_variant in model_variants:\n model_options = common.ModelOptions(outputs_to_num_classes)._replace(\n add_image_level_feature=False,\n aspp_with_batch_norm=False,\n aspp_with_separable_conv=False,\n model_variant=model_variant)\n\n for i, image_pyramid in enumerate(image_pyramids):\n g = tf.Graph()\n with g.as_default():\n with self.test_session(graph=g):\n inputs = tf.random_uniform(\n (batch_size, crop_size[0], crop_size[1], 3))\n outputs_to_scales_to_logits = model.multi_scale_logits(\n inputs, model_options, image_pyramid=image_pyramid)\n\n # Check computed results for each output type.\n for output in outputs_to_num_classes:\n scales_to_logits = outputs_to_scales_to_logits[output]\n self.assertListEqual(sorted(scales_to_logits.keys()),\n sorted(expected_endpoints[i]))\n\n # Expected number of logits = len(image_pyramid) + 1, since the\n # last logits is merged from all the scales.\n self.assertEqual(len(scales_to_logits), expected_num_logits[i])\n\n def testForwardpassDeepLabv3plus(self):\n crop_size = [33, 33]\n outputs_to_num_classes = {'semantic': 3}\n\n model_options = common.ModelOptions(\n outputs_to_num_classes,\n crop_size,\n output_stride=16\n )._replace(\n add_image_level_feature=True,\n aspp_with_batch_norm=True,\n logits_kernel_size=1,\n model_variant='mobilenet_v2') # Employ MobileNetv2 for fast test.\n\n g = tf.Graph()\n with g.as_default():\n with self.test_session(graph=g) as sess:\n inputs = tf.random_uniform(\n (1, crop_size[0], crop_size[1], 3))\n outputs_to_scales_to_logits = model.multi_scale_logits(\n inputs,\n model_options,\n image_pyramid=[1.0])\n\n sess.run(tf.global_variables_initializer())\n outputs_to_scales_to_logits = sess.run(outputs_to_scales_to_logits)\n\n # Check computed results for each output type.\n for output in outputs_to_num_classes:\n scales_to_logits = outputs_to_scales_to_logits[output]\n # Expect only one output.\n self.assertEquals(len(scales_to_logits), 1)\n for logits in scales_to_logits.values():\n self.assertTrue(logits.any())\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.random_uniform", "tensorflow.Graph", "tensorflow.global_variables_initializer", "tensorflow.test.main" ] ]
damon-demon/Black-Box-Defense
[ "b4e1b9e6e1703a8d1ba7535d531647abb9705fe9" ]
[ "archs/resnet.py" ]
[ "import torch\nimport torch.nn as nn\n\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)\n # while original implementation places the stride at the first 1x1 convolution(self.conv1)\n # according to \"Deep residual learning for image recognition\"https://arxiv.org/abs/1512.03385.\n # This variant is also known as ResNet V1.5 and improves accuracy according to\n # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.\n\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n super(Bottleneck, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n width = int(planes * (base_width / 64.)) * groups\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv1x1(inplanes, width)\n self.bn1 = norm_layer(width)\n self.conv2 = conv3x3(width, width, stride, groups, dilation)\n self.bn2 = norm_layer(width)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.bn3 = norm_layer(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, num_classes=10, zero_init_residual=False,\n groups=1, width_per_group=64, replace_stride_with_dilation=None,\n norm_layer=None):\n super(ResNet, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self._norm_layer = norm_layer\n\n self.inplanes = 64\n self.dilation = 1\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\"replace_stride_with_dilation should be None \"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation))\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = norm_layer(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2,\n dilate=replace_stride_with_dilation[0])\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2,\n dilate=replace_stride_with_dilation[1])\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2,\n dilate=replace_stride_with_dilation[2])\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1, dilate=False):\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation, norm_layer))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation,\n norm_layer=norm_layer))\n\n return nn.Sequential(*layers)\n\n def _forward_impl(self, x):\n # See note [TorchScript super()]\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.fc(x)\n\n return x\n\n def forward(self, x):\n return self._forward_impl(x)\n\n\ndef _resnet(block, layers, **kwargs):\n model = ResNet(block, layers, **kwargs)\n return model\n\ndef ResNet50(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-50 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet(Bottleneck, [3, 4, 6, 3], **kwargs)\n\ndef ResNet18(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-18 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet(BasicBlock, [2, 2, 2, 2], **kwargs)" ]
[ [ "torch.nn.Linear", "torch.flatten", "torch.nn.MaxPool2d", "torch.nn.Sequential", "torch.nn.init.constant_", "torch.nn.init.kaiming_normal_", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.AdaptiveAvgPool2d" ] ]
prasunanand/dask-ml
[ "69680f79d0dff57bec50818998edc20b71b6846f" ]
[ "tests/test_partial.py" ]
[ "import dask\nimport dask.array as da\nimport dask.bag as db\nimport dask.dataframe as dd\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom dask.delayed import Delayed\nfrom sklearn.base import clone\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import SGDClassifier\n\nimport dask_ml.feature_extraction.text\nfrom dask_ml._partial import fit, predict\nfrom dask_ml.datasets import make_classification\nfrom dask_ml.wrappers import Incremental\n\nx = np.array([[1, 0], [2, 0], [3, 0], [4, 0], [0, 1], [0, 2], [3, 3], [4, 4]])\n\ny = np.array([1, 1, 1, 1, -1, -1, 0, 0])\n\nz = np.array([[1, -1], [-1, 1], [10, -10], [-10, 10]])\n\nX = da.from_array(x, chunks=(3, 2))\nY = da.from_array(y, chunks=(3,))\nZ = da.from_array(z, chunks=(2, 2))\n\n\ndef test_fit():\n with dask.config.set(scheduler=\"single-threaded\"):\n sgd = SGDClassifier(max_iter=5, tol=1e-3)\n\n sgd = fit(sgd, X, Y, classes=np.array([-1, 0, 1]))\n\n sol = sgd.predict(z)\n result = predict(sgd, Z)\n assert result.chunks == ((2, 2),)\n assert result.compute().tolist() == sol.tolist()\n\n\ndef test_no_compute():\n sgd = SGDClassifier(max_iter=5, tol=1e-3)\n\n result = fit(sgd, X, Y, classes=np.array([-1, 0, 1]), compute=False)\n assert isinstance(result, Delayed)\n\n\ndef test_fit_rechunking():\n n_classes = 2\n X, y = make_classification(chunks=20, n_classes=n_classes)\n X = X.rechunk({1: 10})\n\n assert X.numblocks[1] > 1\n\n clf = Incremental(SGDClassifier(max_iter=5, tol=1e-3))\n clf.fit(X, y, classes=list(range(n_classes)))\n\n\ndef test_fit_shuffle_blocks():\n N = 10\n X = da.from_array(1 + np.arange(N).reshape(-1, 1), chunks=1)\n y = da.from_array(np.ones(N), chunks=1)\n classes = [0, 1]\n\n sgd = SGDClassifier(\n max_iter=5, random_state=0, fit_intercept=False, shuffle=False, tol=1e-3\n )\n\n sgd1 = fit(clone(sgd), X, y, random_state=0, classes=classes)\n sgd2 = fit(clone(sgd), X, y, random_state=42, classes=classes)\n assert len(sgd1.coef_) == len(sgd2.coef_) == 1\n assert not np.allclose(sgd1.coef_, sgd2.coef_)\n\n X, y = make_classification(random_state=0, chunks=20)\n sgd_a = fit(clone(sgd), X, y, random_state=0, classes=classes, shuffle_blocks=False)\n sgd_b = fit(\n clone(sgd), X, y, random_state=42, classes=classes, shuffle_blocks=False\n )\n assert np.allclose(sgd_a.coef_, sgd_b.coef_)\n\n with pytest.raises(ValueError, match=\"cannot be used to seed\"):\n fit(\n sgd,\n X,\n y,\n classes=np.array([-1, 0, 1]),\n shuffle_blocks=True,\n random_state=da.random.RandomState(42),\n )\n\n\ndef test_dataframes():\n df = pd.DataFrame({\"x\": range(10), \"y\": [0, 1] * 5})\n ddf = dd.from_pandas(df, npartitions=2)\n\n with dask.config.set(scheduler=\"single-threaded\"):\n sgd = SGDClassifier(max_iter=5, tol=1e-3)\n\n sgd = fit(sgd, ddf[[\"x\"]], ddf.y, classes=[0, 1])\n\n sol = sgd.predict(df[[\"x\"]])\n result = predict(sgd, ddf[[\"x\"]])\n\n da.utils.assert_eq(sol, result)\n\n\ndef test_bag():\n x = db.from_sequence(range(10), npartitions=2)\n vect = dask_ml.feature_extraction.text.HashingVectorizer()\n vect = fit(vect, x, None)\n y = vect.transform(x)\n assert y.shape[1] == vect.n_features\n\n\ndef test_no_partial_fit_raises():\n X, y = make_classification(chunks=50)\n with pytest.raises(ValueError, match=\"RandomForestClassifier\"):\n fit(RandomForestClassifier(), X, y)\n" ]
[ [ "numpy.array", "sklearn.ensemble.RandomForestClassifier", "numpy.ones", "numpy.allclose", "sklearn.linear_model.SGDClassifier", "numpy.arange", "sklearn.base.clone" ] ]
zzb610/factest
[ "1e628f6fc885cd1975c2e68181caf40e2874dc08" ]
[ "factest/alphalens/tears.py" ]
[ "#\r\n# Copyright 2017 Quantopian, Inc.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport matplotlib.gridspec as gridspec\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport warnings\r\n\r\nfrom . import plotting\r\nfrom . import performance as perf\r\nfrom . import utils\r\n\r\n\r\nclass GridFigure(object):\r\n \"\"\"\r\n It makes life easier with grid plots\r\n \"\"\"\r\n\r\n def __init__(self, rows, cols):\r\n self.rows = rows\r\n self.cols = cols\r\n self.fig = plt.figure(figsize=(14, rows * 7))\r\n self.gs = gridspec.GridSpec(rows, cols, wspace=0.4, hspace=0.3)\r\n self.curr_row = 0\r\n self.curr_col = 0\r\n\r\n def next_row(self):\r\n if self.curr_col != 0:\r\n self.curr_row += 1\r\n self.curr_col = 0\r\n subplt = plt.subplot(self.gs[self.curr_row, :])\r\n self.curr_row += 1\r\n return subplt\r\n\r\n def next_cell(self):\r\n if self.curr_col >= self.cols:\r\n self.curr_row += 1\r\n self.curr_col = 0\r\n subplt = plt.subplot(self.gs[self.curr_row, self.curr_col])\r\n self.curr_col += 1\r\n return subplt\r\n\r\n def close(self):\r\n plt.close(self.fig)\r\n self.fig = None\r\n self.gs = None\r\n\r\n\r\[email protected]\r\ndef create_summary_tear_sheet(\r\n factor_data, long_short=True, group_neutral=False\r\n):\r\n \"\"\"\r\n Creates a small summary tear sheet with returns, information, and turnover\r\n analysis.\r\n\r\n Parameters\r\n ----------\r\n factor_data : pd.DataFrame - MultiIndex\r\n A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),\r\n containing the values for a single alpha factor, forward returns for\r\n each period, the factor quantile/bin that factor value belongs to, and\r\n (optionally) the group the asset belongs to.\r\n - See full explanation in utils.get_clean_factor_and_forward_returns\r\n long_short : bool\r\n Should this computation happen on a long short portfolio? if so, then\r\n mean quantile returns will be demeaned across the factor universe.\r\n group_neutral : bool\r\n Should this computation happen on a group neutral portfolio? if so,\r\n returns demeaning will occur on the group level.\r\n \"\"\"\r\n\r\n # Returns Analysis\r\n mean_quant_ret, std_quantile = perf.mean_return_by_quantile(\r\n factor_data,\r\n by_group=False,\r\n demeaned=long_short,\r\n group_adjust=group_neutral,\r\n )\r\n\r\n mean_quant_rateret = mean_quant_ret.apply(\r\n utils.rate_of_return, axis=0, base_period=mean_quant_ret.columns[0]\r\n )\r\n\r\n mean_quant_ret_bydate, std_quant_daily = perf.mean_return_by_quantile(\r\n factor_data,\r\n by_date=True,\r\n by_group=False,\r\n demeaned=long_short,\r\n group_adjust=group_neutral,\r\n )\r\n\r\n mean_quant_rateret_bydate = mean_quant_ret_bydate.apply(\r\n utils.rate_of_return,\r\n axis=0,\r\n base_period=mean_quant_ret_bydate.columns[0],\r\n )\r\n\r\n compstd_quant_daily = std_quant_daily.apply(\r\n utils.std_conversion, axis=0, base_period=std_quant_daily.columns[0]\r\n )\r\n\r\n alpha_beta = perf.factor_alpha_beta(\r\n factor_data, demeaned=long_short, group_adjust=group_neutral\r\n )\r\n\r\n mean_ret_spread_quant, std_spread_quant = perf.compute_mean_returns_spread(\r\n mean_quant_rateret_bydate,\r\n factor_data[\"factor_quantile\"].max(),\r\n factor_data[\"factor_quantile\"].min(),\r\n std_err=compstd_quant_daily,\r\n )\r\n\r\n periods = utils.get_forward_returns_columns(factor_data.columns)\r\n periods = list(map(lambda p: pd.Timedelta(p).days, periods))\r\n\r\n fr_cols = len(periods)\r\n vertical_sections = 2 + fr_cols * 3\r\n gf = GridFigure(rows=vertical_sections, cols=1)\r\n\r\n plotting.plot_quantile_statistics_table(factor_data)\r\n\r\n plotting.plot_returns_table(\r\n alpha_beta, mean_quant_rateret, mean_ret_spread_quant\r\n )\r\n\r\n plotting.plot_quantile_returns_bar(\r\n mean_quant_rateret,\r\n by_group=False,\r\n ylim_percentiles=None,\r\n ax=gf.next_row(),\r\n )\r\n\r\n # Information Analysis\r\n ic = perf.factor_information_coefficient(factor_data)\r\n plotting.plot_information_table(ic)\r\n\r\n # Turnover Analysis\r\n quantile_factor = factor_data[\"factor_quantile\"]\r\n\r\n quantile_turnover = {\r\n p: pd.concat(\r\n [\r\n perf.quantile_turnover(quantile_factor, q, p)\r\n for q in range(1, int(quantile_factor.max()) + 1)\r\n ],\r\n axis=1,\r\n )\r\n for p in periods\r\n }\r\n\r\n autocorrelation = pd.concat(\r\n [\r\n perf.factor_rank_autocorrelation(factor_data, period)\r\n for period in periods\r\n ],\r\n axis=1,\r\n )\r\n\r\n plotting.plot_turnover_table(autocorrelation, quantile_turnover)\r\n\r\n plt.show()\r\n gf.close()\r\n\r\n\r\[email protected]\r\ndef create_returns_tear_sheet(\r\n factor_data, long_short=True, group_neutral=False, by_group=False, equal_weight=False\r\n):\r\n \"\"\"\r\n Creates a tear sheet for returns analysis of a factor.\r\n\r\n Parameters\r\n ----------\r\n factor_data : pd.DataFrame - MultiIndex\r\n A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),\r\n containing the values for a single alpha factor, forward returns for\r\n each period, the factor quantile/bin that factor value belongs to,\r\n and (optionally) the group the asset belongs to.\r\n - See full explanation in utils.get_clean_factor_and_forward_returns\r\n long_short : bool\r\n Should this computation happen on a long short portfolio? if so, then\r\n mean quantile returns will be demeaned across the factor universe.\r\n Additionally factor values will be demeaned across the factor universe\r\n when factor weighting the portfolio for cumulative returns plots\r\n group_neutral : bool\r\n Should this computation happen on a group neutral portfolio? if so,\r\n returns demeaning will occur on the group level.\r\n Additionally each group will weight the same in cumulative returns\r\n plots\r\n by_group : bool\r\n If True, display graphs separately for each group.\r\n \"\"\"\r\n\r\n factor_returns = perf.factor_returns(\r\n factor_data, long_short, group_neutral, equal_weight\r\n )\r\n\r\n mean_quant_ret, std_quantile = perf.mean_return_by_quantile(\r\n factor_data,\r\n by_group=False,\r\n demeaned=long_short,\r\n group_adjust=group_neutral,\r\n )\r\n\r\n mean_quant_rateret = mean_quant_ret.apply(\r\n utils.rate_of_return, axis=0, base_period=mean_quant_ret.columns[0]\r\n )\r\n\r\n mean_quant_ret_bydate, std_quant_daily = perf.mean_return_by_quantile(\r\n factor_data,\r\n by_date=True,\r\n by_group=False,\r\n demeaned=long_short,\r\n group_adjust=group_neutral,\r\n )\r\n\r\n mean_quant_rateret_bydate = mean_quant_ret_bydate.apply(\r\n utils.rate_of_return,\r\n axis=0,\r\n base_period=mean_quant_ret_bydate.columns[0],\r\n )\r\n\r\n compstd_quant_daily = std_quant_daily.apply(\r\n utils.std_conversion, axis=0, base_period=std_quant_daily.columns[0]\r\n )\r\n\r\n alpha_beta = perf.factor_alpha_beta(\r\n factor_data, factor_returns, long_short, group_neutral\r\n )\r\n\r\n mean_ret_spread_quant, std_spread_quant = perf.compute_mean_returns_spread(\r\n mean_quant_rateret_bydate,\r\n factor_data[\"factor_quantile\"].max(),\r\n factor_data[\"factor_quantile\"].min(),\r\n std_err=compstd_quant_daily,\r\n )\r\n\r\n fr_cols = len(factor_returns.columns)\r\n vertical_sections = 2 + fr_cols * 3\r\n gf = GridFigure(rows=vertical_sections, cols=1)\r\n\r\n plotting.plot_returns_table(\r\n alpha_beta, mean_quant_rateret, mean_ret_spread_quant\r\n )\r\n\r\n plotting.plot_quantile_returns_bar(\r\n mean_quant_rateret,\r\n by_group=False,\r\n ylim_percentiles=None,\r\n ax=gf.next_row(),\r\n )\r\n\r\n plotting.plot_quantile_returns_violin(\r\n mean_quant_rateret_bydate, ylim_percentiles=(1, 99), ax=gf.next_row()\r\n )\r\n\r\n trading_calendar = factor_data.index.levels[0].freq\r\n if trading_calendar is None:\r\n trading_calendar = pd.tseries.offsets.BDay()\r\n warnings.warn(\r\n \"'freq' not set in factor_data index: assuming business day\",\r\n UserWarning,\r\n )\r\n\r\n # Compute cumulative returns from daily simple returns, if '1D'\r\n # returns are provided.\r\n if \"1D\" in factor_returns:\r\n title = (\r\n \"Factor Weighted \"\r\n + (\"Group Neutral \" if group_neutral else \"\")\r\n + (\"Long/Short \" if long_short else \"\")\r\n + \"Portfolio Cumulative Return (1D Period)\"\r\n )\r\n\r\n plotting.plot_cumulative_returns(\r\n factor_returns[\"1D\"], period=\"1D\", title=title, ax=gf.next_row()\r\n )\r\n\r\n plotting.plot_cumulative_returns_by_quantile(\r\n mean_quant_ret_bydate[\"1D\"], period=\"1D\", ax=gf.next_row()\r\n )\r\n\r\n ax_mean_quantile_returns_spread_ts = [\r\n gf.next_row() for x in range(fr_cols)\r\n ]\r\n plotting.plot_mean_quantile_returns_spread_time_series(\r\n mean_ret_spread_quant,\r\n std_err=std_spread_quant,\r\n bandwidth=0.5,\r\n ax=ax_mean_quantile_returns_spread_ts,\r\n )\r\n\r\n plt.show()\r\n gf.close()\r\n\r\n if by_group:\r\n (\r\n mean_return_quantile_group,\r\n mean_return_quantile_group_std_err,\r\n ) = perf.mean_return_by_quantile(\r\n factor_data,\r\n by_date=False,\r\n by_group=True,\r\n demeaned=long_short,\r\n group_adjust=group_neutral,\r\n )\r\n\r\n mean_quant_rateret_group = mean_return_quantile_group.apply(\r\n utils.rate_of_return,\r\n axis=0,\r\n base_period=mean_return_quantile_group.columns[0],\r\n )\r\n\r\n num_groups = len(\r\n mean_quant_rateret_group.index.get_level_values(\"group\").unique()\r\n )\r\n\r\n vertical_sections = 1 + (((num_groups - 1) // 2) + 1)\r\n gf = GridFigure(rows=vertical_sections, cols=2)\r\n\r\n ax_quantile_returns_bar_by_group = [\r\n gf.next_cell() for _ in range(num_groups)\r\n ]\r\n plotting.plot_quantile_returns_bar(\r\n mean_quant_rateret_group,\r\n by_group=True,\r\n ylim_percentiles=(5, 95),\r\n ax=ax_quantile_returns_bar_by_group,\r\n )\r\n plt.show()\r\n gf.close()\r\n\r\n\r\[email protected]\r\ndef create_information_tear_sheet(\r\n factor_data, group_neutral=False, by_group=False\r\n):\r\n \"\"\"\r\n Creates a tear sheet for information analysis of a factor.\r\n\r\n Parameters\r\n ----------\r\n factor_data : pd.DataFrame - MultiIndex\r\n A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),\r\n containing the values for a single alpha factor, forward returns for\r\n each period, the factor quantile/bin that factor value belongs to, and\r\n (optionally) the group the asset belongs to.\r\n - See full explanation in utils.get_clean_factor_and_forward_returns\r\n group_neutral : bool\r\n Demean forward returns by group before computing IC.\r\n by_group : bool\r\n If True, display graphs separately for each group.\r\n \"\"\"\r\n\r\n ic = perf.factor_information_coefficient(factor_data, group_neutral)\r\n\r\n plotting.plot_information_table(ic)\r\n\r\n columns_wide = 2\r\n fr_cols = len(ic.columns)\r\n rows_when_wide = ((fr_cols - 1) // columns_wide) + 1\r\n vertical_sections = fr_cols + 3 * rows_when_wide + 2 * fr_cols\r\n gf = GridFigure(rows=vertical_sections, cols=columns_wide)\r\n\r\n ax_ic_ts = [gf.next_row() for _ in range(fr_cols)]\r\n plotting.plot_ic_ts(ic, ax=ax_ic_ts)\r\n\r\n ax_ic_hqq = [gf.next_cell() for _ in range(fr_cols * 2)]\r\n plotting.plot_ic_hist(ic, ax=ax_ic_hqq[::2])\r\n plotting.plot_ic_qq(ic, ax=ax_ic_hqq[1::2])\r\n\r\n if not by_group:\r\n\r\n mean_monthly_ic = perf.mean_information_coefficient(\r\n factor_data,\r\n group_adjust=group_neutral,\r\n by_group=False,\r\n by_time=\"M\",\r\n )\r\n ax_monthly_ic_heatmap = [gf.next_cell() for x in range(fr_cols)]\r\n plotting.plot_monthly_ic_heatmap(\r\n mean_monthly_ic, ax=ax_monthly_ic_heatmap\r\n )\r\n\r\n if by_group:\r\n mean_group_ic = perf.mean_information_coefficient(\r\n factor_data, group_adjust=group_neutral, by_group=True\r\n )\r\n\r\n plotting.plot_ic_by_group(mean_group_ic, ax=gf.next_row())\r\n\r\n plt.show()\r\n gf.close()\r\n\r\n\r\[email protected]\r\ndef create_turnover_tear_sheet(factor_data, turnover_periods=None):\r\n \"\"\"\r\n Creates a tear sheet for analyzing the turnover properties of a factor.\r\n\r\n Parameters\r\n ----------\r\n factor_data : pd.DataFrame - MultiIndex\r\n A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),\r\n containing the values for a single alpha factor, forward returns for\r\n each period, the factor quantile/bin that factor value belongs to, and\r\n (optionally) the group the asset belongs to.\r\n - See full explanation in utils.get_clean_factor_and_forward_returns\r\n turnover_periods : sequence[string], optional\r\n Periods to compute turnover analysis on. By default periods in\r\n 'factor_data' are used but custom periods can provided instead. This\r\n can be useful when periods in 'factor_data' are not multiples of the\r\n frequency at which factor values are computed i.e. the periods\r\n are 2h and 4h and the factor is computed daily and so values like\r\n ['1D', '2D'] could be used instead\r\n \"\"\"\r\n\r\n if turnover_periods is None:\r\n input_periods = utils.get_forward_returns_columns(\r\n factor_data.columns, require_exact_day_multiple=True,\r\n ).to_numpy()\r\n turnover_periods = utils.timedelta_strings_to_integers(input_periods)\r\n else:\r\n turnover_periods = utils.timedelta_strings_to_integers(\r\n turnover_periods,\r\n )\r\n\r\n quantile_factor = factor_data[\"factor_quantile\"]\r\n\r\n quantile_turnover = {\r\n p: pd.concat(\r\n [\r\n perf.quantile_turnover(quantile_factor, q, p)\r\n for q in quantile_factor.sort_values().unique().tolist()\r\n ],\r\n axis=1,\r\n )\r\n for p in turnover_periods\r\n }\r\n\r\n autocorrelation = pd.concat(\r\n [\r\n perf.factor_rank_autocorrelation(factor_data, period)\r\n for period in turnover_periods\r\n ],\r\n axis=1,\r\n )\r\n\r\n plotting.plot_turnover_table(autocorrelation, quantile_turnover)\r\n\r\n fr_cols = len(turnover_periods)\r\n columns_wide = 1\r\n rows_when_wide = ((fr_cols - 1) // 1) + 1\r\n vertical_sections = fr_cols + 3 * rows_when_wide + 2 * fr_cols\r\n gf = GridFigure(rows=vertical_sections, cols=columns_wide)\r\n\r\n for period in turnover_periods:\r\n if quantile_turnover[period].isnull().all().all():\r\n continue\r\n plotting.plot_top_bottom_quantile_turnover(\r\n quantile_turnover[period], period=period, ax=gf.next_row()\r\n )\r\n\r\n for period in autocorrelation:\r\n if autocorrelation[period].isnull().all():\r\n continue\r\n plotting.plot_factor_rank_auto_correlation(\r\n autocorrelation[period], period=period, ax=gf.next_row()\r\n )\r\n\r\n plt.show()\r\n gf.close()\r\n\r\n\r\[email protected]\r\ndef create_full_tear_sheet(factor_data,\r\n long_short=False,\r\n group_neutral=False,\r\n equal_weight=True,\r\n by_group=False):\r\n \"\"\"\r\n Creates a full tear sheet for analysis and evaluating single\r\n return predicting (alpha) factor.\r\n\r\n Parameters\r\n ----------\r\n factor_data : pd.DataFrame - MultiIndex\r\n A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),\r\n containing the values for a single alpha factor, forward returns for\r\n each period, the factor quantile/bin that factor value belongs to, and\r\n (optionally) the group the asset belongs to.\r\n - See full explanation in utils.get_clean_factor_and_forward_returns\r\n long_short : bool\r\n Should this computation happen on a long short portfolio?\r\n - See tears.create_returns_tear_sheet for details on how this flag\r\n affects returns analysis\r\n group_neutral : bool\r\n Should this computation happen on a group neutral portfolio?\r\n - See tears.create_returns_tear_sheet for details on how this flag\r\n affects returns analysis\r\n - See tears.create_information_tear_sheet for details on how this\r\n flag affects information analysis\r\n by_group : bool\r\n If True, display graphs separately for each group.\r\n \"\"\"\r\n\r\n plotting.plot_quantile_statistics_table(factor_data)\r\n create_returns_tear_sheet(\r\n factor_data, long_short, group_neutral, by_group, equal_weight, set_context=False\r\n )\r\n create_information_tear_sheet(\r\n factor_data, group_neutral, by_group, set_context=False\r\n )\r\n create_turnover_tear_sheet(factor_data, set_context=False)\r\n\r\n\r\[email protected]\r\ndef create_event_returns_tear_sheet(factor_data,\r\n returns,\r\n avgretplot=(5, 15),\r\n long_short=True,\r\n group_neutral=False,\r\n std_bar=True,\r\n by_group=False):\r\n \"\"\"\r\n Creates a tear sheet to view the average cumulative returns for a\r\n factor within a window (pre and post event).\r\n\r\n Parameters\r\n ----------\r\n factor_data : pd.DataFrame - MultiIndex\r\n A MultiIndex Series indexed by date (level 0) and asset (level 1),\r\n containing the values for a single alpha factor, the factor\r\n quantile/bin that factor value belongs to and (optionally) the group\r\n the asset belongs to.\r\n - See full explanation in utils.get_clean_factor_and_forward_returns\r\n returns : pd.DataFrame\r\n A DataFrame indexed by date with assets in the columns containing daily\r\n returns.\r\n - See full explanation in utils.get_clean_factor_and_forward_returns\r\n avgretplot: tuple (int, int) - (before, after)\r\n If not None, plot quantile average cumulative returns\r\n long_short : bool\r\n Should this computation happen on a long short portfolio? if so then\r\n factor returns will be demeaned across the factor universe\r\n group_neutral : bool\r\n Should this computation happen on a group neutral portfolio? if so,\r\n returns demeaning will occur on the group level.\r\n std_bar : boolean, optional\r\n Show plots with standard deviation bars, one for each quantile\r\n by_group : bool\r\n If True, display graphs separately for each group.\r\n \"\"\"\r\n\r\n before, after = avgretplot\r\n\r\n avg_cumulative_returns = perf.average_cumulative_return_by_quantile(\r\n factor_data,\r\n returns,\r\n periods_before=before,\r\n periods_after=after,\r\n demeaned=long_short,\r\n group_adjust=group_neutral,\r\n )\r\n\r\n num_quantiles = int(factor_data[\"factor_quantile\"].max())\r\n\r\n vertical_sections = 1\r\n if std_bar:\r\n vertical_sections += ((num_quantiles - 1) // 2) + 1\r\n cols = 2 if num_quantiles != 1 else 1\r\n gf = GridFigure(rows=vertical_sections, cols=cols)\r\n plotting.plot_quantile_average_cumulative_return(\r\n avg_cumulative_returns,\r\n by_quantile=False,\r\n std_bar=False,\r\n ax=gf.next_row(),\r\n )\r\n if std_bar:\r\n ax_avg_cumulative_returns_by_q = [\r\n gf.next_cell() for _ in range(num_quantiles)\r\n ]\r\n plotting.plot_quantile_average_cumulative_return(\r\n avg_cumulative_returns,\r\n by_quantile=True,\r\n std_bar=True,\r\n ax=ax_avg_cumulative_returns_by_q,\r\n )\r\n\r\n plt.show()\r\n gf.close()\r\n\r\n if by_group:\r\n groups = factor_data[\"group\"].unique()\r\n num_groups = len(groups)\r\n vertical_sections = ((num_groups - 1) // 2) + 1\r\n gf = GridFigure(rows=vertical_sections, cols=2)\r\n\r\n avg_cumret_by_group = perf.average_cumulative_return_by_quantile(\r\n factor_data,\r\n returns,\r\n periods_before=before,\r\n periods_after=after,\r\n demeaned=long_short,\r\n group_adjust=group_neutral,\r\n by_group=True,\r\n )\r\n\r\n for group, avg_cumret in avg_cumret_by_group.groupby(level=\"group\"):\r\n avg_cumret.index = avg_cumret.index.droplevel(\"group\")\r\n plotting.plot_quantile_average_cumulative_return(\r\n avg_cumret,\r\n by_quantile=False,\r\n std_bar=False,\r\n title=group,\r\n ax=gf.next_cell(),\r\n )\r\n\r\n plt.show()\r\n gf.close()\r\n\r\n\r\[email protected]\r\ndef create_event_study_tear_sheet(factor_data,\r\n returns,\r\n avgretplot=(5, 15),\r\n rate_of_ret=True,\r\n n_bars=50):\r\n \"\"\"\r\n Creates an event study tear sheet for analysis of a specific event.\r\n\r\n Parameters\r\n ----------\r\n factor_data : pd.DataFrame - MultiIndex\r\n A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),\r\n containing the values for a single event, forward returns for each\r\n period, the factor quantile/bin that factor value belongs to, and\r\n (optionally) the group the asset belongs to.\r\n returns : pd.DataFrame, required only if 'avgretplot' is provided\r\n A DataFrame indexed by date with assets in the columns containing daily\r\n returns.\r\n - See full explanation in utils.get_clean_factor_and_forward_returns\r\n avgretplot: tuple (int, int) - (before, after), optional\r\n If not None, plot event style average cumulative returns within a\r\n window (pre and post event).\r\n rate_of_ret : bool, optional\r\n Display rate of return instead of simple return in 'Mean Period Wise\r\n Return By Factor Quantile' and 'Period Wise Return By Factor Quantile'\r\n plots\r\n n_bars : int, optional\r\n Number of bars in event distribution plot\r\n \"\"\"\r\n\r\n long_short = False\r\n\r\n plotting.plot_quantile_statistics_table(factor_data)\r\n\r\n gf = GridFigure(rows=1, cols=1)\r\n plotting.plot_events_distribution(\r\n events=factor_data[\"factor\"], num_bars=n_bars, ax=gf.next_row()\r\n )\r\n plt.show()\r\n gf.close()\r\n\r\n if returns is not None and avgretplot is not None:\r\n\r\n create_event_returns_tear_sheet(\r\n factor_data=factor_data,\r\n returns=returns,\r\n avgretplot=avgretplot,\r\n long_short=long_short,\r\n group_neutral=False,\r\n std_bar=True,\r\n by_group=False,\r\n )\r\n\r\n factor_returns = perf.factor_returns(\r\n factor_data, demeaned=False, equal_weight=True\r\n )\r\n\r\n mean_quant_ret, std_quantile = perf.mean_return_by_quantile(\r\n factor_data, by_group=False, demeaned=long_short\r\n )\r\n if rate_of_ret:\r\n mean_quant_ret = mean_quant_ret.apply(\r\n utils.rate_of_return, axis=0, base_period=mean_quant_ret.columns[0]\r\n )\r\n\r\n mean_quant_ret_bydate, std_quant_daily = perf.mean_return_by_quantile(\r\n factor_data, by_date=True, by_group=False, demeaned=long_short\r\n )\r\n if rate_of_ret:\r\n mean_quant_ret_bydate = mean_quant_ret_bydate.apply(\r\n utils.rate_of_return,\r\n axis=0,\r\n base_period=mean_quant_ret_bydate.columns[0],\r\n )\r\n\r\n fr_cols = len(factor_returns.columns)\r\n vertical_sections = 2 + fr_cols * 1\r\n gf = GridFigure(rows=vertical_sections + 1, cols=1)\r\n\r\n plotting.plot_quantile_returns_bar(\r\n mean_quant_ret, by_group=False, ylim_percentiles=None, ax=gf.next_row()\r\n )\r\n\r\n plotting.plot_quantile_returns_violin(\r\n mean_quant_ret_bydate, ylim_percentiles=(1, 99), ax=gf.next_row()\r\n )\r\n\r\n trading_calendar = factor_data.index.levels[0].freq\r\n if trading_calendar is None:\r\n trading_calendar = pd.tseries.offsets.BDay()\r\n warnings.warn(\r\n \"'freq' not set in factor_data index: assuming business day\",\r\n UserWarning,\r\n )\r\n\r\n plt.show()\r\n gf.close()\r\n" ]
[ [ "pandas.Timedelta", "pandas.tseries.offsets.BDay", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.subplot" ] ]
chen0040/mxnet-vqa
[ "9ad6d0b8019c647540cd19867a777e5da5928086" ]
[ "mxnet_vqa/library/vqa1.py" ]
[ "import mxnet as mx\nfrom mxnet import gluon, autograd, nd\nfrom mxnet.gluon import nn\nimport os\nimport numpy as np\nimport logging\n\nfrom mxnet_vqa.utils.glove_loader import GloveModel\nfrom mxnet_vqa.utils.image_utils import Vgg16FeatureExtractor\nfrom mxnet_vqa.utils.text_utils import word_tokenize\n\n\nclass Net1(gluon.Block):\n\n def __init__(self, nb_classes, **kwargs):\n super(Net1, self).__init__(**kwargs)\n self.nb_classes = nb_classes\n with self.name_scope():\n self.bn = nn.BatchNorm()\n self.dropout = nn.Dropout(.3)\n self.fc1 = nn.Dense(8192, activation='relu')\n self.fc2 = nn.Dense(self.nb_classes)\n\n def forward(self, x, *args, **kwargs):\n F = nd\n x1 = F.L2Normalization(x[0])\n x2 = F.L2Normalization(x[1])\n z = F.concat(x1, x2, dim=1)\n z = self.fc1(z)\n z = self.bn(z)\n z = self.dropout(z)\n z = self.fc2(z)\n return z\n\n\nclass VQANet(object):\n model_name = 'vqa-net-1'\n\n def __init__(self, model_ctx=mx.cpu(), data_ctx=mx.cpu()):\n self.model = None\n self.version = '0'\n self.model_ctx = model_ctx\n self.data_ctx = data_ctx\n self.input_mode_answer = 'int'\n self.input_mode_question = 'add'\n self.nb_classes = 1001\n self.meta = None\n self.glove_model = GloveModel()\n self.fe = Vgg16FeatureExtractor()\n\n def get_config_file_path(self, model_dir_path):\n return os.path.join(model_dir_path, VQANet.model_name + '-v' + self.version + '-config.npy')\n\n def get_params_file_path(self, model_dir_path):\n return os.path.join(model_dir_path, VQANet.model_name + '-v' + self.version + '-net.params')\n\n def evaluate_accuracy(self, data_iterator):\n metric = mx.metric.Accuracy()\n data_iterator.reset()\n for i, batch in enumerate(data_iterator):\n data1 = batch.data[0].as_in_context(self.model_ctx)\n data2 = batch.data[1]\n if len(data2.shape) == 3:\n # collapse the last 2 dimension\n data2 = data2.reshape((data2.shape[0], data2.shape[1] * data2.shape[2]))\n data2 = data2.as_in_context(self.model_ctx)\n data = [data1, data2]\n label = batch.label[0].as_in_context(self.model_ctx)\n output = self.model(data)\n\n # metric.update(preds=output, labels=label)\n metric.update([label], [output])\n return metric.get()[1]\n\n def load_model(self, model_dir_path):\n config = np.load(self.get_config_file_path(model_dir_path)).item()\n self.input_mode_answer = config['input_mode_answer']\n self.input_mode_question = config['input_mode_question']\n self.nb_classes = config['nb_classes']\n self.meta = config['meta']\n self.model = Net1(self.nb_classes)\n self.model.load_params(self.get_params_file_path(model_dir_path), ctx=self.model_ctx)\n\n def checkpoint(self, model_dir_path):\n self.model.save_params(self.get_params_file_path(model_dir_path))\n\n def save_history(self, history, model_dir_path):\n return np.save(os.path.join(model_dir_path, VQANet.model_name + '-v' + self.version + '-history.npy'), history)\n\n def fit(self, data_train, data_eva, meta, model_dir_path, epochs=10, learning_rate=0.01):\n\n config = dict()\n config['input_mode_answer'] = self.input_mode_answer\n config['input_mode_question'] = self.input_mode_question\n config['nb_classes'] = self.nb_classes\n config['meta'] = meta\n self.meta = meta\n np.save(self.get_config_file_path(model_dir_path), config)\n\n loss = gluon.loss.SoftmaxCrossEntropyLoss()\n\n self.model = Net1(self.nb_classes)\n self.model.collect_params().initialize(init=mx.init.Xavier(), ctx=self.model_ctx)\n trainer = gluon.Trainer(self.model.collect_params(), 'sgd', {'learning_rate': learning_rate})\n\n history = dict()\n history['train_acc'] = list()\n history['val_acc'] = list()\n\n moving_loss = 0.\n best_eva = 0\n for e in range(epochs):\n data_train.reset()\n for i, batch in enumerate(data_train):\n batch_size = batch.data[0].shape[0]\n\n data1 = batch.data[0].as_in_context(self.model_ctx)\n data2 = batch.data[1]\n if len(data2.shape) == 3:\n # collapse the last 2 dimension\n data2 = data2.reshape((batch_size, data2.shape[1] * data2.shape[2]))\n data2 = data2.as_in_context(self.model_ctx)\n data = [data1, data2]\n label = batch.label[0].as_in_context(self.model_ctx)\n with autograd.record():\n output = self.model(data)\n cross_entropy = loss(output, label)\n cross_entropy.backward()\n trainer.step(batch_size)\n\n if i == 0:\n moving_loss = np.mean(cross_entropy.asnumpy()[0])\n else:\n moving_loss = .99 * moving_loss + .01 * np.mean(cross_entropy.asnumpy()[0])\n if i % 200 == 0:\n logging.debug(\"Epoch %s, batch %s. Moving avg of loss: %s\", e, i, moving_loss)\n eva_accuracy = self.evaluate_accuracy(data_iterator=data_eva)\n train_accuracy = self.evaluate_accuracy(data_iterator=data_train)\n history['train_acc'].append(train_accuracy)\n history['val_acc'].append(eva_accuracy)\n print(\"Epoch %s. Loss: %s, Train_acc %s, Eval_acc %s\" % (e, moving_loss, train_accuracy, eva_accuracy))\n if eva_accuracy > best_eva:\n best_eva = eva_accuracy\n logging.info('Best validation acc found. Checkpointing...')\n self.checkpoint(model_dir_path)\n if e % 5 == 0:\n self.save_history(history, model_dir_path)\n\n self.save_history(history, model_dir_path)\n return history\n\n def predict_answer_class(self, img_path, question):\n f = self.fe.extract_image_features(img_path)\n questions_matrix_shape = self.meta['questions_matrix_shape']\n if len(questions_matrix_shape) == 2:\n max_seq_length = questions_matrix_shape[0]\n question_matrix = np.zeros(shape=(1, max_seq_length, 300))\n words = word_tokenize(question.lower())\n for i, word in enumerate(words[0:min(max_seq_length, len(words))]):\n question_matrix[0, i, :] = self.glove_model.encode_word(word)\n input_data = [f.as_in_context(self.model_ctx),\n nd.array(question_matrix, ctx=self.model_ctx).reshape(1, max_seq_length * 300)]\n output = self.model(input_data)\n return nd.argmax(output, axis=1).astype(np.uint8).asscalar()\n else:\n words = word_tokenize(question.lower())\n E = np.zeros(shape=(300, len(words)))\n for j, word in enumerate(words):\n E[:, j] = self.glove_model.encode_word(word)\n question_matrix = np.sum(E, axis=1)\n input_data = [f.as_in_context(self.model_ctx),\n nd.array(question_matrix, ctx=self.model_ctx).reshape(1, 300)]\n output = self.model(input_data)\n return nd.argmax(output, axis=1).astype(np.uint8).asscalar()\n\n def load_glove_300(self, data_dir_path):\n self.glove_model.load(data_dir_path, embedding_dim=300)" ]
[ [ "numpy.sum", "numpy.zeros" ] ]
cleverhans-lab/unrolling-sgd
[ "49e001f9cc77b61d65eac3bf26888b5183b73bef" ]
[ "BERT/regular_bert.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nimport copy\nfrom torch.utils.data import Dataset, DataLoader\nimport torchvision\nimport torchvision.transforms as transforms\nimport numpy as np\nfrom PyHessian.pyhessian import hessian # Hessian computation\nimport os\nimport argparse\nimport numpy as np\nimport random \n\n\nRANDOM_SEED = 100\ntorch.manual_seed(RANDOM_SEED)\ntorch.cuda.manual_seed(RANDOM_SEED)\ntorch.cuda.manual_seed_all(RANDOM_SEED)\nnp.random.seed(RANDOM_SEED)\nrandom.seed(RANDOM_SEED)\n\ndef validate(model,test_dataloader):\n correct = 0\n total = 0\n for images_test, labels_test in test_dataloader:\n images_test = images_test.to(device)\n labels_test = labels_test.to(device)\n outputs_test = model(images_test)\n\n\n _, predicted = torch.max(outputs_test.data, 1)\n total+= labels_test.squeeze().size(0)\n # for gpu, bring the predicted and labels back to cpu fro python operations to work\n correct+= (predicted == labels_test.squeeze()).sum()\n accuracy = 100 * correct.item()/total\n return accuracy\n\n#puts the weights into a list, but faster\ndef weights_to_list_fast(weights):\n with torch.no_grad():\n weights_list = []\n for weight in weights:\n list_t = weight.view(-1).tolist()\n weights_list = weights_list + list_t\n\n return weights_list\n\n\n\n#=====================================================================\n#=====================================================================\n#=====================================================================\n\n\nparser = argparse.ArgumentParser(description='Finetuning for verification error')\nparser.add_argument('--lr', default=0.1, type=float, help='learning rate')\nparser.add_argument('--model', default='resnet', type=str, help='resnet or vgg')\nparser.add_argument('--pretrain_epochs', default=10, type=int, help='number of pretraining epochs')\nparser.add_argument('--pretrain_batch_size', default=32, type=int, help='pretraining batch size')\nparser.add_argument('--weight_decay', default=0.0, type=float, help='number of finetuning epochs')\nargs = parser.parse_args()\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\nlr = args.lr\nwd = args.weight_decay\nstd_reg = 0.0\ndef std_loss(x,y):\n log_prob = -1.0 * F.log_softmax(x, 1)\n loss = log_prob.gather(1, y.unsqueeze(1))\n loss = loss.mean()\n avg_std = torch.sum(torch.std(x, dim=1))/(len(x.view(-1)))\n loss = loss + std_reg*avg_std\n l2_norm = 0\n M_weights_tensor = [param for param in model.parameters()]\n curr_weights = weights_to_list_fast(M_weights_tensor)\n l2_norm = np.linalg.norm((np.array(curr_weights)))\n \n loss += l2_norm*wd\n return loss\n\nfrom torchvision import transforms, datasets\nimport torch.nn as nn\n\ntrain_y = torch.load(\"./imdb_bert_train_labels.pt\")\ntrain_x = torch.load(\"./imdb_bert_train_pooled.pt\")\n\ntest_y = torch.load(\"./imdb_bert_test_labels.pt\")\ntest_x = torch.load(\"./imdb_bert_test_pooled.pt\")\n\nclass CustomTextDataset(Dataset):\n def __init__(self, txt, labels):\n self.labels = labels\n self.text = txt\n def __len__(self):\n return len(self.labels)\n def __getitem__(self, idx):\n label = self.labels[idx]\n image = self.text[idx]\n sample = (image,label)\n return sample\n\ntrain_data = CustomTextDataset(train_x,train_y)\ntest_data= CustomTextDataset(test_x,test_y)\n\ntrain_dataloader = DataLoader(train_data,batch_size = args.pretrain_batch_size, shuffle = True)\ntest_dataloader = DataLoader(test_data,batch_size = 64, shuffle = False)\n\nmodel = torch.nn.Linear(768, 2)\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nmodel = model.to(device)\n\nif device == 'cuda':\n model = torch.nn.DataParallel(model)\n cudnn.benchmark = True\n\noptimizer = optim.SGD(model.parameters(), lr =lr)\n\ntest_acc_list = []\n\ntotal_len = len(train_dataloader)\n\nfor epoch in range(args.pretrain_epochs):\n train_correct = 0\n train_total = 0\n print(f'on pretraining epoch = {epoch}')\n for i, (inputs, labels) in enumerate(train_dataloader):\n inputs = inputs.to(device)\n labels = labels.to(device)\n optimizer.zero_grad()\n outputs =model(inputs)\n\n labels = labels.squeeze()\n loss = std_loss(outputs, labels)\n loss.backward()\n optimizer.step()\n acc = validate(model, test_dataloader)\n test_acc_list.append(acc) \nprint(f\"Testing accuracy after {args.pretrain_epochs} epoch of pretraining = {acc}\")\nfinal_test_acc = acc\n\nret = {}\nret['test_acc'] = test_acc_list\n\nimport pickle\nif not os.path.isdir('regular_results'):\n os.mkdir('regular_results')\npath = f'./regular_results/{args.model}_{args.lr}_{args.pretrain_batch_size}_{args.pretrain_epochs}_{std_reg}_{wd}.p'\npickle.dump(ret, open(path, 'wb'))\n\n\n\n\n\n\n" ]
[ [ "torch.nn.Linear", "numpy.array", "torch.cuda.manual_seed", "torch.cuda.manual_seed_all", "torch.max", "numpy.random.seed", "torch.no_grad", "torch.std", "torch.nn.functional.log_softmax", "torch.manual_seed", "torch.cuda.is_available", "torch.utils.data.DataLoader", "torch.load", "torch.nn.DataParallel" ] ]
studioalight/char-rnn-tensorflow
[ "6c1134982590c42787fc8af1181720eb88e55d6b" ]
[ "sample.py" ]
[ "from __future__ import print_function\nimport tensorflow as tf\n\nimport argparse\nimport os\nfrom six.moves import cPickle\n\nfrom model import Model\n\nfrom six import text_type\n\n\ndef main():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--save_dir', type=str, default='save',\n help='model directory to store checkpointed models')\n parser.add_argument('-n', type=int, default=500,\n help='number of characters to sample')\n parser.add_argument('--prime', type=text_type, default=u' ',\n help='prime text')\n parser.add_argument('--sample', type=int, default=1,\n help='0 to use max at each timestep, 1 to sample at '\n 'each timestep, 2 to sample on spaces')\n\n args = parser.parse_args()\n sample(args)\n\n\ndef sample(args):\n with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:\n saved_args = cPickle.load(f)\n with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'rb') as f:\n chars, vocab = cPickle.load(f)\n model = Model(saved_args, training=False)\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n saver = tf.train.Saver(tf.global_variables())\n ckpt = tf.train.get_checkpoint_state(args.save_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n print(model.sample(sess, chars, vocab, args.n, args.prime,\n args.sample))\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.Session", "tensorflow.global_variables_initializer", "tensorflow.train.get_checkpoint_state", "tensorflow.global_variables" ] ]
marcusabate/Kimera-LCD-ROS
[ "a6a98c109f6be0cf6c32423c74c70cfcfc9eba90" ]
[ "scripts/ros_lcd_data_provider.py" ]
[ "#! /usr/bin/env python\n\nimport rospy\nimport numpy as np\n\nimport message_filters\nfrom sensor_msgs.msg import Image, CameraInfo\nfrom geometry_msgs.msg import Pose\nimport tf.transformations as transformations\nfrom kimera_lcd_ros.msg import LcdInputPayload, StereoFrame, StereoMatchingParams, CameraParams\n\nclass RosLcdDataProviderNode:\n def __init__(self):\n\n self.image_sub_left = message_filters.Subscriber(\"left_cam\", Image)\n self.image_sub_right = message_filters.Subscriber(\"right_cam\", Image)\n\n self.ts = message_filters.TimeSynchronizer(\n [self.image_sub_left, self.image_sub_right], 10)\n self.ts.registerCallback(self.camera_cb)\n\n self.lcd_input_pub = rospy.Publisher(\"lcd_input\", LcdInputPayload, queue_size=1)\n\n self.L_Pose_R = Pose()\n self.left_camera_params = CameraParams()\n self.right_camera_params = CameraParams()\n self.stereo_matching_params = StereoMatchingParams()\n \n self.frame_id = 1\n self.cur_kf_id = 1\n\n self.stereoMatchingParamInit()\n self.parse_camera_data()\n \n def camera_cb(self, left_msg, right_msg):\n \"\"\" ROS callback for time-synchronized camera images. Left and right camera \n messages are synchronized and stored for use in LcdIputPayloads.\n\n param left_msg: A sensor_msgs.Image message representing the left camera.\n param right_msg: A sensor_msgs.Image message representing the right camera.\n \"\"\"\n if self.frame_id % 5 == 0:\n stereo_frame = StereoFrame()\n stereo_frame.header = left_msg.header\n stereo_frame.timestamp = RosLcdDataProviderNode.rosTimeToUint(left_msg.header.stamp)\n stereo_frame.id = self.cur_kf_id\n stereo_frame.left_image = left_msg\n stereo_frame.right_image = right_msg\n stereo_frame.left_camera_params = self.left_camera_params\n stereo_frame.right_camera_params = self.right_camera_params\n stereo_frame.stereo_matching_params = self.stereo_matching_params\n stereo_frame.L_Pose_R = self.L_Pose_R\n\n lcd_input_payload = LcdInputPayload()\n lcd_input_payload.header = stereo_frame.header\n lcd_input_payload.timestamp_kf = stereo_frame.timestamp\n lcd_input_payload.cur_kf_id = self.cur_kf_id\n lcd_input_payload.stereo_frame = stereo_frame\n lcd_input_payload.w_Pose_b_lkf = Pose()\n\n self.lcd_input_pub.publish(lcd_input_payload)\n\n self.cur_kf_id += 1\n self.frame_id += 1\n\n def parse_camera_data(self):\n \"\"\" Retrieve camera data from calibration files. \"\"\"\n rate = rospy.get_param(\"~camera_rate_hz\")\n resolution = rospy.get_param(\"~camera_resolution\")\n body_Pose_cam = body_Pose_cam = rospy.get_param(\"~calibration_to_body_frame\")\n distortion_model = rospy.get_param(\"~distortion_model\")\n\n for i in range(2):\n camera_name = \"\"\n camera_param_i = CameraParams()\n camera_param_i.frame_rate = rate\n camera_param_i.image_size = resolution\n camera_param_i.body_Pose_cam = body_Pose_cam\n camera_param_i.distortion_model = distortion_model\n\n if i == 0:\n camera_name = \"left_camera_\"\n else:\n camera_name = \"right_camera_\"\n \n camera_param_i.intrinsics = rospy.get_param(\"~\" + camera_name + \"intrinsics\")\n camera_param_i.extrinsics = rospy.get_param(\"~\" + camera_name + \"extrinsics\")\n camera_param_i.distortion_coefficients = rospy.get_param(\"~\" + camera_name + \"distortion_coefficients\")\n \n if (i == 0):\n self.left_camera_params = camera_param_i\n else:\n self.right_camera_params = camera_param_i\n\n L_Pose_R_mat = np.dot(np.linalg.inv(np.reshape(self.left_camera_params.extrinsics, (4,4))),\n np.reshape(self.right_camera_params.extrinsics, (4,4)))\n\n L_Pose_R_trans = L_Pose_R_mat[:3,3]\n self.L_Pose_R.position.x = L_Pose_R_trans[0]\n self.L_Pose_R.position.y = L_Pose_R_trans[1]\n self.L_Pose_R.position.z = L_Pose_R_trans[2]\n\n L_Pose_R_mat[:3,3] = np.array([0, 0, 0])\n L_Pose_R_quat = transformations.quaternion_from_matrix(L_Pose_R_mat)\n\n self.L_Pose_R.orientation.x = L_Pose_R_quat[0]\n self.L_Pose_R.orientation.y = L_Pose_R_quat[1]\n self.L_Pose_R.orientation.z = L_Pose_R_quat[2]\n self.L_Pose_R.orientation.w = L_Pose_R_quat[3]\n \n @staticmethod\n def rosTimeToUint(stamp):\n \"\"\"\n \"\"\"\n return stamp.to_nsec()\n\n def stereoMatchingParamInit(self):\n \"\"\"\n \"\"\"\n self.stereo_matching_params.tol_template_matching = 0.15\n self.stereo_matching_params.nominal_baseline = 0.11\n self.stereo_matching_params.templ_cols = 101\n self.stereo_matching_params.templ_rows = 11\n self.stereo_matching_params.stripe_extra_rows = 0\n self.stereo_matching_params.min_point_dist = 0.5\n self.stereo_matching_params.max_point_dist = 10\n self.stereo_matching_params.bidirectional_matching = False\n self.stereo_matching_params.subpixel_refinement = False\n self.stereo_matching_params.equalize_image = False\n self.stereo_matching_params.vision_sensor_type = 0\n self.stereo_matching_params.min_depth_factor = 0.3\n self.stereo_matching_params.map_depth_factor = 0.001\n\n\nif __name__ == \"__main__\":\n rospy.init_node(\"ros_lcd_data_provider_node\")\n ros_lcd_data_provider_node = RosLcdDataProviderNode()\n rospy.spin()\n" ]
[ [ "numpy.array", "numpy.reshape" ] ]
vmzhang/studyGroup
[ "d49ddc32bdd7ac91d73cb8890154e1965d1dcfd0" ]
[ "lessons/python/matplotlib/hwk2.2.py" ]
[ "#import necessary modules for poly1d and arange from numpy\nfrom numpy import poly1d,arange\nimport matplotlib.pyplot as plt\n\n#Generate an empty array for storing the roots\nroots=[]\n\n#Nested loops iterates through all the range of s and zeta passing the values\n#to the poly1d and solving the roots. The roots are then stored in the roots array\nfor s in arange(0.0,0.65,0.01):\n for z in arange(0.0,0.62,0.01):\n \n p=poly1d([1,0,z-2,z,0],variable='s')\n roots.append(p.r)\n \n#Generate empty array for storing non zero values of the roots \nnonzeros=[]\n#This loop extracts out the non zero values of the roots and appends it to a new array\nfor i in range(len(roots)-1):\n nonzeros.append(roots[i][0:3])\n\n#Plot zeros\nplt.plot(nonzeros)\n\n#save figure\n\nplt.savefig(\"roots.pdf\")\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "numpy.arange", "numpy.poly1d" ] ]
peisabelle/EVAP_data_worflow
[ "9b1b2ea1fbc35173ce31ed21c53b9271804fc5cb" ]
[ "process_micromet/rename_trim_vars.py" ]
[ "# -*- coding: utf-8 -*-\nimport pandas as pd\n\ndef rename_trim_vars(stationName,varNameExcelTab,df,tab):\n \"\"\"Rename variables according to an Excel spreadsheet. Trim the DataFrame\n in order to keep only the variable specified in the spreadsheet\n\n Parameters\n ----------\n stationName: name of the station\n varNameExcelTab: path and name of the Excel file that contains the\n variable description and their names\n df: pandas DataFrame that contains the variables\n tab: Excel spreadsheet table suffix that refers to the source of the data\n (either 'cs' for Campbell Scientific files, or 'eddypro' for EddyPro\n output files)\n\n Returns\n -------\n df: a nice and tidy pandas DataFrame\n \"\"\"\n\n print('Start renaming variables for station:', stationName, '...', end='\\r')\n # Import Excel documentation file\n xlsFile = pd.ExcelFile(varNameExcelTab)\n column_dic = pd.read_excel(xlsFile,stationName+'_'+tab)\n\n # Make translation dictionary from CS vars to DB vars\n lines_to_include = column_dic.iloc[:,0].str.contains('NA - Only stored as binary|Database variable name', regex=True)\n column_dic = column_dic[lines_to_include == False]\n column_dic = column_dic.iloc[:,[0,1]]\n column_dic.columns = ['db_name','cs_name']\n\n # Trim dataframe and rename columns\n idColumnsIntersect = column_dic.cs_name.isin(df.columns)\n df = df[column_dic.cs_name[idColumnsIntersect]]\n df.columns = column_dic.db_name[idColumnsIntersect]\n\n # Merge columns that have similar column name\n if df.keys().shape != df.keys().unique().shape:\n df = df.groupby(df.columns, axis=1).mean()\n print('Done!')\n\n return df" ]
[ [ "pandas.ExcelFile", "pandas.read_excel" ] ]
maxfrei750/ignite
[ "0e97cb289f64de9679a04f18969c6e761aa146b0" ]
[ "ignite/contrib/metrics/regression/fractional_absolute_error.py" ]
[ "\n\nimport torch\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.contrib.metrics.regression._base import _BaseRegression\n\n\nclass FractionalAbsoluteError(_BaseRegression):\n r\"\"\"\n Calculates the Fractional Absolute Error.\n\n :math:`\\text{FAE} = \\frac{1}{n}\\sum_{j=1}^n\\frac{2 |A_j - P_j|}{|A_j| + |P_j|}`\n\n where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.\n\n More details can be found in `Botchkarev 2018`__.\n\n - `update` must receive output of the form `(y_pred, y)` or `{'y_pred': y_pred, 'y': y}`.\n - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.\n\n __ https://arxiv.org/abs/1809.03006\n \"\"\"\n\n def reset(self):\n self._sum_of_errors = 0.0\n self._num_examples = 0\n\n def _update(self, output):\n y_pred, y = output\n errors = 2 * torch.abs(y.view_as(y_pred) - y_pred) / (torch.abs(y_pred) + torch.abs(y.view_as(y_pred)))\n self._sum_of_errors += torch.sum(errors).item()\n self._num_examples += y.shape[0]\n\n def compute(self):\n if self._num_examples == 0:\n raise NotComputableError('FractionalAbsoluteError must have at least '\n 'one example before it can be computed.')\n return self._sum_of_errors / self._num_examples\n" ]
[ [ "torch.abs", "torch.sum" ] ]
aangelopoulos/ltt
[ "c03d13073146ca9fb2da327de6e279b0663de31c" ]
[ "experiments/coco/src/grid_fig.py" ]
[ "import torch\nimport torchvision as tv\nfrom ASL.src.helper_functions.helper_functions import parse_args\nfrom ASL.src.loss_functions.losses import AsymmetricLoss, AsymmetricLossOptimized\nfrom ASL.src.models import create_model\nimport argparse\nimport numpy as np\nfrom scipy.stats import binom\nfrom PIL import Image\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom utils import *\nimport random\nimport pdb\n\nparser = argparse.ArgumentParser(description='ASL MS-COCO predictor')\n\nparser.add_argument('--model_path',type=str,default='./ASL/models_local/MS_COCO_TResNet_xl_640_88.4.pth')\nparser.add_argument('--dset_path',type=str,default='../data/')\nparser.add_argument('--model_name',type=str,default='tresnet_xl')\nparser.add_argument('--input_size',type=int,default=640)\nparser.add_argument('--dataset_type',type=str,default='MS-COCO')\nparser.add_argument('--batch_size',type=int,default=64)\nparser.add_argument('--th',type=float,default=0.6527)\n\ndef random_example(dataset, model, scores_to_labels, corr, classes_list):\n i = random.randint(0,len(dataset)-1) \n img = dataset[i][0]\n ann = dataset[i][1][0]\n\n labels = []\n annotations = dataset.coco.getAnnIds(imgIds=int(ann['image_id'])) \n for annotation in dataset.coco.loadAnns(annotations):\n labels = labels + [classes_list[corr[annotation['category_id']]]]\n labels = list(np.unique(np.array(labels)))\n est_labels = scores_to_labels(torch.sigmoid(model(img.unsqueeze(0).cuda()).cpu()))\n return img.permute(1,2,0), est_labels, [labels]\n\ndef gridplot_imgs(imgs,est_labels,labels,rows,cols):\n fig, axs = plt.subplots(nrows=rows,ncols=cols,figsize=(cols*10,rows*10))\n props = dict(boxstyle='round', facecolor='white', alpha=0.8)\n for idx, img in enumerate(imgs):\n r = idx//cols\n c = idx % cols\n axs[r,c].axis('off')\n axs[r,c].imshow(img, aspect='equal')\n corr_labelstr = \"\"\n est_labelstr = \"\"\n all_labelstr = \"\"\n fake_labelstr = \"\"\n num_labels = 0\n for i in range(len(est_labels[idx])):\n if est_labels[idx][i] in labels[idx]:\n corr_labelstr += est_labels[idx][i] + '\\n'\n est_labelstr = '\\n' + est_labelstr\n all_labelstr = '\\n' + all_labelstr \n fake_labelstr += est_labels[idx][i] + '\\n'\n else:\n est_labelstr += est_labels[idx][i] + '\\n'\n all_labelstr += '\\n'\n fake_labelstr += est_labels[idx][i] + '\\n'\n num_labels += 1\n\n for i in range(len(labels[idx])):\n if labels[idx][i] not in est_labels[idx]:\n all_labelstr += labels[idx][i] + '\\n'\n fake_labelstr += labels[idx][i] + '\\n'\n num_labels += 1\n\n # Remove last newline\n fake_labelstr = fake_labelstr[0:-1]\n all_labelstr = all_labelstr[0:-1]\n est_labelstr = est_labelstr[0:-1]\n corr_labelstr = corr_labelstr[0:-1] \n\n # Resize text\n fontsize = 32\n if(num_labels <= 5):\n fontsize = 48\n\n # Make a fake bbox first.\n axs[r,c].text(0.05,0.95,fake_labelstr,transform=axs[r,c].transAxes,fontsize=fontsize,color='#00000000',verticalalignment='top',bbox=props)\n axs[r,c].text(0.05,0.95,all_labelstr,transform=axs[r,c].transAxes,fontsize=fontsize,color='#ff4555',verticalalignment='top')\n axs[r,c].text(0.05,0.95,est_labelstr,transform=axs[r,c].transAxes,fontsize=fontsize,color='#40B5BC',verticalalignment='top')\n axs[r,c].text(0.05,0.95,corr_labelstr,transform=axs[r,c].transAxes,fontsize=fontsize,color='k',verticalalignment='top')\n\n plt.tight_layout()\n plt.subplots_adjust(wspace=0.05,hspace=0.05)\n plt.savefig('../outputs/coco_grid_fig.pdf')\n\nif __name__ == \"__main__\":\n with torch.no_grad():\n fix_randomness(seed=238)\n args = parse_args(parser)\n\n dataset = tv.datasets.CocoDetection('../data/val2017','../data/annotations_trainval2017/instances_val2017.json',transform=tv.transforms.Compose([tv.transforms.Resize((args.input_size, args.input_size)),\n tv.transforms.ToTensor()]))\n state = torch.load(args.model_path, map_location='cpu')\n classes_list = np.array(list(state['idx_to_class'].values()))\n args.num_classes = state['num_classes']\n model = create_model(args).cuda()\n model.load_state_dict(state['model'], strict=True)\n model.eval()\n print('Model Loaded')\n corr = get_correspondence(classes_list,dataset.coco.cats)\n\n rows = 2\n cols = 5 \n\n def scores_to_labels(x):\n tw = torch.where(x > args.th)\n est_labels = [[]]*x.shape[0]\n\n for k in tw[0].unique():\n est_labels[k] = [classes_list[idx] for idx in tw[1][tw[0]==0]]\n\n return est_labels\n\n imgs = []\n est_labels = []\n labels = []\n for i in range(rows*cols):\n img, est_label, label = random_example(dataset,model,scores_to_labels,corr,classes_list)\n imgs = imgs + [img]\n est_labels = est_labels + est_label\n labels = labels + label\n gridplot_imgs(imgs,est_labels,labels,rows,cols)\n" ]
[ [ "numpy.array", "matplotlib.pyplot.savefig", "torch.no_grad", "matplotlib.pyplot.subplots", "matplotlib.pyplot.tight_layout", "torch.load", "matplotlib.pyplot.subplots_adjust", "torch.where" ] ]
j-c-cook/pygfunction
[ "73cb9292fc39a068bd3d4ebe66b07ec9c8903c8d" ]
[ "pygfunction/examples/load_aggregation.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\" Example of simulation of a geothermal system.\n\n The g-function of a single borehole is calculated for boundary condition of\n uniform borehole wall temperature along the borehole. Then, the borehole\n wall temperature variations resulting from a time-varying load profile\n are simulated using the aggregation method of Claesson and Javed (2012).\n\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.constants import pi\nfrom scipy.interpolate import interp1d\nfrom scipy.signal import fftconvolve\n\nimport pygfunction as gt\n\n\ndef main():\n # -------------------------------------------------------------------------\n # Simulation parameters\n # -------------------------------------------------------------------------\n\n # Borehole dimensions\n D = 4.0 # Borehole buried depth (m)\n H = 150.0 # Borehole length (m)\n r_b = 0.075 # Borehole radius (m)\n\n # Ground properties\n alpha = 1.0e-6 # Ground thermal diffusivity (m2/s)\n k_s = 2.0 # Ground thermal conductivity (W/m.K)\n T_g = 10.0 # Undisturbed ground temperature (degC)\n\n # Number of segments per borehole\n nSegments = 12\n\n # Simulation parameters\n dt = 3600. # Time step (s)\n tmax = 20.*8760. * 3600. # Maximum time (s)\n Nt = int(np.ceil(tmax/dt)) # Number of time steps\n time = dt * np.arange(1, Nt+1)\n\n # Evaluate heat extraction rate\n Q_b = synthetic_load(time/3600.)\n\n # Load aggregation scheme\n LoadAgg = gt.load_aggregation.ClaessonJaved(dt, tmax)\n\n # -------------------------------------------------------------------------\n # Calculate g-function\n # -------------------------------------------------------------------------\n\n # The field contains only one borehole\n boreField = [gt.boreholes.Borehole(H, D, r_b, x=0., y=0.)]\n # Get time values needed for g-function evaluation\n time_req = LoadAgg.get_times_for_simulation()\n # Calculate g-function\n gFunc = gt.gfunction.uniform_temperature(boreField, time_req, alpha,\n nSegments=nSegments)\n # Initialize load aggregation scheme\n LoadAgg.initialize(gFunc/(2*pi*k_s))\n\n # -------------------------------------------------------------------------\n # Simulation\n # -------------------------------------------------------------------------\n\n T_b = np.zeros(Nt)\n for i in range(Nt):\n # Increment time step by (1)\n LoadAgg.next_time_step(time[i])\n\n # Apply current load\n LoadAgg.set_current_load(Q_b[i]/H)\n\n # Evaluate borehole wall temeprature\n deltaT_b = LoadAgg.temporal_superposition()\n T_b[i] = T_g - deltaT_b\n\n # -------------------------------------------------------------------------\n # Calculate exact solution from convolution in the Fourier domain\n # -------------------------------------------------------------------------\n\n # Heat extraction rate increment\n dQ = np.zeros(Nt)\n dQ[0] = Q_b[0]\n # Interpolated g-function\n g = interp1d(time_req, gFunc)(time)\n for i in range(1, Nt):\n dQ[i] = Q_b[i] - Q_b[i-1]\n\n # Convolution in Fourier domain\n T_b_exact = T_g - fftconvolve(dQ, g/(2.0*pi*k_s*H), mode='full')[0:Nt]\n\n # -------------------------------------------------------------------------\n # plot results\n # -------------------------------------------------------------------------\n\n # Configure figure and axes\n fig = gt.utilities._initialize_figure()\n\n ax1 = fig.add_subplot(311)\n # Axis labels\n ax1.set_xlabel(r'$t$ [hours]')\n ax1.set_ylabel(r'$Q_b$ [W]')\n gt.utilities._format_axes(ax1)\n\n\n hours = np.array([(j+1)*dt/3600. for j in range(Nt)])\n ax1.plot(hours, Q_b)\n\n ax2 = fig.add_subplot(312)\n # Axis labels\n ax2.set_xlabel(r'$t$ [hours]')\n ax2.set_ylabel(r'$T_b$ [degC]')\n gt.utilities._format_axes(ax2)\n\n ax2.plot(hours, T_b)\n ax2.plot(hours, T_b_exact, 'k.')\n\n ax3 = fig.add_subplot(313)\n # Axis labels\n ax3.set_xlabel(r'$t$ [hours]')\n ax3.set_ylabel(r'Error [degC]')\n gt.utilities._format_axes(ax3)\n\n ax3.plot(hours, T_b - T_b_exact)\n\n # Adjust to plot window\n plt.tight_layout()\n\n return\n\n\ndef synthetic_load(x):\n \"\"\"\n Synthetic load profile of Bernier et al. (2004).\n\n Returns load y (in watts) at time x (in hours).\n \"\"\"\n A = 2000.0\n B = 2190.0\n C = 80.0\n D = 2.0\n E = 0.01\n F = 0.0\n G = 0.95\n\n func = (168.0-C)/168.0\n for i in [1,2,3]:\n func += 1.0/(i*pi)*(np.cos(C*pi*i/84.0)-1.0) \\\n *(np.sin(pi*i/84.0*(x-B)))\n func = func*A*np.sin(pi/12.0*(x-B)) \\\n *np.sin(pi/4380.0*(x-B))\n\n y = func + (-1.0)**np.floor(D/8760.0*(x-B))*abs(func) \\\n + E*(-1.0)**np.floor(D/8760.0*(x-B))/np.sign(np.cos(D*pi/4380.0*(x-F))+G)\n return -y\n\n\n# Main function\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.ceil", "scipy.interpolate.interp1d", "numpy.sin", "scipy.signal.fftconvolve", "numpy.zeros", "numpy.arange", "matplotlib.pyplot.tight_layout", "numpy.cos", "numpy.floor" ] ]
imatge-upc/munegc
[ "92a820c1665e760bc7736595dd5dced19df448c1" ]
[ "Fusion2D3DMUNEGC/torch_geometric_extension/munegc.py" ]
[ "\"\"\"\n 2D–3D Geometric Fusion network using Multi-Neighbourhood Graph Convolution for RGB-D indoor scene classification\n 2021 Albert Mosella-Montoro <[email protected]>\n\"\"\"\n\nimport torch\nimport torch_geometric\n\nfrom .agc import create_agc\nfrom .graph_reg import GraphReg\nfrom .graph_reg import numberEdgeAttr\n\n\nclass MUNEGC(torch.nn.Module):\n def __init__(self, in_channels, out_channels,\n neighs=9, rad_neigh=None,\n fnetw=[128], edge_attr=['posspherical'], edge_attr_feat=None,\n fnet_llbias=True, fnet_tanh=True,\n aggr='avg', bias=False, flow='source_to_target'):\n\n super(MUNEGC, self).__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n\n self.n_neighs = neighs\n self.rad_neigh = rad_neigh\n\n self.edge_attr = edge_attr\n self.edge_attr_feat = edge_attr_feat if (edge_attr_feat is not None) else edge_attr\n self.flow = flow\n self.aggr = aggr.strip().lower()\n\n self.knn = True if (rad_neigh is None) else False\n\n self.fnetw_geo = [numberEdgeAttr(self.edge_attr, self.in_channels)] + fnetw\n self.fnetw_feat = [numberEdgeAttr(self.edge_attr_feat, self.in_channels)] + fnetw\n\n self.agc_geo = create_agc(self.in_channels, self.out_channels,\n self.fnetw_geo,\n fnet_llbias=fnet_llbias,\n fnet_tanh=fnet_tanh, bias=bias, flow=flow)\n\n self.agc_feat = create_agc(self.in_channels, self.out_channels,\n self.fnetw_feat,\n fnet_llbias=fnet_llbias,\n fnet_tanh=fnet_tanh, bias=bias, flow=flow)\n\n self.graph_gen_geo = GraphReg(n_neigh=neighs, rad_neigh=rad_neigh, knn=self.knn,\n self_loop=True, edge_attr=self.edge_attr,\n flow=flow)\n\n self.gen_edge_attr_feat = GraphReg(knn=None, edge_attr=self.edge_attr_feat,\n flow=flow)\n\n def forward(self, data):\n data = self.graph_gen_geo(data)\n x_geo = self.agc_geo(data.x, data.edge_index, data.edge_attr.float())\n\n data.edge_index = None\n data.edge_attr = None\n\n data.edge_index = torch_geometric.nn.knn_graph(data.x, self.n_neighs,\n data.batch, loop=True,\n flow=self.flow)\n data = self.gen_edge_attr_feat(data)\n\n x_feat = self.agc_feat(data.x, data.edge_index, data.edge_attr.float())\n\n if self.aggr == 'avg':\n data.x = (x_geo + x_feat)/2\n\n elif self.aggr == 'max':\n data.x = torch.max(x_geo, x_feat).squeeze(-1)\n else:\n raise RuntimeError('Invalid aggregation')\n data.edge_index = None\n data.edge_attr = None\n\n return data\n\n def extra_repr(self):\n s = '{}({}, {}'.format(self.__class__.__name__, self.in_channels,\n self.out_channels)\n s += ', aggr: {}'.format(self.aggr)\n s += ')'\n return s.format(**self.__dict__)\n" ]
[ [ "torch.max" ] ]
correac/commah
[ "fc2531c1cea90f144a8c8dea4ed414d1044b83b7" ]
[ "examples.py" ]
[ "from __future__ import absolute_import, division, print_function\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport commah\n\n\ndef runcommand(cosmology='WMAP5'):\n \"\"\" Example interface commands \"\"\"\n\n # Return the WMAP5 cosmology concentration predicted for\n # z=0 range of masses\n Mi = [1e8, 1e9, 1e10]\n zi = 0\n print(\"Concentrations for haloes of mass %s at z=%s\" % (Mi, zi))\n output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi)\n\n print(output['c'].flatten())\n\n # Return the WMAP5 cosmology concentration predicted for\n # z=0 range of masses AND cosmological parameters\n Mi = [1e8, 1e9, 1e10]\n zi = 0\n print(\"Concentrations for haloes of mass %s at z=%s\" % (Mi, zi))\n output, cosmo = commah.run(cosmology=cosmology, zi=zi, Mi=Mi,\n retcosmo=True)\n\n print(output['c'].flatten())\n print(cosmo)\n\n # Return the WMAP5 cosmology concentration predicted for MW\n # mass (2e12 Msol) across redshift\n Mi = 2e12\n z = [0, 0.5, 1, 1.5, 2, 2.5]\n output = commah.run(cosmology=cosmology, zi=0, Mi=Mi, z=z)\n for zval in z:\n print(\"M(z=0)=%s has c(z=%s)=%s\"\n % (Mi, zval, output[output['z'] == zval]['c'].flatten()))\n\n # Return the WMAP5 cosmology concentration predicted for MW\n # mass (2e12 Msol) across redshift\n Mi = 2e12\n zi = [0, 0.5, 1, 1.5, 2, 2.5]\n output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi)\n for zval in zi:\n print(\"M(z=%s)=%s has concentration %s\"\n % (zval, Mi, output[(output['zi'] == zval) &\n (output['z'] == zval)]['c'].flatten()))\n\n # Return the WMAP5 cosmology concentration and\n # rarity of high-z cluster\n Mi = 2e14\n zi = 6\n output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi)\n print(\"Concentrations for haloes of mass %s at z=%s\" % (Mi, zi))\n print(output['c'].flatten())\n print(\"Mass variance sigma of haloes of mass %s at z=%s\" % (Mi, zi))\n print(output['sig'].flatten())\n print(\"Fluctuation for haloes of mass %s at z=%s\" % (Mi, zi))\n print(output['nu'].flatten())\n\n # Return the WMAP5 cosmology accretion rate prediction\n # for haloes at range of redshift and mass\n Mi = [1e8, 1e9, 1e10]\n zi = [0]\n z = [0, 0.5, 1, 1.5, 2, 2.5]\n output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi, z=z)\n for Mval in Mi:\n print(\"dM/dt for halo of mass %s at z=%s across redshift %s is: \"\n % (Mval, zi, z))\n print(output[output['Mi'] == Mval]['dMdt'].flatten())\n\n # Return the WMAP5 cosmology Halo Mass History for haloes with M(z=0) = 1e8\n M = [1e8]\n z = [0, 0.5, 1, 1.5, 2, 2.5]\n print(\"Halo Mass History for z=0 mass of %s across z=%s\" % (M, z))\n output = commah.run(cosmology=cosmology, zi=0, Mi=M, z=z)\n print(output['Mz'].flatten())\n\n # Return the WMAP5 cosmology formation redshifts for haloes at\n # range of redshift and mass\n M = [1e8, 1e9, 1e10]\n z = [0]\n print(\"Formation Redshifts for haloes of mass %s at z=%s\" % (M, z))\n output = commah.run(cosmology=cosmology, zi=0, Mi=M, z=z)\n for Mval in M:\n print(output[output['Mi'] == Mval]['zf'].flatten())\n\n return(\"Done\")\n\n\ndef plotcommand(cosmology='WMAP5', plotname=None):\n \"\"\" Example ways to interrogate the dataset and plot the commah output \"\"\"\n\n # Plot the c-M relation as a functon of redshift\n xarray = 10**(np.arange(1, 15, 0.2))\n yval = 'c'\n\n # Specify the redshift range\n zarray = np.arange(0, 5, 0.5)\n\n xtitle = r\"Halo Mass (M$_{sol}$)\"\n ytitle = r\"Concentration\"\n linelabel = \"z=\"\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel(xtitle)\n ax.set_ylabel(ytitle)\n plt.ylim([2, 30])\n\n colors = cm.rainbow(np.linspace(0, 1, len(zarray)))\n\n for zind, zval in enumerate(zarray):\n output = commah.run(cosmology=cosmology, zi=zval, Mi=xarray)\n\n # Access the column yval from the data file\n yarray = output[yval].flatten()\n\n # Plot each line in turn with different colour\n ax.plot(xarray, yarray, label=linelabel+str(zval), color=colors[zind])\n # Overplot the D08 predictions in black\n ax.plot(xarray, commah.commah.cduffy(zval, xarray), color=\"black\")\n\n ax.set_xscale('log')\n ax.set_yscale('log')\n\n leg = ax.legend(loc=1)\n # Make box totally transparent\n leg.get_frame().set_alpha(0)\n leg.get_frame().set_edgecolor('white')\n for label in leg.get_texts():\n label.set_fontsize('small') # the font size\n for label in leg.get_lines():\n label.set_linewidth(4) # the legend line width\n\n if plotname:\n fig.tight_layout(pad=0.2)\n print(\"Plotting to '%s_CM_relation.png'\" % (plotname))\n fig.savefig(plotname+\"_CM_relation.png\", dpi=fig.dpi*5)\n else:\n plt.show()\n\n # Plot the c-z relation as a function of mass (so always Mz=M0)\n xarray = 10**(np.arange(0, 1, 0.05)) - 1\n yval = 'c'\n\n # Specify the mass range\n zarray = 10**np.arange(6, 14, 2)\n\n xtitle = r\"Redshift\"\n ytitle = r\"NFW Concentration\"\n linelabel = r\"log$_{10}$ M$_{z}$(M$_{sol}$)=\"\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel(xtitle)\n ax.set_ylabel(ytitle)\n colors = cm.rainbow(np.linspace(0, 1, len(zarray)))\n\n for zind, zval in enumerate(zarray):\n output = commah.run(cosmology=cosmology, zi=xarray, Mi=zval)\n\n # Access the column yval from the data file\n yarray = output[yval].flatten()\n\n # Plot each line in turn with different colours\n ax.plot(xarray, yarray,\n label=linelabel+\"{0:.1f}\".format(np.log10(zval)),\n color=colors[zind],)\n\n leg = ax.legend(loc=1)\n # Make box totally transparent\n leg.get_frame().set_alpha(0)\n leg.get_frame().set_edgecolor('white')\n for label in leg.get_texts():\n label.set_fontsize('small') # the font size\n for label in leg.get_lines():\n label.set_linewidth(4) # the legend line width\n\n if plotname:\n fig.tight_layout(pad=0.2)\n print(\"Plotting to '%s_Cz_relation.png'\" % (plotname))\n fig.savefig(plotname+\"_Cz_relation.png\", dpi=fig.dpi*5)\n else:\n plt.show()\n\n # Plot the zf-z relation for different masses (so always Mz=M0)\n xarray = 10**(np.arange(0, 1, 0.05)) - 1\n yval = 'zf'\n\n # Specify the mass range\n zarray = 10**np.arange(6, 14, 2)\n\n xtitle = r\"Redshift\"\n ytitle = r\"Formation Redshift\"\n linelabel = r\"log$_{10}$ M$_{z}$(M$_{sol}$)=\"\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel(xtitle)\n ax.set_ylabel(ytitle)\n colors = cm.rainbow(np.linspace(0, 1, len(zarray)))\n\n for zind, zval in enumerate(zarray):\n output = commah.run(cosmology=cosmology, zi=xarray, Mi=zval)\n\n yarray = output[yval].flatten()\n\n # Plot each line in turn with different colour\n ax.plot(xarray, yarray,\n label=linelabel+\"{0:.1f}\".format(np.log10(zval)),\n color=colors[zind],)\n\n leg = ax.legend(loc=2)\n # Make box totally transparent\n leg.get_frame().set_alpha(0)\n leg.get_frame().set_edgecolor('white')\n for label in leg.get_texts():\n label.set_fontsize('small') # the font size\n for label in leg.get_lines():\n label.set_linewidth(4) # the legend line width\n\n if plotname:\n fig.tight_layout(pad=0.2)\n print(\"Plotting to '%s_zfz_relation.png'\" % (plotname))\n fig.savefig(plotname+\"_zfz_relation.png\", dpi=fig.dpi*5)\n else:\n plt.show()\n\n # Plot the dM/dt-z relation for different masses (so always Mz=M0)\n xarray = 10**(np.arange(0, 1, 0.05)) - 1\n yval = 'dMdt'\n\n # Specify the mass range\n zarray = 10**np.arange(10, 14, 0.5)\n\n xtitle = r\"log$_{10}$ (1+z)\"\n ytitle = r\"log$_{10}$ Accretion Rate M$_{sol}$ yr$^{-1}$\"\n linelabel = r\"log$_{10}$ M$_z$(M$_{sol}$)=\"\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel(xtitle)\n ax.set_ylabel(ytitle)\n colors = cm.rainbow(np.linspace(0, 1, len(zarray)))\n\n cosmo = commah.getcosmo(cosmology)\n for zind, zval in enumerate(zarray):\n output = commah.run(cosmology=cosmology, zi=xarray, Mi=zval,\n com=False, mah=True)\n\n yarray = output[yval].flatten()\n\n # Plot each line in turn with different colour\n ax.plot(np.log10(xarray+1.), np.log10(yarray),\n label=linelabel+\"{0:.1f}\".format(np.log10(zval)),\n color=colors[zind],)\n\n # Plot the semi-analytic approximate formula from Correa et al 2015b\n semianalytic_approx = 71.6 * (zval / 1e12) * (cosmo['h'] / 0.7) *\\\n (-0.24 + 0.75 * (xarray + 1)) * np.sqrt(\n cosmo['omega_M_0'] * (xarray + 1)**3 + cosmo['omega_lambda_0'])\n\n ax.plot(np.log10(xarray + 1), np.log10(semianalytic_approx),\n color='black')\n\n leg = ax.legend(loc=2)\n # Make box totally transparent\n leg.get_frame().set_alpha(0)\n leg.get_frame().set_edgecolor('white')\n for label in leg.get_texts():\n label.set_fontsize('small') # the font size\n for label in leg.get_lines():\n label.set_linewidth(4) # the legend line width\n\n if plotname:\n fig.tight_layout(pad=0.2)\n print(\"Plotting to '%s_dMdtz_relation.png'\" % (plotname))\n fig.savefig(plotname+\"_dMdtz_relation.png\", dpi=fig.dpi*5)\n else:\n plt.show()\n\n # Plot the dMdt-M relation as a function of redshift\n xarray = 10**(np.arange(10, 14, 0.5))\n yval = 'dMdt'\n\n # Specify the redshift range\n zarray = np.arange(0, 5, 0.5)\n\n xtitle = r\"Halo Mass M$_{sol}$\"\n ytitle = r\"Accretion Rate M$_{sol}$ yr$^{-1}$\"\n linelabel = \"z=\"\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel(xtitle)\n ax.set_ylabel(ytitle)\n colors = cm.rainbow(np.linspace(0, 1, len(zarray)))\n\n for zind, zval in enumerate(zarray):\n output = commah.run(cosmology=cosmology, zi=zval, Mi=xarray,\n com=False, mah=True)\n\n yarray = output[yval].flatten()\n\n # Plot each line in turn with different colour\n ax.plot(xarray, yarray, label=linelabel+str(zval),\n color=colors[zind],)\n\n ax.set_xscale('log')\n ax.set_yscale('log')\n\n leg = ax.legend(loc=2)\n # Make box totally transparent\n leg.get_frame().set_alpha(0)\n leg.get_frame().set_edgecolor('white')\n for label in leg.get_texts():\n label.set_fontsize('small') # the font size\n for label in leg.get_lines():\n label.set_linewidth(4) # the legend line width\n\n if plotname:\n fig.tight_layout(pad=0.2)\n print(\"Plotting to '%s_MAH_M_relation.png'\" % (plotname))\n fig.savefig(plotname+\"_MAH_M_relation.png\", dpi=fig.dpi*5)\n else:\n plt.show()\n\n # Plot the (dM/M)dt-M relation as a function of redshift\n xarray = 10**(np.arange(10, 14, 0.5))\n yval = 'dMdt'\n\n # Specify the redshift range\n zarray = np.arange(0, 5, 0.5)\n\n xtitle = r\"Halo Mass M$_{sol}$\"\n ytitle = r\"Specific Accretion Rate yr$^{-1}$\"\n linelabel = \"z=\"\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel(xtitle)\n ax.set_ylabel(ytitle)\n colors = cm.rainbow(np.linspace(0, 1, len(zarray)))\n\n for zind, zval in enumerate(zarray):\n output = commah.run(cosmology=cosmology, zi=zval, Mi=xarray,\n mah=True, com=False)\n\n yarray = output[yval].flatten()\n\n # Plot each line in turn with different colour\n ax.plot(xarray, yarray/xarray, label=linelabel+str(zval),\n color=colors[zind],)\n\n ax.set_xscale('log')\n ax.set_yscale('log')\n\n leg = ax.legend(loc=1)\n # Make box totally transparent\n leg.get_frame().set_alpha(0)\n leg.get_frame().set_edgecolor('white')\n for label in leg.get_texts():\n label.set_fontsize('small') # the font size\n for label in leg.get_lines():\n label.set_linewidth(4) # the legend line width\n\n if plotname:\n fig.tight_layout(pad=0.2)\n print(\"Plotting to '%s_specificMAH_M_relation.png'\" % (plotname))\n fig.savefig(plotname+\"_specificMAH_M_relation.png\", dpi=fig.dpi*5)\n else:\n plt.show()\n\n # Plot the Mz-z relation as a function of mass\n # (so mass is decreasing to zero as z-> inf)\n xarray = 10**(np.arange(0, 1, 0.05)) - 1\n yval = 'Mz'\n\n # Specify the mass range\n zarray = 10**np.arange(10, 14, 0.5)\n\n xtitle = r\"Redshift\"\n ytitle = r\"M(z) (M$_{sol}$)\"\n linelabel = r\"log$_{10}$ M$_{0}$(M$_{sol}$)=\"\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel(xtitle)\n ax.set_ylabel(ytitle)\n colors = cm.rainbow(np.linspace(0, 1, len(zarray)))\n\n for zind, zval in enumerate(zarray):\n output = commah.run(cosmology=cosmology, zi=0, Mi=zval, z=xarray)\n\n yarray = output[yval].flatten()\n\n # Plot each line in turn with different colour\n ax.plot(xarray, yarray,\n label=linelabel+\"{0:.1f}\".format(np.log10(zval)),\n color=colors[zind],)\n\n ax.set_yscale('log')\n\n leg = ax.legend(loc=1)\n # Make box totally transparent\n leg.get_frame().set_alpha(0)\n leg.get_frame().set_edgecolor('white')\n for label in leg.get_texts():\n label.set_fontsize('small') # the font size\n for label in leg.get_lines():\n label.set_linewidth(4) # the legend line width\n\n if plotname:\n fig.tight_layout(pad=0.2)\n print(\"Plotting to '%s_Mzz_relation.png'\" % (plotname))\n fig.savefig(plotname+\"_Mzz_relation.png\", dpi=fig.dpi*5)\n else:\n plt.show()\n\n # Plot the Mz/M0-z relation as a function of mass\n xarray = 10**(np.arange(0, 1, 0.02)) - 1\n yval = 'Mz'\n\n # Specify the mass range\n zarray = 10**np.arange(10, 14, 0.5)\n\n xtitle = r\"Redshift\"\n ytitle = r\"log$_{10}$ M(z)/M$_{0}$\"\n linelabel = r\"log$_{10}$ M$_{0}$(M$_{sol}$)=\"\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel(xtitle)\n ax.set_ylabel(ytitle)\n colors = cm.rainbow(np.linspace(0, 1, len(zarray)))\n\n for zind, zval in enumerate(zarray):\n output = commah.run(cosmology=cosmology, zi=0, Mi=zval, z=xarray)\n\n yarray = output[yval].flatten()\n\n # Plot each line in turn with different colour\n ax.plot(xarray, np.log10(yarray/zval),\n label=linelabel+\"{0:.1f}\".format(np.log10(zval)),\n color=colors[zind],)\n\n leg = ax.legend(loc=3)\n # Make box totally transparent\n leg.get_frame().set_alpha(0)\n leg.get_frame().set_edgecolor('white')\n for label in leg.get_texts():\n label.set_fontsize('small') # the font size\n for label in leg.get_lines():\n label.set_linewidth(4) # the legend line width\n\n if plotname:\n fig.tight_layout(pad=0.2)\n print(\"Plotting to '%s_MzM0z_relation.png'\" % (plotname))\n fig.savefig(plotname+\"_MzM0z_relation.png\", dpi=fig.dpi*5)\n else:\n plt.show()\n\n return(\"Done\")\n" ]
[ [ "matplotlib.pyplot.ylim", "matplotlib.pyplot.figure", "numpy.arange", "numpy.sqrt", "matplotlib.pyplot.show", "numpy.log10" ] ]
ssinad/gcp
[ "50db0f72db5bc907cefd4f7b38e34dadb6ccc0b9" ]
[ "examples/Covid-19/LA-covid19-indicators/ca_reopening_tiers.py" ]
[ "\"\"\"\nFunctions to see how CA counties are meeting the\nCA Department of Public Health's reopening metrics\n\nhttps://www.cdph.ca.gov/Programs/CID/DCDC/Pages/COVID-19/COVID19CountyMonitoringOverview.aspx\n\n\"\"\"\nimport numpy as np\nimport pandas as pd\n\nimport default_parameters\nimport utils\n\nfulldate_format = default_parameters.fulldate_format\ntime_zone = default_parameters.time_zone\nstart_date = default_parameters.start_date\nyesterday_date = default_parameters.yesterday_date\ntoday_date = default_parameters.today_date\none_week_ago = default_parameters.one_week_ago\ntwo_weeks_ago = default_parameters.two_weeks_ago\nthree_weeks_ago = default_parameters.three_weeks_ago\ntwo_days_ago = default_parameters.two_days_ago\nthree_days_ago = default_parameters.three_days_ago\neight_days_ago = default_parameters.eight_days_ago\nnine_days_ago = default_parameters.nine_days_ago\n\nS3_FILE_PATH = \"s3://public-health-dashboard/jhu_covid19/\"\n\n# Units for case rates are per 100k\nPOP_RATE = 100_000\n\n# LA County population (using ca_county_pop_crosswalk)\nLA_POP = 10_257_557\n\n#---------------------------------------------------------------#\n# Case Rate (CA counties)\n#---------------------------------------------------------------# \n# Case Rate (per 100k) is calculated as 7-day average with 7-day lag\n# Calculated off of daily new cases\ndef case_rate(county_state_name, start_date, time_period):\n df = prep_case_rate(county_state_name, start_date, time_period)\n \n pop = (pd.read_parquet(f'{S3_FILE_PATH}ca_county_pop_crosswalk.parquet')\n .rename(columns = {\"county_fips\": \"fips\"})\n [[\"fips\", \"county_pop2020\"]]\n )\n \n df = pd.merge(df, pop, on = \"fips\", how = \"left\", validate = \"m:1\")\n \n # Calculate 7-day average of new cases\n extract_col = [\"new_cases\"]\n county_pop = df.county_pop2020.iloc[0]\n new_cases_avg7 = df[extract_col].mean()\n \n # Convert to cases per 100k\n cases_per100k = (new_cases_avg7 / county_pop * POP_RATE).round(2).iloc[0]\n \n return cases_per100k\n\n \n\"\"\"\nSub-functions for case rate\n\"\"\"\ndef prep_case_rate(county_state_name, start_date, time_period):\n df = utils.prep_county(county_state_name, start_date) \n \n if time_period == \"today\":\n df = df[(df.date <= yesterday_date) & (df.date > one_week_ago)]\n \n if time_period == \"one_week_ago\":\n df = df[(df.date <= one_week_ago) & (df.date > two_weeks_ago)]\n\n if time_period == \"two_weeks_ago\": \n df = df[(df.date <= two_weeks_ago) & (df.date > three_weeks_ago)]\n \n return df \n\n\n\n#---------------------------------------------------------------#\n# Test Positivity Rate (LA County)\n#---------------------------------------------------------------# \n# Test Positivity is calculated as 7-day average with 7-day lag\n# Testing particularly is lagged; we know this to be true for LA County's data\ndef positive_rate(start_date, time_period):\n df = prep_test_rate(start_date, time_period) \n \n # Calculate 7-day average of test positivity\n extract_col1 = [\"County_Positive\"]\n extract_col2 = [\"County_Performed\"]\n \n positivity_rate = (df[extract_col1].sum().iloc[0]) / (df[extract_col2].sum().iloc[0]) \n positivity_rate = positivity_rate.round(3)\n \n return positivity_rate\n\n\n\"\"\"\nSub-functions for testing and test positivity rates\n\"\"\"\ndef prep_test_rate(start_date, time_period):\n df = utils.prep_testing(start_date)\n \n if time_period == \"one_week_ago\":\n df = df[(df.date <= one_week_ago) & (df.date > two_weeks_ago)]\n\n if time_period == \"two_weeks_ago\": \n df = df[(df.date <= two_weeks_ago) & (df.date > three_weeks_ago)]\n \n return df\n\n\n#---------------------------------------------------------------#\n# Test Rate (LA County)\n#---------------------------------------------------------------# \n# Test Rate is calculated as 7-day average with 7-day lag\n# It is per 100k. It's only used to adjust for test positivity rate.\n# Testing particularly is lagged; we know this to be true for LA County's data\ndef test_rate(start_date, time_period):\n df = prep_test_rate(start_date, time_period) \n \n # Calculate 7-day average of tests conducted\n extract_col = [\"County_Performed\"]\n tests_avg7 = df[extract_col].mean()\n \n tests_per100k = (tests_avg7 / LA_POP * POP_RATE).round(2).iloc[0]\n \n return tests_per100k" ]
[ [ "pandas.read_parquet", "pandas.merge" ] ]
hojeong3709/ML
[ "2fdd8d22dc5584103397559cb23a6efa8bb59637" ]
[ "tensorflow-learning/tensorflow-lab/cost-function/min-squared-error/mini-exam/1.py" ]
[ "import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 문제1\n# simple3 파일을 읽어\n# hx = wx + b\n# w, b 값을 구하고 x가 5인 경우의 값을 예측하시오.\n\nf = np.loadtxt(\"simple3.txt\", dtype=np.int32, skiprows=1, delimiter=\",\")\nprint(f)\n\nx = f[:, 0]\ny = f[:, 1]\n\nprint(x)\nprint(y)\n\nX = tf.placeholder(dtype=tf.float32)\nY = tf.constant(y, dtype=tf.float32)\n\n# xaiver or he 알고리즘으로 적절한 초기값을 지정할 수 있다.\nW = tf.Variable(1, dtype=tf.float32)\nb = tf.Variable(1, dtype=tf.float32)\n\nhx = W * X + b\n\ncost = tf.reduce_mean(tf.square(hx - y))\n\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)\ntrain = optimizer.minimize(cost)\n\nsess = tf.Session()\ninit = tf.global_variables_initializer()\nsess.run(init)\n\nfor i in range(100):\n _train, _cost = sess.run([train, cost], feed_dict={X: x})\n print(i, _cost)\n plt.plot(i, _cost, \"ro\")\n\nprint(\"W : \", sess.run(W))\nprint(\"b : \", sess.run(b))\n\nprint(\"X가 5인경우 값 : \", sess.run(hx, feed_dict={X: 5}))\n\n# 문제2\n# 위의 결과로 그래프를 그리시오.\n# x축, y축 (hx)\nplt.show()\n\nplt.plot(x, sess.run(hx, feed_dict={X: x}))\nplt.show()\n\n# 문제3\n# y = (5x +2 ) ^ 2를 미분하시오\n# 코딩이 아닌 도출과정을 적으시오.\n\n# 풀이 ==> 2 * (5x + 2) * 5 (편미분)" ]
[ [ "tensorflow.Session", "tensorflow.Variable", "matplotlib.pyplot.plot", "tensorflow.constant", "numpy.loadtxt", "tensorflow.placeholder", "matplotlib.pyplot.show", "tensorflow.global_variables_initializer", "tensorflow.square", "tensorflow.train.GradientDescentOptimizer" ] ]
WolfNiu/polite-dialogue-generation
[ "18f3b1376ea48e3c4aef6360777ac0224470fcb6" ]
[ "src/model/continuous-LFT.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n\"\"\"\nDon't forget to append end token for Subtle dataset!!\n\"\"\"\n\n# Imports for compatibility between Python 2&3\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom six.moves import xrange\nimport numpy as np\nimport tensorflow as tf\nimport os\nimport sys\nfrom pprint import pprint\nfrom pathlib import Path\nfrom math import exp\nimport itertools\nimport string\nfrom pprint import pprint\nsys.path.extend([\".\", \"../\", \"../..\"])\nfrom src.model.seq2seq_politeness import Seq2Seq\nfrom src.model.util import gpu_config \nfrom src.basic.util import (shuffle, remove_duplicates, pad,\n unzip_lst, zip_lsts,\n prepend, append,\n load_pickle, load_pickles, \n dump_pickle, dump_pickles, \n build_dict, read_lines, write_lines, \n group_lst, decode2string)\nimport argparse\n\n\n# In[ ]:\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=\"Train/Val/Test continuous-LFT model\")\n parser.add_argument(\n \"--test\", action=\"store_true\",\n help=\"whether we are testing, default to False\")\n parser.add_argument(\n \"--ckpt_generator\", type=str, default=\"ckpt/seq2seq_RL_pretrain_3\",\n help=\"path to model files\")\n args = parser.parse_args()\n return args\n\nargs = parse_args()\ninfer_only = args.test\nforce_restore_point = args.ckpt_generator\n\n\n# In[2]:\n\n\nstart_epoch = 0\ntotal_epochs = 40\n\ndebugging = False\npretrain = False\ncontinuous_label = True\nclip_loss = False\nreorganize = False\n\ngpu_start_index = 0\nlearning_rate = 0.001\nmonotonic_attention = True\noutput_layer = False\n\nget_PPL = False\nPPL_all = False\n\nforce_restore = True\n\n\"\"\"\ngpu configurations\n\"\"\"\nnum_gpus = 1\nbatch_size = 96\nassert batch_size % num_gpus == 0\nbatch_size_per_gpu = batch_size // num_gpus\n\nextra_str = \"_cont_LFT\"\n\nif monotonic_attention:\n extra_str += \"_monotonic\"\n (\"Applying monotonic attention.\")\n \nif pretrain:\n extra_str += \"_pretrain\"\n \nif debugging:\n batch_size = 4\n\n\n# In[3]:\n\n\n\"\"\"\nLoad pickled lists\n\"\"\"\ndata_path = \"data/MovieTriples/\"\npoliteness_path = \"data/Stanford_politeness_corpus/\"\nckpt_path = \"ckpt/\"\n\n\"\"\"\nLoad pickled lists\n\"\"\"\nfilenames = [\n \"vocab_all.pkl\",\n \"shared_vocab_politeness.pkl\", \"new_vocab_politeness.pkl\",\n \"shared_vocab_movie.pkl\", \"new_vocab_movie.pkl\",\n \"embedding_word2vec_politeness.pkl\", \"embedding_word2vec_movie.pkl\",\n \"movie_train_source.pkl\", \"movie_train_target.pkl\",\n \"movie_valid_source.pkl\", \"movie_valid_target.pkl\",\n \"movie_test_source.pkl\", \"movie_test_target.pkl\",\n \"polite_movie_target.pkl\", \"neutral_movie_target.pkl\", \"rude_movie_target.pkl\"]\n \nfiles = [\n os.path.join(politeness_path if \"politeness\" in filename else data_path, filename) \n for filename in filenames]\n\n# Load files\ndata = load_pickles(files)\n\nvocab = data[0]\nshared_vocab_politeness = data[1]\nnew_vocab_politeness = data[2]\nshared_vocab_movie = data[3]\nnew_vocab_movie = data[4]\nembedding_word2vec_politeness = data[5]\nembedding_word2vec_movie = data[6]\n\nsource_train = data[7] \ntarget_train = data[8] \nsource_valid = data[9]\ntarget_valid = data[10]\nsource_test = data[11]\ntarget_test = data[12]\ntriple_lsts = data[13:]\n\n\n# In[4]:\n\n\ndef zip_remove_duplicates_unzip(lsts):\n zipped = zip_lsts(lsts)\n zipped_without_duplicates = remove_duplicates(zipped) \n unzipped = unzip_lst(zipped_without_duplicates)\n return unzipped\n\n\n# In[5]:\n\n\nif not pretrain:\n [source_train, target_train] = zip_remove_duplicates_unzip([source_train, target_train])\n print(len(source_train))\n\n\n# In[6]:\n\n\nshared_vocab_size_politeness = len(shared_vocab_politeness)\nshared_vocab_size_movie = len(shared_vocab_movie)\n\nspecial_tokens = [\n \"UNK_TOKEN\", \"START_TOKEN\", \"END_TOKEN\",\n \"<polite>\", \"<neutral>\", \"<rude>\"]\n\nvocab_size = len(vocab)\n\n# Index vocabulary\nindex2token = {i: token for (i, token) in enumerate(vocab)}\ntoken2index = {token: i for (i, token) in enumerate(vocab)}\n\n[unk_token, start_token, end_token,\n polite_label, neutral_label, rude_label] = [token2index[token] \n for token in special_tokens]\n\nlabels = [polite_label, neutral_label, rude_label]\nnum_labels = len(labels)\n\nnew_vocab_size_politeness = len(new_vocab_politeness)\nnew_vocab_size_movie = len(new_vocab_movie)\nassert (1 + shared_vocab_size_politeness + new_vocab_size_politeness # +1 for \"UNK\"\n + shared_vocab_size_movie + new_vocab_size_movie) == vocab_size\n\n\n# In[7]:\n\n\ntags = [\"<person>\", \"<number>\", \"<continued_utterance>\"]\nner_tokens = [token2index[token] for token in tags]\nunk_indices = [unk_token] + ner_tokens\n\n\n# In[8]:\n\n\n# LFT_data_file = \"/playpen/home/tongn/Stanford_politeness_corpus/\" + \"LFT_continuous_label_train.pkl\"\n# LFT_data = load_pickle(LFT_data_file)\nLFT_data = list(itertools.chain(*triple_lsts))\n\n\n# In[9]:\n\n\nif reorganize:\n words_path = \"/home/tongn/politeness-generation/src/data/\"\n polite_lst = set(\n [token2index[word] \n for word in read_lines(words_path + \"polite_words.txt\")\n if word in vocab])\n rude_lst = set(\n [token2index[word] \n for word in read_lines(words_path + \"swear_words_wikitionary.txt\")\n if word in vocab])\n \n LFT_examples = []\n counter = 0\n for (source, target, score) in LFT_data:\n if len(set(target).intersection(set(polite_lst))) > 0 and score < 0.8:\n LFT_examples.append([1.0, source, target])\n counter += 1\n elif len(set(target).intersection(set(rude_lst))) > 0 and score > 0.2:\n LFT_examples.append([0.0, source, target])\n counter += 1\n else:\n LFT_examples.append([source, target, score])\n print(\"Moved %d examples\" % counter)\nelse:\n LFT_examples = LFT_data\n\n\n# In[11]:\n\n\n\"\"\"\nShared hyperparameters\n\"\"\"\nbeam_width = 2\nlength_penalty_weight = 1.0\nclipping_threshold = 5.0 # threshold for gradient clipping\nembedding_size = 300\n\n\"\"\"\nseq2seq hyperparameters\n\"\"\"\nhidden_size = 512\nnum_layers = 2\nmax_iterations = 34\ndropout_rate = 0.2\nattention_size = 512\nattention_layer_size = 256\n\n\n# In[12]:\n\n\nif debugging:\n if pretrain:\n data_dict = {\n \"train\": (source_train[:8], target_train[:8])}\n else:\n data_dict = {\n \"train\": (source_train[:8], target_train[:8]),\n \"valid\": (source_valid[:8], target_valid[:8]),\n \"test\": (source_test[:8], target_test[:8])}\nelse:\n if pretrain:\n data_dict = {\n \"train\": (source_train, target_train)}\n else:\n data_dict = {\n \"train\": (source_train, target_train),\n \"valid\": (source_valid, target_valid),\n \"test\": (source_test, target_test)}\n\n\n# In[13]:\n\n\ndef build_model():\n tf.reset_default_graph()\n graph = tf.Graph()\n with graph.as_default():\n model = Seq2Seq(\n batch_size,\n shared_vocab_size_politeness, new_vocab_size_politeness,\n shared_vocab_size_movie, new_vocab_size_movie,\n embedding_word2vec_politeness, embedding_word2vec_movie, \n embedding_size, hidden_size, num_layers,\n max_iterations,\n start_token, end_token, unk_indices,\n attention_size=attention_size, \n attention_layer_size=attention_layer_size,\n beam_width=beam_width, length_penalty_weight=length_penalty_weight,\n gpu_start_index=gpu_start_index, \n num_gpus=num_gpus,\n learning_rate=learning_rate, \n clipping_threshold=clipping_threshold,\n monotonic_attention=monotonic_attention,\n continuous_label=continuous_label,\n output_layer=output_layer,\n clip_loss=clip_loss)\n saver_seq2seq = tf.train.Saver(var_list=model.trainable_variables)\n if start_epoch == 0:\n exclude_indices = [5]\n if monotonic_attention:\n exclude_indices.extend([37, 38, 39])\n restore_vars = [\n var\n for (i, var) in enumerate(model.trainable_variables)\n if i not in exclude_indices]\n saver_restore = tf.train.Saver(var_list=restore_vars)\n else:\n saver_restore = saver_seq2seq\n print(\"Done building model graph.\")\n return (model, graph, saver_seq2seq, saver_restore)\n\n(model, graph, saver_seq2seq, saver_restore) = build_model()\n\n\n# In[14]:\n\n\n# pprint(list(enumerate(model.trainable_variables)))\n# input(\"pause\")\n\n\n# In[15]:\n\n\ndef run_seq2seq(sess, mode, epoch, feed_score=1.0):\n \"\"\"see if we need to append end_token\"\"\" \n is_training = (mode == \"train\")\n \n if is_training:\n (source_lst, target_lst, score_lst) = unzip_lst(LFT_examples) \n else:\n (source_lst, target_lst) = data_dict[mode]\n score_lst = [feed_score] * len(source_lst)\n \n# source_lst = source_lst[:batch_size * 2]\n# target_lst = target_lst[:batch_size * 2]\n# score_lst = score_lst[:batch_size * 2]\n \n num_examples = len(source_lst)\n assert num_examples >= batch_size\n num_batches = num_examples // batch_size\n \n keep_prob = (1 - dropout_rate) if is_training else 1.0\n start_tokens = [start_token] * batch_size\n \n total_loss = 0.0\n num_tokens = 0\n zipped_lst = []\n for i in range(num_batches):\n start = i * batch_size\n end = start + batch_size\n \n sources = source_lst[start:end]\n source_lengths = list(map(len, sources))\n targets = target_lst[start:end]\n target_lengths = list(map(len, targets))\n \n scores = score_lst[start:end]\n \n feed_dict = {\n model.source: pad(sources, source_lengths),\n model.source_length: source_lengths,\n model.target: pad(targets, target_lengths),\n model.target_length: target_lengths,\n model.start_tokens: start_tokens,\n model.keep_prob: keep_prob,\n model.is_training: is_training,\n model.score: scores}\n \n if is_training:\n fetches = [model.batch_total_loss, model.batch_num_tokens, model.apply_gradients_op]\n else:\n fetches = [model.batch_sample_ids_beam, model.batch_final_lengths_beam]\n \n result = sess.run(fetches, feed_dict=feed_dict)\n \n if is_training:\n total_loss += result[0]\n num_tokens += result[1]\n print(\"Epoch (%s) %d Batch %d perplexity: %.2f\" % \n (mode, epoch, i, exp(result[0] / result[1])))\n print(\"Perplexity so far:\", exp(total_loss / num_tokens))\n else:\n print(\"Finished testing batch %d\" % i)\n responses = [response[:length] \n for (response, length) \n in zip(result[0].tolist(), result[1].tolist())]\n zipped = zip_lsts([sources, targets, responses])\n zipped_lst.extend(zipped)\n \n if is_training:\n print(\"Epoch (%s) %d average perplexity: %.2f\" % \n (mode, epoch, exp(total_loss / num_tokens)))\n if not get_PPL:\n saver_seq2seq.save(sess, \"%sseq2seq_RL%s_%d\" % (ckpt_path, extra_str, epoch))\n print(\"Checkpoint saved for epoch %d.\" % epoch)\n \n return zipped_lst\n\n\n# In[16]:\n\n\nconfig = gpu_config()\n\nnum_epochs = total_epochs - start_epoch\nassert num_epochs >= 0\n\nwith tf.Session(graph=graph, config=config) as sess:\n sess.run(tf.global_variables_initializer())\n print(\"Initialized.\")\n \n if force_restore or start_epoch > 0:\n if force_restore:\n restore_ckpt = force_restore_point\n else:\n restore_ckpt = \"%sseq2seq_RL%s_%d\" % (ckpt_path, extra_str, start_epoch - 1)\n \n saver_restore.restore(sess, restore_ckpt)\n print(\"Restored from\", restore_ckpt)\n \n for i in xrange(num_epochs):\n if not infer_only:\n mode = \"train\"\n run_seq2seq(sess, mode, i + start_epoch)\n \n if pretrain:\n continue\n \n mode = \"valid\"\n score_range = [1.0]\n zipped = run_seq2seq(sess, mode, i + start_epoch, feed_score=score_range[0])\n \n if infer_only and not get_PPL and (i + start_epoch - 1) % 5 == 0: # for getting perplexity of test data, use train branch\n print(\"Inferring on test set...\")\n mode = \"test\"\n\n responses_lst = []\n source_lst = []\n target_lst = []\n score_range = list(np.arange(0.0, 1.1, 0.5))\n for score in score_range:\n zipped_responses = run_seq2seq(\n sess, mode, i + start_epoch, feed_score=score)\n (source_lst, target_lst, responses) = unzip_lst(zipped_responses)\n responses_lst.append(responses)\n num_responses = len(responses_lst[0]) \n\n zipped = zip_lsts([source_lst, target_lst] + responses_lst)\n \n flattened = [decode2string(index2token, sent, end_token=end_token, remove_END_TOKEN=True) \n for tp in zipped for sent in tp]\n\n # now we mark sentences that are generated by our model\n num_lines = len(score_range) + 2\n marked_G = [(\"G: \" + sent)\n if k % num_lines == 1 else sent\n for (k, sent) in enumerate(flattened)]\n\n marked_M = [(\"M: \" + sent) \n if k % num_lines in range(2, num_lines) else sent\n for (k, sent) in enumerate(marked_G)]\n \n filename = (\"%sseq2seq_RL_%s_result%s_%d.txt\" % \n (\"output/\", mode, extra_str, i + start_epoch))\n\n write_lines(filename, marked_M)\n\n # only need 1 epoch for inferring or getting PPL\n if infer_only or get_PPL: \n break\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "tensorflow.Graph", "tensorflow.Session", "tensorflow.reset_default_graph", "tensorflow.train.Saver", "numpy.arange", "tensorflow.global_variables_initializer" ] ]
SU-CVR-21/DF-VO
[ "ac5f4656036e899ac4beac1afb5fbd7f7e1659f1" ]
[ "run.py" ]
[ "''''''\n'''\n@Author: Huangying Zhan ([email protected])\n@Date: 2019-09-01\n@Copyright: Copyright (C) Huangying Zhan 2020. All rights reserved. Please refer to the license file.\n@LastEditTime: 2020-06-02\n@LastEditors: Huangying Zhan\n@Description: This API runs DF-VO.\n'''\n\nimport argparse\nimport numpy as np\nimport os\nimport random\nimport torch\nimport libs.dfvo\nimport sys\n\nprint(sys.path)\nfrom libs.dfvo import DFVO\nfrom libs.general.utils import mkdir_if_not_exists\nfrom libs.general.configuration import ConfigLoader\n\n\nconfig_loader = ConfigLoader()\n\ndef read_cfgs():\n \"\"\"Parse arguments and laod configurations\n\n Returns\n -------\n args : args\n arguments\n cfg : edict\n configuration dictionary\n \"\"\"\n ''' Argument Parsing '''\n parser = argparse.ArgumentParser(description='VO system')\n parser.add_argument(\"-s\", \"--seq\", \n default=None, help=\"sequence\")\n parser.add_argument(\"-d\", \"--default_configuration\", type=str, \n default=\"options/kitti/kitti_default_configuration.yml\",\n help=\"default configuration files\")\n parser.add_argument(\"-c\", \"--configuration\", type=str,\n default=None,\n help=\"custom configuration file\")\n parser.add_argument(\"--no_confirm\", action=\"store_true\",\n help=\"no confirmation questions\")\n args = parser.parse_args()\n\n ''' Read configuration '''\n # read default and custom config, merge cfgs\n config_files = [args.default_configuration, args.configuration]\n cfg = config_loader.merge_cfg(config_files)\n if args.seq is not None:\n if cfg.dataset == \"kitti_odom\":\n cfg.seq = \"{:02}\".format(int(args.seq))\n else:\n cfg.seq = args.seq\n cfg.seq = str(cfg.seq)\n\n ''' double check result directory '''\n if args.no_confirm:\n mkdir_if_not_exists(cfg.directory.result_dir)\n cfg.no_confirm = True\n else:\n cfg.no_confirm = False\n continue_flag = input(\"Save result in {}? [y/n]\".format(cfg.directory.result_dir))\n if continue_flag == \"y\":\n mkdir_if_not_exists(cfg.directory.result_dir)\n else:\n exit()\n return args, cfg\n\n\nif __name__ == '__main__':\n # Read config\n args, cfg = read_cfgs()\n\n # Set random seed\n SEED = cfg.seed\n np.random.seed(SEED)\n torch.cuda.manual_seed(SEED)\n torch.manual_seed(SEED)\n\n # setup DFVO\n vo = DFVO(cfg)\n vo.main()\n\n # Save configuration file\n cfg_path = os.path.join(cfg.directory.result_dir, 'configuration_{}.yml'.format(cfg.seq))\n config_loader.save_cfg([args.default_configuration, args.configuration], file_path=cfg_path)\n" ]
[ [ "numpy.random.seed", "torch.manual_seed", "torch.cuda.manual_seed" ] ]
Spacider/comp9444_assignment
[ "149db9a562c579d03b3ea06c9de2020c8f3ef310" ]
[ "asmt1/encoder_main.py" ]
[ "# encoder_main.py\n# COMP9444, CSE, UNSW\n\nfrom __future__ import print_function\nimport torch.utils.data\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nimport argparse\n\nfrom asmt1.encoder_model import EncModel, plot_hidden\nfrom asmt1.encoder import star16, heart18, target1, target2\n\n# command-line arguments\nparser = argparse.ArgumentParser()\nparser.add_argument('--target',type=str,default='input',help='input, star16, heart18, target1 or target2')\nparser.add_argument('--dim',type=int,default=9,help='input dimension')\nparser.add_argument('--plot',default=False,action='store_true',help='show intermediate plots')\nparser.add_argument('--epochs',type=int, default=1000000, help='max epochs')\nparser.add_argument('--stop',type=float, default=0.02, help='loss to stop at')\nparser.add_argument('--lr', type=float, default =0.4, help='learning rate')\nparser.add_argument('--mom',type=float, default=0.9, help='momentum')\nparser.add_argument('--init',type=float, default=0.001, help='initial weights')\nparser.add_argument('--cuda',default=False,action='store_true',help='use cuda')\n\nargs = parser.parse_args()\n\n# choose CPU or CUDA\nif args.cuda:\n device = 'cuda'\nelse:\n device = 'cpu'\n\n# load specified target values\nif args.target == 'input':\n target = torch.eye(args.dim)\nelif args.target == 'star16':\n target = star16\nelif args.target == 'heart18':\n target = heart18\nelif args.target == 'target1':\n target = target1\nelif args.target == 'target2':\n target = target2\nelse:\n print('Unknown target: %s' %args.target )\n exit()\n\nnum_in = target.size()[0]\nnum_out = target.size()[1]\n\n# input is one-hot with same number of rows as target\ninput = torch.eye(num_in)\n\nxor_dataset = torch.utils.data.TensorDataset(input,target)\ntrain_loader = torch.utils.data.DataLoader(xor_dataset,batch_size=num_in)\n\n# create neural network according to model specification\nnet = EncModel(num_in,2,num_out).to(device) # CPU or GPU\n\n# initialize weights, but set biases to zero\nnet.in_hid.weight.data.normal_(0,args.init)\nnet.hid_out.weight.data.normal_(0,args.init)\nnet.in_hid.bias.data.zero_()\nnet.hid_out.bias.data.zero_()\n\n# SGD optimizer\noptimizer = torch.optim.SGD(net.parameters(),lr=args.lr,momentum=args.mom)\n\n# plot only at selected epochs\ndef plot_epoch( epoch ):\n return epoch in [50,100,150,200,300,500,700,1000,\n 1500,2000,3000,5000,7000,10000,\n 15000,20000,30000,50000,70000,100000,\n 150000,200000,300000,500000,700000,1000000]\n\nloss = 1.0\nepoch = 0\nwhile epoch < args.epochs and loss > args.stop:\n epoch = epoch+1\n for batch_id, (data,target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad() # zero the gradients\n output = net(data) # apply network\n loss = F.binary_cross_entropy(output,target)\n loss.backward() # compute gradients\n optimizer.step() # update weights\n if epoch % 10 == 0:\n print('ep%3d: loss = %7.4f' % (epoch, loss.item()))\n if args.plot and plot_epoch( epoch ):\n plot_hidden(net)\n plt.show()\n\nplot_hidden(net)\nplt.show()\n" ]
[ [ "matplotlib.pyplot.show", "torch.nn.functional.binary_cross_entropy" ] ]
AlexeyReshetnyak/ozon_prediction
[ "457824ee5ea575a44838b9b33ff83f633341bfb2" ]
[ "ozon_prediction.py" ]
[ "#!/usr/bin/env python3\n# coding: utf-8\n\nimport numpy as np\nimport pandas as pd\nfrom IPython.display import display # TODO: is it needed?\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn import preprocessing\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import r2_score, mean_squared_error\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.neural_network import MLPRegressor\n\ndef main():\n\n \"\"\"\n About the data. There are several files corresponding to different weather\n stations here. How to deal with them is not yet entirely clear. Maybe they\n need to be combined, maybe build models for each station separately.\n While we take some one station and work with it. For example file\n PRSA_Data_Aotizhongxin_20130301-20170228.csv.\n \"\"\"\n\n file = './data/PRSA_Data_20130301-20170228/PRSA_Data_Aotizhongxin_20130301-20170228.csv'\n #TODO: fit all to 79 cols\n #TODO: download, and unzip data from web\n data = pd.read_csv(file)\n\n # Drop some non useful columns.\n cols_to_drop = ['No', 'year', 'month', 'day', 'hour', 'wd', 'station']\n data = data.drop(cols_to_drop, axis=1)\n data.info()\n\n print('Are there any duplicated values in data ? : {}\\n'.format(data.duplicated().any()))\n print('The total number of null values in each colum:')\n display(data.isnull().sum())\n data.fillna(value=data.mean(), inplace=True)\n display(data.isnull().any())\n\n # Let's do a little visualization\n plt.figure(figsize=(12, 5))\n sns.histplot(data['O3'], bins=100)\n plt.title('Ozon dencity', fontsize=16)\n plt.show()\n #It gives nothing special, data as data.\n\n # Lets see corellation between the features of the data\n plt.figure(figsize=(13, 9))\n correlation_data = data[['PM2.5', 'PM10', 'SO2', 'NO2',\n 'CO', 'O3', 'TEMP', 'PRES',\n 'DEWP', 'RAIN', 'WSPM']]\n sns.heatmap(correlation_data.corr(), cmap=plt.cm.Reds, annot=True)\n plt.title('Heatmap displaying the correlation matrix of the variables',\n fontsize=16)\n plt.show()\n\n \"\"\"\n We see that only two pairs of features correlate well.\n PM 10, PM2.5 and TEMP, DEWP with coefficients 0.87, 0.82,\n respectively. Not much to care about. Just ignore it.\n \"\"\"\n\n # Now we will split data to predictor, and outcome featires\n X = data.drop('O3', axis=1)\n y = data['O3'].to_numpy()\n\n # Preprocessin\n X_scaled = preprocessing.scale(X)\n\n \"\"\"\n Split to train and test. It's demanded by the task test data\n is between 01.06.2016 to 30.11.2016, so split it\n manually see nedeed period. It's between 28513 and 32904 rows.\n \"\"\"\n d1 = 28513; d2 = 32904\n X_test = X_scaled[d1:d2, :]\n y_test = y[d1:d2]\n\n X_train = np.concatenate((X_scaled[0:d1, :], X_scaled[d2:, :]), axis=0)\n y_train = np.concatenate((y[0:d1], y[d2:]), axis=0)\n \n # Let's try linear regression\n lin_model = LinearRegression()\n lin_model.fit(X_train,y_train)\n\n prediction = lin_model.predict(X_test)\n mse = mean_squared_error(y_test, prediction)\n accuracy = r2_score(y_test, prediction)\n\n print('Lenear regression Mean Squared Error (MSE): {}'.format(np.sqrt(mse)))\n print('Lenear regression model accuracy: {}\\n'.format(accuracy))\n #Accuracy is about 0.6, very poor, let's try another model\n\n decision_tree = DecisionTreeRegressor()\n decision_tree.fit(X_train, y_train)\n\n tree_pred = decision_tree.predict(X_test)\n tree_mse = mean_squared_error(y_test, tree_pred)\n tree_accuracy = r2_score(y_test, tree_pred)\n\n print('Decision tree Root Mean Squared Error: {}'.format(np.sqrt(tree_mse)))\n print('Decision tree model accuracy: {}\\n'.format(tree_accuracy))\n #Accuracy is about the same as lin. regression, let's try another model\n\n forest = RandomForestRegressor(n_estimators=100,\n max_depth=7,\n max_features='auto',\n min_samples_split=7,\n min_samples_leaf=3)\n\n forest.fit(X_train, y_train)\n forest_pred = forest.predict(X_test)\n\n forest_mse = mean_squared_error(y_test, forest_pred)\n forest_accuracy = r2_score(y_test, forest_pred)\n\n print('Random forest Root Mean Squared Error: {}'.format(np.sqrt(forest_mse)))\n print('Random forest model accuracy: {}\\n'.format(forest_accuracy))\n # Accuracy is about 0.74\n\n grad_boost = GradientBoostingRegressor(n_estimators=100,\n max_depth=7,\n max_features='auto',\n min_samples_split=7,\n min_samples_leaf=3,\n learning_rate=0.1)\n\n grad_boost.fit(X_train, y_train)\n\n gboost_pred = grad_boost.predict(X_test)\n gboost_mse = mean_squared_error(y_test, gboost_pred)\n gboost_accuracy = r2_score(y_test, gboost_pred)\n\n print('Gradient boosting Root Mean Squared Error: {}'.format(np.sqrt(gboost_mse)))\n print('Gradient boosting Overall model accuracy:{}\\n'.format(gboost_accuracy))\n # Accuracy is about 0.76\n\n params = {'max_depth':[3,4,5,6,7,8,9],\n 'max_features':['auto','sqrt','log2'],\n 'min_samples_split':[2,3,4,5,6,7,8,9,10],\n 'min_samples_leaf':[2,3,4,5,6,7,8,9,10]}\n params['learning_rate'] = np.linspace(0.1, 1, 10)\n\n gradient_boosting = GradientBoostingRegressor()\n gboost_search = RandomizedSearchCV(gradient_boosting, params, n_jobs=-1,\n cv=5, verbose=1)\n gboost_search.fit(X_train, y_train)\n\n gboost_search_pred = gboost_search.predict(X_test)\n gboost_search_mse = mean_squared_error(y_test, gboost_search_pred)\n gboost_search_accuracy = r2_score(y_test, gboost_search_pred)\n\n print('Gradient Boosting with search Root Mean Squared Error: {}'.format(np.sqrt(gboost_search_mse)))\n print('Gradient Boosting with search Overall model accuracy: {}\\n'.format(gboost_search_accuracy))\n # Accuracy is about 0.73\n\n ann = MLPRegressor(hidden_layer_sizes=(500, 100), max_iter=1200)\n ann.fit(X_train, y_train)\n ann_pred = ann.predict(X_test)\n ann_score = ann.score(X_test, y_test)\n ann_mse = mean_squared_error(y_test, ann_pred)\n ann_accuracy = r2_score(y_test, ann_pred)\n\n print('ANN Root Mean Squared Error: {}'.format(np.sqrt(ann_mse)))\n print('ANN Overall model accuracy: {}\\n'.format(ann_accuracy))\n # Accuracy is about 0.75\n \"\"\"\n So several methods have been tried, we can say by brute force. The accuracy\n is about 0.76. The average result, but for the first approximation it will\n do fine.\n \"\"\"\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.concatenate", "sklearn.metrics.mean_squared_error", "sklearn.ensemble.GradientBoostingRegressor", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.title", "numpy.linspace", "matplotlib.pyplot.figure", "sklearn.preprocessing.scale", "sklearn.model_selection.RandomizedSearchCV", "sklearn.neural_network.MLPRegressor", "sklearn.ensemble.RandomForestRegressor", "numpy.sqrt", "sklearn.tree.DecisionTreeRegressor", "sklearn.metrics.r2_score", "matplotlib.pyplot.show", "pandas.read_csv" ] ]
mateusz-kosior/tensorflow-onnx
[ "b64622565223ce79596b60ace6d44ee06af72d52" ]
[ "tests/test_optimizers.py" ]
[ "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT license.\n\n\"\"\"Unit Tests for optimizers such as TransposeOptimizer.\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\nfrom onnx import helper, TensorProto, OperatorSetIdProto\nfrom tf2onnx import utils, constants\nfrom tf2onnx.graph import GraphUtil\nfrom backend_test_base import Tf2OnnxBackendTestBase\nfrom common import unittest_main, group_nodes_by_type, check_opset_min_version, check_opset_max_version\n\n\n# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test\n\nclass OptimizerTests(Tf2OnnxBackendTestBase):\n \"\"\"Run original model proto and modified model proto with onnxruntime, compare the results.\"\"\"\n\n def run_and_compare(self, output_names_with_port, onnx_feed_dict, origin_proto, op_type,\n remaining_op_num, debug=False, rtol=1e-07):\n utils.make_sure(op_type is not None, \"op_type should be specified\")\n utils.make_sure(remaining_op_num is not None, \"remaining_op_num should be specified\")\n\n origin_model_path = self.save_onnx_model(origin_proto, onnx_feed_dict, postfix=\"_origin\")\n\n new_proto = GraphUtil.optimize_model_proto(origin_proto)\n\n self.assertTrue(new_proto, msg=\"model proto after optimizer should not be None\")\n\n new_model_path = self.save_onnx_model(new_proto, onnx_feed_dict, postfix=\"_opt\")\n current = GraphUtil.get_node_count_from_onnx_graph(new_proto.graph)\n\n self.assertTrue(current[op_type] == remaining_op_num,\n msg=\"Expect \" + str(remaining_op_num) + \" \" + op_type + \" ops left, but actually \" + str(\n current[op_type]) + \" left\")\n\n if self.config.is_onnxruntime_backend:\n expected = self.run_onnxruntime(origin_model_path, onnx_feed_dict, output_names_with_port)\n actual = self.run_onnxruntime(new_model_path, onnx_feed_dict, output_names_with_port)\n else:\n raise ValueError(\"only onnxruntime is supported to test transpose optimizer\")\n\n for expected_val, actual_val in zip(expected, actual):\n self.assertAllClose(expected_val, actual_val, rtol=rtol, atol=1e-5)\n self.assertEqual(expected_val.dtype, actual_val.dtype)\n self.assertEqual(expected_val.shape, actual_val.shape)\n\n return new_proto\n\n @staticmethod\n def _make_onnx_const(np_val, output_name):\n node = helper.make_node(\n 'Constant',\n inputs=[],\n outputs=[output_name],\n value=helper.make_tensor(\n name=output_name,\n data_type=utils.map_numpy_to_onnx_dtype(np_val.dtype),\n dims=np_val.shape,\n vals=np_val.flatten().astype(np_val.dtype),\n ),\n )\n return node\n\n def make_model(self, graph, producer_name=\"onnx-tests\"):\n imp = OperatorSetIdProto()\n imp.version = self.config.opset\n model_proto = helper.make_model(graph, producer_name=producer_name, opset_imports=[imp])\n try:\n model_proto.ir_version = constants.OPSET_TO_IR_VERSION.get(self.config.opset, model_proto.ir_version)\n except: # pylint: disable=bare-except\n pass\n return model_proto\n\n # Tranpose Optimizer Tests Start\n\n def run_transpose_compare(self, output_names_with_port, onnx_feed_dict, origin_proto,\n remaining_transpose_num=None, debug=False, rtol=1e-07):\n return self.run_and_compare(output_names_with_port, onnx_feed_dict, origin_proto, op_type=\"Transpose\",\n remaining_op_num=remaining_transpose_num, debug=debug, rtol=rtol)\n\n def check_transpose_perm(self, model_proto, expected_perm):\n for node in model_proto.graph.node:\n if node.op_type == \"Transpose\":\n perm = list(node.attribute[0].ints)\n self.assertEqual(perm, expected_perm)\n\n def test_transpose_with_concat(self):\n input_shape = (2, 3, 4, 5)\n perm = [0, 3, 1, 2]\n input_shape_with_trans = [input_shape[i] for i in perm]\n for axis in [0, 1, 2, 3]:\n output_before_trans = list(input_shape)\n output_before_trans[axis] *= 2\n output_shape = [output_before_trans[i] for i in [0, 3, 1, 2]]\n node1 = helper.make_node(\"Transpose\", [\"input_data1\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans\")\n node2 = helper.make_node(\"Concat\", [\"Y\", \"input_data2\"], [\"Z\"], axis=axis, name=\"concat\")\n node3 = helper.make_node(\"Transpose\", [\"Z\"], [\"res\"], perm=[0, 3, 1, 2], name=\"trans2\")\n\n graph = helper.make_graph(\n [node1, node2, node3],\n \"test_transpose_with_concat\",\n [helper.make_tensor_value_info(\"input_data1\", TensorProto.FLOAT, input_shape_with_trans),\n helper.make_tensor_value_info(\"input_data2\", TensorProto.FLOAT, input_shape),\n ],\n [helper.make_tensor_value_info(\"res\", TensorProto.FLOAT, output_shape)],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n feed_dict = {\"input_data1\": np.random.randn(*input_shape_with_trans).astype(np.float32),\n \"input_data2\": np.random.randn(*input_shape).astype(np.float32),\n }\n self.run_transpose_compare([\"res\"], feed_dict, model_proto, remaining_transpose_num=1)\n\n def test_transpose_with_add1(self):\n # when transpose follows with a broadcasting op\n # reshape is needed when switching transpose with this op and op need broadcast its inputs\n node1 = helper.make_node(\"Transpose\", [\"input_data1\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans\")\n node2 = helper.make_node(\"Add\", [\"Y\", \"input_data2\"], [\"Z\"], name=\"add\")\n node3 = helper.make_node(\"Transpose\", [\"Z\"], [\"res\"], perm=[0, 3, 1, 2], name=\"trans2\")\n\n graph = helper.make_graph(\n [node1, node2, node3],\n \"transpose_with_shape\",\n [helper.make_tensor_value_info(\"input_data1\", TensorProto.FLOAT, (2, 3, 4, 5)),\n helper.make_tensor_value_info(\"input_data2\", TensorProto.FLOAT, (3,)),\n ],\n [helper.make_tensor_value_info(\"res\", TensorProto.FLOAT, (2, 3, 4, 5))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n feed_dict = {\"input_data1\": np.random.randn(2, 3, 4, 5).astype(np.float32),\n \"input_data2\": np.random.randn(3).astype(np.float32),\n }\n self.run_transpose_compare([\"res\"], feed_dict, model_proto, remaining_transpose_num=0)\n\n def test_transpose_with_add2(self):\n node1 = helper.make_node(\"Transpose\", [\"input_data1\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans\")\n node2 = helper.make_node(\"Add\", [\"Y\", \"input_data2\"], [\"Z\"], name=\"add\")\n node3 = helper.make_node(\"Transpose\", [\"Z\"], [\"res\"], perm=[0, 3, 1, 2], name=\"trans2\")\n\n graph = helper.make_graph(\n [node1, node2, node3],\n \"transpose_with_shape\",\n [helper.make_tensor_value_info(\"input_data1\", TensorProto.FLOAT, (2, 3, 4, 5)),\n helper.make_tensor_value_info(\"input_data2\", TensorProto.FLOAT, (2, 4, 5, 3)),\n ],\n [helper.make_tensor_value_info(\"res\", TensorProto.FLOAT, (2, 3, 4, 5))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n feed_dict = {\"input_data1\": np.random.randn(2, 3, 4, 5).astype(np.float32),\n \"input_data2\": np.random.randn(2, 4, 5, 3).astype(np.float32),\n }\n self.run_transpose_compare([\"res\"], feed_dict, model_proto, remaining_transpose_num=1)\n\n def test_transpose_relu(self):\n node1 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans_1\")\n node2 = helper.make_node(\"Relu\", [\"Y\"], [\"Z\"], name=\"relu\")\n node3 = helper.make_node(\"Transpose\", [\"Z\"], [\"Z1\"], perm=[0, 3, 1, 2], name=\"trans_2\")\n\n graph = helper.make_graph(\n [node1, node2, node3],\n \"relu-test\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (2, 3, 4, 5))],\n [helper.make_tensor_value_info(\"Z1\", TensorProto.FLOAT, (2, 3, 4, 5))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"Z1\"], {\"X\": np.random.randn(2, 3, 4, 5).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n def test_transpose_leaky_relu(self):\n node1 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans_1\")\n node2 = helper.make_node(\"LeakyRelu\", [\"Y\"], [\"Z\"], alpha=0.02, name=\"relu\")\n node3 = helper.make_node(\"Transpose\", [\"Z\"], [\"Z1\"], perm=[0, 3, 1, 2], name=\"trans_2\")\n\n graph = helper.make_graph(\n [node1, node2, node3],\n \"LeakyRelu-test\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (2, 3, 4, 5))],\n [helper.make_tensor_value_info(\"Z1\", TensorProto.FLOAT, (2, 3, 4, 5))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"Z1\"], {\"X\": np.random.randn(2, 3, 4, 5).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n @check_opset_min_version(10, \"Slice in opset 10 can accept dymaic 'start' and 'ends'\")\n def test_transpose_slice(self):\n starts = np.array([0, 0, 0, 0], dtype=np.int64)\n ends = np.array([1, 2, 1, 2], dtype=np.int64)\n axes = np.array([0, 1, 2, 3], dtype=np.int64)\n node1 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans_1\")\n node2 = helper.make_node(\"Slice\", [\"Y\", \"starts\", \"ends\", \"axes\"], [\"Z\"], name=\"relu\")\n node3 = helper.make_node(\"Transpose\", [\"Z\"], [\"Z1\"], perm=[0, 3, 1, 2], name=\"trans_2\")\n\n graph = helper.make_graph(\n [node1, node2, node3],\n \"relu-test\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (2, 3, 4, 5))],\n [helper.make_tensor_value_info(\"Z1\", TensorProto.FLOAT, (1, 2, 2, 1))],\n [\n helper.make_tensor(\"starts\", TensorProto.INT64, starts.shape, starts),\n helper.make_tensor(\"ends\", TensorProto.INT64, ends.shape, ends),\n helper.make_tensor(\"axes\", TensorProto.INT64, axes.shape, axes)\n ]\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"Z1\"], {\"X\": np.random.randn(2, 3, 4, 5).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n @check_opset_min_version(8, \"Max in opset 10 supports broadcasting\")\n def test_transpose_max(self):\n const_1_val = [2.0]\n const_1 = helper.make_tensor(\"const_1\", TensorProto.FLOAT, (1,), const_1_val)\n const_1_node = helper.make_node(\"Constant\", [], [\"const_1\"], value=const_1, name=\"const_1\")\n\n const_2_val = np.random.randn(2, 4, 5, 3).astype(np.float32)\n const_2 = helper.make_tensor(\"const_2\", TensorProto.FLOAT, (2, 4, 5, 3), const_2_val.flatten())\n const_2_node = helper.make_node(\"Constant\", [], [\"const_2\"], value=const_2, name=\"const_2\")\n\n const_3_val = np.random.randn(2, 4, 5, 3).astype(np.float32)\n const_3 = helper.make_tensor(\"const_3\", TensorProto.FLOAT, (2, 4, 5, 3), const_3_val.flatten())\n const_3_node = helper.make_node(\"Constant\", [], [\"const_3\"], value=const_3, name=\"const_3\")\n\n node1 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans_1\")\n node2 = helper.make_node(\"Max\", [\"Y\", \"const_3\", \"const_2\", \"const_1\"], [\"Z\"], name=\"max\")\n node3 = helper.make_node(\"Transpose\", [\"Z\"], [\"Z1\"], perm=[0, 3, 1, 2], name=\"trans_2\")\n\n graph = helper.make_graph(\n [const_1_node, const_2_node, const_3_node, node1, node2, node3],\n \"Max-test\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (2, 3, 4, 5))],\n [helper.make_tensor_value_info(\"Z1\", TensorProto.FLOAT, (2, 3, 4, 5))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"Z1\"], {\"X\": np.random.randn(2, 3, 4, 5).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n @check_opset_min_version(8, \"Max in opset 10 supports broadcasting\")\n def test_transpose_max_input_non_const(self):\n const_1_val = [2.0]\n const_1 = helper.make_tensor(\"const_1\", TensorProto.FLOAT, (1,), const_1_val)\n const_1_node = helper.make_node(\"Constant\", [], [\"const_1\"], value=const_1, name=\"const_1\")\n\n const_2_val = np.random.randn(2, 4, 5, 3).astype(np.float32)\n const_2 = helper.make_tensor(\"const_2\", TensorProto.FLOAT, (2, 4, 5, 3), const_2_val.flatten())\n const_2_node = helper.make_node(\"Constant\", [], [\"const_2\"], value=const_2, name=\"const_2\")\n\n node1 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans_1\")\n node2 = helper.make_node(\"Max\", [\"Y\", \"non_const\", \"const_2\", \"const_1\"], [\"Z\"], name=\"max\")\n node3 = helper.make_node(\"Transpose\", [\"Z\"], [\"Z1\"], perm=[0, 3, 1, 2], name=\"trans_2\")\n\n graph = helper.make_graph(\n [const_1_node, const_2_node, node1, node2, node3],\n \"Max-test\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (2, 3, 4, 5)),\n helper.make_tensor_value_info(\"non_const\", TensorProto.FLOAT, (2, 4, 5, 3))],\n [helper.make_tensor_value_info(\"Z1\", TensorProto.FLOAT, (2, 3, 4, 5))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"Z1\"], {\"X\": np.random.randn(2, 3, 4, 5).astype(np.float32),\n \"non_const\": np.random.randn(2, 4, 5, 3).astype(np.float32)},\n model_proto, remaining_transpose_num=1)\n\n def test_transpose_merge(self):\n node0 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans\")\n node1 = helper.make_node(\"Transpose\", [\"X\"], [\"Y_1\"], perm=[0, 2, 3, 1], name=\"trans_1\")\n node2 = helper.make_node(\"Mul\", [\"Y\", \"Y_1\"], [\"OUT\"], name=\"mul\")\n\n graph = helper.make_graph(\n [node0, node1, node2],\n \"transpose-merge-test\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (2, 3, 4, 5))],\n [helper.make_tensor_value_info(\"OUT\", TensorProto.FLOAT, (2, 4, 5, 3))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"OUT\"], {\"X\": np.random.randn(2, 3, 4, 5).astype(np.float32)},\n model_proto, remaining_transpose_num=1)\n\n def test_transpose_with_shape(self):\n node1 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans\")\n node2 = helper.make_node(\"Shape\", [\"Y\"], [\"Z\"], name=\"shape\")\n\n graph = helper.make_graph(\n [node1, node2],\n \"transpose_with_shape\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (2, 3, 4, 5))],\n [helper.make_tensor_value_info(\"Z\", TensorProto.INT64, [4])],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"Z\"], {\"X\": np.random.randn(2, 3, 4, 5).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n def test_transpose_with_identity(self):\n node1 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans\")\n node2 = helper.make_node(\"Identity\", [\"Y\"], [\"Z\"], name=\"identity\")\n\n graph = helper.make_graph(\n [node1, node2],\n \"transpose_with_identity\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (2, 3, 4, 5))],\n [helper.make_tensor_value_info(\"Z\", TensorProto.FLOAT, (2, 4, 5, 3))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"Z\"], {\"X\": np.random.randn(2, 3, 4, 5).astype(np.float32)},\n model_proto, remaining_transpose_num=1)\n\n def test_transpose_with_squeeze1(self):\n # squeeze the first dim\n node1 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans\")\n node2 = helper.make_node(\"Squeeze\", [\"Y\"], [\"Z\"], name=\"squeeze\", axes=[0])\n\n graph = helper.make_graph(\n [node1, node2],\n \"transpose_with_squeeze\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (1, 3, 4, 5))],\n [helper.make_tensor_value_info(\"Z\", TensorProto.FLOAT, (4, 5, 3))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n model_after_opt = self.run_transpose_compare([\"Z\"], {\"X\": np.random.randn(1, 3, 4, 5).astype(np.float32)},\n model_proto, remaining_transpose_num=1)\n self.check_transpose_perm(model_after_opt, [1, 2, 0])\n\n def test_transpose_with_squeeze2(self):\n # squeeze the second dim\n node1 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans\")\n node2 = helper.make_node(\"Squeeze\", [\"Y\"], [\"Z\"], name=\"squeeze\", axes=[1])\n\n graph = helper.make_graph(\n [node1, node2],\n \"transpose_with_squeeze\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (3, 4, 1, 5))],\n [helper.make_tensor_value_info(\"Z\", TensorProto.FLOAT, (3, 5, 4))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n model_after_opt = self.run_transpose_compare([\"Z\"], {\"X\": np.random.randn(3, 4, 1, 5).astype(np.float32)},\n model_proto, remaining_transpose_num=1)\n self.check_transpose_perm(model_after_opt, [0, 2, 1])\n\n def test_transpose_with_squeeze3(self):\n # squeeze the last dim\n node1 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans\")\n node2 = helper.make_node(\"Squeeze\", [\"Y\"], [\"Z\"], name=\"squeeze\", axes=[3])\n\n graph = helper.make_graph(\n [node1, node2],\n \"transpose_with_squeeze\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (3, 1, 4, 5))],\n [helper.make_tensor_value_info(\"Z\", TensorProto.FLOAT, (3, 4, 5))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"Z\"], {\"X\": np.random.randn(3, 1, 4, 5).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n def test_transpose_with_squeeze4(self):\n # squeeze the two dims\n node1 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans\")\n node2 = helper.make_node(\"Squeeze\", [\"Y\"], [\"Z\"], name=\"squeeze\", axes=[1, 3])\n\n graph = helper.make_graph(\n [node1, node2],\n \"transpose_with_squeeze\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (3, 1, 1, 5))],\n [helper.make_tensor_value_info(\"Z\", TensorProto.FLOAT, (3, 5))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"Z\"], {\"X\": np.random.randn(3, 1, 1, 5).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n def test_transpose_with_loop(self):\n def _define_loop_graph(external_inputs):\n # external_inputs: external node which will be used by this graph\n # graph without loop carried\n # computation\n # for(...){a = external_inputs[i]; b = trans(a), c = squeeze(b)}, c is scan output\n node1 = helper.make_node(\"Gather\", [external_inputs[0], \"loop_iter_num\"], [\"Y0\"])\n node2 = helper.make_node(\"Transpose\", [\"Y0\"], [\"Z0\"], perm=[0, 2, 3, 1])\n # graph output\n node3 = helper.make_node(\"Squeeze\", [\"Z0\"], [\"scan_output\"], axes=[0])\n node4 = helper.make_node(\"Identity\", [\"loop_condition\"], [\"loop_cond_output\"])\n node5 = helper.make_node(\"Identity\", [\"loop_condition\"], [\"loop_carried_output\"])\n\n graph = helper.make_graph(\n [node1, node2, node3, node4, node5],\n \"loop_subgraph\",\n [helper.make_tensor_value_info(\"loop_iter_num\", TensorProto.INT64, (1,)), # iteration_num\n helper.make_tensor_value_info(\"loop_condition\", TensorProto.BOOL, ()), # condition\n helper.make_tensor_value_info(\"loop_carried\", TensorProto.BOOL, ()) # loop_carried\n ],\n [helper.make_tensor_value_info(\"loop_cond_output\", TensorProto.BOOL, ()),\n helper.make_tensor_value_info(\"loop_carried_output\", TensorProto.BOOL, ()),\n helper.make_tensor_value_info(\"scan_output\", TensorProto.FLOAT, [\"unknown\"] * 3)\n ],\n )\n return graph\n\n def _make_loop(external_inputs, outputs):\n trip_cnt = self._make_onnx_const(np.array(10, dtype=np.int64), \"trip_cnt\")\n cond = self._make_onnx_const(np.array(True, dtype=np.bool), \"cond\")\n sub_graph = _define_loop_graph(external_inputs)\n loop_node = helper.make_node(\"Loop\", [\"trip_cnt\", \"cond\", \"cond\"], outputs,\n name=\"loop\", body=sub_graph)\n return trip_cnt, cond, loop_node\n\n nodes = _make_loop([\"array\"], [\"loop_carried\", \"scan_out\"])\n res = helper.make_node(\"Transpose\", [\"scan_out\"], [\"Y\"], perm=[0, 3, 1, 2], name=\"trans\")\n\n graph = helper.make_graph(\n [*nodes, res],\n \"transpose_with_loop\",\n [helper.make_tensor_value_info(\"array\", TensorProto.FLOAT, [\"unknow\"] * 4)],\n [helper.make_tensor_value_info(\"Y\", TensorProto.FLOAT, [\"unknow\"] * 4)],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"Y\"], {\"array\": np.random.randn(10, 3, 4, 5).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n def test_trans_with_sub(self):\n io_shape = [2, 3, 4, 5]\n const_shapes = [[2, 4, 5, 3], [4, 5, 3], [5, 3], [3]]\n for trans_is_first_input in [True, False]:\n for const_shape in const_shapes:\n node1 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans_a\")\n const_tensor = helper.make_tensor(name='const', data_type=TensorProto.FLOAT, dims=const_shape,\n vals=np.random.randn(*const_shape).flatten().astype(np.float32))\n node2 = helper.make_node(\"Constant\", [], [\"const\"], value=const_tensor, name=\"const\")\n if trans_is_first_input:\n node3 = helper.make_node(\"Sub\", [\"Y\", \"const\"], [\"Z\"], name=\"sub\")\n else:\n node3 = helper.make_node(\"Sub\", [\"const\", \"Y\"], [\"Z\"], name=\"sub\")\n\n node4 = helper.make_node(\"Transpose\", [\"Z\"], [\"res\"], perm=[0, 3, 1, 2], name=\"trans_b\")\n graph = helper.make_graph(\n [node1, node2, node3, node4],\n \"test_trans_with_sub\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, io_shape)],\n [helper.make_tensor_value_info(\"res\", TensorProto.FLOAT, io_shape)],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"res\"], {\"X\": np.random.randn(2, 3, 4, 5).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n def test_trans_with_sub_input_non_const(self):\n io_shape = [2, 3, 4, 5]\n non_const_shapes = [[2, 4, 5, 3], [4, 5, 3], [5, 3]]\n for trans_is_first_input in [True, False]:\n for non_const_shape in non_const_shapes:\n node1 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans_a\")\n if trans_is_first_input:\n node2 = helper.make_node(\"Sub\", [\"Y\", \"non_const\"], [\"Z\"], name=\"sub\")\n else:\n node2 = helper.make_node(\"Sub\", [\"non_const\", \"Y\"], [\"Z\"], name=\"sub\")\n\n node3 = helper.make_node(\"Transpose\", [\"Z\"], [\"res\"], perm=[0, 3, 1, 2], name=\"trans_b\")\n graph = helper.make_graph(\n [node1, node2, node3],\n \"test_trans_with_sub_input_non_const\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, io_shape),\n helper.make_tensor_value_info(\"non_const\", TensorProto.FLOAT, non_const_shape)],\n [helper.make_tensor_value_info(\"res\", TensorProto.FLOAT, io_shape)],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"res\"], {\"X\": np.random.randn(2, 3, 4, 5).astype(np.float32),\n \"non_const\": np.random.randn(*non_const_shape).astype(np.float32)},\n model_proto, remaining_transpose_num=1)\n\n def test_transpose_add_with_input_non_const(self):\n\n node0 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans_1\")\n node1 = helper.make_node(\"Add\", [\"Y\", \"A\"], [\"Z\"], name=\"add\")\n node2 = helper.make_node(\"Transpose\", [\"Z\"], [\"res\"], perm=[0, 3, 1, 2], name=\"trans_2\")\n\n graph = helper.make_graph(\n [node0, node1, node2],\n \"transpose-add-test-input-non-const\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (1, 1, 3, 3)),\n helper.make_tensor_value_info(\"A\", TensorProto.FLOAT, (1, 3, 3, 1))],\n [helper.make_tensor_value_info(\"res\", TensorProto.FLOAT, (1, 1, 3, 3))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"res\"], {\"X\": np.random.randn(1, 1, 3, 3).astype(np.float32),\n \"A\": np.random.randn(1, 3, 3, 1).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n def test_transpose_add_with_input_const(self):\n const_1_val = np.random.randn(1, 3, 3, 1).astype(np.float32)\n const_1 = helper.make_tensor(\"const_1\", TensorProto.FLOAT, (1, 3, 3, 1), const_1_val.flatten())\n const_1_node = helper.make_node(\"Constant\", [], [\"const_1\"], value=const_1, name=\"const_1\")\n\n node0 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans_1\")\n node1 = helper.make_node(\"Add\", [\"Y\", \"const_1\"], [\"Z\"], name=\"add\")\n node2 = helper.make_node(\"Transpose\", [\"Z\"], [\"res\"], perm=[0, 3, 1, 2], name=\"trans_2\")\n\n graph = helper.make_graph(\n [const_1_node, node0, node1, node2],\n \"transpose-add-test-input-const\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (1, 1, 3, 3))],\n [helper.make_tensor_value_info(\"res\", TensorProto.FLOAT, (1, 1, 3, 3))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"res\"], {\"X\": np.random.randn(1, 1, 3, 3).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n def test_transpose_add_with_conv_1(self):\n # case where bias's dim is 1D and can be merged into Conv\n const_b_val = np.random.randn(1, 1, 1, 16).astype(np.float32)\n const_b = helper.make_tensor(\"const_b\", TensorProto.FLOAT, (1, 1, 1, 16), const_b_val.flatten())\n const_b_node = helper.make_node(\"Constant\", [], [\"const_b\"], value=const_b, name=\"const_b\")\n\n node0 = helper.make_node(\"Conv\", [\"x\", \"W\"], [\"X\"], name=\"conv\", pads=[0, 0, 0, 0])\n node1 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans_1\")\n node2 = helper.make_node(\"Add\", [\"Y\", \"const_b\"], [\"Z\"], name=\"add\")\n node3 = helper.make_node(\"Transpose\", [\"Z\"], [\"res\"], perm=[0, 3, 1, 2], name=\"trans_2\")\n\n graph = helper.make_graph(\n [const_b_node, node0, node1, node2, node3],\n \"transpose-add-test-with-conv-1\",\n [helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, (1, 5, 3, 3)),\n helper.make_tensor_value_info(\"W\", TensorProto.FLOAT, (16, 5, 3, 3))],\n [helper.make_tensor_value_info(\"res\", TensorProto.FLOAT, (1, 16, 1, 1))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"res\"], {\"x\": np.random.randn(1, 5, 3, 3).astype(np.float32),\n \"W\": np.random.randn(16, 5, 3, 3).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n def test_transpose_add_with_conv_2(self):\n # case where bias's dim is not 1D and can't be merged into Conv\n # add handler just remove the transpose around Add node\n const_b_val = np.random.randn(1, 3, 3, 1).astype(np.float32)\n const_b = helper.make_tensor(\"const_b\", TensorProto.FLOAT, (1, 3, 3, 1), const_b_val.flatten())\n const_b_node = helper.make_node(\"Constant\", [], [\"const_b\"], value=const_b, name=\"const_b\")\n\n node0 = helper.make_node(\"Conv\", [\"x\", \"W\"], [\"X\"], name=\"conv\", pads=[0, 0, 0, 0])\n node1 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans_1\")\n node2 = helper.make_node(\"Add\", [\"Y\", \"const_b\"], [\"Z\"], name=\"add\")\n node3 = helper.make_node(\"Transpose\", [\"Z\"], [\"res\"], perm=[0, 3, 1, 2], name=\"trans_2\")\n\n graph = helper.make_graph(\n [const_b_node, node0, node1, node2, node3],\n \"transpose-add-test-with-conv-2\",\n [helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, (1, 1, 5, 5)),\n helper.make_tensor_value_info(\"W\", TensorProto.FLOAT, (1, 1, 3, 3))],\n [helper.make_tensor_value_info(\"res\", TensorProto.FLOAT, (1, 1, 3, 3))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"res\"], {\"x\": np.random.randn(1, 1, 5, 5).astype(np.float32),\n \"W\": np.random.randn(1, 1, 3, 3).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n @check_opset_max_version(10, \"pad\")\n def test_transpose_pad(self):\n node0 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans_1\")\n node1 = helper.make_node(\"Pad\", [\"Y\"], [\"Z\"], pads=[1, 0, 1, 3, 0, 0, 2, 0], name=\"pad\")\n node2 = helper.make_node(\"Transpose\", [\"Z\"], [\"res\"], perm=[0, 3, 1, 2], name=\"trans_2\")\n\n graph = helper.make_graph(\n [node0, node1, node2],\n \"transpose-pad-test\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (1, 3, 4, 5))],\n [helper.make_tensor_value_info(\"res\", TensorProto.FLOAT, (2, 6, 4, 8))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"res\"], {\"X\": np.random.randn(1, 3, 4, 5).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n @check_opset_min_version(11, \"pad\")\n def test_transpose_pad11(self):\n\n pads_val = np.array([1, 0, 1, 3, 0, 0, 2, 0], dtype=np.int64)\n pads_tensor = helper.make_tensor(\"Pads\", TensorProto.INT64, [8], pads_val)\n pads_const = helper.make_node(\"Constant\", [], [\"Pads\"], value=pads_tensor, name=\"Pads\")\n\n node0 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans_1\")\n node1 = helper.make_node(\"Pad\", [\"Y\", \"Pads\"], [\"Z\"], name=\"pad\")\n node2 = helper.make_node(\"Transpose\", [\"Z\"], [\"res\"], perm=[0, 3, 1, 2], name=\"trans_2\")\n\n graph = helper.make_graph(\n [node0, node1, node2, pads_const],\n \"transpose-pad-test\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (1, 3, 4, 5))],\n [helper.make_tensor_value_info(\"res\", TensorProto.FLOAT, (2, 6, 4, 8))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"res\"], {\"X\": np.random.randn(1, 3, 4, 5).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n def test_transpose_reducemean(self):\n node0 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=[0, 2, 3, 1], name=\"trans_1\")\n node1 = helper.make_node(\"ReduceMean\", [\"Y\"], [\"Z\"], axes=[1, 2], keepdims=1, name=\"reducemean\")\n node2 = helper.make_node(\"Transpose\", [\"Z\"], [\"res\"], perm=[0, 3, 1, 2], name=\"trans_2\")\n\n graph = helper.make_graph(\n [node0, node1, node2],\n \"transpose-reducemean-test\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (1, 3, 4, 5))],\n [helper.make_tensor_value_info(\"res\", TensorProto.FLOAT, (1, 3, 1, 1))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"res\"], {\"X\": np.random.randn(1, 3, 4, 5).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n def test_trans_output_as_graph_outputs(self):\n \"\"\"\n If transpose's output is graph's output, don't optimize it.\n \"\"\"\n trans = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], name=\"trans\", perm=[0, 2, 3, 1])\n graph_proto = helper.make_graph(\n [trans],\n \"trans-to-graph-output\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (2, 3, 4, 5))],\n [helper.make_tensor_value_info(\"Y\", TensorProto.FLOAT, (2, 4, 5, 3))],\n )\n\n graph = GraphUtil.create_graph_from_onnx_graph(graph_proto)\n # remove identity to graph output\n identity_op = graph.get_node_by_output(graph.outputs[0])\n graph.outputs = [identity_op.input[0]]\n graph.remove_node(identity_op.name)\n\n optimized_graph = GraphUtil.optimize_graph(graph)\n\n self.assertTrue(optimized_graph, msg=\"graph after optimizer should not be None\")\n\n trans_cnt = len(group_nodes_by_type(optimized_graph)[\"Transpose\"])\n\n self.assertTrue(trans_cnt == 1, msg=\"Expect 1 Transpose ops left, but actually \" + str(trans_cnt) + \" left\")\n\n def test_trans_can_be_replaced_with_reshape1(self):\n # test trans-NHWC\n input_shapes_np = [(2, 3, 4, 1), (2, 1, 1, 4), (2, 3, 4, 1)]\n input_shapes = [(2, 3, 4, 1), (2, 1, 1, 4), (2, -1, -1, 1)]\n perm = (0, 3, 1, 2)\n for input_shape_np, input_shape in zip(input_shapes_np, input_shapes):\n result_shape = [input_shape[i] for i in perm]\n node1 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=perm, name=\"trans\")\n graph = helper.make_graph(\n [node1],\n \"test_trans_can_be_replaced_with_reshape\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, input_shape)],\n [helper.make_tensor_value_info(\"Y\", TensorProto.FLOAT, result_shape)],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"Y\"], {\"X\": np.random.randn(*input_shape_np).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n def test_trans_can_be_replaced_with_reshape2(self):\n # test trans-NCHW\n input_shapes_np = [(2, 1, 3, 4), (2, 4, 1, 1), (2, 1, 3, 4)]\n input_shapes = [(2, 1, 3, 4), (2, 4, 1, 1), (2, 1, -1, -1)]\n perm = (0, 2, 3, 1)\n for input_shape_np, input_shape in zip(input_shapes_np, input_shapes):\n result_shape = [input_shape[i] for i in perm]\n node1 = helper.make_node(\"Transpose\", [\"X\"], [\"Y\"], perm=perm, name=\"trans\")\n graph = helper.make_graph(\n [node1],\n \"test_trans_can_be_replaced_with_reshape\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, input_shape)],\n [helper.make_tensor_value_info(\"Y\", TensorProto.FLOAT, result_shape)],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"Y\"], {\"X\": np.random.randn(*input_shape_np).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n def test_two_transposes_switch_with_mul(self):\n const_node = self._make_onnx_const(np.array(np.random.random(6), dtype=np.float32), \"const_10\")\n node0 = helper.make_node(\"Transpose\", [\"u1\"], [\"v1\"], perm=[0, 2, 3, 1], name=\"trans_0\")\n node1 = helper.make_node(\"Transpose\", [\"u2\"], [\"v2\"], perm=[0, 2, 3, 1], name=\"trans_1\")\n\n node2 = helper.make_node(\"Mul\", [\"v1\", \"v2\"], [\"x\"], name=\"mul_1\")\n node3 = helper.make_node(\"Mul\", [\"x\", const_node.output[0]], [\"y\"], name=\"mul_2\")\n node4 = helper.make_node(\"Transpose\", [\"y\"], [\"res\"], perm=[0, 3, 1, 2], name=\"trans_3\")\n\n graph = helper.make_graph(\n [const_node, node0, node1, node2, node3, node4],\n \"test-transpose-mul\",\n [helper.make_tensor_value_info(\"u1\", TensorProto.FLOAT, (1, 6, 8, 9)),\n helper.make_tensor_value_info(\"u2\", TensorProto.FLOAT, (1, 6, 8, 9))],\n [helper.make_tensor_value_info(\"res\", TensorProto.FLOAT, (1, 6, 8, 9))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"res\"], {\"u1\": np.random.randn(1, 6, 8, 9).astype(np.float32),\n \"u2\": np.random.randn(1, 6, 8, 9).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n def test_many_transposes_and_constant_switch_with_sum(self):\n constnode = self._make_onnx_const(np.array(np.random.random((1, 8, 9, 6)), dtype=np.float32), \"v4\")\n node0 = helper.make_node(\"Transpose\", [\"u1\"], [\"v1\"], perm=[0, 2, 3, 1], name=\"trans_0\")\n node1 = helper.make_node(\"Transpose\", [\"u2\"], [\"v2\"], perm=[0, 2, 3, 1], name=\"trans_1\")\n node11 = helper.make_node(\"Transpose\", [\"u3\"], [\"v3\"], perm=[0, 2, 3, 1], name=\"trans_2\")\n\n node2 = helper.make_node(\"Sum\", [\"v1\", \"v2\", \"v3\", \"v4\"], [\"x\"], name=\"sum_1\")\n node3 = helper.make_node(\"Sum\", [\"x\", \"v1\"], [\"y\"], name=\"sum_2\")\n node4 = helper.make_node(\"Transpose\", [\"y\"], [\"res\"], perm=[0, 3, 1, 2], name=\"trans_4\")\n\n graph = helper.make_graph(\n [constnode, node0, node1, node11, node2, node3, node4],\n \"test-transpose-mul\",\n [helper.make_tensor_value_info(\"u1\", TensorProto.FLOAT, (1, 6, 8, 9)),\n helper.make_tensor_value_info(\"u2\", TensorProto.FLOAT, (1, 6, 8, 9)),\n helper.make_tensor_value_info(\"u3\", TensorProto.FLOAT, (1, 6, 8, 9))],\n [helper.make_tensor_value_info(\"res\", TensorProto.FLOAT, (1, 6, 8, 9))],\n )\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"res\"], {\"u1\": np.random.randn(1, 6, 8, 9).astype(np.float32),\n \"u2\": np.random.randn(1, 6, 8, 9).astype(np.float32),\n \"u3\": np.random.randn(1, 6, 8, 9).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n # Tranpose Optimizer Tests End\n\n # Identity Optimizer Tests Start\n\n def run_identity_compare(self, output_names_with_port, onnx_feed_dict, origin_proto,\n remaining_identity_num=None, debug=False, rtol=1e-07):\n self.run_and_compare(output_names_with_port, onnx_feed_dict, origin_proto, op_type=\"Identity\",\n remaining_op_num=remaining_identity_num, debug=debug, rtol=rtol)\n\n def test_identity_non_graph_output(self):\n node1 = helper.make_node(\"Add\", [\"X\", \"X\"], [\"Y\"], name=\"add\")\n node2 = helper.make_node(\"Identity\", [\"Y\"], [\"Z\"], name=\"identity\")\n node3 = helper.make_node(\"Shape\", [\"Z\"], [\"Z1\"], name=\"shape\")\n\n graph = helper.make_graph(\n [node1, node2, node3],\n \"identity-test\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (2, 3, 4, 5))],\n [helper.make_tensor_value_info(\"Z1\", TensorProto.INT64, [4])],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_identity_compare([\"Z1\"], {\"X\": np.random.randn(2, 3, 4, 5).astype(np.float32)},\n model_proto, remaining_identity_num=0)\n\n def test_identity_unremovable_identity(self):\n # should not remove!!\n node1 = helper.make_node(\"Identity\", [\"X\"], [\"Y\"], name=\"identity\")\n\n graph = helper.make_graph(\n [node1],\n \"identity-test\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (2, 3, 4, 5))],\n [helper.make_tensor_value_info(\"Y\", TensorProto.FLOAT, (2, 3, 4, 5))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_identity_compare([\"Y\"], {\"X\": np.random.randn(2, 3, 4, 5).astype(np.float32)},\n model_proto, remaining_identity_num=1)\n\n def test_identity_output_as_multiple_graph_outputs(self):\n # handle case like this, both Identity nodes are graph outputs,\n # Add\n # / \\\n # Identity Identity\n # We at most can remove one Identity for this case.\n node1 = helper.make_node(\"Add\", [\"X\", \"X\"], [\"Y\"], name=\"identity\")\n node2 = helper.make_node(\"Identity\", [\"Y\"], [\"Z1\"], name=\"identity2\")\n node3 = helper.make_node(\"Identity\", [\"Y\"], [\"Z2\"], name=\"identity3\")\n graph = helper.make_graph(\n [node1, node2, node3],\n \"identity-test\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (2, 3, 4, 5))],\n [helper.make_tensor_value_info(\"Z1\", TensorProto.FLOAT, (2, 3, 4, 5)),\n helper.make_tensor_value_info(\"Z2\", TensorProto.FLOAT, (2, 3, 4, 5))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_identity_compare([\"Z1\", \"Z2\"], {\"X\": np.random.randn(2, 3, 4, 5).astype(np.float32)},\n model_proto, remaining_identity_num=1)\n\n def test_identity_in_subgraph_non_graph_output(self):\n node1 = helper.make_node(\"Add\", [\"X\", \"X\"], [\"Y\"], name=\"add\")\n\n iter_num_value = np.array(1, dtype=np.int64)\n node2 = helper.make_node(\n 'Constant',\n inputs=[],\n outputs=['iterate_num_value'],\n value=helper.make_tensor(\n name='iterate_num_value',\n data_type=TensorProto.INT64,\n dims=iter_num_value.shape,\n vals=iter_num_value.flatten().astype(np.int64),\n ),\n )\n\n cond_value = np.array(True, dtype=np.bool)\n node3 = helper.make_node(\n 'Constant',\n inputs=[],\n outputs=['cond_value'],\n value=helper.make_tensor(\n name='cond_value',\n data_type=TensorProto.BOOL,\n dims=iter_num_value.shape,\n vals=cond_value.flatten().astype(np.bool),\n ),\n )\n\n # sub graph\n sub_node1 = helper.make_node(\"Add\", [\"loop_var_1\", \"loop_var_1\"], [\"SubY\"], name=\"sub_add\")\n sub_node2 = helper.make_node(\"Identity\", [\"SubY\"], [\"SubIdentity1\"], name=\"sub_identity_1\")\n sub_node3 = helper.make_node(\"Identity\", [\"SubIdentity1\"], [\"loop_var_out_1\"], name=\"sub_identity_2\")\n sub_node4 = helper.make_node(\"Identity\", [\"loop_condition\"], [\"loop_cond_output\"], name=\"sub_identity_3\")\n sub_graph = helper.make_graph(\n [sub_node1, sub_node2, sub_node3, sub_node4],\n \"identity_subgraph-test\",\n [helper.make_tensor_value_info(\"loop_iter_num\", TensorProto.INT64, (1,)), # iteration_num\n helper.make_tensor_value_info(\"loop_condition\", TensorProto.BOOL, ()), # condition\n helper.make_tensor_value_info(\"loop_var_1\", TensorProto.FLOAT, ()), # loop-carried dependency\n ],\n [helper.make_tensor_value_info(\"loop_cond_output\", TensorProto.BOOL, ()),\n helper.make_tensor_value_info(\"loop_var_out_1\", TensorProto.FLOAT, ())\n ],\n )\n # sub graph ends\n\n loop_node = helper.make_node(\"Loop\", [\"iterate_num_value\", \"cond_value\", \"Y\"], [\"loop_var_1_output\"],\n name=\"loop\", body=sub_graph)\n\n node4 = helper.make_node(\"Identity\", [\"loop_var_1_output\"], [\"Z\"], name=\"identity\")\n node5 = helper.make_node(\"Shape\", [\"Z\"], [\"Z1\"], name=\"shape\")\n\n graph = helper.make_graph(\n [node1, node2, node3, loop_node, node4, node5],\n \"identity-test\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (2, 3, 4, 5))],\n [helper.make_tensor_value_info(\"Z1\", TensorProto.INT64, [4])],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_identity_compare([\"Z1\"], {\"X\": np.random.randn(2, 3, 4, 5).astype(np.float32)},\n model_proto, remaining_identity_num=0)\n\n # Identity Optimizer Tests End\n\n # Merge Duplicated Nodes Optimizer Tests Start\n\n def run_merge_duplicated_nodes_compare(self, output_names_with_port, onnx_feed_dict, origin_proto,\n op_type=None, remaining_op_num=None, debug=False, rtol=1e-07,\n graph_validator=None):\n new_proto = self.run_and_compare(output_names_with_port, onnx_feed_dict, origin_proto, op_type=op_type,\n remaining_op_num=remaining_op_num, debug=debug, rtol=rtol)\n if graph_validator:\n self.assertTrue(graph_validator(new_proto.graph))\n\n def test_duplicated_duplicated_input(self):\n # same input or not\n node0 = helper.make_node('Add', inputs=[\"X\", \"X\"], outputs=[\"value0\"])\n node1 = helper.make_node('Add', inputs=[\"X\", \"X\"], outputs=[\"value1\"])\n node2 = helper.make_node('Add', inputs=[\"value1\", \"X\"], outputs=[\"value2\"])\n node3 = helper.make_node(\"Mul\", [\"value0\", \"value2\"], [\"value3\"])\n node4 = helper.make_node(\"Mul\", [\"value1\", \"value3\"], [\"OUT\"])\n\n graph = helper.make_graph(\n [node0, node1, node2, node3, node4],\n \"test_duplicated_duplicated_input\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (5, 5))],\n [helper.make_tensor_value_info(\"OUT\", TensorProto.FLOAT, (5, 5))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_merge_duplicated_nodes_compare([\"OUT\"], {\"X\": np.random.randn(5, 5).astype(np.float32)}, model_proto,\n op_type=\"Add\", remaining_op_num=2)\n\n def test_duplicated_duplicated_attributes(self):\n # same attr or not\n node0 = helper.make_node('ReduceSum', inputs=[\"X\"], outputs=[\"value0\"], axes=[0], keepdims=0)\n node1 = helper.make_node('ReduceSum', inputs=[\"X\"], outputs=[\"value1\"], axes=[0], keepdims=0)\n node2 = helper.make_node('ReduceSum', inputs=[\"X\"], outputs=[\"value2\"], axes=[1], keepdims=0)\n node3 = helper.make_node('Add', inputs=[\"value0\", \"value1\"], outputs=[\"value3\"])\n node4 = helper.make_node(\"Mul\", [\"value2\", \"value3\"], [\"OUT\"])\n\n graph = helper.make_graph(\n [node0, node1, node2, node3, node4],\n \"test_duplicated_duplicated_attributes\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (5, 5))],\n [helper.make_tensor_value_info(\"OUT\", TensorProto.FLOAT, (5,))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_merge_duplicated_nodes_compare([\"OUT\"], {\"X\": np.random.randn(5, 5).astype(np.float32)}, model_proto,\n op_type=\"ReduceSum\", remaining_op_num=2)\n\n def _check_initializer_num(self, graph_proto, num):\n print(len(graph_proto.initializer))\n return num == len(graph_proto.initializer)\n\n def test_duplicated_duplicated_constant(self):\n const_val = np.array([1, 2, 3], dtype=np.float32)\n tensor_1 = helper.make_tensor(\"tensor_1\", TensorProto.FLOAT, const_val.shape, const_val)\n tensor_2 = helper.make_tensor(\"tensor_2\", TensorProto.FLOAT, const_val.shape, const_val)\n tensor_3 = helper.make_tensor(\"tensor_3\", TensorProto.FLOAT, const_val.shape, const_val.tobytes(), raw=True)\n tensor_4 = helper.make_tensor(\"tensor_4\", TensorProto.FLOAT, const_val.shape, const_val.tobytes(), raw=True)\n node0 = helper.make_node('Constant', inputs=[], outputs=[\"value0\"], value=tensor_1)\n node1 = helper.make_node('Constant', inputs=[], outputs=[\"value1\"], value=tensor_2)\n node2 = helper.make_node('Constant', inputs=[], outputs=[\"value2\"], value=tensor_3)\n node3 = helper.make_node('Constant', inputs=[], outputs=[\"value3\"], value=tensor_4)\n node4 = helper.make_node(\"Mul\", [\"value0\", \"value1\"], [\"output1\"])\n node5 = helper.make_node(\"Mul\", [\"value2\", \"output1\"], [\"output2\"])\n node6 = helper.make_node(\"Mul\", [\"value3\", \"output2\"], [\"OUT\"])\n\n graph = helper.make_graph(\n [node0, node1, node2, node3, node4, node5, node6],\n \"test_duplicated_duplicated_constant\",\n [],\n [helper.make_tensor_value_info(\"OUT\", TensorProto.FLOAT, (3,))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_merge_duplicated_nodes_compare([\"OUT\"], {}, model_proto, op_type=\"Constant\", remaining_op_num=0,\n graph_validator=lambda g: self._check_initializer_num(g, 1))\n\n def test_duplicated_duplicated_constant_and_initializer(self):\n const_val = np.array([1, 2, 3], dtype=np.float32)\n tensor_1 = helper.make_tensor(\"value0\", TensorProto.FLOAT, const_val.shape, const_val)\n tensor_2 = helper.make_tensor(\"value1\", TensorProto.FLOAT, const_val.shape, const_val)\n tensor_3 = helper.make_tensor(\"value2\", TensorProto.FLOAT, const_val.shape, const_val.tobytes(), raw=True)\n tensor_4 = helper.make_tensor(\"value3\", TensorProto.FLOAT, const_val.shape, const_val.tobytes(), raw=True)\n node0 = helper.make_node('Constant', inputs=[], outputs=[\"value0\"], value=tensor_1)\n node1 = helper.make_node('Constant', inputs=[], outputs=[\"value1\"], value=tensor_2)\n node4 = helper.make_node(\"Mul\", [\"value0\", \"value1\"], [\"output1\"])\n node5 = helper.make_node(\"Mul\", [\"value2\", \"output1\"], [\"output2\"])\n node6 = helper.make_node(\"Mul\", [\"value3\", \"output2\"], [\"OUT\"])\n\n graph = helper.make_graph(\n [node0, node1, node4, node5, node6],\n \"test_duplicated_duplicated_constant\",\n [helper.make_tensor_value_info(\"value2\", TensorProto.FLOAT, (3,))],\n [helper.make_tensor_value_info(\"OUT\", TensorProto.FLOAT, (3,))],\n [tensor_3, tensor_4]\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_merge_duplicated_nodes_compare([\"OUT\"], {}, model_proto, op_type=\"Constant\", remaining_op_num=0,\n graph_validator=lambda g: self._check_initializer_num(g, 2))\n\n def test_duplicated_node_is_graph_output(self):\n node0 = helper.make_node('Add', inputs=[\"X\", \"X\"], outputs=[\"value0\"])\n node1 = helper.make_node('Add', inputs=[\"X\", \"X\"], outputs=[\"value1\"])\n node2 = helper.make_node('Add', inputs=[\"value1\", \"X\"], outputs=[\"value2\"])\n\n graph = helper.make_graph(\n [node0, node1, node2],\n \"test_duplicated_node_is_graph_output\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (5, 5))],\n [helper.make_tensor_value_info(\"value1\", TensorProto.FLOAT, (5, 5)),\n helper.make_tensor_value_info(\"value2\", TensorProto.FLOAT, (5, 5))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_merge_duplicated_nodes_compare([\"value1\", \"value2\"],\n {\"X\": np.random.randn(5, 5).astype(np.float32)}, model_proto,\n op_type=\"Add\", remaining_op_num=2)\n\n @check_opset_min_version(10, \"Dropout in opset 10 produces mask of 'bool' type\")\n def test_duplicated_different_output_length(self):\n node0 = helper.make_node('Dropout', inputs=[\"X\"], outputs=[\"value0\"])\n node1 = helper.make_node('Dropout', inputs=[\"X\"], outputs=[\"value1\", \"mask\"])\n node2 = helper.make_node('Dropout', inputs=[\"value1\"], outputs=[\"value2\"])\n\n graph = helper.make_graph(\n [node0, node1, node2],\n \"test_duplicated_different_output_length\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (5,))],\n [helper.make_tensor_value_info(\"value1\", TensorProto.FLOAT, (5,)),\n helper.make_tensor_value_info(\"mask\", TensorProto.BOOL, (5,)),\n helper.make_tensor_value_info(\"value2\", TensorProto.FLOAT, (5,))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_merge_duplicated_nodes_compare([\"value1\", \"mask\", \"value2\"],\n {\"X\": np.random.randn(5).astype(np.float32)},\n model_proto,\n op_type=\"Dropout\", remaining_op_num=2)\n\n def test_duplicated_need_multiple_run(self):\n node00 = helper.make_node('Log', inputs=[\"X\"], outputs=[\"value00\"])\n node01 = helper.make_node('Log', inputs=[\"value00\"], outputs=[\"value01\"])\n node02 = helper.make_node('Log', inputs=[\"value01\"], outputs=[\"value02\"])\n\n node10 = helper.make_node('Log', inputs=[\"X\"], outputs=[\"value10\"])\n node11 = helper.make_node('Log', inputs=[\"value10\"], outputs=[\"value11\"])\n node12 = helper.make_node('Log', inputs=[\"value11\"], outputs=[\"value12\"])\n\n res = helper.make_node('Add', inputs=[\"value02\", \"value12\"], outputs=[\"res\"])\n\n graph = helper.make_graph(\n [node00, node01, node02, node10, node11, node12, res],\n \"test_duplicated_node_is_graph_output\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (5,))],\n [helper.make_tensor_value_info(\"res\", TensorProto.FLOAT, (5,))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_merge_duplicated_nodes_compare([\"res\"], {\"X\": np.random.randn(5).astype(np.float32)},\n model_proto,\n op_type=\"Log\", remaining_op_num=3)\n\n # Merge Duplicated Nodes Optimizer Tests End\n\n # Const Fold Optimizer Tests Start\n\n def test_const_fold_trans_with_const1(self):\n shape = (6, 6)\n const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,\n vals=np.random.randn(*shape).flatten().astype(np.float32))\n node1 = helper.make_node(\"Constant\", [], [\"const\"], value=const_tensor)\n node2 = helper.make_node(\"Transpose\", [\"const\"], [\"value1\"])\n node3 = helper.make_node(\"Add\", [\"value1\", \"X\"], [\"res\"])\n\n graph = helper.make_graph(\n [node1, node2, node3],\n \"test_const_fold_trans_with_const1\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, shape)],\n [helper.make_tensor_value_info(\"res\", TensorProto.FLOAT, shape)],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"res\"], {\"X\": np.random.randn(*shape).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n def test_const_fold_trans_with_const2(self):\n # need multiple optimization run\n shape = (6, 6)\n const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,\n vals=np.random.randn(*shape).flatten().astype(np.float32))\n node1 = helper.make_node(\"Constant\", [], [\"const\"], value=const_tensor)\n node2 = helper.make_node(\"Transpose\", [\"const\"], [\"value1\"])\n node3 = helper.make_node(\"Transpose\", [\"value1\"], [\"value2\"])\n node4 = helper.make_node(\"Add\", [\"value2\", \"X\"], [\"res\"])\n\n graph = helper.make_graph(\n [node1, node2, node3, node4],\n \"test_const_fold_trans_with_const2\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, shape)],\n [helper.make_tensor_value_info(\"res\", TensorProto.FLOAT, shape)],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"res\"], {\"X\": np.random.randn(*shape).astype(np.float32)},\n model_proto, remaining_transpose_num=0)\n\n def test_const_fold_node_is_output(self):\n # need multiple optimization run\n shape = (6, 6)\n const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,\n vals=np.random.randn(*shape).flatten().astype(np.float32))\n node1 = helper.make_node(\"Constant\", [], [\"const\"], value=const_tensor)\n node2 = helper.make_node(\"Transpose\", [\"const\"], [\"value1\"])\n node3 = helper.make_node(\"Transpose\", [\"value1\"], [\"res\"])\n\n graph = helper.make_graph(\n [node1, node2, node3],\n \"test_const_fold_node_is_output\",\n [],\n [helper.make_tensor_value_info(\"res\", TensorProto.FLOAT, shape)],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"res\"], {},\n model_proto, remaining_transpose_num=0)\n\n def test_const_fold_unsqueeze_with_const(self):\n shape = (6, 6)\n const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,\n vals=np.random.randn(*shape).flatten().astype(np.float32))\n node1 = helper.make_node(\"Constant\", [], [\"const\"], value=const_tensor)\n node2 = helper.make_node(\"Unsqueeze\", [\"const\"], [\"value1\"], axes=[0, 2, 3])\n node3 = helper.make_node(\"Add\", [\"value1\", \"X\"], [\"res\"])\n\n graph = helper.make_graph(\n [node1, node2, node3],\n \"test_const_fold_unsqueeze_with_const\",\n [helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, (1,))],\n [helper.make_tensor_value_info(\"res\", TensorProto.FLOAT, (1, 6, 1, 1, 6))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_and_compare([\"res\"], {\"X\": np.random.randn(1).astype(np.float32)}, model_proto,\n \"Unsqueeze\", 0)\n\n def test_const_fold_cast_with_const(self):\n shape = (6, 6)\n const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,\n vals=np.random.randn(*shape).flatten().astype(np.float32))\n node1 = helper.make_node(\"Constant\", [], [\"const\"], value=const_tensor)\n node2 = helper.make_node(\"Cast\", [\"const\"], [\"value1\"], to=TensorProto.INT64)\n node3 = helper.make_node(\"Add\", [\"value1\", \"X\"], [\"res\"])\n\n graph = helper.make_graph(\n [node1, node2, node3],\n \"test_const_fold_cast_with_const\",\n [helper.make_tensor_value_info(\"X\", TensorProto.INT64, shape)],\n [helper.make_tensor_value_info(\"res\", TensorProto.INT64, shape)],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_and_compare([\"res\"], {\"X\": np.random.randn(*shape).astype(np.int64)}, model_proto,\n \"Cast\", 0)\n\n # Const Fold Optimizer Tests End\n\n def test_transpose_back_to_back_non_const(self):\n\n node0 = helper.make_node(\"Transpose\", [\"u\"], [\"v\"], perm=[0, 2, 3, 1], name=\"trans_0\")\n node1 = helper.make_node(\"Transpose\", [\"v\"], [\"w\"], perm=[0, 3, 1, 2], name=\"trans_1\")\n node2 = helper.make_node(\"Transpose\", [\"w\"], [\"x\"], perm=[0, 3, 2, 1], name=\"trans_2\")\n node3 = helper.make_node(\"Transpose\", [\"x\"], [\"res\"], perm=[1, 3, 0, 2], name=\"trans_3\")\n\n graph = helper.make_graph(\n [node0, node1, node2, node3],\n \"test-transpose-back-to-back-non-const\",\n [helper.make_tensor_value_info(\"u\", TensorProto.FLOAT, (5, 5, 5, 5))],\n [helper.make_tensor_value_info(\"res\", TensorProto.FLOAT, (5, 5, 5, 5))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n self.run_transpose_compare([\"res\"], {\"u\": np.random.randn(5, 5, 5, 5).astype(np.float32)},\n model_proto, remaining_transpose_num=1)\n\n @check_opset_min_version(9, \"string type tensor\")\n def test_cast_back_to_back_non_const_mixed_types(self):\n node0 = helper.make_node(\"Cast\", [\"u\"], [\"v\"], to=11, name=\"cast_0\") # double\n node1 = helper.make_node(\"Cast\", [\"v\"], [\"w\"], to=6, name=\"cast_1\") # int32\n node2 = helper.make_node(\"Cast\", [\"w\"], [\"x\"], to=1, name=\"cast_2\") # float\n node3 = helper.make_node(\"Cast\", [\"x\"], [\"res\"], to=7, name=\"cast_3\") # int64\n\n node4 = helper.make_node(\"Cast\", [\"w\"], [\"w2\"], to=6, name=\"cast_4\") # int32\n node5 = helper.make_node(\"Cast\", [\"w2\"], [\"res2\"], to=7, name=\"cast_5\") # int64\n\n node6 = helper.make_node(\"Cast\", [\"x\"], [\"x2\"], to=9, name=\"cast_6\") # bool\n # TODO: uncomment below after fix\n # https://github.com/microsoft/onnxruntime/issues/2338\n # node7 = helper.make_node(\"Cast\", [\"x2\"], [\"x3\"], to=8, name=\"cast_7\") # string\n node8 = helper.make_node(\"Cast\", [\"x2\"], [\"res3\"], to=3, name=\"cast_8\") # int8\n\n graph = helper.make_graph(\n [node0, node1, node2, node3, node4, node5, node6, node8],\n \"test-cast-back-to-back-non-const\",\n [helper.make_tensor_value_info(\"u\", TensorProto.FLOAT, (1, 2, 3))],\n [helper.make_tensor_value_info(\"res\", TensorProto.INT64, (1, 2, 3)),\n helper.make_tensor_value_info(\"res2\", TensorProto.INT64, (1, 2, 3)),\n helper.make_tensor_value_info(\"res3\", TensorProto.INT8, (1, 2, 3))],\n )\n\n model_proto = self.make_model(graph, producer_name=\"onnx-tests\")\n\n self.run_and_compare([\"res\", \"res2\", \"res3\"], {\"u\": np.random.randn(1, 2, 3).astype(np.float32)}, model_proto,\n \"Cast\", 5)\n\n\nif __name__ == \"__main__\":\n unittest_main()\n" ]
[ [ "numpy.random.random", "numpy.array", "numpy.random.randn" ] ]
suzrz/nnvis
[ "a984449fb5c30b8ed2d2c451c36a53b16792cc67" ]
[ "nnvis/prelim.py" ]
[ "import logging\nimport copy\nimport torch\nimport numpy as np\nfrom torch.optim.lr_scheduler import StepLR\nfrom torch import optim\nfrom nnvis import paths, net, data_loader\n\nlogger = logging.getLogger(\"vis_net\")\n\n\ndef pre_train_subset(model, device, subset_list, epochs, test_loader):\n \"\"\"\n Function to examine impact of different sizes of training subset.\n\n :param model: NN model\n :param device: device to be used\n :param subset_list: list of subsets sizes to be examinated\n :param epochs: number of training epoch\n :param test_loader: test dataset loader\n \"\"\"\n logger.info(\"Subset preliminary experiment started\")\n if paths.train_subs_loss.exists() and paths.train_subs_acc.exists():\n return\n\n loss_list = []\n acc_list = []\n theta_i = copy.deepcopy(torch.load(paths.init_state))\n theta_f = copy.deepcopy(torch.load(paths.final_state))\n\n for n_samples in subset_list:\n model.load_state_dict(theta_i)\n\n optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5) # set optimizer\n scheduler = StepLR(optimizer, step_size=1, gamma=0.7) # set scheduler\n\n for epoch in range(1, epochs):\n train_loader, test_loader = data_loader.data_load(train_samples=n_samples)\n\n net.train(model, train_loader, optimizer, device, epoch)\n net.test(model, test_loader, device)\n\n scheduler.step()\n logger.debug(f\"Finished epoch for tranining subset {epoch}, {n_samples}\")\n\n loss, acc = net.test(model, test_loader, device)\n\n loss_list.append(loss)\n acc_list.append(acc)\n\n np.savetxt(paths.train_subs_loss, loss_list)\n np.savetxt(paths.train_subs_acc, acc_list)\n\n model.load_state_dict(theta_f)\n\n\ndef pre_test_subset(model, device, subset_list):\n \"\"\"\n Function examines impact of test dataset size on stability of measurements\n\n :param model: NN model\n :param device: device to be used\n :param subset_list: list of subset sizes to be examined\n \"\"\"\n if paths.test_subs_loss.exists() and paths.test_subs_acc.exists():\n return\n\n subset_losses = []\n subset_accs = []\n theta_f = copy.deepcopy(torch.load(paths.final_state))\n\n model.load_state_dict(theta_f)\n\n for n_samples in subset_list:\n losses = []\n accs = []\n for x in range(10):\n _, test_loader = data_loader.data_load(test_samples=n_samples) # choose random data each time\n loss, acc = net.test(model, test_loader, device)\n losses.append(loss)\n accs.append(acc)\n logger.info(f\"Subset size: {n_samples}\\n\"\n f\"Validation loss: {loss}\\n\"\n f\"Accuracy: {acc}\\n\")\n\n subset_losses.append(losses)\n subset_accs.append(accs)\n\n np.savetxt(paths.test_subs_loss, subset_losses)\n np.savetxt(paths.test_subs_acc, subset_accs)\n\n\ndef pre_epochs(model, device, epochs_list):\n \"\"\"\n Function examines performance of the model after certain number of epochs\n\n :param model: NN model\n :param device: device to be used\n :param epochs_list: list of epochs numbers after which will be the model evaluated\n \"\"\"\n logger.info(\"Epochs performance experiment started.\")\n if paths.epochs_loss.exists() and paths.epochs_acc.exists():\n return\n\n loss_list = []\n acc_list = []\n\n theta_i = copy.deepcopy(torch.load(paths.init_state))\n\n model.load_state_dict(theta_i)\n optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5) # set optimizer\n scheduler = StepLR(optimizer, step_size=1, gamma=0.7) # set scheduler\n train_loader, test_loader = data_loader.data_load()\n\n for epoch in range(max(epochs_list) + 1):\n net.train(model, train_loader, optimizer, device, epoch)\n net.test(model, test_loader, device)\n\n scheduler.step()\n\n logger.debug(f\"Finished epoch {epoch}\")\n if epoch in epochs_list:\n loss, acc = net.test(model, test_loader, device)\n\n loss_list.append(loss)\n acc_list.append(acc)\n logger.info(f\"Performance of the model for epoch {epoch}\"\n f\"Validation loss: {loss}\"\n f\"Accuracy: {acc}\")\n\n np.savetxt(paths.epochs_loss, loss_list)\n np.savetxt(paths.epochs_acc, loss_list)\n" ]
[ [ "torch.optim.lr_scheduler.StepLR", "numpy.savetxt", "torch.load" ] ]
PanagiotisP/demucs
[ "d115d0773ca08a081f5b6bfe274cf0e4ed9e2677" ]
[ "result_table.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\nimport gzip\nimport json\nimport sys\nfrom collections import defaultdict\nfrom pathlib import Path\n\nimport numpy as np\nimport treetable as tt\n\nBASELINES = [\n 'WaveUNet',\n 'MMDenseLSTM',\n 'OpenUnmix',\n 'IRM2',\n]\nEVALS = Path(\"evals\")\nLOGS = Path(\"logs\")\nBASELINE_EVALS = Path(\"baselines\")\nSTD_KEY = \"seed\"\n\nparser = argparse.ArgumentParser(\"result_table.py\")\nparser.add_argument(\"-p\",\n \"--paper\",\n action=\"store_true\",\n help=\"show results from the paper experiment\")\nparser.add_argument(\"-i\", \"--individual\", action=\"store_true\", help=\"no aggregation by seed\")\nparser.add_argument(\"-l\", \"--latex\", action=\"store_true\", help=\"output easy to copy latex\")\nparser.add_argument(\"metric\", default=\"SDR\", nargs=\"?\")\nargs = parser.parse_args()\n\nif args.paper:\n EVALS = Path(\"results/evals\")\n LOGS = Path(\"results/logs\")\n\n\ndef read_track(metric, results, pool=np.nanmedian):\n all_metrics = {}\n for target in results[\"targets\"]:\n source = target[\"name\"]\n metrics = [frame[\"metrics\"][metric] for frame in target[\"frames\"]]\n metrics = pool(metrics)\n all_metrics[source] = metrics\n return all_metrics\n\n\ndef read(metric, path, pool=np.nanmedian):\n all_metrics = defaultdict(list)\n for f in path.iterdir():\n if f.name.endswith(\".json.gz\"):\n results = json.load(gzip.open(f, \"r\"))\n metrics = read_track(metric, results, pool=pool)\n for source, value in metrics.items():\n all_metrics[source].append(value)\n return {key: np.array(value) for key, value in all_metrics.items()}\n\n\nall_stats = defaultdict(list)\n# for name in BASELINES:\n# all_stats[name] = [read(args.metric, BASELINE_EVALS / name / \"test\")]\nfor path in EVALS.iterdir():\n results = path / \"results\" / \"test\"\n if not results.exists():\n continue\n if not args.paper and not (LOGS / (path.name + \".done\")).exists():\n continue\n name = path.name\n model = \"Demucs\"\n if \"tasnet\" in name:\n model = \"Tasnet\"\n if name == \"default\":\n parts = []\n else:\n parts = [p.split(\"=\") for p in name.split(\" \") if \"tasnet\" not in p]\n if not args.individual:\n parts = [(k, v) for k, v in parts if k != STD_KEY]\n name = model + \" \" + \" \".join(f\"{k}={v}\" for k, v in parts)\n stats = read(args.metric, results)\n # if (not stats or len(stats[\"drums\"]) != 50):\n # print(f\"Missing stats for {results}\", file=sys.stderr)\n # else:\n all_stats[name].append(stats)\n\nmetrics = [tt.leaf(\"score\", \".2f\"), tt.leaf(\"std\", \".2f\")]\nsources = [\"drums\", \"bass\", \"other\", \"vocals\"]\nsources = [\"accompaniment\", \"vocals\"]\n\n# mytable = tt.table([tt.leaf(\"name\"), tt.group(\"all\", metrics + [tt.leaf(\"count\")])] +\nmytable = tt.table([tt.leaf(\"name\"), tt.group(\"all\", metrics)] +\n [tt.group(source, metrics) for idx, source in enumerate(sources)])\n\nlines = []\nfor name, stats in all_stats.items():\n line = {\"name\": name}\n if 'accompaniment' in stats:\n del stats['accompaniment']\n alls = []\n for source in sources:\n stat = [np.nanmedian(s[source]) for s in stats]\n alls.append(stat)\n line[source] = {\"score\": np.mean(stat), \"std\": np.std(stat) / len(stat)**0.5}\n alls = np.array(alls)\n line[\"all\"] = {\n \"score\": alls.mean(),\n \"std\": alls.mean(0).std() / alls.shape[1]**0.5,\n \"count\": alls.shape[1]\n }\n lines.append(line)\n\n\ndef latex_number(m):\n out = f\"{m['score']:.2f}\"\n if m[\"std\"] > 0:\n std = \"{:.2f}\".format(m[\"std\"])[1:]\n out += f\" $\\\\scriptstyle\\\\pm {std}$\"\n return out\n\n\nlines.sort(key=lambda x: -x[\"all\"][\"score\"])\nif args.latex:\n for line in lines:\n cols = [\n line['name'],\n latex_number(line[\"all\"]),\n latex_number(line[\"drums\"]),\n latex_number(line[\"bass\"]),\n latex_number(line[\"other\"]),\n latex_number(line[\"vocals\"])\n ]\n print(\" & \".join(cols) + r\" \\\\\")\nelse:\n print(tt.treetable(lines, mytable, colors=['33', '0']))\n" ]
[ [ "numpy.std", "numpy.array", "numpy.mean", "numpy.nanmedian" ] ]
lharri73/ray
[ "3e42f54910ee8fecdd09a9a69e4852d6fb946e6d" ]
[ "python/ray/experimental/data/impl/shuffle.py" ]
[ "import math\nfrom typing import TypeVar, List, Optional\n\nimport numpy as np\n\nimport ray\nfrom ray.experimental.data.block import Block, BlockAccessor, BlockMetadata\nfrom ray.experimental.data.impl.progress_bar import ProgressBar\nfrom ray.experimental.data.impl.block_list import BlockList\nfrom ray.experimental.data.impl.arrow_block import DelegatingArrowBlockBuilder\nfrom ray.experimental.data.impl.remote_fn import cached_remote_fn\n\nT = TypeVar(\"T\")\n\n\ndef simple_shuffle(input_blocks: BlockList[T],\n output_num_blocks: int,\n *,\n random_shuffle: bool = False,\n random_seed: Optional[int] = None) -> BlockList[T]:\n input_num_blocks = len(input_blocks)\n\n shuffle_map = cached_remote_fn(_shuffle_map).options(\n num_returns=output_num_blocks)\n shuffle_reduce = cached_remote_fn(_shuffle_reduce, num_returns=2)\n\n map_bar = ProgressBar(\"Shuffle Map\", position=0, total=input_num_blocks)\n\n shuffle_map_out = [\n shuffle_map.remote(block, i, output_num_blocks, random_shuffle,\n random_seed) for i, block in enumerate(input_blocks)\n ]\n if output_num_blocks == 1:\n # Handle the num_returns=1 edge case which doesn't return a list.\n shuffle_map_out = [[x] for x in shuffle_map_out]\n map_bar.block_until_complete([x[0] for x in shuffle_map_out])\n map_bar.close()\n\n # Randomize the reduce order of the blocks.\n if random_shuffle:\n random = np.random.RandomState(random_seed)\n random.shuffle(shuffle_map_out)\n\n reduce_bar = ProgressBar(\n \"Shuffle Reduce\", position=0, total=output_num_blocks)\n shuffle_reduce_out = [\n shuffle_reduce.remote(\n *[shuffle_map_out[i][j] for i in range(input_num_blocks)])\n for j in range(output_num_blocks)\n ]\n new_blocks, new_metadata = zip(*shuffle_reduce_out)\n reduce_bar.block_until_complete(list(new_blocks))\n new_metadata = ray.get(list(new_metadata))\n reduce_bar.close()\n\n return BlockList(list(new_blocks), list(new_metadata))\n\n\ndef _shuffle_map(block: Block, idx: int, output_num_blocks: int,\n random_shuffle: bool,\n random_seed: Optional[int]) -> List[Block]:\n block = BlockAccessor.for_block(block)\n\n # Randomize the distribution of records to blocks.\n if random_shuffle:\n seed_i = random_seed + idx if random_seed is not None else None\n block = block.random_shuffle(seed_i)\n block = BlockAccessor.for_block(block)\n\n slice_sz = max(1, math.ceil(block.num_rows() / output_num_blocks))\n slices = []\n for i in range(output_num_blocks):\n slices.append(block.slice(i * slice_sz, (i + 1) * slice_sz, copy=True))\n\n # Randomize the distribution order of the blocks (this matters when\n # some blocks are larger than others).\n if random_shuffle:\n random = np.random.RandomState(seed_i)\n random.shuffle(slices)\n\n num_rows = sum(BlockAccessor.for_block(s).num_rows() for s in slices)\n assert num_rows == block.num_rows(), (num_rows, block.num_rows())\n # Needed to handle num_returns=1 edge case in Ray API.\n if len(slices) == 1:\n return slices[0]\n else:\n return slices\n\n\ndef _shuffle_reduce(*mapper_outputs: List[Block]) -> (Block, BlockMetadata):\n builder = DelegatingArrowBlockBuilder()\n for block in mapper_outputs:\n builder.add_block(block)\n new_block = builder.build()\n accessor = BlockAccessor.for_block(new_block)\n new_metadata = BlockMetadata(\n num_rows=accessor.num_rows(),\n size_bytes=accessor.size_bytes(),\n schema=accessor.schema(),\n input_files=None)\n return new_block, new_metadata\n" ]
[ [ "numpy.random.RandomState" ] ]
ryandesign/yap
[ "9a50d1a3d985ec559ebfbb8e9f4d4c6b88b30214" ]
[ "packages/python/yap_kernel/yap_ipython/core/tests/test_completer.py" ]
[ "# encoding: utf-8\n\"\"\"Tests for the yap_ipython tab-completion machinery.\"\"\"\n\n# Copyright (c) yap_ipython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport os\nimport sys\nimport textwrap\nimport unittest\n\nfrom contextlib import contextmanager\n\nimport nose.tools as nt\n\nfrom traitlets.config.loader import Config\nfrom yap_ipython import get_ipython\nfrom yap_ipython.core import completer\nfrom yap_ipython.external.decorators import knownfailureif\nfrom yap_ipython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory\nfrom yap_ipython.utils.generics import complete_object\nfrom yap_ipython.testing import decorators as dec\n\nfrom yap_ipython.core.completer import (\n Completion, provisionalcompleter, match_dict_keys, _deduplicate_completions)\nfrom nose.tools import assert_in, assert_not_in\n\n#-----------------------------------------------------------------------------\n# Test functions\n#-----------------------------------------------------------------------------\n\n@contextmanager\ndef greedy_completion():\n ip = get_ipython()\n greedy_original = ip.Completer.greedy\n try:\n ip.Completer.greedy = True\n yield\n finally:\n ip.Completer.greedy = greedy_original\n\ndef test_protect_filename():\n if sys.platform == 'win32':\n pairs = [('abc','abc'),\n (' abc','\" abc\"'),\n ('a bc','\"a bc\"'),\n ('a bc','\"a bc\"'),\n (' bc','\" bc\"'),\n ]\n else:\n pairs = [('abc','abc'),\n (' abc',r'\\ abc'),\n ('a bc',r'a\\ bc'),\n ('a bc',r'a\\ \\ bc'),\n (' bc',r'\\ \\ bc'),\n # On posix, we also protect parens and other special characters.\n ('a(bc',r'a\\(bc'),\n ('a)bc',r'a\\)bc'),\n ('a( )bc',r'a\\(\\ \\)bc'),\n ('a[1]bc', r'a\\[1\\]bc'),\n ('a{1}bc', r'a\\{1\\}bc'),\n ('a#bc', r'a\\#bc'),\n ('a?bc', r'a\\?bc'),\n ('a=bc', r'a\\=bc'),\n ('a\\\\bc', r'a\\\\bc'),\n ('a|bc', r'a\\|bc'),\n ('a;bc', r'a\\;bc'),\n ('a:bc', r'a\\:bc'),\n (\"a'bc\", r\"a\\'bc\"),\n ('a*bc', r'a\\*bc'),\n ('a\"bc', r'a\\\"bc'),\n ('a^bc', r'a\\^bc'),\n ('a&bc', r'a\\&bc'),\n ]\n # run the actual tests\n for s1, s2 in pairs:\n s1p = completer.protect_filename(s1)\n nt.assert_equal(s1p, s2)\n\n\ndef check_line_split(splitter, test_specs):\n for part1, part2, split in test_specs:\n cursor_pos = len(part1)\n line = part1+part2\n out = splitter.split_line(line, cursor_pos)\n nt.assert_equal(out, split)\n\n\ndef test_line_split():\n \"\"\"Basic line splitter test with default specs.\"\"\"\n sp = completer.CompletionSplitter()\n # The format of the test specs is: part1, part2, expected answer. Parts 1\n # and 2 are joined into the 'line' sent to the splitter, as if the cursor\n # was at the end of part1. So an empty part2 represents someone hitting\n # tab at the end of the line, the most common case.\n t = [('run some/scrip', '', 'some/scrip'),\n ('run scripts/er', 'ror.py foo', 'scripts/er'),\n ('echo $HOM', '', 'HOM'),\n ('print sys.pa', '', 'sys.pa'),\n ('print(sys.pa', '', 'sys.pa'),\n (\"execfile('scripts/er\", '', 'scripts/er'),\n ('a[x.', '', 'x.'),\n ('a[x.', 'y', 'x.'),\n ('cd \"some_file/', '', 'some_file/'),\n ]\n check_line_split(sp, t)\n # Ensure splitting works OK with unicode by re-running the tests with\n # all inputs turned into unicode\n check_line_split(sp, [ map(str, p) for p in t] )\n\n\ndef test_custom_completion_error():\n \"\"\"Test that errors from custom attribute completers are silenced.\"\"\"\n ip = get_ipython()\n class A(object): pass\n ip.user_ns['a'] = A()\n \n @complete_object.when_type(A)\n def complete_A(a, existing_completions):\n raise TypeError(\"this should be silenced\")\n \n ip.complete(\"a.\")\n\n\ndef test_unicode_completions():\n ip = get_ipython()\n # Some strings that trigger different types of completion. Check them both\n # in str and unicode forms\n s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']\n for t in s + list(map(str, s)):\n # We don't need to check exact completion values (they may change\n # depending on the state of the namespace, but at least no exceptions\n # should be thrown and the return value should be a pair of text, list\n # values.\n text, matches = ip.complete(t)\n nt.assert_true(isinstance(text, str))\n nt.assert_true(isinstance(matches, list))\n\ndef test_latex_completions():\n from yap_ipython.core.latex_symbols import latex_symbols\n import random\n ip = get_ipython()\n # Test some random unicode symbols\n keys = random.sample(latex_symbols.keys(), 10)\n for k in keys:\n text, matches = ip.complete(k)\n nt.assert_equal(len(matches),1)\n nt.assert_equal(text, k)\n nt.assert_equal(matches[0], latex_symbols[k])\n # Test a more complex line\n text, matches = ip.complete(u'print(\\\\alpha')\n nt.assert_equal(text, u'\\\\alpha')\n nt.assert_equal(matches[0], latex_symbols['\\\\alpha'])\n # Test multiple matching latex symbols\n text, matches = ip.complete(u'\\\\al')\n nt.assert_in('\\\\alpha', matches)\n nt.assert_in('\\\\aleph', matches)\n\n\n\n\ndef test_back_latex_completion():\n ip = get_ipython()\n\n # do not return more than 1 matches fro \\beta, only the latex one.\n name, matches = ip.complete('\\\\β')\n nt.assert_equal(len(matches), 1)\n nt.assert_equal(matches[0], '\\\\beta')\n\ndef test_back_unicode_completion():\n ip = get_ipython()\n \n name, matches = ip.complete('\\\\Ⅴ')\n nt.assert_equal(len(matches), 1)\n nt.assert_equal(matches[0], '\\\\ROMAN NUMERAL FIVE')\n\n\ndef test_forward_unicode_completion():\n ip = get_ipython()\n \n name, matches = ip.complete('\\\\ROMAN NUMERAL FIVE')\n nt.assert_equal(len(matches), 1)\n nt.assert_equal(matches[0], 'Ⅴ')\n\[email protected](sys.platform == 'win32', 'Fails if there is a C:\\\\j... path')\ndef test_no_ascii_back_completion():\n ip = get_ipython()\n with TemporaryWorkingDirectory(): # Avoid any filename completions\n # single ascii letter that don't have yet completions\n for letter in 'jJ' :\n name, matches = ip.complete('\\\\'+letter)\n nt.assert_equal(matches, [])\n\n\n\n\nclass CompletionSplitterTestCase(unittest.TestCase):\n def setUp(self):\n self.sp = completer.CompletionSplitter()\n\n def test_delim_setting(self):\n self.sp.delims = ' '\n nt.assert_equal(self.sp.delims, ' ')\n nt.assert_equal(self.sp._delim_expr, '[\\ ]')\n\n def test_spaces(self):\n \"\"\"Test with only spaces as split chars.\"\"\"\n self.sp.delims = ' '\n t = [('foo', '', 'foo'),\n ('run foo', '', 'foo'),\n ('run foo', 'bar', 'foo'),\n ]\n check_line_split(self.sp, t)\n\n\ndef test_has_open_quotes1():\n for s in [\"'\", \"'''\", \"'hi' '\"]:\n nt.assert_equal(completer.has_open_quotes(s), \"'\")\n\n\ndef test_has_open_quotes2():\n for s in ['\"', '\"\"\"', '\"hi\" \"']:\n nt.assert_equal(completer.has_open_quotes(s), '\"')\n\n\ndef test_has_open_quotes3():\n for s in [\"''\", \"''' '''\", \"'hi' 'ipython'\"]:\n nt.assert_false(completer.has_open_quotes(s))\n\n\ndef test_has_open_quotes4():\n for s in ['\"\"', '\"\"\" \"\"\"', '\"hi\" \"ipython\"']:\n nt.assert_false(completer.has_open_quotes(s))\n\n\n@knownfailureif(sys.platform == 'win32', \"abspath completions fail on Windows\")\ndef test_abspath_file_completions():\n ip = get_ipython()\n with TemporaryDirectory() as tmpdir:\n prefix = os.path.join(tmpdir, 'foo')\n suffixes = ['1', '2']\n names = [prefix+s for s in suffixes]\n for n in names:\n open(n, 'w').close()\n\n # Check simple completion\n c = ip.complete(prefix)[1]\n nt.assert_equal(c, names)\n\n # Now check with a function call\n cmd = 'a = f(\"%s' % prefix\n c = ip.complete(prefix, cmd)[1]\n comp = [prefix+s for s in suffixes]\n nt.assert_equal(c, comp)\n\n\ndef test_local_file_completions():\n ip = get_ipython()\n with TemporaryWorkingDirectory():\n prefix = './foo'\n suffixes = ['1', '2']\n names = [prefix+s for s in suffixes]\n for n in names:\n open(n, 'w').close()\n\n # Check simple completion\n c = ip.complete(prefix)[1]\n nt.assert_equal(c, names)\n\n # Now check with a function call\n cmd = 'a = f(\"%s' % prefix\n c = ip.complete(prefix, cmd)[1]\n comp = set(prefix+s for s in suffixes)\n nt.assert_true(comp.issubset(set(c)))\n\n\ndef test_quoted_file_completions():\n ip = get_ipython()\n with TemporaryWorkingDirectory():\n name = \"foo'bar\"\n open(name, 'w').close()\n\n # Don't escape Windows\n escaped = name if sys.platform == \"win32\" else \"foo\\\\'bar\"\n\n # Single quote matches embedded single quote\n text = \"open('foo\"\n c = ip.Completer._complete(cursor_line=0,\n cursor_pos=len(text),\n full_text=text)[1]\n nt.assert_equal(c, [escaped])\n\n # Double quote requires no escape\n text = 'open(\"foo'\n c = ip.Completer._complete(cursor_line=0,\n cursor_pos=len(text),\n full_text=text)[1]\n nt.assert_equal(c, [name])\n\n # No quote requires an escape\n text = '%ls foo'\n c = ip.Completer._complete(cursor_line=0,\n cursor_pos=len(text),\n full_text=text)[1]\n nt.assert_equal(c, [escaped])\n\n\ndef test_jedi():\n \"\"\"\n A couple of issue we had with Jedi\n \"\"\"\n ip = get_ipython()\n\n def _test_complete(reason, s, comp, start=None, end=None):\n l = len(s)\n start = start if start is not None else l\n end = end if end is not None else l\n with provisionalcompleter():\n completions = set(ip.Completer.completions(s, l))\n assert_in(Completion(start, end, comp), completions, reason)\n\n def _test_not_complete(reason, s, comp):\n l = len(s)\n with provisionalcompleter():\n completions = set(ip.Completer.completions(s, l))\n assert_not_in(Completion(l, l, comp), completions, reason)\n\n import jedi\n jedi_version = tuple(int(i) for i in jedi.__version__.split('.')[:3])\n if jedi_version > (0, 10):\n yield _test_complete, 'jedi >0.9 should complete and not crash', 'a=1;a.', 'real'\n yield _test_complete, 'can infer first argument', 'a=(1,\"foo\");a[0].', 'real'\n yield _test_complete, 'can infer second argument', 'a=(1,\"foo\");a[1].', 'capitalize'\n yield _test_complete, 'cover duplicate completions', 'im', 'import', 0, 2\n\n yield _test_not_complete, 'does not mix types', 'a=(1,\"foo\");a[0].', 'capitalize'\n\ndef test_completion_have_signature():\n \"\"\"\n Lets make sure jedi is capable of pulling out the signature of the function we are completing.\n \"\"\"\n ip = get_ipython()\n with provisionalcompleter():\n completions = ip.Completer.completions('ope', 3)\n c = next(completions) # should be `open`\n assert 'file' in c.signature, \"Signature of function was not found by completer\"\n assert 'encoding' in c.signature, \"Signature of function was not found by completer\"\n\n\ndef test_deduplicate_completions():\n \"\"\"\n Test that completions are correctly deduplicated (even if ranges are not the same)\n \"\"\"\n ip = get_ipython()\n ip.ex(textwrap.dedent('''\n class Z:\n zoo = 1\n '''))\n with provisionalcompleter():\n l = list(_deduplicate_completions('Z.z', ip.Completer.completions('Z.z', 3)))\n\n assert len(l) == 1, 'Completions (Z.z<tab>) correctly deduplicate: %s ' % l\n assert l[0].text == 'zoo' # and not `it.accumulate`\n\n\ndef test_greedy_completions():\n \"\"\"\n Test the capability of the Greedy completer. \n\n Most of the test here do not really show off the greedy completer, for proof\n each of the text bellow now pass with Jedi. The greedy completer is capable of more. \n\n See the :any:`test_dict_key_completion_contexts`\n\n \"\"\"\n ip = get_ipython()\n ip.ex('a=list(range(5))')\n _,c = ip.complete('.',line='a[0].')\n nt.assert_false('.real' in c,\n \"Shouldn't have completed on a[0]: %s\"%c)\n with greedy_completion(), provisionalcompleter():\n def _(line, cursor_pos, expect, message, completion):\n _,c = ip.complete('.', line=line, cursor_pos=cursor_pos)\n with provisionalcompleter():\n completions = ip.Completer.completions(line, cursor_pos)\n nt.assert_in(expect, c, message%c)\n nt.assert_in(completion, completions)\n\n yield _, 'a[0].', 5, 'a[0].real', \"Should have completed on a[0].: %s\", Completion(5,5, 'real')\n yield _, 'a[0].r', 6, 'a[0].real', \"Should have completed on a[0].r: %s\", Completion(5,6, 'real')\n\n if sys.version_info > (3, 4):\n yield _, 'a[0].from_', 10, 'a[0].from_bytes', \"Should have completed on a[0].from_: %s\", Completion(5, 10, 'from_bytes')\n\n\ndef test_omit__names():\n # also happens to test IPCompleter as a configurable\n ip = get_ipython()\n ip._hidden_attr = 1\n ip._x = {}\n c = ip.Completer\n ip.ex('ip=get_ipython()')\n cfg = Config()\n cfg.IPCompleter.omit__names = 0\n c.update_config(cfg)\n with provisionalcompleter():\n s,matches = c.complete('ip.')\n completions = set(c.completions('ip.', 3))\n\n nt.assert_in('ip.__str__', matches)\n nt.assert_in(Completion(3, 3, '__str__'), completions)\n \n nt.assert_in('ip._hidden_attr', matches)\n nt.assert_in(Completion(3,3, \"_hidden_attr\"), completions)\n\n\n cfg = Config()\n cfg.IPCompleter.omit__names = 1\n c.update_config(cfg)\n with provisionalcompleter():\n s,matches = c.complete('ip.')\n completions = set(c.completions('ip.', 3))\n\n nt.assert_not_in('ip.__str__', matches)\n nt.assert_not_in(Completion(3,3,'__str__'), completions)\n\n # nt.assert_in('ip._hidden_attr', matches)\n nt.assert_in(Completion(3,3, \"_hidden_attr\"), completions)\n\n cfg = Config()\n cfg.IPCompleter.omit__names = 2\n c.update_config(cfg)\n with provisionalcompleter():\n s,matches = c.complete('ip.')\n completions = set(c.completions('ip.', 3))\n\n nt.assert_not_in('ip.__str__', matches)\n nt.assert_not_in(Completion(3,3,'__str__'), completions)\n\n nt.assert_not_in('ip._hidden_attr', matches)\n nt.assert_not_in(Completion(3,3, \"_hidden_attr\"), completions)\n\n with provisionalcompleter():\n s,matches = c.complete('ip._x.')\n completions = set(c.completions('ip._x.', 6))\n\n nt.assert_in('ip._x.keys', matches)\n nt.assert_in(Completion(6,6, \"keys\"), completions)\n\n del ip._hidden_attr\n del ip._x\n\n\ndef test_limit_to__all__False_ok():\n \"\"\"\n Limit to all is deprecated, once we remove it this test can go away. \n \"\"\"\n ip = get_ipython()\n c = ip.Completer\n ip.ex('class D: x=24')\n ip.ex('d=D()')\n cfg = Config()\n cfg.IPCompleter.limit_to__all__ = False\n c.update_config(cfg)\n s, matches = c.complete('d.')\n nt.assert_in('d.x', matches)\n\n\ndef test_get__all__entries_ok():\n class A(object):\n __all__ = ['x', 1]\n words = completer.get__all__entries(A())\n nt.assert_equal(words, ['x'])\n\n\ndef test_get__all__entries_no__all__ok():\n class A(object):\n pass\n words = completer.get__all__entries(A())\n nt.assert_equal(words, [])\n\n\ndef test_func_kw_completions():\n ip = get_ipython()\n c = ip.Completer\n ip.ex('def myfunc(a=1,b=2): return a+b')\n s, matches = c.complete(None, 'myfunc(1,b')\n nt.assert_in('b=', matches)\n # Simulate completing with cursor right after b (pos==10):\n s, matches = c.complete(None, 'myfunc(1,b)', 10)\n nt.assert_in('b=', matches)\n s, matches = c.complete(None, 'myfunc(a=\"escaped\\\\\")string\",b')\n nt.assert_in('b=', matches)\n #builtin function\n s, matches = c.complete(None, 'min(k, k')\n nt.assert_in('key=', matches)\n\n\ndef test_default_arguments_from_docstring():\n ip = get_ipython()\n c = ip.Completer\n kwd = c._default_arguments_from_docstring(\n 'min(iterable[, key=func]) -> value')\n nt.assert_equal(kwd, ['key'])\n #with cython type etc\n kwd = c._default_arguments_from_docstring(\n 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\\n')\n nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])\n #white spaces\n kwd = c._default_arguments_from_docstring(\n '\\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\\n')\n nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])\n\ndef test_line_magics():\n ip = get_ipython()\n c = ip.Completer\n s, matches = c.complete(None, 'lsmag')\n nt.assert_in('%lsmagic', matches)\n s, matches = c.complete(None, '%lsmag')\n nt.assert_in('%lsmagic', matches)\n\n\ndef test_cell_magics():\n from yap_ipython.core.magic import register_cell_magic\n\n @register_cell_magic\n def _foo_cellm(line, cell):\n pass\n \n ip = get_ipython()\n c = ip.Completer\n\n s, matches = c.complete(None, '_foo_ce')\n nt.assert_in('%%_foo_cellm', matches)\n s, matches = c.complete(None, '%%_foo_ce')\n nt.assert_in('%%_foo_cellm', matches)\n\n\ndef test_line_cell_magics():\n from yap_ipython.core.magic import register_line_cell_magic\n\n @register_line_cell_magic\n def _bar_cellm(line, cell):\n pass\n \n ip = get_ipython()\n c = ip.Completer\n\n # The policy here is trickier, see comments in completion code. The\n # returned values depend on whether the user passes %% or not explicitly,\n # and this will show a difference if the same name is both a line and cell\n # magic.\n s, matches = c.complete(None, '_bar_ce')\n nt.assert_in('%_bar_cellm', matches)\n nt.assert_in('%%_bar_cellm', matches)\n s, matches = c.complete(None, '%_bar_ce')\n nt.assert_in('%_bar_cellm', matches)\n nt.assert_in('%%_bar_cellm', matches)\n s, matches = c.complete(None, '%%_bar_ce')\n nt.assert_not_in('%_bar_cellm', matches)\n nt.assert_in('%%_bar_cellm', matches)\n\n\ndef test_magic_completion_order():\n ip = get_ipython()\n c = ip.Completer\n\n # Test ordering of line and cell magics.\n text, matches = c.complete(\"timeit\")\n nt.assert_equal(matches, [\"%timeit\", \"%%timeit\"])\n\n\ndef test_magic_completion_shadowing():\n ip = get_ipython()\n c = ip.Completer\n\n # Before importing matplotlib, %matplotlib magic should be the only option.\n text, matches = c.complete(\"mat\")\n nt.assert_equal(matches, [\"%matplotlib\"])\n\n # The newly introduced name should shadow the magic.\n ip.run_cell(\"matplotlib = 1\")\n text, matches = c.complete(\"mat\")\n nt.assert_equal(matches, [\"matplotlib\"])\n\n # After removing matplotlib from namespace, the magic should again be\n # the only option.\n del ip.user_ns[\"matplotlib\"]\n text, matches = c.complete(\"mat\")\n nt.assert_equal(matches, [\"%matplotlib\"])\n\ndef test_magic_completion_shadowing_explicit():\n \"\"\"\n If the user try to complete a shadowed magic, and explicit % start should\n still return the completions.\n \"\"\"\n ip = get_ipython()\n c = ip.Completer\n\n # Before importing matplotlib, %matplotlib magic should be the only option.\n text, matches = c.complete(\"%mat\")\n nt.assert_equal(matches, [\"%matplotlib\"])\n\n ip.run_cell(\"matplotlib = 1\")\n\n # After removing matplotlib from namespace, the magic should still be\n # the only option.\n text, matches = c.complete(\"%mat\")\n nt.assert_equal(matches, [\"%matplotlib\"])\n\ndef test_magic_config():\n ip = get_ipython()\n c = ip.Completer\n\n s, matches = c.complete(None, 'conf')\n nt.assert_in('%config', matches)\n s, matches = c.complete(None, 'conf')\n nt.assert_not_in('AliasManager', matches)\n s, matches = c.complete(None, 'config ')\n nt.assert_in('AliasManager', matches)\n s, matches = c.complete(None, '%config ')\n nt.assert_in('AliasManager', matches)\n s, matches = c.complete(None, 'config Ali')\n nt.assert_list_equal(['AliasManager'], matches)\n s, matches = c.complete(None, '%config Ali')\n nt.assert_list_equal(['AliasManager'], matches)\n s, matches = c.complete(None, 'config AliasManager')\n nt.assert_list_equal(['AliasManager'], matches)\n s, matches = c.complete(None, '%config AliasManager')\n nt.assert_list_equal(['AliasManager'], matches)\n s, matches = c.complete(None, 'config AliasManager.')\n nt.assert_in('AliasManager.default_aliases', matches)\n s, matches = c.complete(None, '%config AliasManager.')\n nt.assert_in('AliasManager.default_aliases', matches)\n s, matches = c.complete(None, 'config AliasManager.de')\n nt.assert_list_equal(['AliasManager.default_aliases'], matches)\n s, matches = c.complete(None, 'config AliasManager.de')\n nt.assert_list_equal(['AliasManager.default_aliases'], matches)\n\n\ndef test_magic_color():\n ip = get_ipython()\n c = ip.Completer\n\n s, matches = c.complete(None, 'colo')\n nt.assert_in('%colors', matches)\n s, matches = c.complete(None, 'colo')\n nt.assert_not_in('NoColor', matches)\n s, matches = c.complete(None, '%colors') # No trailing space\n nt.assert_not_in('NoColor', matches)\n s, matches = c.complete(None, 'colors ')\n nt.assert_in('NoColor', matches)\n s, matches = c.complete(None, '%colors ')\n nt.assert_in('NoColor', matches)\n s, matches = c.complete(None, 'colors NoCo')\n nt.assert_list_equal(['NoColor'], matches)\n s, matches = c.complete(None, '%colors NoCo')\n nt.assert_list_equal(['NoColor'], matches)\n\n\ndef test_match_dict_keys():\n \"\"\"\n Test that match_dict_keys works on a couple of use case does return what\n expected, and does not crash\n \"\"\"\n delims = ' \\t\\n`!@#$^&*()=+[{]}\\\\|;:\\'\",<>?'\n\n\n keys = ['foo', b'far']\n assert match_dict_keys(keys, \"b'\", delims=delims) == (\"'\", 2 ,['far'])\n assert match_dict_keys(keys, \"b'f\", delims=delims) == (\"'\", 2 ,['far'])\n assert match_dict_keys(keys, 'b\"', delims=delims) == ('\"', 2 ,['far'])\n assert match_dict_keys(keys, 'b\"f', delims=delims) == ('\"', 2 ,['far'])\n\n assert match_dict_keys(keys, \"'\", delims=delims) == (\"'\", 1 ,['foo'])\n assert match_dict_keys(keys, \"'f\", delims=delims) == (\"'\", 1 ,['foo'])\n assert match_dict_keys(keys, '\"', delims=delims) == ('\"', 1 ,['foo'])\n assert match_dict_keys(keys, '\"f', delims=delims) == ('\"', 1 ,['foo'])\n \n match_dict_keys\n\n\ndef test_dict_key_completion_string():\n \"\"\"Test dictionary key completion for string keys\"\"\"\n ip = get_ipython()\n complete = ip.Completer.complete\n\n ip.user_ns['d'] = {'abc': None}\n\n # check completion at different stages\n _, matches = complete(line_buffer=\"d[\")\n nt.assert_in(\"'abc'\", matches)\n nt.assert_not_in(\"'abc']\", matches)\n\n _, matches = complete(line_buffer=\"d['\")\n nt.assert_in(\"abc\", matches)\n nt.assert_not_in(\"abc']\", matches)\n\n _, matches = complete(line_buffer=\"d['a\")\n nt.assert_in(\"abc\", matches)\n nt.assert_not_in(\"abc']\", matches)\n\n # check use of different quoting\n _, matches = complete(line_buffer=\"d[\\\"\")\n nt.assert_in(\"abc\", matches)\n nt.assert_not_in('abc\\\"]', matches)\n\n _, matches = complete(line_buffer=\"d[\\\"a\")\n nt.assert_in(\"abc\", matches)\n nt.assert_not_in('abc\\\"]', matches)\n\n # check sensitivity to following context\n _, matches = complete(line_buffer=\"d[]\", cursor_pos=2)\n nt.assert_in(\"'abc'\", matches)\n\n _, matches = complete(line_buffer=\"d['']\", cursor_pos=3)\n nt.assert_in(\"abc\", matches)\n nt.assert_not_in(\"abc'\", matches)\n nt.assert_not_in(\"abc']\", matches)\n\n # check multiple solutions are correctly returned and that noise is not\n ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,\n 5: None}\n\n _, matches = complete(line_buffer=\"d['a\")\n nt.assert_in(\"abc\", matches)\n nt.assert_in(\"abd\", matches)\n nt.assert_not_in(\"bad\", matches)\n assert not any(m.endswith((']', '\"', \"'\")) for m in matches), matches\n\n # check escaping and whitespace\n ip.user_ns['d'] = {'a\\nb': None, 'a\\'b': None, 'a\"b': None, 'a word': None}\n _, matches = complete(line_buffer=\"d['a\")\n nt.assert_in(\"a\\\\nb\", matches)\n nt.assert_in(\"a\\\\'b\", matches)\n nt.assert_in(\"a\\\"b\", matches)\n nt.assert_in(\"a word\", matches)\n assert not any(m.endswith((']', '\"', \"'\")) for m in matches), matches\n\n # - can complete on non-initial word of the string\n _, matches = complete(line_buffer=\"d['a w\")\n nt.assert_in(\"word\", matches)\n\n # - understands quote escaping\n _, matches = complete(line_buffer=\"d['a\\\\'\")\n nt.assert_in(\"b\", matches)\n\n # - default quoting should work like repr\n _, matches = complete(line_buffer=\"d[\")\n nt.assert_in(\"\\\"a'b\\\"\", matches)\n\n # - when opening quote with \", possible to match with unescaped apostrophe\n _, matches = complete(line_buffer=\"d[\\\"a'\")\n nt.assert_in(\"b\", matches)\n\n # need to not split at delims that readline won't split at\n if '-' not in ip.Completer.splitter.delims:\n ip.user_ns['d'] = {'before-after': None}\n _, matches = complete(line_buffer=\"d['before-af\")\n nt.assert_in('before-after', matches)\n\ndef test_dict_key_completion_contexts():\n \"\"\"Test expression contexts in which dict key completion occurs\"\"\"\n ip = get_ipython()\n complete = ip.Completer.complete\n d = {'abc': None}\n ip.user_ns['d'] = d\n\n class C:\n data = d\n ip.user_ns['C'] = C\n ip.user_ns['get'] = lambda: d\n\n def assert_no_completion(**kwargs):\n _, matches = complete(**kwargs)\n nt.assert_not_in('abc', matches)\n nt.assert_not_in('abc\\'', matches)\n nt.assert_not_in('abc\\']', matches)\n nt.assert_not_in('\\'abc\\'', matches)\n nt.assert_not_in('\\'abc\\']', matches)\n\n def assert_completion(**kwargs):\n _, matches = complete(**kwargs)\n nt.assert_in(\"'abc'\", matches)\n nt.assert_not_in(\"'abc']\", matches)\n\n # no completion after string closed, even if reopened\n assert_no_completion(line_buffer=\"d['a'\")\n assert_no_completion(line_buffer=\"d[\\\"a\\\"\")\n assert_no_completion(line_buffer=\"d['a' + \")\n assert_no_completion(line_buffer=\"d['a' + '\")\n\n # completion in non-trivial expressions\n assert_completion(line_buffer=\"+ d[\")\n assert_completion(line_buffer=\"(d[\")\n assert_completion(line_buffer=\"C.data[\")\n\n # greedy flag\n def assert_completion(**kwargs):\n _, matches = complete(**kwargs)\n nt.assert_in(\"get()['abc']\", matches)\n \n assert_no_completion(line_buffer=\"get()[\")\n with greedy_completion():\n assert_completion(line_buffer=\"get()[\")\n assert_completion(line_buffer=\"get()['\")\n assert_completion(line_buffer=\"get()['a\")\n assert_completion(line_buffer=\"get()['ab\")\n assert_completion(line_buffer=\"get()['abc\")\n\n\n\ndef test_dict_key_completion_bytes():\n \"\"\"Test handling of bytes in dict key completion\"\"\"\n ip = get_ipython()\n complete = ip.Completer.complete\n\n ip.user_ns['d'] = {'abc': None, b'abd': None}\n\n _, matches = complete(line_buffer=\"d[\")\n nt.assert_in(\"'abc'\", matches)\n nt.assert_in(\"b'abd'\", matches)\n\n if False: # not currently implemented\n _, matches = complete(line_buffer=\"d[b\")\n nt.assert_in(\"b'abd'\", matches)\n nt.assert_not_in(\"b'abc'\", matches)\n\n _, matches = complete(line_buffer=\"d[b'\")\n nt.assert_in(\"abd\", matches)\n nt.assert_not_in(\"abc\", matches)\n\n _, matches = complete(line_buffer=\"d[B'\")\n nt.assert_in(\"abd\", matches)\n nt.assert_not_in(\"abc\", matches)\n\n _, matches = complete(line_buffer=\"d['\")\n nt.assert_in(\"abc\", matches)\n nt.assert_not_in(\"abd\", matches)\n\n\ndef test_dict_key_completion_unicode_py3():\n \"\"\"Test handling of unicode in dict key completion\"\"\"\n ip = get_ipython()\n complete = ip.Completer.complete\n\n ip.user_ns['d'] = {u'a\\u05d0': None}\n\n # query using escape\n if sys.platform != 'win32':\n # Known failure on Windows\n _, matches = complete(line_buffer=\"d['a\\\\u05d0\")\n nt.assert_in(\"u05d0\", matches) # tokenized after \\\\\n\n # query using character\n _, matches = complete(line_buffer=\"d['a\\u05d0\")\n nt.assert_in(u\"a\\u05d0\", matches)\n \n with greedy_completion():\n # query using escape\n _, matches = complete(line_buffer=\"d['a\\\\u05d0\")\n nt.assert_in(\"d['a\\\\u05d0']\", matches) # tokenized after \\\\\n\n # query using character\n _, matches = complete(line_buffer=\"d['a\\u05d0\")\n nt.assert_in(u\"d['a\\u05d0']\", matches)\n \n\n\[email protected]_without('numpy')\ndef test_struct_array_key_completion():\n \"\"\"Test dict key completion applies to numpy struct arrays\"\"\"\n import numpy\n ip = get_ipython()\n complete = ip.Completer.complete\n ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])\n _, matches = complete(line_buffer=\"d['\")\n nt.assert_in(\"hello\", matches)\n nt.assert_in(\"world\", matches)\n # complete on the numpy struct itself\n dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),\n ('my_data', '>f4', 5)])\n x = numpy.zeros(2, dtype=dt)\n ip.user_ns['d'] = x[1]\n _, matches = complete(line_buffer=\"d['\")\n nt.assert_in(\"my_head\", matches)\n nt.assert_in(\"my_data\", matches)\n # complete on a nested level\n with greedy_completion():\n ip.user_ns['d'] = numpy.zeros(2, dtype=dt)\n _, matches = complete(line_buffer=\"d[1]['my_head']['\")\n nt.assert_true(any([\"my_dt\" in m for m in matches]))\n nt.assert_true(any([\"my_df\" in m for m in matches]))\n\n\[email protected]_without('pandas')\ndef test_dataframe_key_completion():\n \"\"\"Test dict key completion applies to pandas DataFrames\"\"\"\n import pandas\n ip = get_ipython()\n complete = ip.Completer.complete\n ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})\n _, matches = complete(line_buffer=\"d['\")\n nt.assert_in(\"hello\", matches)\n nt.assert_in(\"world\", matches)\n\n\ndef test_dict_key_completion_invalids():\n \"\"\"Smoke test cases dict key completion can't handle\"\"\"\n ip = get_ipython()\n complete = ip.Completer.complete\n\n ip.user_ns['no_getitem'] = None\n ip.user_ns['no_keys'] = []\n ip.user_ns['cant_call_keys'] = dict\n ip.user_ns['empty'] = {}\n ip.user_ns['d'] = {'abc': 5}\n\n _, matches = complete(line_buffer=\"no_getitem['\")\n _, matches = complete(line_buffer=\"no_keys['\")\n _, matches = complete(line_buffer=\"cant_call_keys['\")\n _, matches = complete(line_buffer=\"empty['\")\n _, matches = complete(line_buffer=\"name_error['\")\n _, matches = complete(line_buffer=\"d['\\\\\") # incomplete escape\n\nclass KeyCompletable(object):\n def __init__(self, things=()):\n self.things = things\n\n def _ipython_key_completions_(self):\n return list(self.things)\n\ndef test_object_key_completion():\n ip = get_ipython()\n ip.user_ns['key_completable'] = KeyCompletable(['qwerty', 'qwick'])\n\n _, matches = ip.Completer.complete(line_buffer=\"key_completable['qw\")\n nt.assert_in('qwerty', matches)\n nt.assert_in('qwick', matches)\n\n\nclass NamedInstanceMetaclass(type):\n def __getitem__(cls, item):\n return cls.get_instance(item)\n\nclass NamedInstanceClass(object, metaclass=NamedInstanceMetaclass):\n def __init__(self, name):\n if not hasattr(self.__class__, 'instances'):\n self.__class__.instances = {}\n self.__class__.instances[name] = self\n\n @classmethod\n def _ipython_key_completions_(cls):\n return cls.instances.keys()\n\n @classmethod\n def get_instance(cls, name):\n return cls.instances[name]\n\ndef test_class_key_completion():\n ip = get_ipython()\n NamedInstanceClass('qwerty')\n NamedInstanceClass('qwick')\n ip.user_ns['named_instance_class'] = NamedInstanceClass\n\n _, matches = ip.Completer.complete(line_buffer=\"named_instance_class['qw\")\n nt.assert_in('qwerty', matches)\n nt.assert_in('qwick', matches)\n\ndef test_tryimport():\n \"\"\"\n Test that try-import don't crash on trailing dot, and import modules before\n \"\"\"\n from yap_ipython.core.completerlib import try_import\n assert(try_import(\"yap_ipython.\"))\n\n\ndef test_aimport_module_completer():\n ip = get_ipython()\n _, matches = ip.complete('i', '%aimport i')\n nt.assert_in('io', matches)\n nt.assert_not_in('int', matches)\n\ndef test_nested_import_module_completer():\n ip = get_ipython()\n _, matches = ip.complete(None, 'import yap_ipython.co', 17)\n nt.assert_in('yap_ipython.core', matches)\n nt.assert_not_in('import yap_ipython.core', matches)\n nt.assert_not_in('yap_ipython.display', matches)\n\ndef test_import_module_completer():\n ip = get_ipython()\n _, matches = ip.complete('i', 'import i')\n nt.assert_in('io', matches)\n nt.assert_not_in('int', matches)\n\ndef test_from_module_completer():\n ip = get_ipython()\n _, matches = ip.complete('B', 'from io import B', 16)\n nt.assert_in('BytesIO', matches)\n nt.assert_not_in('BaseException', matches)\n\ndef test_snake_case_completion():\n ip = get_ipython()\n ip.user_ns['some_three'] = 3\n ip.user_ns['some_four'] = 4\n _, matches = ip.complete(\"s_\", \"print(s_f\")\n nt.assert_in('some_three', matches)\n nt.assert_in('some_four', matches)\n" ]
[ [ "pandas.DataFrame", "numpy.array", "numpy.dtype", "numpy.zeros" ] ]
finncatling/lap-risk
[ "afda480bfa42bae0ce25c12129031971e517545f" ]
[ "tests/test_indications.py" ]
[ "from typing import Tuple, List, Set\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom utils import indications\n\n\[email protected]()\ndef columns_and_indication_subset() -> Tuple[List[str], Set[str]]:\n return (\n [\n 'S03PreOpArterialBloodLactate',\n 'S03PreOpLowestAlbumin',\n 'S03Sodium',\n 'S03Pulse',\n 'S03SystolicBloodPressure',\n 'S05Ind_Peritonitis',\n 'S05Ind_Perforation',\n 'S05Ind_AbdominalAbscess',\n ],\n {\n 'S05Ind_Peritonitis',\n 'S05Ind_Perforation',\n 'S05Ind_AbdominalAbscess',\n }\n )\n\n\ndef test_get_indication_variable_names_with_list(columns_and_indication_subset):\n columns, indication_columns = columns_and_indication_subset\n output = indications.get_indication_variable_names(columns)\n assert isinstance(output, list)\n assert set(output) == indication_columns\n\n\ndef test_get_indication_variable_names_with_pandas_columns(\n columns_and_indication_subset\n):\n columns, indication_columns = columns_and_indication_subset\n df = pd.DataFrame(data=np.zeros((2, len(columns))),\n columns=columns)\n output = indications.get_indication_variable_names(df.columns)\n assert isinstance(output, list)\n assert set(output) == indication_columns\n\n\[email protected]()\ndef indications_df_fixture() -> pd.DataFrame:\n return pd.DataFrame({\n 'S05Ind_0': [1, 1, 1, 1, 0, 0, 0, 0],\n 'S05Ind_1': [0, 0, 1, 0, 0, 0, 0, 0],\n 'S05Ind_2': [0, 0, 1, 0, 0, 0, 0, 0],\n 'S05Ind_3': [0, 0, 0, 1, 0, 1, 1, 1]\n })\n\n\[email protected]()\ndef common_single_indications_fixture() -> List[str]:\n return ['S05Ind_3', 'S05Ind_0']\n\n\[email protected]()\ndef ohe_single_indications_df_fixture() -> pd.DataFrame:\n return pd.DataFrame({\n 'S05Ind_3': [0, 0, 0, 0, 0, 1, 1, 1],\n 'S05Ind_0': [1, 1, 0, 0, 0, 0, 0, 0]\n })\n\n\ndef test_get_common_single_indications(\n indications_df_fixture,\n common_single_indications_fixture\n):\n common_single_inds = indications.get_common_single_indications(\n indication_df=indications_df_fixture,\n frequency_threshold=2\n )\n assert common_single_inds == common_single_indications_fixture\n\n\ndef test_ohe_single_indications(\n indications_df_fixture,\n common_single_indications_fixture,\n ohe_single_indications_df_fixture\n):\n ohe_ind_df = indications.ohe_single_indications(\n indication_df=indications_df_fixture,\n indication_subset_names=common_single_indications_fixture\n )\n assert ohe_single_indications_df_fixture.equals(ohe_ind_df)\n\n\ndef test_ohe_to_single_column():\n ohe_df = pd.DataFrame({\n 'a_0': [0, 1, 0, 1],\n 'a_1': [1, 0, 0, 0],\n 'a_2': [0, 0, 1, 0],\n 'disregard': [0.1, 8.4, 3.2, 12.],\n })\n single_a_column_df = indications.ohe_to_single_column(\n df=ohe_df,\n variable_name='a',\n categories=['a_0', 'a_1', 'a_2']\n )\n assert pd.DataFrame({\n 'disregard': [0.1, 8.4, 3.2, 12.],\n 'a': ['a_1', 'a_0', 'a_2', 'a_0']\n }).equals(single_a_column_df)\n\n\nclass TestIndicationNameProcessor:\n @pytest.fixture(scope='class')\n def inp_fixture(\n self\n ) -> indications.IndicationNameProcessor:\n return indications.IndicationNameProcessor(\n multi_category_levels={\n 'Indication': (\n \"S05Ind_SmallBowelObstruction\",\n \"S05Ind_IntestinalObstruction\",\n \"S05Ind_Ischaemia\",\n \"S05Ind_Missing\"\n )},\n max_line_length=12)\n\n def test_names(self, inp_fixture):\n assert inp_fixture.names == [\n \"S05Ind_SmallBowelObstruction\",\n \"S05Ind_IntestinalObstruction\",\n \"S05Ind_Ischaemia\"\n ]\n\n def test_sanitized(self, inp_fixture):\n assert inp_fixture.sanitized == [\n \"Small bowel\\nobstruction\",\n \"Intestinal\\nobstruction\",\n \"Ischaemia\"\n ]\n" ]
[ [ "pandas.DataFrame" ] ]
balbok0/SMPyBandits
[ "35e675bde29dafbec68288fcfcd14ef3b0f058b2" ]
[ "SMPyBandits/Policies/DoublingTrickWrapper.py" ]
[ "# -*- coding: utf-8 -*-\nr\"\"\" A policy that acts as a wrapper on another policy `P`, assumed to be *horizon dependent* (has to known :math:`T`), by implementing a \"doubling trick\":\n\n- starts to assume that :math:`T=T_0=1000`, and run the policy :math:`P(T_0)`, from :math:`t=1` to :math:`t=T_0`,\n- if :math:`t > T_0`, then the \"doubling trick\" is performed, by either re-initializing or just changing the parameter `horizon` of the policy P, for instance with :math:`T_2 = 10 \\times T_0`,\n- and keep doing this until :math:`t = T`.\n\n.. note::\n\n This is implemented in a very generic way, with simply a function `next_horizon(horizon)` that gives the next horizon to try when crossing the current guess.\n It can be a simple linear function (`next_horizon(horizon) = horizon + 100`), a geometric growth to have the \"real\" doubling trick (`next_horizon(horizon) = horizon * 10`), or even functions growing exponentially fast (`next_horizon(horizon) = horizon ** 1.1`, `next_horizon(horizon) = horizon ** 1.5`, `next_horizon(horizon) = horizon ** 2`).\n\n.. note::\n\n My guess is that this \"doubling trick\" wrapping policy can only be efficient (for stochastic problems) if:\n\n - the underlying policy `P` is a very efficient horizon-dependent algorithm, e.g., the :class:`Policies.ApproximatedFHGittins`,\n - the growth function `next_horizon` is growing faster than any geometric rate, so that the number of refresh is :math:`o(\\log T)` and not :math:`O(\\log T)`.\n\n.. seealso::\n\n Reference: [[What the Doubling Trick Can or Can't Do for Multi-Armed Bandits, Lilian Besson and Emilie Kaufmann, 2018]](https://hal.inria.fr/hal-01736357), to be presented soon.\n\n.. warning::\n\n Interface: If `FULL_RESTART=False` (default), the underlying algorithm is recreated at every breakpoint,\n instead its attribute `horizon` or `_horizon` is updated. Be sure that this is enough to really\n change the internal value used by the policy. Some policy use T only once to compute others parameters,\n which should be updated as well. A manual implementation of the `__setattr__` method can help.\n\"\"\"\nfrom __future__ import division, print_function # Python 2 compatibility\n\n__author__ = \"Lilian Besson\"\n__version__ = \"0.9\"\n\n\nimport numpy as np\ntry:\n from .BaseWrapperPolicy import BaseWrapperPolicy\n from .UCBH import UCBH\nexcept ImportError:\n from BaseWrapperPolicy import BaseWrapperPolicy\n from UCBH import UCBH\ntry:\n from .usenumba import jit # Import numba.jit or a dummy jit(f)=f\nexcept (ValueError, ImportError, SystemError):\n from usenumba import jit # Import numba.jit or a dummy jit(f)=f\n\n\n#: Default horizon-dependent policy\ndefault_horizonDependent_policy = UCBH\n\n#: Default constant to know what to do when restarting the underlying policy with a new horizon parameter.\n#:\n#: - `True` means that a new policy, initialized from scratch, will be created at every breakpoint.\n#: - `False` means that the same policy object is used but just its attribute `horizon` is updated (default).\nFULL_RESTART = True\nFULL_RESTART = False\n\n\n\n#: Default horizon, used for the first step.\nDEFAULT_FIRST_HORIZON = 200\n\n\n#: Default stepsize for the arithmetic horizon progression.\nARITHMETIC_STEP = 10 * DEFAULT_FIRST_HORIZON\nARITHMETIC_STEP = 1 * DEFAULT_FIRST_HORIZON\n\n\n@jit\ndef next_horizon__arithmetic(i, horizon):\n r\"\"\" The arithmetic horizon progression function:\n\n .. math::\n\n T &\\mapsto T + 100,\\\\\n T_i &:= T_0 + 100 \\times i.\n \"\"\"\n return horizon + ARITHMETIC_STEP\n\nnext_horizon__arithmetic.__latex_name__ = \"arithm\"\nnext_horizon__arithmetic.__latex_name__ = r\"$T_i = {} + {} \\times i$\".format(DEFAULT_FIRST_HORIZON, ARITHMETIC_STEP)\n\n\n#: Default multiplicative constant for the geometric horizon progression.\nGEOMETRIC_STEP = 2\n\n\n@jit\ndef next_horizon__geometric(i, horizon):\n r\"\"\" The geometric horizon progression function:\n\n .. math::\n\n T &\\mapsto T \\times 2,\\\\\n T_i &:= T_0 2^i.\n \"\"\"\n return horizon * GEOMETRIC_STEP\n\nnext_horizon__geometric.__latex_name__ = \"geom\"\nnext_horizon__geometric.__latex_name__ = r\"$T_i = {} \\times {}^i$\".format(DEFAULT_FIRST_HORIZON, GEOMETRIC_STEP)\n\n\n#: Default exponential constant for the exponential horizon progression.\nEXPONENTIAL_STEP = 1.5\n\n\n@jit\ndef next_horizon__exponential(i, horizon):\n r\"\"\" The exponential horizon progression function:\n\n .. math::\n\n T &\\mapsto \\left\\lfloor T^{1.5} \\right\\rfloor,\\\\\n T_i &:= \\left\\lfloor T_0^{1.5^i} \\right\\rfloor.\n \"\"\"\n return int(np.floor(horizon ** EXPONENTIAL_STEP))\n\nnext_horizon__exponential.__latex_name__ = \"exp\"\nnext_horizon__exponential.__latex_name__ = r\"$T_i = {}^{}$\".format(DEFAULT_FIRST_HORIZON, r\"{%.3g^i}\" % EXPONENTIAL_STEP)\n\n\n#: Default exponential constant for the slow exponential horizon progression.\nSLOW_EXPONENTIAL_STEP = 1.1\n\n\n@jit\ndef next_horizon__exponential_slow(i, horizon):\n r\"\"\" The exponential horizon progression function:\n\n .. math::\n\n T &\\mapsto \\left\\lfloor T^{1.1} \\right\\rfloor,\\\\\n T_i &:= \\left\\lfloor T_0^{1.1^i} \\right\\rfloor.\n \"\"\"\n return int(np.floor(horizon ** SLOW_EXPONENTIAL_STEP))\n\nnext_horizon__exponential_slow.__latex_name__ = \"slow exp\"\nnext_horizon__exponential_slow.__latex_name__ = r\"$T_i = {}^{}$\".format(DEFAULT_FIRST_HORIZON, r\"{%.3g^i}\" % SLOW_EXPONENTIAL_STEP)\n\n\n#: Default exponential constant for the fast exponential horizon progression.\nFAST_EXPONENTIAL_STEP = 2\n\n\n@jit\ndef next_horizon__exponential_fast(i, horizon):\n r\"\"\" The exponential horizon progression function:\n\n .. math::\n\n T &\\mapsto \\lfloor T^{2} \\rfloor,\\\\\n T_i &:= \\lfloor T_0^{2^i} \\rfloor.\n \"\"\"\n return int(np.floor(horizon ** 2))\n\nnext_horizon__exponential_fast.__latex_name__ = \"fast exp\"\nnext_horizon__exponential_fast.__latex_name__ = r\"$T_i = {}^{}$\".format(DEFAULT_FIRST_HORIZON, r\"{%.3g^i}\" % FAST_EXPONENTIAL_STEP)\n\n\n#: Default constant :math:`\\alpha` for the generic exponential sequence.\nALPHA = 2\n#: Default constant :math:`\\beta` for the generic exponential sequence.\nBETA = 2\n\ndef next_horizon__exponential_generic(i, horizon):\n r\"\"\" The generic exponential horizon progression function:\n\n .. math:: T_i := \\left\\lfloor \\frac{T_0}{a} a^{b^i} \\right\\rfloor.\n \"\"\"\n return int((DEFAULT_FIRST_HORIZON / ALPHA) * ALPHA ** (BETA ** i))\n # return int(ALPHA * np.floor(horizon ** BETA))\n\nnext_horizon__exponential_generic.__latex_name__ = r\"exp $a={:.3g}$, $b={:.3g}$\".format(ALPHA, BETA)\nnext_horizon__exponential_generic.__latex_name__ = r\"$T_i = ({}/{}) {}^{}$\".format(DEFAULT_FIRST_HORIZON, ALPHA, ALPHA, r\"{%.3g^i}\" % BETA)\n\n\n#: Chose the default horizon growth function.\n# default_next_horizon = next_horizon__arithmetic\n# default_next_horizon = next_horizon__geometric\n# default_next_horizon = next_horizon__geometric\n# default_next_horizon = next_horizon__exponential_fast\ndefault_next_horizon = next_horizon__exponential_slow\n\n\n# --- Utility function\n\ndef breakpoints(next_horizon, first_horizon, horizon, debug=False):\n r\"\"\" Return the list of restart point (breakpoints), if starting from ``first_horizon`` to ``horizon`` with growth function ``next_horizon``.\n\n - Also return the gap between the last guess for horizon and the true horizon. This gap should not be too large.\n - Nicely print all the values if ``debug=True``.\n\n - First examples:\n\n >>> first_horizon = 1000\n >>> horizon = 30000\n >>> breakpoints(next_horizon__arithmetic, first_horizon, horizon) # doctest: +ELLIPSIS\n ([1000, 1200, 1400, ..., 29800, 30000], 0)\n >>> breakpoints(next_horizon__geometric, first_horizon, horizon)\n ([1000, 2000, 4000, 8000, 16000, 32000], 2000)\n >>> breakpoints(next_horizon__exponential, first_horizon, horizon)\n ([1000, 31622], 1622)\n >>> breakpoints(next_horizon__exponential_slow, first_horizon, horizon)\n ([1000, 1995, 4265, 9838, 24671, 67827], 37827)\n >>> breakpoints(next_horizon__exponential_fast, first_horizon, horizon)\n ([1000, 1000000], 970000)\n\n - Second examples:\n\n >>> first_horizon = 5000\n >>> horizon = 1000000\n >>> breakpoints(next_horizon__arithmetic, first_horizon, horizon) # doctest: +ELLIPSIS\n ([5000, 5200, ..., 999600, 999800, 1000000], 0)\n >>> breakpoints(next_horizon__geometric, first_horizon, horizon)\n ([5000, 10000, 20000, 40000, 80000, 160000, 320000, 640000, 1280000], 280000)\n >>> breakpoints(next_horizon__exponential, first_horizon, horizon)\n ([5000, 353553, 210223755], 209223755)\n >>> breakpoints(next_horizon__exponential_slow, first_horizon, horizon)\n ([5000, 11718, 29904, 83811, 260394, 906137, 3572014], 2572014)\n >>> breakpoints(next_horizon__exponential_fast, first_horizon, horizon)\n ([5000, 25000000], 24000000)\n\n - Third examples:\n\n >>> first_horizon = 10\n >>> horizon = 1123456\n >>> breakpoints(next_horizon__arithmetic, first_horizon, horizon) # doctest: +ELLIPSIS\n ([10, 210, 410, ..., 1123210, 1123410, 1123610], 154)\n >>> breakpoints(next_horizon__geometric, first_horizon, horizon)\n ([10, 20, 40, 80, 160, 320, 640, 1280, 2560, 5120, 10240, 20480, 40960, 81920, 163840, 327680, 655360, 1310720], 187264)\n >>> breakpoints(next_horizon__exponential, first_horizon, horizon)\n ([10, 31, 172, 2255, 107082, 35040856], 33917400)\n >>> breakpoints(next_horizon__exponential_slow, first_horizon, horizon)\n ([10, 12, 15, 19, 25, 34, 48, 70, 107, 170, 284, 499, 928, 1837, 3895, 8903, 22104, 60106, 180638, 606024, 2294768], 1171312)\n >>> breakpoints(next_horizon__exponential_fast, first_horizon, horizon)\n ([10, 100, 10000, 100000000], 98876544)\n \"\"\"\n i = 0\n t = max(first_horizon, 2)\n times = [t]\n if debug: print(\"\\n\\nFor the growth function {}, named '{}', first guess of the horizon = {} and true horizon = {} ...\\n ==> The times will be:\".format(next_horizon, getattr(next_horizon, '__latex_name__', '?'), first_horizon, horizon))\n while t < horizon:\n t = next_horizon(i, t)\n i += 1\n times.append(t)\n if debug: print(\" The {}th breakpoint is {} ...\".format(i, t)) # DEBUG\n assert horizon <= t, \"Error: the last guess for horizon = {} was found smaller than the true horizon = {}...\".format(t, horizon) # DEBUG\n gap = t - horizon\n if debug: print(\"This last guess for horizon = {} gives a gap = {} against the true horizon {}. Relative difference = {:.3%}...\".format(t, gap, horizon, gap / float(horizon))) # DEBUG\n return times, gap\n\n\n# --- Experimental code to plot some doubling sequences and\n# check numerically some inequalities :\n# like controlling a sum Sigma_i=0^n u_i by a constant times to last term u_n\n# and controlling the last term u_{L_T} as a function of T.\n\n\n#: The constant c in front of the function f.\nconstant_c_for_the_functions_f = 1.0\nconstant_c_for_the_functions_f = 0.1\nconstant_c_for_the_functions_f = 0.5\n\n\ndef function_f__for_geometric_sequences(i, c=constant_c_for_the_functions_f):\n r\"\"\" For the *geometric* doubling sequences, :math:`f(i) = c \\times \\log(i)`.\"\"\"\n if i <= 0: return 0.0\n return c * np.log(i)\n\n\ndef function_f__for_exponential_sequences(i, c=constant_c_for_the_functions_f):\n r\"\"\" For the *exponential* doubling sequences, :math:`f(i) = c \\times i`.\"\"\"\n return c * i\n\n\ndef function_f__for_generic_sequences(i, c=constant_c_for_the_functions_f, d=0.5, e=0.0):\n r\"\"\" For a certain *generic* family of doubling sequences, :math:`f(i) = c \\times i^{d} \\times (\\log(i))^{e}`.\n\n - ``d, e = 0, 1`` gives :func:`function_f__for_geometric_sequences`,\n - ``d, e = 1, 0`` gives :func:`function_f__for_geometric_sequences`,\n - ``d, e = 0.5, 0`` gives an intermediate sequence, growing faster than any geometric sequence and slower than any exponential sequence,\n - any other combination has not been studied yet.\n\n .. warning:: ``d`` should most probably be smaller than 1.\n \"\"\"\n i = float(i)\n if i <= 0: return 0.0\n if e == 0:\n assert d > 0, \"Error: invalid value of d = {} for function_f__for_generic_sequences.\".format(d) # DEBUG\n return c * (i ** d)\n if d == 0:\n assert e > 0, \"Error: invalid value of e = {} for function_f__for_generic_sequences.\".format(e) # DEBUG\n return c * ((np.log(i)) ** e)\n return c * (i ** d) * ((np.log(i)) ** e)\n\n\ndef function_f__for_intermediate_sequences(i):\n return function_f__for_generic_sequences(i, c=constant_c_for_the_functions_f, d=0.5, e=0.0)\n\ndef function_f__for_intermediate2_sequences(i):\n return function_f__for_generic_sequences(i, c=constant_c_for_the_functions_f, d=0.3333, e=0.0)\n\ndef function_f__for_intermediate3_sequences(i):\n return function_f__for_generic_sequences(i, c=constant_c_for_the_functions_f, d=0.6667, e=0.0)\n\ndef function_f__for_intermediate4_sequences(i):\n return function_f__for_generic_sequences(i, c=constant_c_for_the_functions_f, d=0.5, e=0.5)\n\ndef function_f__for_intermediate5_sequences(i):\n return function_f__for_generic_sequences(i, c=constant_c_for_the_functions_f, d=1, e=-1)\n\n\n#: Value of the parameter :math:`\\alpha` for the :func:`Ti_from_f` function.\nalpha_for_Ti = 0.1\nalpha_for_Ti = 1.0\nalpha_for_Ti = 0.5\n\n\ndef Ti_from_f(f, alpha=alpha_for_Ti, *args, **kwargs):\n r\"\"\" For any non-negative and increasing function :math:`f: i \\mapsto f(i)`, the corresponding sequence is defined by:\n\n .. math:: \\forall i\\in\\mathbb{N},\\; T_i := \\lfloor \\exp(\\alpha \\times \\exp(f(i))) \\rfloor.\n\n .. warning:: :math:`f(i)` can need other parameters, see the examples above. They can be given as ``*args`` or ``**kwargs`` to :func:`Ti_from_f`.\n\n .. warning:: it should be computed otherwise, I should give :math:`i \\mapsto \\exp(f(i))` instead of :math:`f: i \\mapsto f(i)`. I need to try as much as possible to reduce the risk of overflow errors!\n \"\"\"\n # WARNING don't forget the floor!\n def Ti(i):\n this_Ti = np.floor(np.exp(alpha * np.exp(f(float(i), *args, **kwargs))))\n if not (np.isinf(this_Ti) or np.isnan(this_Ti)):\n this_Ti = int(this_Ti)\n # print(\" For f = {}, i = {} gives Ti = {}\".format(f, i, this_Ti)) # DEBUG\n return this_Ti\n return Ti\n\n\ndef Ti_geometric(i, horizon, alpha=alpha_for_Ti, first_horizon=DEFAULT_FIRST_HORIZON, *args, **kwargs):\n \"\"\" Sequence :math:`T_i` generated from the function :math:`f` = :func:`function_f__for_geometric_sequences`.\"\"\"\n f = function_f__for_geometric_sequences\n this_Ti = first_horizon + np.floor(np.exp(alpha * np.exp(f(float(i), *args, **kwargs))))\n if not (np.isinf(this_Ti) or np.isnan(this_Ti)): this_Ti = int(this_Ti)\n return this_Ti\nTi_geometric.__latex_name__ = r\"$f(i)=\\log(i)$\"\n\ndef Ti_exponential(i, horizon, alpha=alpha_for_Ti, first_horizon=DEFAULT_FIRST_HORIZON, *args, **kwargs):\n \"\"\" Sequence :math:`T_i` generated from the function :math:`f` = :func:`function_f__for_exponential_sequences`.\"\"\"\n f = function_f__for_exponential_sequences\n this_Ti = first_horizon + np.floor(np.exp(alpha * np.exp(f(float(i), *args, **kwargs))))\n if not (np.isinf(this_Ti) or np.isnan(this_Ti)): this_Ti = int(this_Ti)\n return this_Ti\nTi_exponential.__latex_name__ = r\"$f(i)=i$\"\n\ndef Ti_intermediate_sqrti(i, horizon, alpha=alpha_for_Ti, first_horizon=DEFAULT_FIRST_HORIZON, *args, **kwargs):\n \"\"\" Sequence :math:`T_i` generated from the function :math:`f` = :func:`function_f__for_intermediate_sequences`.\"\"\"\n f = function_f__for_intermediate_sequences\n this_Ti = first_horizon + np.floor(np.exp(alpha * np.exp(f(float(i), *args, **kwargs))))\n if not (np.isinf(this_Ti) or np.isnan(this_Ti)): this_Ti = int(this_Ti)\n return this_Ti\nTi_intermediate_sqrti.__latex_name__ = r\"$f(i)=\\sqrt{i}$\"\n\ndef Ti_intermediate_i13(i, horizon, alpha=alpha_for_Ti, first_horizon=DEFAULT_FIRST_HORIZON, *args, **kwargs):\n \"\"\" Sequence :math:`T_i` generated from the function :math:`f` = :func:`function_f__for_intermediate2_sequences`.\"\"\"\n f = function_f__for_intermediate2_sequences\n this_Ti = first_horizon + np.floor(np.exp(alpha * np.exp(f(float(i), *args, **kwargs))))\n if not (np.isinf(this_Ti) or np.isnan(this_Ti)): this_Ti = int(this_Ti)\n return this_Ti\nTi_intermediate_i13.__latex_name__ = r\"$f(i)=i^{1/3}$\"\n\ndef Ti_intermediate_i23(i, horizon, alpha=alpha_for_Ti, first_horizon=DEFAULT_FIRST_HORIZON, *args, **kwargs):\n \"\"\" Sequence :math:`T_i` generated from the function :math:`f` = :func:`function_f__for_intermediate3_sequences`.\"\"\"\n f = function_f__for_intermediate3_sequences\n this_Ti = first_horizon + np.floor(np.exp(alpha * np.exp(f(float(i), *args, **kwargs))))\n if not (np.isinf(this_Ti) or np.isnan(this_Ti)): this_Ti = int(this_Ti)\n return this_Ti\nTi_intermediate_i23.__latex_name__ = r\"$f(i)=i^{2/3}$\"\n\ndef Ti_intermediate_i12_logi12(i, horizon, alpha=alpha_for_Ti, first_horizon=DEFAULT_FIRST_HORIZON, *args, **kwargs):\n \"\"\" Sequence :math:`T_i` generated from the function :math:`f` = :func:`function_f__for_intermediate4_sequences`.\"\"\"\n f = function_f__for_intermediate4_sequences\n this_Ti = first_horizon + np.floor(np.exp(alpha * np.exp(f(float(i), *args, **kwargs))))\n if not (np.isinf(this_Ti) or np.isnan(this_Ti)): this_Ti = int(this_Ti)\n return this_Ti\nTi_intermediate_i12_logi12.__latex_name__ = r\"$f(i)=\\sqrt{i \\log(i)}$\"\n\ndef Ti_intermediate_i_by_logi(i, horizon, alpha=alpha_for_Ti, first_horizon=DEFAULT_FIRST_HORIZON, *args, **kwargs):\n \"\"\" Sequence :math:`T_i` generated from the function :math:`f` = :func:`function_f__for_intermediate5_sequences`.\"\"\"\n f = function_f__for_intermediate5_sequences\n this_Ti = first_horizon + np.floor(np.exp(alpha * np.exp(f(float(i + 1), *args, **kwargs))))\n if not (np.isinf(this_Ti) or np.isnan(this_Ti)): this_Ti = int(this_Ti)\n return this_Ti\nTi_intermediate_i_by_logi.__latex_name__ = r\"$f(i)=i / \\log(i)$\"\n\n\ndef last_term_operator_LT(Ti, max_i=10000):\n r\"\"\" For a certain function representing a doubling sequence, :math:`T: i \\mapsto T_i`, this :func:`last_term_operator_LT` function returns the function :math:`L: T \\mapsto L_T`, defined as:\n\n .. math:: \\forall T\\in\\mathbb{N},\\; L_T := \\min\\{ i \\in\\mathbb{N},\\; T \\leq T_i \\}.\n\n :math:`L_T` is the only integer which satisfies :math:`T_{L_T - 1} < T \\leq T_{L_T}`.\n \"\"\"\n def LT(T, max_i=max_i):\n i = 0\n while Ti(i) < T:\n i += 1\n if i >= max_i:\n raise ValueError(\"LT(T={T}) was unable to find a i <= {max_i} such that T_i >= T.\".format(T, max_i)) # DEBUG\n assert Ti(i - 1) < T <= Ti(i), \"Error: i = {} was computed as LT for T = {} and Ti = {} but does not satisfy T_(i-1) < T <= T(i)\".format(i, T, Ti) # DEBUG\n # print(\" For LT: i = {} was computed as LT for T = {} and Ti = {} and satisfies T(i-1) = {} < T <= T(i) = {}\".format(i, T, Ti, Ti(i-1), Ti(i))) # DEBUG\n return i\n return LT\n\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\ndef plot_doubling_sequences(\n i_min=1, i_max=30,\n list_of_f=(\n function_f__for_geometric_sequences,\n function_f__for_intermediate_sequences,\n function_f__for_intermediate2_sequences,\n function_f__for_intermediate3_sequences,\n function_f__for_intermediate4_sequences,\n function_f__for_exponential_sequences,\n ),\n label_of_f=(\n \"Geometric doubling (d=0, e=1)\",\n \"Intermediate doubling (d=1/2, e=0)\",\n \"Intermediate doubling (d=1/3, e=0)\",\n \"Intermediate doubling (d=2/3, e=0)\",\n \"Intermediate doubling (d=1/2, e=1/2)\",\n \"Exponential doubling (d=1, e=0)\",\n ),\n *args, **kwargs\n ):\n r\"\"\" Display a plot to illustrate the values of the :math:`T_i` as a function of :math:`i` for some i.\n\n - Can accept many functions f (and labels).\n \"\"\"\n # Make unique markers\n nb = len(list_of_f)\n allmarkers = ['o', 'D', 'v', 'p', '<', 's', '^', '*', 'h', '>']\n longlist = allmarkers * (1 + int(nb / float(len(allmarkers)))) # Cycle the good number of time\n markers = longlist[:nb] # Truncate\n # Make unique colors\n colors = sns.hls_palette(nb + 1)[:nb]\n\n fig = plt.figure()\n # plt.hold(True)\n\n i_s = np.arange(i_min, i_max)\n # now for each function f\n for num_f, (f, la) in enumerate(zip(list_of_f, label_of_f)):\n print(\"\\n\\nThe {}th function is referred to as {} and is {}\".format(num_f, la, f)) # DEBUG\n\n Ti = Ti_from_f(f)\n values_of_Ti = np.array([ Ti(i) for i in i_s ])\n plt.plot(i_s, values_of_Ti, label=la, lw=3, ms=3, color=colors[num_f], marker=markers[num_f])\n plt.legend()\n plt.xlabel(r\"Value of the time horizon $i = {},...,{}$\".format(i_min, i_max))\n plt.title(r\"Comparison of the values of $T_i$\")\n plt.show()\n return fig\n\n\ndef plot_quality_first_upper_bound(\n Tmin=10, Tmax=int(1e8), nbTs=100,\n gamma=0.0, delta=1.0, # XXX bound in RT <= log(T)\n # gamma=0.5, delta=0.0, # XXX bound in RT <= sqrt(T)\n # gamma=0.5, delta=0.5, # XXX bound in RT <= sqrt(T * log(T))\n # gamma=0.66667, delta=1.0, # XXX another weird bound in RT <= T^2/3 * log(T)\n list_of_f=(\n function_f__for_geometric_sequences,\n function_f__for_intermediate_sequences,\n function_f__for_intermediate2_sequences,\n function_f__for_intermediate3_sequences,\n function_f__for_intermediate4_sequences,\n function_f__for_exponential_sequences,\n ),\n label_of_f=(\n \"Geometric doubling (d=0, e=1)\",\n \"Intermediate doubling (d=1/2, e=0)\",\n \"Intermediate doubling (d=1/3, e=0)\",\n \"Intermediate doubling (d=2/3, e=0)\",\n \"Intermediate doubling (d=1/2, e=1/2)\",\n \"Exponential doubling (d=1, e=0)\",\n ),\n show_Ti_m_Tim1=True,\n # show_Ti_m_Tim1=False, # DEBUG\n *args, **kwargs\n ):\n r\"\"\" Display a plot to compare numerically between the following sum :math:`S` and the upper-bound we hope to have, :math:`T^{\\gamma} (\\log T)^{\\delta}`, as a function of :math:`T` for some values between :math:`T_{\\min}` and :math:`T_{\\max}`:\n\n .. math:: S := \\sum_{i=0}^{L_T} (T_i - T_{i-1})^{\\gamma} (\\log (T_i - T_{i-1}))^{\\delta}.\n\n - Can accept many functions f (and labels).\n - Can use :math:`T_i` instead of :math:`T_i - T_{i-1}` if ``show_Ti_m_Tim1=False`` (default is to use the smaller possible bound, with difference of sequence lengths, :math:`T_i - T_{i-1}`).\n\n .. warning:: This is still ON GOING WORK.\n \"\"\"\n # Make unique markers\n nb = len(list_of_f)\n allmarkers = ['o', 'D', 'v', 'p', '<', 's', '^', '*', 'h', '>']\n longlist = allmarkers * (1 + int(nb / float(len(allmarkers)))) # Cycle the good number of time\n markers = longlist[:nb] # Truncate\n # Make unique colors\n colors = sns.hls_palette(nb + 1)[:nb]\n\n fig = plt.figure()\n # plt.hold(True)\n\n Ts = np.floor(np.linspace(Tmin, Tmax, num=nbTs))\n the_bound_we_want = (Ts ** gamma) * (np.log(Ts) ** delta)\n\n # plt.plot(Ts, the_bound_we_want, label=r\"$T^{\\gamma} (\\log T)^{\\delta}$\", lw=3, ms=3, color=colors[0], marker=markers[0])\n # compute the sequence lengths to use, either T_i or T_i - T_{i-1}\n Ts_for_f = np.copy(Ts)\n if show_Ti_m_Tim1: Ts_for_f[1:] = np.diff(Ts)\n\n # now for each function f\n for num_f, (f, la) in enumerate(zip(list_of_f, label_of_f)):\n print(\"\\n\\nThe {}th function is referred to as {} and is {}\".format(num_f, la, f)) # DEBUG\n Ti = Ti_from_f(f)\n LT = last_term_operator_LT(Ti)\n the_sum_we_have = np.zeros_like(Ts_for_f)\n for j, (Tj, dTj) in enumerate(zip(Ts, Ts_for_f)):\n LTj = LT(Tj)\n the_sum_we_have[j] = sum(\n (dTj ** gamma) * (np.log(dTj) ** delta)\n for i in range(0, LTj + 1)\n )\n print(\"For j = {}, Tj = {}, dTj = {}, gives LTj = {}, and the value of the sum from i=0 to LTj is = {}.\".format(j, Tj, dTj, LTj, the_sum_we_have[j])) # DEBUG\n print(\"the_sum_we_have =\", the_sum_we_have) # DEBUG\n plt.plot(Ts, the_sum_we_have / the_bound_we_want, label=la, lw=3, ms=3, color=colors[num_f], marker=markers[num_f])\n\n plt.legend()\n plt.xlabel(r\"Value of the time horizon $T = {},...,{}$\".format(Tmin, Tmax))\n str_of_Tj_or_dTj = \"T_i - T_{i-1}\" if show_Ti_m_Tim1 else \"T_i\"\n plt.title(r\"Ratio of the sum $\\sum_{i=0}^{L_T} (%s)^{\\gamma} (\\log(%s))^{\\delta}$ and the upper-bound $T^{\\gamma} \\log(T)^{\\delta}$, for $\\gamma=%.3g$, $\\delta=%.3g$.\" % (str_of_Tj_or_dTj, str_of_Tj_or_dTj, gamma, delta)) # DEBUG\n plt.show()\n return fig\n\n\n# --- The interesting class\n\n#: If the sequence Ti does not grow enough, artificially increase i until T_inext > T_i\nMAX_NB_OF_TRIALS = 500\n\n\nclass DoublingTrickWrapper(BaseWrapperPolicy):\n r\"\"\" A policy that acts as a wrapper on another policy `P`, assumed to be *horizon dependent* (has to known :math:`T`), by implementing a \"doubling trick\".\n\n - Reference: [[What the Doubling Trick Can or Can't Do for Multi-Armed Bandits, Lilian Besson and Emilie Kaufmann, 2018]](https://hal.inria.fr/hal-01736357), to be presented soon.\n \"\"\"\n\n def __init__(self, nbArms,\n full_restart=FULL_RESTART,\n policy=default_horizonDependent_policy,\n next_horizon=default_next_horizon,\n first_horizon=DEFAULT_FIRST_HORIZON,\n *args, **kwargs):\n super(DoublingTrickWrapper, self).__init__(nbArms, policy=policy, *args, **kwargs)\n self.full_restart = full_restart #: Constant to know how to refresh the underlying policy.\n # --- Horizon\n self._i = 0\n self._next_horizon = next_horizon # Function for the growing horizon\n self.next_horizon_name = getattr(next_horizon, '__latex_name__', '?') #: Pretty string of the name of this growing function\n self._first_horizon = max(2, first_horizon) # First guess for the horizon\n self.horizon = max(2, first_horizon) #: Last guess for the horizon\n # XXX Force it, just for pretty printing...\n self.startGame()\n\n # --- pretty printing\n\n def __str__(self):\n # remove the T0 part from string representation of the policy\n str_policy = str(self.policy)\n str_policy = str_policy.replace(r\"($T={}$)\".format(self._first_horizon), \"\")\n str_policy = str_policy.replace(r\"$T={}$, \".format(self._first_horizon), \"\")\n return r\"{}({})[{}]\".format(\"DT\" if self.full_restart else \"DTnr\", self.next_horizon_name, str_policy)\n\n # --- Start game by creating new underlying policy\n\n def startGame(self):\n \"\"\" Initialize the policy for a new game.\"\"\"\n super(BaseWrapperPolicy, self).startGame()\n # super(DoublingTrickWrapper, self).startGame() # WARNING no\n self._i = 0 # reinitialize this\n self.horizon = self._first_horizon #: Last guess for the horizon\n try:\n self.policy = self._policy(self.nbArms, horizon=self.horizon, lower=self.lower, amplitude=self.amplitude, *self._args, **self._kwargs)\n except Exception as e:\n print(\"WARNING: Received exception {} when trying to create the underlying policy... maybe the 'horizon={}' keyword argument was not understood correctly? Retrying without it...\".format(e, self.horizon)) # DEBUG\n self.policy = self._policy(self.nbArms, lower=self.lower, amplitude=self.amplitude, *self._args, **self._kwargs)\n # now also start game for the underlying policy\n self.policy.startGame()\n\n # --- Pass the call to the subpolicy\n\n def getReward(self, arm, reward):\n \"\"\" Pass the reward, as usual, update t and sometimes restart the underlying policy.\"\"\"\n # print(\" - At time t = {}, got a reward = {} from arm {} ...\".format(self.t, arm, reward)) # DEBUG\n # super(DoublingTrickWrapper, self).getReward(arm, reward)\n self.t += 1\n self.policy.getReward(arm, reward)\n\n # Maybe we have to update the horizon?\n if self.t > self.horizon:\n self._i += 1\n new_horizon = self._next_horizon(self._i, self.horizon)\n # XXX <!-- small hack if the sequence is not growing fast enough\n nb_of_trials = 1\n while nb_of_trials < MAX_NB_OF_TRIALS and new_horizon <= self.horizon:\n self._i += 1\n nb_of_trials += 1\n new_horizon = self._next_horizon(self._i, self.horizon)\n # XXX end of small hack -->\n assert new_horizon > self.horizon, \"Error: the new_horizon = {} is not > the current horizon = {} ...\".format(new_horizon, self.horizon) # DEBUG\n # print(\" - At time t = {}, a DoublingTrickWrapper class was running with current horizon T_i = {} and decided to use {} as a new horizon...\".format(self.t, self.horizon, new_horizon)) # DEBUG\n self.horizon = new_horizon\n # now we have to update or restart the underlying policy\n if self.full_restart:\n try:\n self.policy = self._policy(self.nbArms, horizon=self.horizon, lower=self.lower, amplitude=self.amplitude, *self._args, **self._kwargs)\n except Exception as e:\n # print(\"Received exception {} when trying to create the underlying policy... maybe the 'horizon={}' keyword argument was not understood correctly? Retrying without it...\".format(e, self.horizon)) # DEBUG\n self.policy = self._policy(self.nbArms, lower=self.lower, amplitude=self.amplitude, *self._args, **self._kwargs)\n # now also start game for the underlying policy\n self.policy.startGame()\n # print(\" ==> Fully restarting the underlying policy by creating a new object... Now it is = {} ...\".format(self.policy)) # DEBUG\n else:\n if hasattr(self.policy, 'horizon'):\n try:\n self.policy.horizon = self.horizon\n except AttributeError:\n pass\n # print(\"Warning: unable to update the parameter 'horizon' of the underlying policy {}... Trying '_horizon' ...\".format(self.policy)) # DEBUG\n # print(\" ==> Just updating the horizon parameter of the underlying policy... Now it is = {} ...\".format(self.policy)) # DEBUG\n # else:\n # print(\" ==> Nothing to do, as the underlying policy DOES NOT have a 'horizon' or '_horizon' parameter that could have been updated... Maybe you are not using a good policy? I suggest UCBH or ApproximatedFHGittins.\") # DEBUG\n\n\n# # --- Debugging\n\nif __name__ == \"__main__\":\n import sys\n if \"plot\" in sys.argv[1:]:\n plt.ion()\n # plot_doubling_sequences()\n for gamma, delta in [\n (0.0, 1.0), # XXX bound in RT <= log(T)\n (0.5, 0.0), # XXX bound in RT <= sqrt(T)\n (0.5, 0.5), # XXX bound in RT <= sqrt(T * log(T))\n (0.66667, 1.0), # XXX another weird bound in RT <= T^2/3 * log(T)\n ]:\n plot_quality_first_upper_bound(gamma=gamma, delta=delta, show_Ti_m_Tim1=True)\n plot_quality_first_upper_bound(gamma=gamma, delta=delta, show_Ti_m_Tim1=False)\n sys.exit(0)\n\n # Code for debugging purposes.\n from doctest import testmod\n print(\"\\nTesting automatically all the docstring written in each functions of this module :\")\n testmod(verbose=True)\n" ]
[ [ "numpy.isinf", "numpy.zeros_like", "matplotlib.pyplot.ion", "numpy.isnan", "numpy.log", "numpy.copy", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.plot", "matplotlib.pyplot.figure", "numpy.diff", "numpy.arange", "matplotlib.pyplot.show", "numpy.linspace", "numpy.floor" ] ]
wuxinwang1997/wheatdetection
[ "93b928d12ab7f75883c85bf93c4738981fb6af79" ]
[ "evaluate/evaluate.py" ]
[ "# encoding: utf-8\n\"\"\"\n@author: wuxin.wang\n@contact: [email protected]\n\"\"\"\nfrom tqdm import tqdm\nimport numpy as np\nfrom .calculate_score import calculate_final_score\n\ndef evaluate(all_predictions):\n best_final_score, best_score_threshold = 0, 0\n for score_threshold in tqdm(np.arange(0, 1, 0.01), total=np.arange(0, 1, 0.01).shape[0], desc=\"OOF\"):\n final_score = calculate_final_score(all_predictions, score_threshold)\n if final_score > best_final_score:\n best_final_score = final_score\n best_score_threshold = score_threshold\n\n for i in range(len(all_predictions)):\n gt_boxes = all_predictions[i]['gt_boxes'].copy()\n pred_boxes = all_predictions[i]['pred_boxes'].copy()\n scores = all_predictions[i]['scores'].copy()\n indexes = np.where(scores>best_score_threshold)\n pred_boxes = pred_boxes[indexes]\n all_predictions[i]['final_missed_boxes_nums'] = len(gt_boxes)-len(pred_boxes)\n\n return best_score_threshold, best_final_score" ]
[ [ "numpy.where", "numpy.arange" ] ]
FFroehlich/pysb
[ "d1afd8bed83cc09476ea871ffcc106b18498dc7f" ]
[ "pysb/examples/run_earm_stochkit.py" ]
[ "\"\"\" Run the Extrinsic Apoptosis Reaction Model (EARM) using StochKit's\nstochastic simulation algorithm (SSA) implementation.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom earm_1_0 import model\nfrom pysb.simulator import StochKitSimulator\n\n\ndef plot_mean_min_max(name, title=None):\n x = np.array([tr[:][name] for tr in trajectories]).T\n if not title:\n title = name\n plt.figure(title)\n plt.plot(tout.T, x, '0.5', lw=2, alpha=0.25) # individual trajectories\n plt.plot(tout[0], x.mean(1), 'k--', lw=3, label=\"Mean\")\n plt.plot(tout[0], x.min(1), 'b--', lw=3, label=\"Minimum\")\n plt.plot(tout[0], x.max(1), 'r--', lw=3, label=\"Maximum\")\n plt.legend(loc=0)\n plt.xlabel('Time')\n plt.ylabel('Population of %s' % name)\n\ntspan = np.linspace(0, 20000, 1000)\nsim = StochKitSimulator(model, tspan)\nsimres = sim.run(n_runs=20, seed=None, algorithm=\"ssa\")\n\ntrajectories = simres.all\ntout = simres.tout\n\nplot_mean_min_max('Bid_unbound')\nplot_mean_min_max('PARP_unbound')\nplot_mean_min_max('mSmac_unbound')\nplot_mean_min_max('tBid_total')\nplot_mean_min_max('CPARP_total')\nplot_mean_min_max('cSmac_total')\n\nplt.show()\n" ]
[ [ "numpy.array", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "numpy.linspace" ] ]
TymonXie/tymon
[ "1939d2f43e0c75b6fcc3805f37bdb2227bf3f594" ]
[ "tymon/model_hub/time_series/lstm.py" ]
[ "import torch.nn as nn\n\n\n# model\nclass LSTM(nn.Module):\n def __init__(self, in_dim=12, hidden_dim=10, output_dim=12, n_layer=1):\n super(LSTM, self).__init__()\n self.in_dim = in_dim\n self.hidden_dim = hidden_dim\n self.output_dim = output_dim\n self.n_layer = n_layer\n self.lstm = nn.LSTM(input_size=in_dim, hidden_size=hidden_dim, num_layers=n_layer, batch_first=True)\n self.linear = nn.Linear(hidden_dim, output_dim)\n\n def forward(self, x):\n _, (h_out, _) = self.lstm(x) # h_out是序列最后一个元素的hidden state\n h_out = h_out.view(h_out.shape[0], -1)\n h_out = self.linear(h_out)\n return h_out\n\nlstm_paras = {'epoch': 700,\n 'learning_rate': 0.001,\n 'seq_length': 4,\n 'n_feature': 12,\n 'divide_ratio': 0.7\n }\n" ]
[ [ "torch.nn.Linear", "torch.nn.LSTM" ] ]
Tanveer81/barlow_twins_video
[ "cb0b092971bdd56cc6c58168faf9eb01e0f8c832" ]
[ "yvos_dataset.py" ]
[ "import time\r\nfrom glob import glob\r\nimport os\r\nimport random\r\nimport json\r\nimport numpy as np\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\nfrom torch.utils.data import Dataset\r\nfrom PIL import Image\r\nimport cv2\r\n\r\n\r\ndef generate_barlow_twin_annotations(img_path, meta_path, out_path, frame_dist):\r\n f = open(meta_path, )\r\n data = json.load(f)\r\n f.close()\r\n video_id_names = list(data.keys())\r\n video_id_names.sort()\r\n frame_pairs = []\r\n start_time = time.time()\r\n for i, video_id in enumerate(video_id_names[:3]):\r\n img_paths = np.sort(glob(os.path.join(img_path, video_id, '*.jpg'))).tolist()\r\n if len(img_paths) <= frame_dist:\r\n continue\r\n categories = list(data[video_id]['objects'].values())\r\n frames = [a.split('/')[-1].split('.')[0] for a in img_paths]\r\n frame_categories = dict((a, []) for a in frames)\r\n\r\n for j, category in enumerate(categories):\r\n for frame in category['frames']:\r\n frame_categories[frame].append(j)\r\n\r\n number_of_pairs = int(len(img_paths)/frame_dist) * 100\r\n attempts = 3 * number_of_pairs\r\n while True:\r\n start = random.randint(0, len(img_paths) - frame_dist-1)\r\n end = random.randint(start + frame_dist, len(img_paths)-1)\r\n key = f'{video_id}/{frames[start]}.jpg'\r\n if key not in frame_pairs and set(frame_categories[frames[start]]) == set(frame_categories[frames[end]]):\r\n frame_pairs.append((key, f'{video_id}/{frames[end]}.jpg'))\r\n number_of_pairs -= 1\r\n if number_of_pairs == 0:\r\n break\r\n attempts -= 1\r\n if attempts == 0:\r\n break\r\n\r\n print(f'{i + 1}/{len(video_id_names)}: {video_id} : {(time.time() - start_time)} seconds')\r\n\r\n print(f'Total Time : {(time.time() - start_time)} seconds')\r\n print(f'Saving pairs as {out_path}barlow_twins_pairs.txt')\r\n '''Total Time : 49643.86153244972 seconds ~ 13.78h\r\n Saving pairs as /nfs/data3/koner/data/youtubeVOS/train/barlow_twins_pairs.txt'''\r\n with open(f'{out_path}barlow_twins_pairs.txt', 'w') as fp:\r\n fp.write('\\n'.join('%s %s' % x for x in frame_pairs))\r\n\r\n\r\ndef debug_barlow_twin_annotations(pair_meta_path, img_path):\r\n start_time = time.time()\r\n file = open(f'{pair_meta_path}barlow_twins_pairs.txt', 'r')\r\n pairs = []\r\n for line in file:\r\n frames = line.split(' ')\r\n pairs.append(frames)\r\n print(f'Total Time for loading Pairs : {(time.time() - start_time)} seconds')\r\n for i in range(10):\r\n i = random.randint(0, len(pairs)-1)\r\n image = Image.open(os.path.join(img_path, pairs[i][0]))\r\n image2 = Image.open(os.path.join(img_path, pairs[i][1].rstrip()))\r\n visualize(image)\r\n time.sleep(0.5)\r\n visualize(image2)\r\n time.sleep(1)\r\n\r\n\r\ndef save_bounding_boxes_for_barlow_twins(path):\r\n start_time = time.time()\r\n bboxes_frame = {}\r\n with open(f'{path}detectron2-annotations-train-balanced.json') as json_file:\r\n detectron_data = json.load(json_file)\r\n length = len(detectron_data['annos'])\r\n for i, frame in enumerate(detectron_data['annos']):\r\n frame_key = frame['video_id'] + '_' + frame['frame_id']\r\n bboxes_anno = {}\r\n for anno in frame['annotations']:\r\n anno_key = str(anno['category_id'])+'_'+anno['object_id']\r\n bboxes_anno[anno_key] = anno['bbox']\r\n bboxes_frame[frame_key] = bboxes_anno\r\n print(f'{i + 1}/{length} : {(time.time() - start_time)} seconds')\r\n\r\n print(f'Total Time : {(time.time() - start_time)} seconds')\r\n print(f'Saving bboxes as {path}/barlow_twins_bboxes.json')\r\n with open(f'{path}/barlow_twins_bboxes.json', 'w') as outfile:\r\n json.dump(bboxes_frame, outfile)\r\n\r\n\r\ndef is_empty(bboxes_data, pair):\r\n # Returns true if a pir of frame does not have bounding box for common objects\r\n pair = pair.split(' ')\r\n box1 = bboxes_data[pair[0].replace('/', '_').split('.')[0]]\r\n box2 = bboxes_data[pair[1].rstrip().replace('/', '_').split('.')[0]]\r\n common_bbox_list = list(set(list(box1.keys())) & set(list(box2.keys())))\r\n return not common_bbox_list\r\n\r\n\r\ndef refine_barlow_pairs_boxes(path):\r\n # remove frame pair with empty bboxes\r\n start_time = time.time()\r\n with open(f'{path}barlow_twins_bboxes.json') as json_file:\r\n bboxes_data = json.load(json_file)\r\n file = open(f'{path}barlow_twins_pairs.txt', 'r')\r\n pairs = [line for line in file]\r\n refined_pairs = [pair for pair in pairs if not is_empty(bboxes_data, pair)]\r\n print(f'Total Time : {(time.time() - start_time)} seconds')\r\n print(f'Saving pairs as {path}barlow_twins_pairs_refined.txt')\r\n with open(f'{path}barlow_twins_pairs_refined.txt', 'w') as fp:\r\n for row in refined_pairs:\r\n fp.write(str(row))\r\n\r\n\r\ndef debug_refined_barlow_pairs_boxes(path):\r\n # remove frame pair with empty bboxes\r\n start_time = time.time()\r\n with open(f'{path}barlow_twins_bboxes.json') as json_file:\r\n bboxes_data = json.load(json_file)\r\n file = open(f'{path}barlow_twins_pairs_refined.txt', 'r')\r\n pairs = [line for line in file]\r\n refined_pairs = [pair for pair in pairs if not is_empty(bboxes_data, pair)]\r\n print(f'Total Time : {(time.time() - start_time)} seconds')\r\n # print(f'Saving pairs as {path}barlow_twins_pairs_refined.txt')\r\n # with open(f'{path}barlow_twins_pairs_refined.txt', 'w') as fp:\r\n # for row in refined_pairs:\r\n # fp.write(str(row))\r\n\r\n\r\ndef visualize(img, cmap='binary'):\r\n plt.imshow(img, cmap=cmap)\r\n plt.show(block=True)\r\n\r\n\r\ndef visualize_bbox(image, bbox):\r\n # image = copy.deepcopy(image)\r\n image = np.ascontiguousarray(image)\r\n cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 2)\r\n visualize(image)\r\n\r\nclass YvosDateset(Dataset):\r\n def __init__(self, meta_path, img_path, transform, crop=False, increase_small_area=False):\r\n super().__init__()\r\n self.increase_small_area = increase_small_area\r\n self.crop = crop\r\n if crop:\r\n with open(f'{meta_path}barlow_twins_bboxes.json') as json_file:\r\n self.bboxes_data = json.load(json_file)\r\n file = open(f'{meta_path}barlow_twins_pairs_refined.txt', 'r')\r\n self.transform = transform\r\n self.img_path = img_path\r\n start_time = time.time()\r\n self.pairs = []\r\n for line in file:\r\n frames = line.split(' ')\r\n self.pairs.append(frames)\r\n print(f'Total Time for loading Pairs : {(time.time() - start_time)} seconds')\r\n\r\n def is_small_object(self, box):\r\n return ((box[2] - box[0]) * (box[3] - box[1])) < 1024\r\n\r\n def increase_area(self, box, img_size):\r\n if self.is_small_object(box):\r\n dist_x = (box[2] - box[0]) / 2\r\n dist_y = (box[3] - box[1]) / 2\r\n\r\n if box[0] - dist_x < 0:\r\n box[0] = 0\r\n box[2] = dist_x * 4\r\n elif box[2] + dist_x > img_size[0]:\r\n box[2] = img_size[0]\r\n box[0] = img_size[0] - 4 * dist_x\r\n else:\r\n box[0] = box[0] - dist_x\r\n box[2] = box[2] + dist_x\r\n\r\n if box[1] - dist_y < 0:\r\n box[1] = 0\r\n box[3] = dist_y * 4\r\n elif box[3] + dist_y > img_size[1]:\r\n box[3] = img_size[1]\r\n box[1] = img_size[1] - 4 * dist_y\r\n else:\r\n box[2] = box[2] - dist_y\r\n box[3] = box[3] + dist_y\r\n\r\n def __getitem__(self, index: int):\r\n pair = self.pairs[index]\r\n image1 = Image.open(os.path.join(self.img_path, pair[0]))\r\n image2 = Image.open(os.path.join(self.img_path, pair[1].rstrip()))\r\n image1 = image1.convert('RGB')\r\n image2 = image2.convert('RGB')\r\n # visualize(image1)\r\n # visualize(image2)\r\n if self.crop:\r\n box1 = self.bboxes_data[pair[0].replace('/', '_').split('.')[0]]\r\n box2 = self.bboxes_data[pair[1].rstrip().replace('/', '_').split('.')[0]]\r\n bbox_indx = random.choice(list(set(list(box1.keys())) & set(list(box2.keys()))))\r\n box1 = box1[bbox_indx] #xyxy format\r\n box2 = box2[bbox_indx]\r\n if self.increase_small_area:\r\n self.increase_area(box1, image1.size)\r\n self.increase_area(box2, image2.size)\r\n\r\n image1 = image1.crop(tuple(box1))# (left, upper, right, lower) im.crop((x0, y0, x1, y1))\r\n image2 = image2.crop(tuple(box2))\r\n image1, image2 = self.transform(image1, image2)\r\n # visualize(image1.permute(1, 2, 0))\r\n # visualize(image2.permute(1, 2, 0))\r\n return image1, image2\r\n\r\n def __len__(self) -> int:\r\n return len(self.pairs)\r\n\r\n\r\ndef main():\r\n root = '/nfs/data3/koner/data'\r\n img_path = f'{root}/youtubeVOS/train/JPEGImages/'\r\n pair_meta_path = f'{root}/youtubeVOS/train/'\r\n meta_path = f'{root}/youtubeVOS/train/train-train-meta-balanced.json'\r\n frame_dist = 5\r\n # generate_barlow_twin_annotations(img_path, meta_path, pair_meta_path, frame_dist)\r\n # debug_barlow_twin_annotations(pair_meta_path, img_path)\r\n # save_bounding_boxes_for_barlow_twins(pair_meta_path)\r\n # refine_barlow_pairs_boxes(pair_meta_path)\r\n debug_refined_barlow_pairs_boxes\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" ]
[ [ "numpy.ascontiguousarray", "matplotlib.pyplot.show", "matplotlib.pyplot.imshow" ] ]
getian107/PRScs
[ "36827146c32e1b45972a290b965e621d990142c3" ]
[ "mcmc_gtb.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\nMarkov Chain Monte Carlo (MCMC) sampler for polygenic prediction with continuous shrinkage (CS) priors.\n\n\"\"\"\n\n\nimport scipy as sp\nfrom scipy import linalg \nfrom scipy import random\nimport gigrnd\n\n\ndef mcmc(a, b, phi, sst_dict, n, ld_blk, blk_size, n_iter, n_burnin, thin, chrom, out_dir, beta_std, seed):\n print('... MCMC ...')\n\n # seed\n if seed != None:\n random.seed(seed)\n\n # derived stats\n beta_mrg = sp.array(sst_dict['BETA'], ndmin=2).T\n maf = sp.array(sst_dict['MAF'], ndmin=2).T\n n_pst = (n_iter-n_burnin)/thin\n p = len(sst_dict['SNP'])\n n_blk = len(ld_blk)\n\n # initialization\n beta = sp.zeros((p,1))\n psi = sp.ones((p,1))\n sigma = 1.0\n if phi == None:\n phi = 1.0; phi_updt = True\n else:\n phi_updt = False\n\n beta_est = sp.zeros((p,1))\n psi_est = sp.zeros((p,1))\n sigma_est = 0.0\n phi_est = 0.0\n\n # MCMC\n for itr in range(1,n_iter+1):\n if itr % 100 == 0:\n print('--- iter-' + str(itr) + ' ---')\n\n mm = 0; quad = 0.0\n for kk in range(n_blk):\n if blk_size[kk] == 0:\n continue\n else:\n idx_blk = range(mm,mm+blk_size[kk])\n dinvt = ld_blk[kk]+sp.diag(1.0/psi[idx_blk].T[0])\n dinvt_chol = linalg.cholesky(dinvt)\n beta_tmp = linalg.solve_triangular(dinvt_chol, beta_mrg[idx_blk], trans='T') + sp.sqrt(sigma/n)*random.randn(len(idx_blk),1)\n beta[idx_blk] = linalg.solve_triangular(dinvt_chol, beta_tmp, trans='N')\n quad += sp.dot(sp.dot(beta[idx_blk].T, dinvt), beta[idx_blk])\n mm += blk_size[kk]\n\n err = max(n/2.0*(1.0-2.0*sum(beta*beta_mrg)+quad), n/2.0*sum(beta**2/psi))\n sigma = 1.0/random.gamma((n+p)/2.0, 1.0/err)\n\n delta = random.gamma(a+b, 1.0/(psi+phi))\n\n for jj in range(p):\n psi[jj] = gigrnd.gigrnd(a-0.5, 2.0*delta[jj], n*beta[jj]**2/sigma)\n psi[psi>1] = 1.0\n\n if phi_updt == True:\n w = random.gamma(1.0, 1.0/(phi+1.0))\n phi = random.gamma(p*b+0.5, 1.0/(sum(delta)+w))\n\n # posterior\n if (itr>n_burnin) and (itr % thin == 0):\n beta_est = beta_est + beta/n_pst\n psi_est = psi_est + psi/n_pst\n sigma_est = sigma_est + sigma/n_pst\n phi_est = phi_est + phi/n_pst\n\n # convert standardized beta to per-allele beta\n if beta_std == 'False':\n beta_est /= sp.sqrt(2.0*maf*(1.0-maf))\n\n # write posterior effect sizes\n if phi_updt == True:\n eff_file = out_dir + '_pst_eff_a%d_b%.1f_phiauto_chr%d.txt' % (a, b, chrom)\n else:\n eff_file = out_dir + '_pst_eff_a%d_b%.1f_phi%1.0e_chr%d.txt' % (a, b, phi, chrom)\n\n with open(eff_file, 'w') as ff:\n for snp, bp, a1, a2, beta in zip(sst_dict['SNP'], sst_dict['BP'], sst_dict['A1'], sst_dict['A2'], beta_est):\n ff.write('%d\\t%s\\t%d\\t%s\\t%s\\t%.6e\\n' % (chrom, snp, bp, a1, a2, beta))\n\n # print estimated phi\n if phi_updt == True:\n print('... Estimated global shrinkage parameter: %1.2e ...' % phi_est )\n\n print('... Done ...')\n\n\n" ]
[ [ "scipy.array", "scipy.zeros", "scipy.random.seed", "scipy.ones", "scipy.linalg.cholesky", "scipy.dot", "scipy.random.gamma", "scipy.diag", "scipy.linalg.solve_triangular", "scipy.sqrt" ] ]
bjfar/JMCtools
[ "f134d2db3bb9c38a61e492a4e776e937519b0eeb" ]
[ "tests/docs_examples/quickstart.py" ]
[ "# make_joint\nimport JMCtools.distributions as jtd\nimport scipy.stats as sps\nimport numpy as np\n\njoint = jtd.JointModel([sps.norm,sps.norm])\n# sample_pdf\nnull_parameters = [{'loc': 3, 'scale': 1}, \n {'loc': 1, 'scale': 2}]\nsamples = joint.rvs((10000,),null_parameters)\n# check_pdf\n# Compute 2D PDF over grid\nnxbins=100\nnybins=100\nx = np.linspace(-2,8,nxbins)\ny = np.linspace(-6,10,nybins)\nX, Y = np.meshgrid(x, y)\ndxdy = (x[1]-x[0]) * (y[1]-y[0])\nPDF = joint.pdf([X,Y],null_parameters)\n\n# Construct smallest intervals containing certain amount of probability\noutarray = np.ones((nxbins,nybins))\nsb = np.argsort(PDF.flat)[::-1]\noutarray.flat[sb] = np.cumsum(PDF.flat[sb] * dxdy)\n\n# Make plot!\nimport matplotlib.pyplot as plt\nfig= plt.figure(figsize=(5,4))\nax = fig.add_subplot(111)\nax.contourf(X, Y, outarray, alpha=0.3, levels=[0,0.68,0.95,0.997])\nax.scatter(*samples,lw=0,s=1)\nax.set_xlabel(\"x\")\nax.set_ylabel(\"y\")\nfig.savefig(\"example_2D_joint.svg\")\n# build_model \nimport JMCtools.models as jtm\n\ndef pars1(a):\n return {'loc': a, 'scale':1}\n\ndef pars2(b):\n return {'loc': b, 'scale':2}\n\nmodel = jtm.ParameterModel(joint,[pars1,pars2])\n# block_structure\nprint(model.blocks)\n# alt_model\ndef pars3(a,b):\n return {'loc': a+b, 'scale':1}\n\nmodel2 = jtm.ParameterModel(joint,[pars3,pars2])\nprint(model2.blocks)\n# sim_data\nnull_parameters = {'a':3, 'b':1}\nNtrials = 10000\nNdraws = 1\ndata = model.simulate((Ntrials,Ndraws),null_parameters)\n# find_MLEs\n# Set starting values and step sizes for Minuit search\noptions = {'a':3, 'error_a':1, 'b': 1, 'error_b': 2}\nLmax, pmax = model.find_MLE_parallel(options,data,method='minuit',Nprocesses=3)\n# Note that Lmax are the log-likelihoods of the MLEs, \n# and pmax are the parameter values.\n# compute_stats\nLnull = model.logpdf(null_parameters)\nLLR = -2*(Lnull - Lmax) # log-likelihood ratio\n\n# Plot!\nn, bins = np.histogram(LLR, bins=100, normed=True)\nq = np.arange(0,9,0.01)\nfig = plt.figure(figsize=(5,4))\nax = fig.add_subplot(111)\nax.plot(bins[:-1],n,drawstyle='steps-post',label=\"Minuit\",c='r')\nax.plot(q,sps.chi2.pdf(q, 2),c='k',label=\"Asymptotic\") \nax.set_xlabel(\"LLR\")\nax.set_ylabel(\"pdf(LLR)\")\nax.set_ylim(0.001,2)\nax.set_xlim(0,9)\nax.set_yscale(\"log\")\nax.legend(loc=1, frameon=False, framealpha=0,prop={'size':14})\n\nfig.savefig('quickstart_LLR.svg')\n" ]
[ [ "numpy.histogram", "numpy.ones", "matplotlib.pyplot.figure", "numpy.arange", "numpy.argsort", "numpy.cumsum", "numpy.linspace", "scipy.stats.chi2.pdf", "numpy.meshgrid" ] ]
jzalger/serial-scope
[ "bf1859df8987356eae3aba0f2a352f86cd4ded47" ]
[ "serialscope.py" ]
[ "# This module defines functionality for the deepsix interface device\nimport getopt\nfrom numpy import ceil, mod\nimport serial\nimport pandas as pd\nimport pylab as plt\nfrom sys import argv, exit\nfrom numpy import float64\nfrom serial.serialutil import SerialException\n\n\nclass SerialScope:\n\n def __init__(self, serial_port=None, serial_baud=None, headers=None, serial_log_limit=1000) -> None:\n parsed_headers = headers.split(\",\")\n self.indicies = None\n self.serial_port = serial_port\n self.serial_baud = serial_baud\n self.serial_log_limit = serial_log_limit\n self._monitoring_serial = False\n self._serial_thread = None\n self._x_axis_header = parsed_headers[0]\n self._headers = parsed_headers\n self._data_headers = parsed_headers[1:]\n self._buffer = pd.DataFrame(columns=parsed_headers, dtype=float64)\n \n plt.ion()\n if len(self._data_headers) <= 3:\n rows, cols = 1, len(self._data_headers)\n else:\n rows, cols = int(ceil(len(self._data_headers)/3)), 3\n self.fig, self.axes = plt.subplots(rows, cols)\n plots = [p.plot([],[])[0] for p in self.axes.flat]\n self._header_plot_map = {h:i for h,i in zip(self._data_headers, plots)}\n plt.show()\n\n def start_monitoring_serial(self):\n self._monitor_serial()\n self._monitoring_serial = True\n \n def stop_monitoring_serial(self):\n self.serial_thread.join()\n self._monitoring_serial = False\n\n def flush_serial_log(self):\n pass\n\n def _monitor_serial(self) -> None:\n try:\n with serial.Serial(self.serial_port, self.serial_baud, timeout=1) as hardware_serial:\n while True:\n msg = hardware_serial.readline()\n data = self._parse_msg(msg.decode('utf-8'))\n self.add_data(data)\n self.update_plot()\n except SerialException:\n print(\"Could not open that serial port (%s) or baud (%s)\", self.serial_port, self.serial_baud)\n exit(2)\n \n def _parse_msg(self, msg) -> list:\n # FIXME: manage this more generically\n parts = msg.split(\":\")\n data = parts[1].replace(\"\\r\\n\",\"\").split(\",\")\n return data\n\n def add_data(self, data):\n new_df = pd.DataFrame([data], columns=self._headers, dtype=float64)\n self._buffer = pd.concat([self._buffer, new_df])\n\n def update_plot(self):\n X = self._buffer[self._x_axis_header]\n for header, values in self._buffer[self._data_headers].iteritems():\n axis = self._header_plot_map[header]\n axis.set_data(X, values)\n plt.draw()\n plt.pause(0.01)\n plt.show()\n\n\ndef parse_opts(args):\n try:\n opts, args = getopt.getopt(args, \"p:b:\", [\"headers=\",\"indicies=\"])\n parsed = dict()\n for opt, arg in opts:\n if opt == '-p':\n parsed[\"port\"] = arg\n elif opt == '-b':\n parsed[\"baud\"] = arg\n elif opt == '--headers':\n parsed[\"headers\"] = arg \n # TODO: Add verification for parameters\n return parsed\n except getopt.GetoptError:\n print_help()\n exit(2)\n\ndef print_help():\n print('serialscope.py -p <serial_port> -b <serial_baud> --headers <header1,header2,..,headern> Assumes value in first index is X axis.')\n\ndef main(args):\n parsed = parse_opts(args)\n scope = SerialScope(serial_port=parsed[\"port\"], serial_baud=parsed[\"baud\"], headers=parsed[\"headers\"])\n scope.start_monitoring_serial()\n\nif __name__ == \"__main__\":\n main(argv[1:])\n\n \n " ]
[ [ "pandas.DataFrame", "pandas.concat" ] ]
pgmoka/checkout-simulator
[ "bce7e68ba47b9309f19514a9199d43bdbbbc4ffc" ]
[ "fullday_model.py" ]
[ "'''\n-----------------------------------------------------------------------\n Additional Documentation\n\nMade by Zachary A Brader, Kieran Coito, Pedro Goncalves Mokarzel\nwhile attending University of Washington Bothell\nMade in 03/09/2020\nBased on instruction in CSS 458, \ntaught by professor Johnny Lin\nNotes:\n- Written for Python 3.7.3.\n- No executable\n- Modules necessary: numpy, matplotlib.pyplot\n- External necessities: variables.py, customer_selection_line.py,\nequal_distribution_line.py, cashier_selector_line.py, and cashier.py.\n- Runs model with the population of a day trickleling into the line\n- Holds relevant information\n\n=======================================================================\n'''\n\n# =======================================================================\n# ============================= Imports==================================\n# =======================================================================\n\n# Python modules\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Create simulation code\nfrom visualization import visual\nimport variables as v\nfrom customer_selection_line import customer_selection_line\nfrom equal_distribution_line import equal_distribution_line\nfrom cashier_selector_line import cashier_selector_line\nfrom cashier import cashier\n\n\n# =======================================================================\n# ================================= Class ===============================\n# =======================================================================\n\n#Variables that can be changed to change model behavior\nHIGH_VALUE = 500\nNORMAL_VALUE = 300\nLOW_VALUE = 100\nHOURS_OF_OPERATION = 3\n\nclass Fullday:\n \"\"\"\n This model\n \"\"\"\n line = 0\n\n #constant\n hours_open = HOURS_OF_OPERATION\n\n def __init__(self,\n model_being_ran,\n number_of_cashiers,\n number_of_selfcheckouts,\n population = 'normal',\n day_type = 'normal',\n minimum_wage=17,\n self_checkout_maintenance_cost=4,\n model_name=\"Default Model\"):\n '''\n Initializes method\n '''\n\n # Analysis variables:\n self.list_of_customers_out_of_system = []\n self.list_of_customers_in_line = []\n self.list_of_customers_on_cashier_queue = []\n self.list_of_items_checked = []\n\n # Maintenance cost variables:\n self.minimum_wage = minimum_wage\n self.self_checkout_maintenance_cost = self_checkout_maintenance_cost\n\n # Environment tested selection:\n self.line = self.create_line(model_being_ran,\n 0,\n number_of_cashiers,\n number_of_selfcheckouts)\n\n # Name:\n self.name = model_name\n\n # set the population and configure how many customers will come in\n # over the course of the day\n self.population = self.setPopulation(population)\n self.day = day_type # save type of day for analysis sake\n self.hourly_population = self.choose_day_type(day_type)\n\n def execute_simulation(self, show=False, showAnim=False):\n \"\"\"\n This method will execute the full day simulation\n\n precondition:\n The model has been initialized\n\n postcondition:\n all major statistics are saved in arrays that represent the entire\n simulation\n \"\"\"\n #set counters for when to add customers\n currentHour = 0\n currentSegment = 0\n\n for i in range( self.hours_open * 60 * v.TIME_STEP ):\n\n #only add customers every 5 minutes of simulation time\n if i == currentSegment:\n self.execute_phase_zero(self.hourly_population[currentHour])\n currentHour += 1\n currentSegment += v.TIME_STEP * 2\n\n self.execute_phase_one()\n self.execute_phase_two()\n self.execute_phase_three()\n\n # Add list\n self.list_of_customers_out_of_system.append(\n self.line.customers_that_left)\n # print(\"Customers left\", self.list_of_customers_out_of_system[-1])\n\n self.list_of_customers_in_line.append(\n self.line.customers_waiting_to_queue)\n # print(\"Customers in line\", self.list_of_customers_in_line[-1])\n\n self.list_of_customers_on_cashier_queue.append(\n self.line.customers_being_served)\n # print(\"Customers in queue\", self.list_of_customers_on_cashier_queue[-1])\n\n self.list_of_items_checked.append(\n self.line.total_number_of_items_in_system -\n self.line.total_number_of_checked_items)\n # print(\"Items checked\", self.list_of_items_checked[-1])\n\n if showAnim:\n showAnim = visual().print_env(self, update_time=.001, start_time=9)\n\n if show:\n plt.figure(1)\n plt.title(\"Customer out of system over time\")\n plt.plot(self.list_of_customers_out_of_system)\n\n plt.figure(2)\n plt.title(\"Customers in line over time\")\n plt.plot(self.list_of_customers_in_line)\n\n plt.figure(3)\n plt.title(\"Customers at cashier queues over time\")\n plt.plot(self.list_of_customers_on_cashier_queue)\n\n plt.figure(4)\n plt.title(\"Items checked over time\")\n plt.plot(self.list_of_items_checked)\n\n plt.show()\n\n print(\"SIMULATION COMPLETE\")\n\n def choose_day_type(self, day_type):\n \"\"\"\n This method will set the total population for the entire days model\n and set it to the specified density that is specified during creation.\n\n precondition: model has began initilization\n\n postcondition: an array that represents population density over time\n is created.\n \"\"\"\n\n if day_type == 'busy':\n hourly_array = self.busyDay()\n\n elif day_type == 'slow':\n hourly_array = self.slowDay()\n\n elif day_type == 'front':\n hourly_array = self.frontLoaded()\n\n elif day_type == 'back':\n hourly_array = self.backLoaded()\n\n else:\n hourly_array = self.normalDay()\n\n return hourly_array*self.population\n\n def setPopulation(self, populationLevel):\n \"\"\"\n This method will set the total population for the entire days model\n\n precondition: model has a set time of hours open\n\n postcondition: the number of customers\n \"\"\"\n if populationLevel == 'low':\n #this is the number of customers per hour on a slow day\n return self.hours_open * LOW_VALUE\n\n elif populationLevel == 'high':\n # this is the number of customers per hour on a busy day\n return self.hours_open * HIGH_VALUE\n\n else:\n # this is the number of customers per hour on a regular day\n return self.hours_open * NORMAL_VALUE\n\n def busyDay(self):\n \"\"\"\n This will generate the population density curve for a day where\n the customer traffic is higher than normal\n\n precondition: model has specified type of day\n\n postcondition: array of how the days population density will change\n over the hours open\n \"\"\"\n #set array to be 12 entries evenly distributed from 0 to pi\n hourly_array = np.arange(start=0,\n stop=np.pi,\n step=1/ (self.hours_open*30))\n\n #fit array to sin(x) * 1.5\n hourly_array = np.sin(hourly_array) * 1.5\n\n #normalize array to have a sum of 1\n hourly_array = hourly_array/np.sum(hourly_array)\n\n return hourly_array\n\n def normalDay(self):\n \"\"\"\n This will generate the population density curve for a day where\n the customer traffic is normal\n\n precondition: model has specified type of day\n\n postcondition: array of how the days population density will change\n over the hours open\n \"\"\"\n hourly_array = np.arange(start=0,\n stop=np.pi,\n step=np.pi/ (self.hours_open*30))\n\n hourly_array = np.sin(hourly_array)\n\n #normalize array to have a sum of 1\n hourly_array = hourly_array/np.sum(hourly_array)\n\n return hourly_array\n\n def slowDay(self):\n \"\"\"\n This will generate the population density curve for a day where\n the customer traffic is lower than normal\n\n precondition: model has specified type of day\n\n postcondition: array of how the days population density will change\n over the hours open\n \"\"\"\n hourly_array = np.arange(start=0,\n stop=np.pi,\n step=np.pi/ (self.hours_open*30))\n\n hourly_array = np.sin(hourly_array) * .5\n\n #normalize array to have a sum of 1\n hourly_array = hourly_array/np.sum(hourly_array)\n\n return hourly_array\n\n def frontLoaded(self):\n \"\"\"\n This will generate the population density curve for a day where\n the customer traffic happens in the earlier part of the open hours\n\n precondition: model has specified type of day\n\n postcondition: array of how the days population density will change\n over the hours open\n \"\"\"\n hourly_array = np.arange(start=0,\n stop=np.pi,\n step=np.pi / (self.hours_open*30))\n\n hourly_array = np.cos(hourly_array - .75)\n hourly_array = np.where(hourly_array > 0, hourly_array, .1)\n\n #normalize array to have a sum of 1\n hourly_array = hourly_array/np.sum(hourly_array)\n\n return hourly_array\n\n def backLoaded(self):\n \"\"\"\n This will generate the population density curve for a day where\n the customer traffic happens in the latter part of the open hours\n\n precondition: model has specified type of day\n\n postcondition: array of how the days population density will change\n over the hours open\n \"\"\"\n hourly_array = np.arange(start=0,\n stop=np.pi,\n step=np.pi / (self.hours_open*30))\n\n hourly_array = np.cos(hourly_array - 2.25)\n hourly_array = np.where(hourly_array > 0, hourly_array, .1)\n\n #normalize array to have a sum of 1\n hourly_array = hourly_array/np.sum(hourly_array)\n\n return hourly_array\n\n def execute_phase_zero(self, number_of_customers):\n ''' Applies math related to adding customers to the running model\n\n Precondition:\n - Creation of model\n '''\n self.line.add_customers(int(number_of_customers))\n\n def execute_phase_one(self):\n ''' Applies math related to the rotation of customers\n\n Precondition:\n - Creation of model\n '''\n self.line.rotate_customers()\n\n def execute_phase_two(self):\n ''' Applies math related to the checkouts\n\n Precondition:\n - Execution of execute_phase_one\n '''\n self.line.apply_checkouts()\n\n def execute_phase_three(self):\n ''' Applies math on updating system\n\n Precondition:\n - Execution of execute_phase_two\n '''\n self.line.update_customers_out_of_system()\n self.line.update_checkedout_items()\n\n def create_line(self, model_being_ran, number_of_customers,\n number_of_cashiers, number_of_selfcheckouts):\n \"\"\"\n Helper method for the creation of lines\n\n Precondition:\n - model_being_ran: String name of the type of the environment to be created\n and tested\n - number_of_customers: int number of customers to inialize list of customers\n - number_of_cashiers: int number of cashiers to inialize in the total\n list of cashiers, and self serving cashiers\n - number_of_selfcheckouts: int number of self-checkout cashier to inialize\n in the total list of cashiers, and self serving cashiers\n - cashier_IPM_p_influence: addition to the p variable for the creation of\n the IPM for cashiers. Used for sensitivity testing. Default = 0\n - customer_IPM_p_influence: addition to the p variable for the creation of\n the IPM for customers. Used for sensitivity testingDefault = 0\n - item_creation_sensitivity_test: variables added to the randomness creation of items\n such that increasing this will increase the probability of smallers items. Used\n for sensitivity testing. Default = 0\n - chitchatness_influence: variables added to the randomness creation of the chitchatness\n variable. Used for sensitivity testing. Default = 0\n\n Postcondition:\n - Return line environment to be used by the model\n \"\"\"\n if (model_being_ran == \"customer\"):\n return customer_selection_line(number_of_cashiers,\n number_of_customers,\n number_of_selfcheckouts,\n self.minimum_wage,\n self.self_checkout_maintenance_cost,\n 0,\n 0)\n\n elif (model_being_ran == \"equal\"):\n return equal_distribution_line(number_of_cashiers,\n number_of_customers,\n number_of_selfcheckouts,\n self.minimum_wage,\n self.self_checkout_maintenance_cost,\n 0,\n 0)\n\n else:\n return cashier_selector_line(number_of_cashiers,\n number_of_customers,\n number_of_selfcheckouts,\n self.minimum_wage,\n self.self_checkout_maintenance_cost,\n 0,\n 0)\n\n" ]
[ [ "numpy.sin", "numpy.sum", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.pyplot.figure", "numpy.where", "numpy.arange", "numpy.cos", "matplotlib.pyplot.show" ] ]
clausia/qiskit-terra
[ "f4e3d086cf22c40adc14e9313af7b7714cb645d0" ]
[ "test/python/transpiler/test_preset_passmanagers.py" ]
[ "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Tests preset pass manager API\"\"\"\nfrom test import combine\nfrom ddt import ddt, data\n\nimport numpy as np\n\nfrom qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister\nfrom qiskit.circuit import Qubit\nfrom qiskit.compiler import transpile, assemble\nfrom qiskit.transpiler import CouplingMap, Layout\nfrom qiskit.circuit.library import U2Gate, U3Gate\nfrom qiskit.test import QiskitTestCase\nfrom qiskit.test.mock import (\n FakeTenerife,\n FakeMelbourne,\n FakeJohannesburg,\n FakeRueschlikon,\n FakeTokyo,\n FakePoughkeepsie,\n)\nfrom qiskit.converters import circuit_to_dag\nfrom qiskit.circuit.library import GraphState\nfrom qiskit.quantum_info import random_unitary\n\n\ndef emptycircuit():\n \"\"\"Empty circuit\"\"\"\n return QuantumCircuit()\n\n\ndef circuit_2532():\n \"\"\"See https://github.com/Qiskit/qiskit-terra/issues/2532\"\"\"\n circuit = QuantumCircuit(5)\n circuit.cx(2, 4)\n return circuit\n\n\n@ddt\nclass TestPresetPassManager(QiskitTestCase):\n \"\"\"Test preset passmanagers work as expected.\"\"\"\n\n @combine(level=[0, 1, 2, 3], name=\"level{level}\")\n def test_no_coupling_map(self, level):\n \"\"\"Test that coupling_map can be None (level={level})\"\"\"\n q = QuantumRegister(2, name=\"q\")\n circuit = QuantumCircuit(q)\n circuit.cz(q[0], q[1])\n result = transpile(circuit, basis_gates=[\"u1\", \"u2\", \"u3\", \"cx\"], optimization_level=level)\n self.assertIsInstance(result, QuantumCircuit)\n\n def test_layout_3239(self, level=3):\n \"\"\"Test final layout after preset level3 passmanager does not include diagonal gates\n See: https://github.com/Qiskit/qiskit-terra/issues/3239\n \"\"\"\n qc = QuantumCircuit(5, 5)\n qc.h(0)\n qc.cx(range(3), range(1, 4))\n qc.z(range(4))\n qc.measure(range(4), range(4))\n result = transpile(\n qc,\n basis_gates=[\"u1\", \"u2\", \"u3\", \"cx\"],\n layout_method=\"trivial\",\n optimization_level=level,\n )\n\n dag = circuit_to_dag(result)\n op_nodes = [node.name for node in dag.topological_op_nodes()]\n self.assertNotIn(\"u1\", op_nodes) # Check if the diagonal Z-Gates (u1) were removed\n\n @combine(level=[0, 1, 2, 3], name=\"level{level}\")\n def test_no_basis_gates(self, level):\n \"\"\"Test that basis_gates can be None (level={level})\"\"\"\n q = QuantumRegister(2, name=\"q\")\n circuit = QuantumCircuit(q)\n circuit.h(q[0])\n circuit.cz(q[0], q[1])\n result = transpile(circuit, basis_gates=None, optimization_level=level)\n self.assertEqual(result, circuit)\n\n def test_level0_keeps_reset(self):\n \"\"\"Test level 0 should keep the reset instructions\"\"\"\n q = QuantumRegister(2, name=\"q\")\n circuit = QuantumCircuit(q)\n circuit.reset(q[0])\n circuit.reset(q[0])\n result = transpile(circuit, basis_gates=None, optimization_level=0)\n self.assertEqual(result, circuit)\n\n @combine(level=[0, 1, 2, 3], name=\"level{level}\")\n def test_unitary_is_preserved_if_in_basis(self, level):\n \"\"\"Test that a unitary is not synthesized if in the basis.\"\"\"\n qc = QuantumCircuit(2)\n qc.unitary(random_unitary(4, seed=42), [0, 1])\n qc.measure_all()\n result = transpile(qc, basis_gates=[\"cx\", \"u\", \"unitary\"], optimization_level=level)\n self.assertEqual(result, qc)\n\n @combine(level=[0, 1, 2, 3], name=\"level{level}\")\n def test_unitary_is_preserved_if_basis_is_None(self, level):\n \"\"\"Test that a unitary is not synthesized if basis is None.\"\"\"\n qc = QuantumCircuit(2)\n qc.unitary(random_unitary(4, seed=4242), [0, 1])\n qc.measure_all()\n result = transpile(qc, basis_gates=None, optimization_level=level)\n self.assertEqual(result, qc)\n\n @combine(level=[0, 1, 2, 3], name=\"level{level}\")\n def test_unitary_is_preserved_if_in_basis_synthesis_translation(self, level):\n \"\"\"Test that a unitary is not synthesized if in the basis with synthesis translation.\"\"\"\n qc = QuantumCircuit(2)\n qc.unitary(random_unitary(4, seed=424242), [0, 1])\n qc.measure_all()\n result = transpile(\n qc,\n basis_gates=[\"cx\", \"u\", \"unitary\"],\n optimization_level=level,\n translation_method=\"synthesis\",\n )\n self.assertEqual(result, qc)\n\n @combine(level=[0, 1, 2, 3], name=\"level{level}\")\n def test_unitary_is_preserved_if_basis_is_None_synthesis_transltion(self, level):\n \"\"\"Test that a unitary is not synthesized if basis is None with synthesis translation.\"\"\"\n qc = QuantumCircuit(2)\n qc.unitary(random_unitary(4, seed=42424242), [0, 1])\n qc.measure_all()\n result = transpile(\n qc, basis_gates=None, optimization_level=level, translation_method=\"synthesis\"\n )\n self.assertEqual(result, qc)\n\n @combine(level=[0, 1, 2, 3], name=\"level{level}\")\n def test_respect_basis(self, level):\n \"\"\"Test that all levels respect basis\"\"\"\n qc = QuantumCircuit(3)\n qc.h(0)\n qc.h(1)\n qc.cp(np.pi / 8, 0, 1)\n qc.cp(np.pi / 4, 0, 2)\n basis_gates = [\"id\", \"rz\", \"sx\", \"x\", \"cx\"]\n result = transpile(\n qc, basis_gates=basis_gates, coupling_map=[[0, 1], [2, 1]], optimization_level=level\n )\n\n dag = circuit_to_dag(result)\n circuit_ops = {node.name for node in dag.topological_op_nodes()}\n self.assertEqual(circuit_ops.union(set(basis_gates)), set(basis_gates))\n\n\n@ddt\nclass TestTranspileLevels(QiskitTestCase):\n \"\"\"Test transpiler on fake backend\"\"\"\n\n @combine(\n circuit=[emptycircuit, circuit_2532],\n level=[0, 1, 2, 3],\n backend=[\n FakeTenerife(),\n FakeMelbourne(),\n FakeRueschlikon(),\n FakeTokyo(),\n FakePoughkeepsie(),\n None,\n ],\n dsc=\"Transpiler {circuit.__name__} on {backend} backend at level {level}\",\n name=\"{circuit.__name__}_{backend}_level{level}\",\n )\n def test(self, circuit, level, backend):\n \"\"\"All the levels with all the backends\"\"\"\n result = transpile(circuit(), backend=backend, optimization_level=level, seed_transpiler=42)\n self.assertIsInstance(result, QuantumCircuit)\n\n\n@ddt\nclass TestPassesInspection(QiskitTestCase):\n \"\"\"Test run passes under different conditions\"\"\"\n\n def setUp(self):\n \"\"\"Sets self.callback to set self.passes with the passes that have been executed\"\"\"\n super().setUp()\n self.passes = []\n\n def callback(**kwargs):\n self.passes.append(kwargs[\"pass_\"].__class__.__name__)\n\n self.callback = callback\n\n @data(0, 1, 2, 3)\n def test_no_coupling_map(self, level):\n \"\"\"Without coupling map, no layout selection nor swapper\"\"\"\n qr = QuantumRegister(3, \"q\")\n qc = QuantumCircuit(qr)\n qc.cx(qr[2], qr[1])\n qc.cx(qr[2], qr[0])\n\n _ = transpile(qc, optimization_level=level, callback=self.callback)\n\n self.assertNotIn(\"SetLayout\", self.passes)\n self.assertNotIn(\"TrivialLayout\", self.passes)\n self.assertNotIn(\"ApplyLayout\", self.passes)\n self.assertNotIn(\"StochasticSwap\", self.passes)\n self.assertNotIn(\"CheckGateDirection\", self.passes)\n\n @data(0, 1, 2, 3)\n def test_backend(self, level):\n \"\"\"With backend a layout and a swapper is run\"\"\"\n qr = QuantumRegister(5, \"q\")\n qc = QuantumCircuit(qr)\n qc.cx(qr[2], qr[4])\n backend = FakeMelbourne()\n\n _ = transpile(qc, backend, optimization_level=level, callback=self.callback)\n\n self.assertIn(\"SetLayout\", self.passes)\n self.assertIn(\"ApplyLayout\", self.passes)\n self.assertIn(\"CheckGateDirection\", self.passes)\n\n @data(0, 1, 2, 3)\n def test_5409(self, level):\n \"\"\"The parameter layout_method='noise_adaptive' should be honored\n See: https://github.com/Qiskit/qiskit-terra/issues/5409\n \"\"\"\n qr = QuantumRegister(5, \"q\")\n qc = QuantumCircuit(qr)\n qc.cx(qr[2], qr[4])\n backend = FakeMelbourne()\n\n _ = transpile(\n qc,\n backend,\n layout_method=\"noise_adaptive\",\n optimization_level=level,\n callback=self.callback,\n )\n\n self.assertIn(\"SetLayout\", self.passes)\n self.assertIn(\"ApplyLayout\", self.passes)\n self.assertIn(\"NoiseAdaptiveLayout\", self.passes)\n\n @data(0, 1, 2, 3)\n def test_symmetric_coupling_map(self, level):\n \"\"\"Symmetric coupling map does not run CheckGateDirection\"\"\"\n qr = QuantumRegister(2, \"q\")\n qc = QuantumCircuit(qr)\n qc.cx(qr[0], qr[1])\n\n coupling_map = [[0, 1], [1, 0]]\n\n _ = transpile(\n qc,\n coupling_map=coupling_map,\n initial_layout=[0, 1],\n optimization_level=level,\n callback=self.callback,\n )\n\n self.assertIn(\"SetLayout\", self.passes)\n self.assertIn(\"ApplyLayout\", self.passes)\n self.assertNotIn(\"CheckGateDirection\", self.passes)\n\n @data(0, 1, 2, 3)\n def test_initial_layout_fully_connected_cm(self, level):\n \"\"\"Honor initial_layout when coupling_map=None\n See: https://github.com/Qiskit/qiskit-terra/issues/5345\n \"\"\"\n qr = QuantumRegister(2, \"q\")\n qc = QuantumCircuit(qr)\n qc.h(qr[0])\n qc.cx(qr[0], qr[1])\n\n transpiled = transpile(\n qc, initial_layout=[0, 1], optimization_level=level, callback=self.callback\n )\n\n self.assertIn(\"SetLayout\", self.passes)\n self.assertIn(\"ApplyLayout\", self.passes)\n self.assertEqual(transpiled._layout, Layout.from_qubit_list([qr[0], qr[1]]))\n\n @data(0, 1, 2, 3)\n def test_partial_layout_fully_connected_cm(self, level):\n \"\"\"Honor initial_layout (partially defined) when coupling_map=None\n See: https://github.com/Qiskit/qiskit-terra/issues/5345\n \"\"\"\n qr = QuantumRegister(2, \"q\")\n qc = QuantumCircuit(qr)\n qc.h(qr[0])\n qc.cx(qr[0], qr[1])\n\n transpiled = transpile(\n qc, initial_layout=[4, 2], optimization_level=level, callback=self.callback\n )\n\n self.assertIn(\"SetLayout\", self.passes)\n self.assertIn(\"ApplyLayout\", self.passes)\n ancilla = QuantumRegister(3, \"ancilla\")\n self.assertEqual(\n transpiled._layout,\n Layout.from_qubit_list([ancilla[0], ancilla[1], qr[1], ancilla[2], qr[0]]),\n )\n\n\n@ddt\nclass TestInitialLayouts(QiskitTestCase):\n \"\"\"Test transpiling with different layouts\"\"\"\n\n @data(0, 1, 2, 3)\n def test_layout_1711(self, level):\n \"\"\"Test that a user-given initial layout is respected,\n in the qobj.\n\n See: https://github.com/Qiskit/qiskit-terra/issues/1711\n \"\"\"\n # build a circuit which works as-is on the coupling map, using the initial layout\n qr = QuantumRegister(3, \"q\")\n cr = ClassicalRegister(3)\n ancilla = QuantumRegister(13, \"ancilla\")\n qc = QuantumCircuit(qr, cr)\n qc.cx(qr[2], qr[1])\n qc.cx(qr[2], qr[0])\n initial_layout = {0: qr[1], 2: qr[0], 15: qr[2]}\n final_layout = {\n 0: qr[1],\n 1: ancilla[0],\n 2: qr[0],\n 3: ancilla[1],\n 4: ancilla[2],\n 5: ancilla[3],\n 6: ancilla[4],\n 7: ancilla[5],\n 8: ancilla[6],\n 9: ancilla[7],\n 10: ancilla[8],\n 11: ancilla[9],\n 12: ancilla[10],\n 13: ancilla[11],\n 14: ancilla[12],\n 15: qr[2],\n }\n\n backend = FakeRueschlikon()\n\n qc_b = transpile(qc, backend, initial_layout=initial_layout, optimization_level=level)\n qobj = assemble(qc_b)\n\n self.assertEqual(qc_b._layout._p2v, final_layout)\n\n compiled_ops = qobj.experiments[0].instructions\n for operation in compiled_ops:\n if operation.name == \"cx\":\n self.assertIn(operation.qubits, backend.configuration().coupling_map)\n self.assertIn(operation.qubits, [[15, 0], [15, 2]])\n\n @data(0, 1, 2, 3)\n def test_layout_2532(self, level):\n \"\"\"Test that a user-given initial layout is respected,\n in the transpiled circuit.\n\n See: https://github.com/Qiskit/qiskit-terra/issues/2532\n \"\"\"\n # build a circuit which works as-is on the coupling map, using the initial layout\n qr = QuantumRegister(5, \"q\")\n cr = ClassicalRegister(2)\n ancilla = QuantumRegister(9, \"ancilla\")\n qc = QuantumCircuit(qr, cr)\n qc.cx(qr[2], qr[4])\n initial_layout = {\n qr[2]: 11,\n qr[4]: 3, # map to [11, 3] connection\n qr[0]: 1,\n qr[1]: 5,\n qr[3]: 9,\n }\n final_layout = {\n 0: ancilla[0],\n 1: qr[0],\n 2: ancilla[1],\n 3: qr[4],\n 4: ancilla[2],\n 5: qr[1],\n 6: ancilla[3],\n 7: ancilla[4],\n 8: ancilla[5],\n 9: qr[3],\n 10: ancilla[6],\n 11: qr[2],\n 12: ancilla[7],\n 13: ancilla[8],\n }\n backend = FakeMelbourne()\n\n qc_b = transpile(qc, backend, initial_layout=initial_layout, optimization_level=level)\n\n self.assertEqual(qc_b._layout._p2v, final_layout)\n\n output_qr = qc_b.qregs[0]\n for gate, qubits, _ in qc_b:\n if gate.name == \"cx\":\n for qubit in qubits:\n self.assertIn(qubit, [output_qr[11], output_qr[3]])\n\n @data(0, 1, 2, 3)\n def test_layout_2503(self, level):\n \"\"\"Test that a user-given initial layout is respected,\n even if cnots are not in the coupling map.\n\n See: https://github.com/Qiskit/qiskit-terra/issues/2503\n \"\"\"\n # build a circuit which works as-is on the coupling map, using the initial layout\n qr = QuantumRegister(3, \"q\")\n cr = ClassicalRegister(2)\n ancilla = QuantumRegister(17, \"ancilla\")\n\n qc = QuantumCircuit(qr, cr)\n qc.append(U3Gate(0.1, 0.2, 0.3), [qr[0]])\n qc.append(U2Gate(0.4, 0.5), [qr[2]])\n qc.barrier()\n qc.cx(qr[0], qr[2])\n initial_layout = [6, 7, 12]\n\n final_layout = {\n 0: ancilla[0],\n 1: ancilla[1],\n 2: ancilla[2],\n 3: ancilla[3],\n 4: ancilla[4],\n 5: ancilla[5],\n 6: qr[0],\n 7: qr[1],\n 8: ancilla[6],\n 9: ancilla[7],\n 10: ancilla[8],\n 11: ancilla[9],\n 12: qr[2],\n 13: ancilla[10],\n 14: ancilla[11],\n 15: ancilla[12],\n 16: ancilla[13],\n 17: ancilla[14],\n 18: ancilla[15],\n 19: ancilla[16],\n }\n\n backend = FakePoughkeepsie()\n\n qc_b = transpile(qc, backend, initial_layout=initial_layout, optimization_level=level)\n\n self.assertEqual(qc_b._layout._p2v, final_layout)\n\n gate_0, qubits_0, _ = qc_b[0]\n gate_1, qubits_1, _ = qc_b[1]\n\n output_qr = qc_b.qregs[0]\n self.assertIsInstance(gate_0, U3Gate)\n self.assertEqual(qubits_0[0], output_qr[6])\n self.assertIsInstance(gate_1, U2Gate)\n self.assertEqual(qubits_1[0], output_qr[12])\n\n\n@ddt\nclass TestFinalLayouts(QiskitTestCase):\n \"\"\"Test final layouts after preset transpilation\"\"\"\n\n @data(0, 1, 2, 3)\n def test_layout_tokyo_2845(self, level):\n \"\"\"Test that final layout in tokyo #2845\n See: https://github.com/Qiskit/qiskit-terra/issues/2845\n \"\"\"\n qr1 = QuantumRegister(3, \"qr1\")\n qr2 = QuantumRegister(2, \"qr2\")\n qc = QuantumCircuit(qr1, qr2)\n qc.cx(qr1[0], qr1[1])\n qc.cx(qr1[1], qr1[2])\n qc.cx(qr1[2], qr2[0])\n qc.cx(qr2[0], qr2[1])\n\n trivial_layout = {\n 0: Qubit(QuantumRegister(3, \"qr1\"), 0),\n 1: Qubit(QuantumRegister(3, \"qr1\"), 1),\n 2: Qubit(QuantumRegister(3, \"qr1\"), 2),\n 3: Qubit(QuantumRegister(2, \"qr2\"), 0),\n 4: Qubit(QuantumRegister(2, \"qr2\"), 1),\n 5: Qubit(QuantumRegister(15, \"ancilla\"), 0),\n 6: Qubit(QuantumRegister(15, \"ancilla\"), 1),\n 7: Qubit(QuantumRegister(15, \"ancilla\"), 2),\n 8: Qubit(QuantumRegister(15, \"ancilla\"), 3),\n 9: Qubit(QuantumRegister(15, \"ancilla\"), 4),\n 10: Qubit(QuantumRegister(15, \"ancilla\"), 5),\n 11: Qubit(QuantumRegister(15, \"ancilla\"), 6),\n 12: Qubit(QuantumRegister(15, \"ancilla\"), 7),\n 13: Qubit(QuantumRegister(15, \"ancilla\"), 8),\n 14: Qubit(QuantumRegister(15, \"ancilla\"), 9),\n 15: Qubit(QuantumRegister(15, \"ancilla\"), 10),\n 16: Qubit(QuantumRegister(15, \"ancilla\"), 11),\n 17: Qubit(QuantumRegister(15, \"ancilla\"), 12),\n 18: Qubit(QuantumRegister(15, \"ancilla\"), 13),\n 19: Qubit(QuantumRegister(15, \"ancilla\"), 14),\n }\n\n dense_layout = {\n 2: Qubit(QuantumRegister(3, \"qr1\"), 0),\n 6: Qubit(QuantumRegister(3, \"qr1\"), 1),\n 1: Qubit(QuantumRegister(3, \"qr1\"), 2),\n 5: Qubit(QuantumRegister(2, \"qr2\"), 0),\n 0: Qubit(QuantumRegister(2, \"qr2\"), 1),\n 3: Qubit(QuantumRegister(15, \"ancilla\"), 0),\n 4: Qubit(QuantumRegister(15, \"ancilla\"), 1),\n 7: Qubit(QuantumRegister(15, \"ancilla\"), 2),\n 8: Qubit(QuantumRegister(15, \"ancilla\"), 3),\n 9: Qubit(QuantumRegister(15, \"ancilla\"), 4),\n 10: Qubit(QuantumRegister(15, \"ancilla\"), 5),\n 11: Qubit(QuantumRegister(15, \"ancilla\"), 6),\n 12: Qubit(QuantumRegister(15, \"ancilla\"), 7),\n 13: Qubit(QuantumRegister(15, \"ancilla\"), 8),\n 14: Qubit(QuantumRegister(15, \"ancilla\"), 9),\n 15: Qubit(QuantumRegister(15, \"ancilla\"), 10),\n 16: Qubit(QuantumRegister(15, \"ancilla\"), 11),\n 17: Qubit(QuantumRegister(15, \"ancilla\"), 12),\n 18: Qubit(QuantumRegister(15, \"ancilla\"), 13),\n 19: Qubit(QuantumRegister(15, \"ancilla\"), 14),\n }\n\n csp_layout = {\n 13: Qubit(QuantumRegister(3, \"qr1\"), 0),\n 19: Qubit(QuantumRegister(3, \"qr1\"), 1),\n 14: Qubit(QuantumRegister(3, \"qr1\"), 2),\n 18: Qubit(QuantumRegister(2, \"qr2\"), 0),\n 17: Qubit(QuantumRegister(2, \"qr2\"), 1),\n 0: Qubit(QuantumRegister(15, \"ancilla\"), 0),\n 1: Qubit(QuantumRegister(15, \"ancilla\"), 1),\n 2: Qubit(QuantumRegister(15, \"ancilla\"), 2),\n 3: Qubit(QuantumRegister(15, \"ancilla\"), 3),\n 4: Qubit(QuantumRegister(15, \"ancilla\"), 4),\n 5: Qubit(QuantumRegister(15, \"ancilla\"), 5),\n 6: Qubit(QuantumRegister(15, \"ancilla\"), 6),\n 7: Qubit(QuantumRegister(15, \"ancilla\"), 7),\n 8: Qubit(QuantumRegister(15, \"ancilla\"), 8),\n 9: Qubit(QuantumRegister(15, \"ancilla\"), 9),\n 10: Qubit(QuantumRegister(15, \"ancilla\"), 10),\n 11: Qubit(QuantumRegister(15, \"ancilla\"), 11),\n 12: Qubit(QuantumRegister(15, \"ancilla\"), 12),\n 15: Qubit(QuantumRegister(15, \"ancilla\"), 13),\n 16: Qubit(QuantumRegister(15, \"ancilla\"), 14),\n }\n\n # Trivial layout\n expected_layout_level0 = trivial_layout\n # Dense layout\n expected_layout_level1 = dense_layout\n # CSP layout\n expected_layout_level2 = csp_layout\n expected_layout_level3 = csp_layout\n\n expected_layouts = [\n expected_layout_level0,\n expected_layout_level1,\n expected_layout_level2,\n expected_layout_level3,\n ]\n backend = FakeTokyo()\n result = transpile(qc, backend, optimization_level=level, seed_transpiler=42)\n self.assertEqual(result._layout._p2v, expected_layouts[level])\n\n @data(0, 1, 2, 3)\n def test_layout_tokyo_fully_connected_cx(self, level):\n \"\"\"Test that final layout in tokyo in a fully connected circuit\"\"\"\n qr = QuantumRegister(5, \"qr\")\n qc = QuantumCircuit(qr)\n for qubit_target in qr:\n for qubit_control in qr:\n if qubit_control != qubit_target:\n qc.cx(qubit_control, qubit_target)\n\n ancilla = QuantumRegister(15, \"ancilla\")\n\n trivial_layout = {\n 0: qr[0],\n 1: qr[1],\n 2: qr[2],\n 3: qr[3],\n 4: qr[4],\n 5: ancilla[0],\n 6: ancilla[1],\n 7: ancilla[2],\n 8: ancilla[3],\n 9: ancilla[4],\n 10: ancilla[5],\n 11: ancilla[6],\n 12: ancilla[7],\n 13: ancilla[8],\n 14: ancilla[9],\n 15: ancilla[10],\n 16: ancilla[11],\n 17: ancilla[12],\n 18: ancilla[13],\n 19: ancilla[14],\n }\n\n dense_layout = {\n 2: qr[0],\n 6: qr[1],\n 1: qr[2],\n 5: qr[3],\n 0: qr[4],\n 3: ancilla[0],\n 4: ancilla[1],\n 7: ancilla[2],\n 8: ancilla[3],\n 9: ancilla[4],\n 10: ancilla[5],\n 11: ancilla[6],\n 12: ancilla[7],\n 13: ancilla[8],\n 14: ancilla[9],\n 15: ancilla[10],\n 16: ancilla[11],\n 17: ancilla[12],\n 18: ancilla[13],\n 19: ancilla[14],\n }\n\n expected_layout_level0 = trivial_layout\n expected_layout_level1 = dense_layout\n expected_layout_level2 = dense_layout\n expected_layout_level3 = dense_layout\n\n expected_layouts = [\n expected_layout_level0,\n expected_layout_level1,\n expected_layout_level2,\n expected_layout_level3,\n ]\n backend = FakeTokyo()\n result = transpile(qc, backend, optimization_level=level, seed_transpiler=42)\n self.assertEqual(result._layout._p2v, expected_layouts[level])\n\n @data(0, 1, 2, 3)\n def test_all_levels_use_trivial_if_perfect(self, level):\n \"\"\"Test that we always use trivial if it's a perfect match.\n\n See: https://github.com/Qiskit/qiskit-terra/issues/5694 for more\n details\n \"\"\"\n backend = FakeTokyo()\n config = backend.configuration()\n\n rows = [x[0] for x in config.coupling_map]\n cols = [x[1] for x in config.coupling_map]\n\n adjacency_matrix = np.zeros((20, 20))\n adjacency_matrix[rows, cols] = 1\n qc = GraphState(adjacency_matrix)\n qc.measure_all()\n expected = {\n 0: Qubit(QuantumRegister(20, \"q\"), 0),\n 1: Qubit(QuantumRegister(20, \"q\"), 1),\n 2: Qubit(QuantumRegister(20, \"q\"), 2),\n 3: Qubit(QuantumRegister(20, \"q\"), 3),\n 4: Qubit(QuantumRegister(20, \"q\"), 4),\n 5: Qubit(QuantumRegister(20, \"q\"), 5),\n 6: Qubit(QuantumRegister(20, \"q\"), 6),\n 7: Qubit(QuantumRegister(20, \"q\"), 7),\n 8: Qubit(QuantumRegister(20, \"q\"), 8),\n 9: Qubit(QuantumRegister(20, \"q\"), 9),\n 10: Qubit(QuantumRegister(20, \"q\"), 10),\n 11: Qubit(QuantumRegister(20, \"q\"), 11),\n 12: Qubit(QuantumRegister(20, \"q\"), 12),\n 13: Qubit(QuantumRegister(20, \"q\"), 13),\n 14: Qubit(QuantumRegister(20, \"q\"), 14),\n 15: Qubit(QuantumRegister(20, \"q\"), 15),\n 16: Qubit(QuantumRegister(20, \"q\"), 16),\n 17: Qubit(QuantumRegister(20, \"q\"), 17),\n 18: Qubit(QuantumRegister(20, \"q\"), 18),\n 19: Qubit(QuantumRegister(20, \"q\"), 19),\n }\n trans_qc = transpile(qc, backend, optimization_level=level)\n self.assertEqual(trans_qc._layout._p2v, expected)\n\n @data(0, 1)\n def test_trivial_layout(self, level):\n \"\"\"Test that trivial layout is preferred in level 0 and 1\n See: https://github.com/Qiskit/qiskit-terra/pull/3657#pullrequestreview-342012465\n \"\"\"\n qr = QuantumRegister(10, \"qr\")\n qc = QuantumCircuit(qr)\n qc.cx(qr[0], qr[1])\n qc.cx(qr[1], qr[2])\n qc.cx(qr[2], qr[6])\n qc.cx(qr[3], qr[8])\n qc.cx(qr[4], qr[9])\n qc.cx(qr[9], qr[8])\n qc.cx(qr[8], qr[7])\n qc.cx(qr[7], qr[6])\n qc.cx(qr[6], qr[5])\n qc.cx(qr[5], qr[0])\n\n ancilla = QuantumRegister(10, \"ancilla\")\n trivial_layout = {\n 0: qr[0],\n 1: qr[1],\n 2: qr[2],\n 3: qr[3],\n 4: qr[4],\n 5: qr[5],\n 6: qr[6],\n 7: qr[7],\n 8: qr[8],\n 9: qr[9],\n 10: ancilla[0],\n 11: ancilla[1],\n 12: ancilla[2],\n 13: ancilla[3],\n 14: ancilla[4],\n 15: ancilla[5],\n 16: ancilla[6],\n 17: ancilla[7],\n 18: ancilla[8],\n 19: ancilla[9],\n }\n\n expected_layouts = [trivial_layout, trivial_layout]\n\n backend = FakeTokyo()\n result = transpile(qc, backend, optimization_level=level, seed_transpiler=42)\n self.assertEqual(result._layout._p2v, expected_layouts[level])\n\n @data(0, 1, 2, 3)\n def test_initial_layout(self, level):\n \"\"\"When a user provides a layout (initial_layout), it should be used.\"\"\"\n qr = QuantumRegister(10, \"qr\")\n qc = QuantumCircuit(qr)\n qc.cx(qr[0], qr[1])\n qc.cx(qr[1], qr[2])\n qc.cx(qr[2], qr[3])\n qc.cx(qr[3], qr[9])\n qc.cx(qr[4], qr[9])\n qc.cx(qr[9], qr[8])\n qc.cx(qr[8], qr[7])\n qc.cx(qr[7], qr[6])\n qc.cx(qr[6], qr[5])\n qc.cx(qr[5], qr[0])\n\n initial_layout = {\n 0: qr[0],\n 2: qr[1],\n 4: qr[2],\n 6: qr[3],\n 8: qr[4],\n 10: qr[5],\n 12: qr[6],\n 14: qr[7],\n 16: qr[8],\n 18: qr[9],\n }\n\n backend = FakeTokyo()\n result = transpile(\n qc, backend, optimization_level=level, initial_layout=initial_layout, seed_transpiler=42\n )\n\n for physical, virtual in initial_layout.items():\n self.assertEqual(result._layout._p2v[physical], virtual)\n\n\n@ddt\nclass TestTranspileLevelsSwap(QiskitTestCase):\n \"\"\"Test if swap is in the basis, do not unroll\n See https://github.com/Qiskit/qiskit-terra/pull/3963\n The circuit in combine should require a swap and that swap should exit at the end\n for the transpilation\"\"\"\n\n @combine(\n circuit=[circuit_2532],\n level=[0, 1, 2, 3],\n dsc=\"circuit: {circuit.__name__}, level: {level}\",\n name=\"{circuit.__name__}_level{level}\",\n )\n def test_1(self, circuit, level):\n \"\"\"Simple coupling map (linear 5 qubits).\"\"\"\n basis = [\"u1\", \"u2\", \"cx\", \"swap\"]\n coupling_map = CouplingMap([(0, 1), (1, 2), (2, 3), (3, 4)])\n result = transpile(\n circuit(),\n optimization_level=level,\n basis_gates=basis,\n coupling_map=coupling_map,\n seed_transpiler=42,\n initial_layout=[0, 1, 2, 3, 4],\n )\n self.assertIsInstance(result, QuantumCircuit)\n resulting_basis = {node.name for node in circuit_to_dag(result).op_nodes()}\n self.assertIn(\"swap\", resulting_basis)\n\n @combine(\n level=[0, 1, 2, 3],\n dsc=\"If swap in basis, do not decompose it. level: {level}\",\n name=\"level{level}\",\n )\n def test_2(self, level):\n \"\"\"Simple coupling map (linear 5 qubits).\n The circuit requires a swap and that swap should exit at the end\n for the transpilation\"\"\"\n basis = [\"u1\", \"u2\", \"cx\", \"swap\"]\n circuit = QuantumCircuit(5)\n circuit.cx(0, 4)\n circuit.cx(1, 4)\n circuit.cx(2, 4)\n circuit.cx(3, 4)\n coupling_map = CouplingMap([(0, 1), (1, 2), (2, 3), (3, 4)])\n result = transpile(\n circuit,\n optimization_level=level,\n basis_gates=basis,\n coupling_map=coupling_map,\n seed_transpiler=42,\n )\n self.assertIsInstance(result, QuantumCircuit)\n resulting_basis = {node.name for node in circuit_to_dag(result).op_nodes()}\n self.assertIn(\"swap\", resulting_basis)\n\n\n@ddt\nclass TestOptimizationWithCondition(QiskitTestCase):\n \"\"\"Test optimization levels with condition in the circuit\"\"\"\n\n @data(0, 1, 2, 3)\n def test_optimization_condition(self, level):\n \"\"\"Test optimization levels with condition in the circuit\"\"\"\n qr = QuantumRegister(2)\n cr = ClassicalRegister(1)\n qc = QuantumCircuit(qr, cr)\n qc.cx(0, 1).c_if(cr, 1)\n backend = FakeJohannesburg()\n circ = transpile(qc, backend, optimization_level=level)\n self.assertIsInstance(circ, QuantumCircuit)\n\n def test_input_dag_copy(self):\n \"\"\"Test substitute_node_with_dag input_dag copy on condition\"\"\"\n qc = QuantumCircuit(2, 1)\n qc.cx(0, 1).c_if(qc.cregs[0], 1)\n qc.cx(1, 0)\n circ = transpile(qc, basis_gates=[\"u3\", \"cz\"])\n self.assertIsInstance(circ, QuantumCircuit)\n" ]
[ [ "numpy.zeros" ] ]
EN1AC13/analytics-zoo
[ "169dac5400f341135b7bf38bb87e3fca89ac09d8", "169dac5400f341135b7bf38bb87e3fca89ac09d8" ]
[ "pyzoo/zoo/examples/orca/learn/horovod/simple_horovod_pytorch.py", "pyzoo/zoo/orca/learn/tf/utils.py" ]
[ "#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Some portions of this file Copyright 2018 Uber Technologies, Inc\n# and licensed under the Apache License, Version 2.0\n#\n\n# This file is adapted from https://github.com/horovod/horovod/blob/master/examples/pytorch_mnist.py\n\nfrom __future__ import print_function\n\nimport argparse\n\nimport horovod.torch as hvd\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data.distributed\nfrom torchvision import datasets, transforms\nfrom zoo.ray import RayContext\n\nfrom zoo import init_spark_on_yarn, init_spark_on_local\nfrom zoo.orca.learn.horovod import HorovodRayRunner\n\n\ndef run_horovod():\n # Temporary patch this script until the MNIST dataset download issue get resolved\n # https://github.com/pytorch/vision/issues/1938\n import urllib\n try:\n # For python 2\n class AppURLopener(urllib.FancyURLopener):\n version = \"Mozilla/5.0\"\n\n urllib._urlopener = AppURLopener()\n except AttributeError:\n # For python 3\n opener = urllib.request.build_opener()\n opener.addheaders = [('User-agent', 'Mozilla/5.0')]\n urllib.request.install_opener(opener)\n\n batch_size = 64\n test_batch_size = 1000\n epochs = 10\n lr = 0.01\n momentum = 0.5\n seed = 43\n log_interval = 10\n fp16_allreduce = False\n use_adasum = False\n\n # Horovod: initialize library.\n hvd.init()\n torch.manual_seed(seed)\n\n # Horovod: limit # of CPU threads to be used per worker.\n torch.set_num_threads(4)\n\n kwargs = {}\n train_dataset = \\\n datasets.MNIST('data-%d' % hvd.rank(), train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ]))\n # Horovod: use DistributedSampler to partition the training data.\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset, num_replicas=hvd.size(), rank=hvd.rank())\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=batch_size, sampler=train_sampler, **kwargs)\n\n test_dataset = \\\n datasets.MNIST('data-%d' % hvd.rank(), train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ]))\n # Horovod: use DistributedSampler to partition the test data.\n test_sampler = torch.utils.data.distributed.DistributedSampler(\n test_dataset, num_replicas=hvd.size(), rank=hvd.rank())\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=test_batch_size,\n sampler=test_sampler, **kwargs)\n\n class Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x)\n\n model = Net()\n\n # By default, Adasum doesn't need scaling up learning rate.\n lr_scaler = hvd.size() if not use_adasum else 1\n\n # Horovod: scale learning rate by lr_scaler.\n optimizer = optim.SGD(model.parameters(), lr=lr * lr_scaler,\n momentum=momentum)\n\n # Horovod: broadcast parameters & optimizer state.\n hvd.broadcast_parameters(model.state_dict(), root_rank=0)\n hvd.broadcast_optimizer_state(optimizer, root_rank=0)\n\n # Horovod: (optional) compression algorithm.\n compression = hvd.Compression.fp16 if fp16_allreduce else hvd.Compression.none\n\n # Horovod: wrap optimizer with DistributedOptimizer.\n optimizer = hvd.DistributedOptimizer(optimizer,\n named_parameters=model.named_parameters(),\n compression=compression,\n op=hvd.Adasum if use_adasum else hvd.Average)\n\n def train(epoch):\n model.train()\n # Horovod: set epoch to sampler for shuffling.\n train_sampler.set_epoch(epoch)\n for batch_idx, (data, target) in enumerate(train_loader):\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % log_interval == 0:\n # Horovod: use train_sampler to determine the number of examples in\n # this worker's partition.\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_sampler),\n 100. * batch_idx / len(train_loader), loss.item()))\n\n def metric_average(val, name):\n tensor = torch.tensor(val)\n avg_tensor = hvd.allreduce(tensor, name=name)\n return avg_tensor.item()\n\n def test():\n model.eval()\n test_loss = 0.\n test_accuracy = 0.\n for data, target in test_loader:\n output = model(data)\n # sum up batch loss\n test_loss += F.nll_loss(output, target, size_average=False).item()\n # get the index of the max log-probability\n pred = output.data.max(1, keepdim=True)[1]\n test_accuracy += pred.eq(target.data.view_as(pred)).cpu().float().sum()\n\n # Horovod: use test_sampler to determine the number of examples in\n # this worker's partition.\n test_loss /= len(test_sampler)\n test_accuracy /= len(test_sampler)\n\n # Horovod: average metric values across workers.\n test_loss = metric_average(test_loss, 'avg_loss')\n test_accuracy = metric_average(test_accuracy, 'avg_accuracy')\n\n # Horovod: print output only on first rank.\n if hvd.rank() == 0:\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {:.2f}%\\n'.format(\n test_loss, 100. * test_accuracy))\n\n for epoch in range(1, epochs + 1):\n train(epoch)\n test()\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--hadoop_conf\", type=str,\n help=\"turn on yarn mode by passing the path to the hadoop\"\n \" configuration folder. Otherwise, turn on local mode.\")\nparser.add_argument(\"--slave_num\", type=int, default=2,\n help=\"The number of slave nodes\")\nparser.add_argument(\"--conda_name\", type=str,\n help=\"The name of conda environment.\")\nparser.add_argument(\"--executor_cores\", type=int, default=8,\n help=\"The number of driver's cpu cores you want to use.\"\n \"You can change it depending on your own cluster setting.\")\nparser.add_argument(\"--executor_memory\", type=str, default=\"10g\",\n help=\"The size of slave(executor)'s memory you want to use.\"\n \"You can change it depending on your own cluster setting.\")\nparser.add_argument(\"--driver_memory\", type=str, default=\"2g\",\n help=\"The size of driver's memory you want to use.\"\n \"You can change it depending on your own cluster setting.\")\nparser.add_argument(\"--driver_cores\", type=int, default=8,\n help=\"The number of driver's cpu cores you want to use.\"\n \"You can change it depending on your own cluster setting.\")\nparser.add_argument(\"--extra_executor_memory_for_ray\", type=str, default=\"20g\",\n help=\"The extra executor memory to store some data.\"\n \"You can change it depending on your own cluster setting.\")\nparser.add_argument(\"--object_store_memory\", type=str, default=\"4g\",\n help=\"The memory to store data on local.\"\n \"You can change it depending on your own cluster setting.\")\n\nif __name__ == \"__main__\":\n\n args = parser.parse_args()\n if args.hadoop_conf:\n sc = init_spark_on_yarn(\n hadoop_conf=args.hadoop_conf,\n conda_name=args.conda_name,\n num_executor=args.slave_num,\n executor_cores=args.executor_cores,\n executor_memory=args.executor_memory,\n driver_memory=args.driver_memory,\n driver_cores=args.driver_cores,\n extra_executor_memory_for_ray=args.extra_executor_memory_for_ray)\n ray_ctx = RayContext(\n sc=sc,\n object_store_memory=args.object_store_memory)\n ray_ctx.init()\n else:\n sc = init_spark_on_local()\n ray_ctx = RayContext(\n sc=sc,\n object_store_memory=args.object_store_memory)\n ray_ctx.init()\n\n runner = HorovodRayRunner(ray_ctx)\n runner.run(func=run_horovod)\n", "#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom pyspark.sql.dataframe import DataFrame\nimport tensorflow as tf\n\nfrom zoo.tfpark.tf_dataset import TFDataset\nfrom zoo.orca.data import SparkXShards\nfrom zoo.orca.data.tf.data import Dataset, TFDataDataset2\n\n\ndef xshards_to_tf_dataset(data_shard,\n batch_size=-1, batch_per_thread=-1,\n validation_data_shard=None,\n hard_code_batch_size=False,\n sequential_order=False,\n shuffle=True):\n # todo data_shard.head ?\n import numpy as np\n\n def check_data_type_and_to_list(data):\n result = {}\n assert isinstance(data, dict), \"each shard should be an dict\"\n assert \"x\" in data, \"key x should in each shard\"\n x = data[\"x\"]\n if isinstance(x, np.ndarray):\n new_x = [x]\n elif isinstance(x, tuple) and all([isinstance(xi, np.ndarray) for xi in x]):\n new_x = x\n else:\n raise ValueError(\"value of x should be a ndarray or a tuple of ndarrays\")\n result[\"x\"] = new_x\n if \"y\" in data:\n y = data[\"y\"]\n if isinstance(y, np.ndarray):\n new_y = [y]\n elif isinstance(y, tuple) and all([isinstance(yi, np.ndarray) for yi in y]):\n new_y = y\n else:\n raise ValueError(\"value of x should be a ndarray or a tuple of ndarrays\")\n result[\"y\"] = new_y\n return result\n\n def get_spec(data):\n data = check_data_type_and_to_list(data)\n feature_spec = [(feat.dtype, feat.shape[1:])\n for feat in data[\"x\"]]\n if \"y\" in data:\n label_spec = [(label.dtype, label.shape[1:])\n for label in data[\"y\"]]\n else:\n label_spec = None\n return (feature_spec, label_spec)\n\n (feature_spec, label_spec) = data_shard.rdd.map(get_spec).first()\n\n feature_spec = [(tf.dtypes.as_dtype(spec[0]), spec[1]) for spec in feature_spec]\n label_spec = [(tf.dtypes.as_dtype(spec[0]), spec[1]) for spec in label_spec] \\\n if label_spec is not None else None\n\n assert batch_size != -1 or batch_per_thread != -1, \\\n \"one of batch_size and batch_per_thread should be specified\"\n\n # todo this might be very slow\n def flatten(data):\n data = check_data_type_and_to_list(data)\n features = data[\"x\"]\n\n has_label = \"y\" in data\n labels = data[\"y\"] if has_label else None\n length = features[0].shape[0]\n\n for i in range(length):\n fs = [feat[i] for feat in features]\n if has_label:\n ls = [l[i] for l in labels]\n yield (fs, ls)\n else:\n yield (fs,)\n\n val_rdd = None if validation_data_shard is None \\\n else validation_data_shard.rdd.flatMap(flatten)\n\n dataset = TFDataset.from_rdd(data_shard.rdd.flatMap(flatten),\n features=feature_spec,\n labels=label_spec,\n batch_size=batch_size,\n batch_per_thread=batch_per_thread,\n val_rdd=val_rdd,\n hard_code_batch_size=hard_code_batch_size,\n sequential_order=sequential_order,\n shuffle=shuffle)\n\n return dataset\n\n\ndef to_dataset(data, batch_size, batch_per_thread, validation_data,\n feature_cols, labels_cols, hard_code_batch_size,\n sequential_order, shuffle):\n if validation_data:\n if isinstance(data, SparkXShards):\n assert isinstance(validation_data, SparkXShards), \\\n \"train data and validation data should be both SparkXShards\"\n if isinstance(data, Dataset):\n assert isinstance(validation_data, Dataset), \\\n \"train data and validation data should be both orca.data.tf.Dataset\"\n if isinstance(data, DataFrame):\n assert isinstance(validation_data, DataFrame), \\\n \"train data and validation data should be both Spark DataFrame\"\n\n if isinstance(data, SparkXShards):\n dataset = xshards_to_tf_dataset(data,\n batch_size,\n batch_per_thread,\n validation_data,\n hard_code_batch_size=hard_code_batch_size,\n sequential_order=sequential_order,\n shuffle=shuffle)\n elif isinstance(data, Dataset):\n dataset = TFDataDataset2(data, batch_size=batch_size,\n batch_per_thread=batch_per_thread,\n validation_dataset=validation_data)\n elif isinstance(data, DataFrame):\n dataset = TFDataset.from_dataframe(data, feature_cols, labels_cols,\n batch_size,\n batch_per_thread,\n hard_code_batch_size,\n validation_data,\n sequential_order,\n shuffle\n )\n else:\n raise ValueError(\"data must be SparkXShards or orca.data.tf.Dataset or Spark DataFrame\")\n\n return dataset\n\n\ndef convert_predict_to_dataframe(df, prediction_rdd):\n from pyspark.sql import Row\n from pyspark.sql.types import StructType, StructField, FloatType\n from pyspark.ml.linalg import VectorUDT, Vectors\n\n def combine(pair):\n # scalar\n if len(pair[1].shape) == 0:\n row = Row(*([pair[0][col] for col in pair[0].__fields__] + [float(pair[1].item(0))]))\n return row, FloatType()\n else:\n row = Row(*([pair[0][col] for col in pair[0].__fields__] + [Vectors.dense(pair[1])]))\n return row, VectorUDT()\n\n combined_rdd = df.rdd.zip(prediction_rdd).map(combine)\n type = combined_rdd.map(lambda data: data[1]).first()\n result_rdd = combined_rdd.map(lambda data: data[0])\n schema = StructType(df.schema.fields + [StructField('prediction', type)])\n result_df = result_rdd.toDF(schema)\n return result_df\n" ]
[ [ "torch.nn.Linear", "torch.nn.functional.dropout", "torch.nn.functional.log_softmax", "torch.nn.Conv2d", "torch.nn.functional.nll_loss", "torch.nn.Dropout2d" ], [ "tensorflow.dtypes.as_dtype" ] ]
yuhonghong66/defensegan
[ "7e3feaebf7b9bbf08b1364e400119ef596cd78fd" ]
[ "utils/config.py" ]
[ "# Copyright 2018 The Defense-GAN Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Contains the configuration handling code and default experiment\nparameters.\"\"\"\n\nimport os\n\nimport tensorflow as tf\nimport yaml\n\nFLAGS = tf.app.flags.FLAGS\n\ntype_to_define_fn = {int: tf.app.flags.DEFINE_integer,\n float: tf.app.flags.DEFINE_float,\n bool: tf.app.flags.DEFINE_boolean,\n basestring: tf.app.flags.DEFINE_string,\n str: tf.app.flags.DEFINE_string,\n type(None): tf.app.flags.DEFINE_integer,\n tuple: tf.app.flags.DEFINE_list,\n list: tf.app.flags.DEFINE_list}\n\n\ndef load_config(cfg_path, set_flag=False, verbose=False):\n \"\"\"Loads the configuration files into the global flags.\n\n Args:\n cfg_path: The path to the config yaml file.\n set_flag: If True, does not create new flag attributes, only sets\n existing ones.\n verbose: Verbose mode.\n\n Returns:\n The loaded configuration dictionary.\n\n Raises:\n RuntimeError: If the configuration path does not exist.\n \"\"\"\n flags = tf.app.flags.FLAGS\n\n if not os.path.exists(cfg_path):\n raise RuntimeError(\n \"[!] Configuration path {} does not exist.\".format(cfg_path))\n if os.path.isdir(cfg_path):\n cfg_path = os.path.join(cfg_path, 'cfg.yml')\n with open(cfg_path, 'r') as f:\n cfg = yaml.load(f)\n else:\n with open(cfg_path, 'r') as f:\n loaded_cfg = yaml.load(f)\n base_dir = os.path.dirname(cfg_path)\n with open(os.path.join(base_dir, 'default.yml'), 'r') as f:\n cfg = yaml.load(f)\n\n cfg.update(loaded_cfg)\n\n with open(os.path.join('experiments/cfgs', 'key_doc.yml')) as f:\n docs = yaml.load(f)\n\n tf.app.flags.DEFINE_string('cfg_path', cfg_path, 'config path.')\n for (k, v) in cfg.items():\n if set_flag:\n setattr(flags, k.lower(), v)\n else:\n if hasattr(flags, k.lower()):\n setattr(flags, k.lower(), v)\n else:\n def_func = type_to_define_fn[type(v)]\n\n try:\n def_func(k.lower(), v, docs[k])\n except KeyError:\n 'Doc for the key {} is not found in the ' \\\n 'experimets/cfgs/key_doc.yml'.format(\n k)\n def_func(k.lower(), v, 'No doc')\n if verbose:\n print('[#] set {} to {} type: {}'.format(k.lower(), v['val'],\n str(type(\n v['val']))))\n cfg['cfg_path'] = cfg_path\n return cfg\n" ]
[ [ "tensorflow.app.flags.DEFINE_string" ] ]
h-mayorquin/attractor_sequences
[ "885271f30d73a58a7aad83b55949e4e32ba0b45a" ]
[ "plots/bernstein_capacity_extension_overload_01.py" ]
[ "import sys\nsys.path.append('../')\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nimport seaborn as sns\n\nfrom network import Protocol, BCPNNFast, NetworkManager\nfrom analysis_functions import calculate_recall_success_sequences\nfrom connectivity_functions import create_artificial_manager\n\nsns.set(font_scale=2.5)\n\n# Patterns parameters\nhypercolumns = 4\nminicolumns = 50\n\ndt = 0.001\n\n# Recall\nn = 10\nT_cue = 0.100\nT_recall = 10.0\n\n# Artificial matrix\nbeta = False\nvalue = 3\ninhibition = -1\nextension = 4\ndecay_factor = 0.3\nsequence_decay = 0.0\ntau_z_pre = 0.150\n\n# Sequence structure\noverlap = 2\nnumber_of_sequences = 2\nhalf_width = 3\nextension_vector = np.arange(1, 7, 1)\n\ntotal_success_list_extension = []\noverloads = [2, 4, 6]\nfor number_of_sequences in overloads:\n total_success_extension = np.zeros(extension_vector.size)\n print('overloads', number_of_sequences)\n for extension_index, extension in enumerate(extension_vector):\n\n # Build chain protocol\n chain_protocol = Protocol()\n units_to_overload = [i for i in range(overlap)]\n sequences = chain_protocol.create_overload_chain(number_of_sequences, half_width, units_to_overload)\n\n manager = create_artificial_manager(hypercolumns, minicolumns, sequences, value=value,\n inhibition=inhibition,\n extension=extension, decay_factor=decay_factor,\n sequence_decay=sequence_decay,\n dt=dt, BCPNNFast=BCPNNFast, NetworkManager=NetworkManager, ampa=True,\n beta=beta)\n\n manager.nn.tau_z_pre = tau_z_pre\n\n successes = calculate_recall_success_sequences(manager, T_recall=T_recall, T_cue=T_cue, n=n,\n sequences=sequences)\n total_success_extension[extension_index] = np.min(successes)\n total_success_list_extension.append(total_success_extension)\n\nfig = plt.figure(figsize=(16, 12))\nax = fig.add_subplot(111)\nfor overload, total_success_extension in zip(overloads, total_success_list_extension):\n ax.plot(extension_vector, total_success_extension, '*-', markersize=15, label='overload = ' + str(overload))\n\nax.axhline(0, color='black')\nax.set_ylim([-5, 115])\n\nax.set_xlabel('Extension')\nax.set_ylabel('Success')\n\nax.legend()\n\n# Save the figure\nfname = './plots/capacity_extension_overload.pdf'\nplt.savefig(fname, format='pdf', dpi=90, bbox_inches='tight', frameon=True, transparent=False)\n\n\n" ]
[ [ "numpy.zeros", "matplotlib.pyplot.savefig", "numpy.min", "matplotlib.pyplot.figure", "numpy.arange" ] ]
equinaut/statsmodels
[ "6fe8d4e351416727641db4c3d3552f4ec4f46d0e" ]
[ "statsmodels/stats/multitest.py" ]
[ "'''Multiple Testing and P-Value Correction\n\n\nAuthor: Josef Perktold\nLicense: BSD-3\n\n'''\n\nfrom statsmodels.compat.python import range\nfrom collections import OrderedDict\nfrom ._knockoff import RegressionFDR\nimport numpy as np\n\n\n#==============================================\n#\n# Part 1: Multiple Tests and P-Value Correction\n#\n#==============================================\n\ndef _ecdf(x):\n '''no frills empirical cdf used in fdrcorrection\n '''\n nobs = len(x)\n return np.arange(1,nobs+1)/float(nobs)\n\nmultitest_methods_names = {'b': 'Bonferroni',\n 's': 'Sidak',\n 'h': 'Holm',\n 'hs': 'Holm-Sidak',\n 'sh': 'Simes-Hochberg',\n 'ho': 'Hommel',\n 'fdr_bh': 'FDR Benjamini-Hochberg',\n 'fdr_by': 'FDR Benjamini-Yekutieli',\n 'fdr_tsbh': 'FDR 2-stage Benjamini-Hochberg',\n 'fdr_tsbky': 'FDR 2-stage Benjamini-Krieger-Yekutieli',\n 'fdr_gbs': 'FDR adaptive Gavrilov-Benjamini-Sarkar'\n }\n\n_alias_list = [['b', 'bonf', 'bonferroni'],\n ['s', 'sidak'],\n ['h', 'holm'],\n ['hs', 'holm-sidak'],\n ['sh', 'simes-hochberg'],\n ['ho', 'hommel'],\n ['fdr_bh', 'fdr_i', 'fdr_p', 'fdri', 'fdrp'],\n ['fdr_by', 'fdr_n', 'fdr_c', 'fdrn', 'fdrcorr'],\n ['fdr_tsbh', 'fdr_2sbh'],\n ['fdr_tsbky', 'fdr_2sbky', 'fdr_twostage'],\n ['fdr_gbs']\n ]\n\n\nmultitest_alias = OrderedDict()\nfor m in _alias_list:\n multitest_alias[m[0]] = m[0]\n for a in m[1:]:\n multitest_alias[a] = m[0]\n\ndef multipletests(pvals, alpha=0.05, method='hs', is_sorted=False,\n returnsorted=False):\n \"\"\"\n Test results and p-value correction for multiple tests\n\n Parameters\n ----------\n pvals : array_like, 1-d\n uncorrected p-values. Must be 1-dimensional.\n alpha : float\n FWER, family-wise error rate, e.g. 0.1\n method : string\n Method used for testing and adjustment of pvalues. Can be either the\n full name or initial letters. Available methods are:\n\n - `bonferroni` : one-step correction\n - `sidak` : one-step correction\n - `holm-sidak` : step down method using Sidak adjustments\n - `holm` : step-down method using Bonferroni adjustments\n - `simes-hochberg` : step-up method (independent)\n - `hommel` : closed method based on Simes tests (non-negative)\n - `fdr_bh` : Benjamini/Hochberg (non-negative)\n - `fdr_by` : Benjamini/Yekutieli (negative)\n - `fdr_tsbh` : two stage fdr correction (non-negative)\n - `fdr_tsbky` : two stage fdr correction (non-negative)\n\n is_sorted : bool\n If False (default), the p_values will be sorted, but the corrected\n pvalues are in the original order. If True, then it assumed that the\n pvalues are already sorted in ascending order.\n returnsorted : bool\n not tested, return sorted p-values instead of original sequence\n\n Returns\n -------\n reject : array, boolean\n true for hypothesis that can be rejected for given alpha\n pvals_corrected : array\n p-values corrected for multiple tests\n alphacSidak: float\n corrected alpha for Sidak method\n alphacBonf: float\n corrected alpha for Bonferroni method\n\n Notes\n -----\n There may be API changes for this function in the future.\n\n Except for 'fdr_twostage', the p-value correction is independent of the\n alpha specified as argument. In these cases the corrected p-values\n can also be compared with a different alpha. In the case of 'fdr_twostage',\n the corrected p-values are specific to the given alpha, see\n ``fdrcorrection_twostage``.\n\n The 'fdr_gbs' procedure is not verified against another package, p-values\n are derived from scratch and are not derived in the reference. In Monte\n Carlo experiments the method worked correctly and maintained the false\n discovery rate.\n\n All procedures that are included, control FWER or FDR in the independent\n case, and most are robust in the positively correlated case.\n\n `fdr_gbs`: high power, fdr control for independent case and only small\n violation in positively correlated case\n\n **Timing**:\n\n Most of the time with large arrays is spent in `argsort`. When\n we want to calculate the p-value for several methods, then it is more\n efficient to presort the pvalues, and put the results back into the\n original order outside of the function.\n\n Method='hommel' is very slow for large arrays, since it requires the\n evaluation of n partitions, where n is the number of p-values.\n \"\"\"\n import gc\n pvals = np.asarray(pvals)\n alphaf = alpha # Notation ?\n\n if not is_sorted:\n sortind = np.argsort(pvals)\n pvals = np.take(pvals, sortind)\n\n ntests = len(pvals)\n alphacSidak = 1 - np.power((1. - alphaf), 1./ntests)\n alphacBonf = alphaf / float(ntests)\n if method.lower() in ['b', 'bonf', 'bonferroni']:\n reject = pvals <= alphacBonf\n pvals_corrected = pvals * float(ntests)\n\n elif method.lower() in ['s', 'sidak']:\n reject = pvals <= alphacSidak\n pvals_corrected = 1 - np.power((1. - pvals), ntests)\n\n elif method.lower() in ['hs', 'holm-sidak']:\n alphacSidak_all = 1 - np.power((1. - alphaf),\n 1./np.arange(ntests, 0, -1))\n notreject = pvals > alphacSidak_all\n del alphacSidak_all\n\n nr_index = np.nonzero(notreject)[0]\n if nr_index.size == 0:\n # nonreject is empty, all rejected\n notrejectmin = len(pvals)\n else:\n notrejectmin = np.min(nr_index)\n notreject[notrejectmin:] = True\n reject = ~notreject\n del notreject\n\n pvals_corrected_raw = 1 - np.power((1. - pvals),\n np.arange(ntests, 0, -1))\n pvals_corrected = np.maximum.accumulate(pvals_corrected_raw)\n del pvals_corrected_raw\n\n elif method.lower() in ['h', 'holm']:\n notreject = pvals > alphaf / np.arange(ntests, 0, -1)\n nr_index = np.nonzero(notreject)[0]\n if nr_index.size == 0:\n # nonreject is empty, all rejected\n notrejectmin = len(pvals)\n else:\n notrejectmin = np.min(nr_index)\n notreject[notrejectmin:] = True\n reject = ~notreject\n pvals_corrected_raw = pvals * np.arange(ntests, 0, -1)\n pvals_corrected = np.maximum.accumulate(pvals_corrected_raw)\n del pvals_corrected_raw\n gc.collect()\n\n elif method.lower() in ['sh', 'simes-hochberg']:\n alphash = alphaf / np.arange(ntests, 0, -1)\n reject = pvals <= alphash\n rejind = np.nonzero(reject)\n if rejind[0].size > 0:\n rejectmax = np.max(np.nonzero(reject))\n reject[:rejectmax] = True\n pvals_corrected_raw = np.arange(ntests, 0, -1) * pvals\n pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]\n del pvals_corrected_raw\n\n elif method.lower() in ['ho', 'hommel']:\n # we need a copy because we overwrite it in a loop\n a = pvals.copy()\n for m in range(ntests, 1, -1):\n cim = np.min(m * pvals[-m:] / np.arange(1,m+1.))\n a[-m:] = np.maximum(a[-m:], cim)\n a[:-m] = np.maximum(a[:-m], np.minimum(m * pvals[:-m], cim))\n pvals_corrected = a\n reject = a <= alphaf\n\n elif method.lower() in ['fdr_bh', 'fdr_i', 'fdr_p', 'fdri', 'fdrp']:\n # delegate, call with sorted pvals\n reject, pvals_corrected = fdrcorrection(pvals, alpha=alpha,\n method='indep',\n is_sorted=True)\n elif method.lower() in ['fdr_by', 'fdr_n', 'fdr_c', 'fdrn', 'fdrcorr']:\n # delegate, call with sorted pvals\n reject, pvals_corrected = fdrcorrection(pvals, alpha=alpha,\n method='n',\n is_sorted=True)\n elif method.lower() in ['fdr_tsbky', 'fdr_2sbky', 'fdr_twostage']:\n # delegate, call with sorted pvals\n reject, pvals_corrected = fdrcorrection_twostage(pvals, alpha=alpha,\n method='bky',\n is_sorted=True)[:2]\n elif method.lower() in ['fdr_tsbh', 'fdr_2sbh']:\n # delegate, call with sorted pvals\n reject, pvals_corrected = fdrcorrection_twostage(pvals, alpha=alpha,\n method='bh',\n is_sorted=True)[:2]\n\n elif method.lower() in ['fdr_gbs']:\n #adaptive stepdown in Gavrilov, Benjamini, Sarkar, Annals of Statistics 2009\n## notreject = pvals > alphaf / np.arange(ntests, 0, -1) #alphacSidak\n## notrejectmin = np.min(np.nonzero(notreject))\n## notreject[notrejectmin:] = True\n## reject = ~notreject\n\n ii = np.arange(1, ntests + 1)\n q = (ntests + 1. - ii)/ii * pvals / (1. - pvals)\n pvals_corrected_raw = np.maximum.accumulate(q) #up requirementd\n\n pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]\n del pvals_corrected_raw\n reject = pvals_corrected <= alpha\n\n else:\n raise ValueError('method not recognized')\n\n if pvals_corrected is not None: #not necessary anymore\n pvals_corrected[pvals_corrected>1] = 1\n if is_sorted or returnsorted:\n return reject, pvals_corrected, alphacSidak, alphacBonf\n else:\n pvals_corrected_ = np.empty_like(pvals_corrected)\n pvals_corrected_[sortind] = pvals_corrected\n del pvals_corrected\n reject_ = np.empty_like(reject)\n reject_[sortind] = reject\n return reject_, pvals_corrected_, alphacSidak, alphacBonf\n\n\ndef fdrcorrection(pvals, alpha=0.05, method='indep', is_sorted=False):\n '''pvalue correction for false discovery rate\n\n This covers Benjamini/Hochberg for independent or positively correlated and\n Benjamini/Yekutieli for general or negatively correlated tests. Both are\n available in the function multipletests, as method=`fdr_bh`, resp. `fdr_by`.\n\n Parameters\n ----------\n pvals : array_like\n set of p-values of the individual tests.\n alpha : float\n error rate\n method : {'indep', 'negcorr')\n\n Returns\n -------\n rejected : array, bool\n True if a hypothesis is rejected, False if not\n pvalue-corrected : array\n pvalues adjusted for multiple hypothesis testing to limit FDR\n\n Notes\n -----\n\n If there is prior information on the fraction of true hypothesis, then alpha\n should be set to alpha * m/m_0 where m is the number of tests,\n given by the p-values, and m_0 is an estimate of the true hypothesis.\n (see Benjamini, Krieger and Yekuteli)\n\n The two-step method of Benjamini, Krieger and Yekutiel that estimates the number\n of false hypotheses will be available (soon).\n\n Method names can be abbreviated to first letter, 'i' or 'p' for fdr_bh and 'n' for\n fdr_by.\n\n\n\n '''\n pvals = np.asarray(pvals)\n\n if not is_sorted:\n pvals_sortind = np.argsort(pvals)\n pvals_sorted = np.take(pvals, pvals_sortind)\n else:\n pvals_sorted = pvals # alias\n\n if method in ['i', 'indep', 'p', 'poscorr']:\n ecdffactor = _ecdf(pvals_sorted)\n elif method in ['n', 'negcorr']:\n cm = np.sum(1./np.arange(1, len(pvals_sorted)+1)) #corrected this\n ecdffactor = _ecdf(pvals_sorted) / cm\n## elif method in ['n', 'negcorr']:\n## cm = np.sum(np.arange(len(pvals)))\n## ecdffactor = ecdf(pvals_sorted)/cm\n else:\n raise ValueError('only indep and negcorr implemented')\n reject = pvals_sorted <= ecdffactor*alpha\n if reject.any():\n rejectmax = max(np.nonzero(reject)[0])\n reject[:rejectmax] = True\n\n pvals_corrected_raw = pvals_sorted / ecdffactor\n pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]\n del pvals_corrected_raw\n pvals_corrected[pvals_corrected>1] = 1\n if not is_sorted:\n pvals_corrected_ = np.empty_like(pvals_corrected)\n pvals_corrected_[pvals_sortind] = pvals_corrected\n del pvals_corrected\n reject_ = np.empty_like(reject)\n reject_[pvals_sortind] = reject\n return reject_, pvals_corrected_\n else:\n return reject, pvals_corrected\n\n\ndef fdrcorrection_twostage(pvals, alpha=0.05, method='bky', iter=False,\n is_sorted=False):\n '''(iterated) two stage linear step-up procedure with estimation of number of true\n hypotheses\n\n Benjamini, Krieger and Yekuteli, procedure in Definition 6\n\n Parameters\n ----------\n pvals : array_like\n set of p-values of the individual tests.\n alpha : float\n error rate\n method : {'bky', 'bh')\n see Notes for details\n\n * 'bky' - implements the procedure in Definition 6 of Benjamini, Krieger\n and Yekuteli 2006\n * 'bh' - the two stage method of Benjamini and Hochberg\n\n iter : bool\n\n Returns\n -------\n rejected : array, bool\n True if a hypothesis is rejected, False if not\n pvalue-corrected : array\n pvalues adjusted for multiple hypotheses testing to limit FDR\n m0 : int\n ntest - rej, estimated number of true hypotheses\n alpha_stages : list of floats\n A list of alphas that have been used at each stage\n\n Notes\n -----\n The returned corrected p-values are specific to the given alpha, they\n cannot be used for a different alpha.\n\n The returned corrected p-values are from the last stage of the fdr_bh\n linear step-up procedure (fdrcorrection0 with method='indep') corrected\n for the estimated fraction of true hypotheses.\n This means that the rejection decision can be obtained with\n ``pval_corrected <= alpha``, where ``alpha`` is the origianal significance\n level.\n (Note: This has changed from earlier versions (<0.5.0) of statsmodels.)\n\n BKY described several other multi-stage methods, which would be easy to implement.\n However, in their simulation the simple two-stage method (with iter=False) was the\n most robust to the presence of positive correlation\n\n TODO: What should be returned?\n\n '''\n pvals = np.asarray(pvals)\n\n if not is_sorted:\n pvals_sortind = np.argsort(pvals)\n pvals = np.take(pvals, pvals_sortind)\n\n ntests = len(pvals)\n if method == 'bky':\n fact = (1.+alpha)\n alpha_prime = alpha / fact\n elif method == 'bh':\n fact = 1.\n alpha_prime = alpha\n else:\n raise ValueError(\"only 'bky' and 'bh' are available as method\")\n\n alpha_stages = [alpha_prime]\n rej, pvalscorr = fdrcorrection(pvals, alpha=alpha_prime, method='indep',\n is_sorted=True)\n r1 = rej.sum()\n if (r1 == 0) or (r1 == ntests):\n return rej, pvalscorr * fact, ntests - r1, alpha_stages\n ri_old = r1\n\n while True:\n ntests0 = 1.0 * ntests - ri_old\n alpha_star = alpha_prime * ntests / ntests0\n alpha_stages.append(alpha_star)\n #print ntests0, alpha_star\n rej, pvalscorr = fdrcorrection(pvals, alpha=alpha_star, method='indep',\n is_sorted=True)\n ri = rej.sum()\n if (not iter) or ri == ri_old:\n break\n elif ri < ri_old:\n # prevent cycles and endless loops\n raise RuntimeError(\" oops - shouldn't be here\")\n ri_old = ri\n\n # make adjustment to pvalscorr to reflect estimated number of Non-Null cases\n # decision is then pvalscorr < alpha (or <=)\n pvalscorr *= ntests0 * 1.0 / ntests\n if method == 'bky':\n pvalscorr *= (1. + alpha)\n\n if not is_sorted:\n pvalscorr_ = np.empty_like(pvalscorr)\n pvalscorr_[pvals_sortind] = pvalscorr\n del pvalscorr\n reject = np.empty_like(rej)\n reject[pvals_sortind] = rej\n return reject, pvalscorr_, ntests - ri, alpha_stages\n else:\n return rej, pvalscorr, ntests - ri, alpha_stages\n\n\ndef local_fdr(zscores, null_proportion=1.0, null_pdf=None, deg=7,\n nbins=30):\n \"\"\"\n Calculate local FDR values for a list of Z-scores.\n\n Parameters\n ----------\n zscores : array-like\n A vector of Z-scores\n null_proportion : float\n The assumed proportion of true null hypotheses\n null_pdf : function mapping reals to positive reals\n The density of null Z-scores; if None, use standard normal\n deg : integer\n The maximum exponent in the polynomial expansion of the\n density of non-null Z-scores\n nbins : integer\n The number of bins for estimating the marginal density\n of Z-scores.\n\n Returns\n -------\n fdr : array-like\n A vector of FDR values\n\n References\n ----------\n B Efron (2008). Microarrays, Empirical Bayes, and the Two-Groups\n Model. Statistical Science 23:1, 1-22.\n\n Examples\n --------\n Basic use (the null Z-scores are taken to be standard normal):\n\n >>> from statsmodels.stats.multitest import local_fdr\n >>> import numpy as np\n >>> zscores = np.random.randn(30)\n >>> fdr = local_fdr(zscores)\n\n Use a Gaussian null distribution estimated from the data:\n\n >>> null = EmpiricalNull(zscores)\n >>> fdr = local_fdr(zscores, null_pdf=null.pdf)\n \"\"\"\n\n from statsmodels.genmod.generalized_linear_model import GLM\n from statsmodels.genmod.generalized_linear_model import families\n from statsmodels.regression.linear_model import OLS\n\n # Bins for Poisson modeling of the marginal Z-score density\n minz = min(zscores)\n maxz = max(zscores)\n bins = np.linspace(minz, maxz, nbins)\n\n # Bin counts\n zhist = np.histogram(zscores, bins)[0]\n\n # Bin centers\n zbins = (bins[:-1] + bins[1:]) / 2\n\n # The design matrix at bin centers\n dmat = np.vander(zbins, deg + 1)\n\n # Use this to get starting values for Poisson regression\n md = OLS(np.log(1 + zhist), dmat).fit()\n\n # Poisson regression\n md = GLM(zhist, dmat, family=families.Poisson()).fit(start_params=md.params)\n\n # The design matrix for all Z-scores\n dmat_full = np.vander(zscores, deg + 1)\n\n # The height of the estimated marginal density of Z-scores,\n # evaluated at every observed Z-score.\n fz = md.predict(dmat_full) / (len(zscores) * (bins[1] - bins[0]))\n\n # The null density.\n if null_pdf is None:\n f0 = np.exp(-0.5 * zscores**2) / np.sqrt(2 * np.pi)\n else:\n f0 = null_pdf(zscores)\n\n # The local FDR values\n fdr = null_proportion * f0 / fz\n\n fdr = np.clip(fdr, 0, 1)\n\n return fdr\n\n\nclass NullDistribution(object):\n \"\"\"\n Estimate a Gaussian distribution for the null Z-scores.\n\n The observed Z-scores consist of both null and non-null values.\n The fitted distribution of null Z-scores is Gaussian, but may have\n non-zero mean and/or non-unit scale.\n\n Parameters\n ----------\n zscores : array-like\n The observed Z-scores.\n null_lb : float\n Z-scores between `null_lb` and `null_ub` are all considered to be\n true null hypotheses.\n null_ub : float\n See `null_lb`.\n estimate_mean : bool\n If True, estimate the mean of the distribution. If False, the\n mean is fixed at zero.\n estimate_scale : bool\n If True, estimate the scale of the distribution. If False, the\n scale parameter is fixed at 1.\n estimate_null_proportion : bool\n If True, estimate the proportion of true null hypotheses (i.e.\n the proportion of z-scores with expected value zero). If False,\n this parameter is fixed at 1.\n\n Attributes\n ----------\n mean : float\n The estimated mean of the empirical null distribution\n sd : float\n The estimated standard deviation of the empirical null distribution\n null_proportion : float\n The estimated proportion of true null hypotheses among all hypotheses\n\n References\n ----------\n B Efron (2008). Microarrays, Empirical Bayes, and the Two-Groups\n Model. Statistical Science 23:1, 1-22.\n\n Notes\n -----\n See also:\n\n http://nipy.org/nipy/labs/enn.html#nipy.algorithms.statistics.empirical_pvalue.NormalEmpiricalNull.fdr\n \"\"\"\n\n def __init__(self, zscores, null_lb=-1, null_ub=1, estimate_mean=True,\n estimate_scale=True, estimate_null_proportion=False):\n\n # Extract the null z-scores\n ii = np.flatnonzero((zscores >= null_lb) & (zscores <= null_ub))\n if len(ii) == 0:\n raise RuntimeError(\"No Z-scores fall between null_lb and null_ub\")\n zscores0 = zscores[ii]\n\n # Number of Z-scores, and null Z-scores\n n_zs, n_zs0 = len(zscores), len(zscores0)\n\n # Unpack and transform the parameters to the natural scale, hold\n # parameters fixed as specified.\n def xform(params):\n\n mean = 0.\n sd = 1.\n prob = 1.\n\n ii = 0\n if estimate_mean:\n mean = params[ii]\n ii += 1\n if estimate_scale:\n sd = np.exp(params[ii])\n ii += 1\n if estimate_null_proportion:\n prob = 1 / (1 + np.exp(-params[ii]))\n\n return mean, sd, prob\n\n\n from scipy.stats.distributions import norm\n\n\n def fun(params):\n \"\"\"\n Negative log-likelihood of z-scores.\n\n The function has three arguments, packed into a vector:\n\n mean : location parameter\n logscale : log of the scale parameter\n logitprop : logit of the proportion of true nulls\n\n The implementation follows section 4 from Efron 2008.\n \"\"\"\n\n d, s, p = xform(params)\n\n # Mass within the central region\n central_mass = (norm.cdf((null_ub - d) / s) -\n norm.cdf((null_lb - d) / s))\n\n # Probability that a Z-score is null and is in the central region\n cp = p * central_mass\n\n # Binomial term\n rval = n_zs0 * np.log(cp) + (n_zs - n_zs0) * np.log(1 - cp)\n\n # Truncated Gaussian term for null Z-scores\n zv = (zscores0 - d) / s\n rval += np.sum(-zv**2 / 2) - n_zs0 * np.log(s)\n rval -= n_zs0 * np.log(central_mass)\n\n return -rval\n\n\n # Estimate the parameters\n from scipy.optimize import minimize\n # starting values are mean = 0, scale = 1, p0 ~ 1\n mz = minimize(fun, np.r_[0., 0, 3], method=\"Nelder-Mead\")\n mean, sd, prob = xform(mz['x'])\n\n self.mean = mean\n self.sd = sd\n self.null_proportion = prob\n\n\n # The fitted null density function\n def pdf(self, zscores):\n \"\"\"\n Evaluates the fitted emirical null Z-score density.\n\n Parameters\n ----------\n zscores : scalar or array-like\n The point or points at which the density is to be\n evaluated.\n\n Returns\n -------\n The empirical null Z-score density evaluated at the given\n points.\n \"\"\"\n\n zval = (zscores - self.mean) / self.sd\n return np.exp(-0.5*zval**2 - np.log(self.sd) - 0.5*np.log(2*np.pi))\n" ]
[ [ "numpy.vander", "numpy.minimum", "scipy.stats.distributions.norm.cdf", "numpy.exp", "numpy.min", "numpy.histogram", "numpy.log", "numpy.nonzero", "numpy.take", "numpy.arange", "numpy.sqrt", "scipy.optimize.minimize", "numpy.empty_like", "numpy.flatnonzero", "numpy.power", "numpy.argsort", "numpy.clip", "numpy.asarray", "numpy.minimum.accumulate", "numpy.sum", "numpy.maximum.accumulate", "numpy.linspace", "numpy.maximum" ] ]