index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
43,135
ltxpdflinks._lplxexporter
__init__
null
def __init__(self, *, include_comments_catcode=False): super().__init__() self.include_comments_catcode = include_comments_catcode
(self, *, include_comments_catcode=False)
43,136
ltxpdflinks._lplxexporter
export
null
def export(self, extractedgraphiclinks): e = extractedgraphiclinks # shorthand graphic_basefname, graphic_ext = os.path.splitext(e.graphic_fname) s = "" if self.include_comments_catcode: s += r"""\catcode`\%=14\relax""" + "\n" s += ( r"""% Automatically generated by ltxpdflinks """ + version_str + r""" on """ + datetime.datetime.now().isoformat() + r""" % % LPLX - """ + _makeltxsafe(e.graphic_fname) + r""" % \LPLX{version=0,ltxpdflinksversion={""" + version_str + r"""},features={bbox}}{% \lplxGraphic{""" + _makeltxsafe(graphic_basefname) + r"""}{""" + _makeltxsafe(graphic_ext) + r"""}% \lplxUserSpaceUnitLength{""" + e.unitlength + r"""}% \lplxSetBbox{0}{0}""" + "{{{:.6g}}}{{{:.6g}}}".format(e.size[0], e.size[1]) + r"""% %%BoundingBox: 0 0 """ + "{:d} {:d}".format(int(e.size[0]+0.5), int(e.size[1]+0.5)) + r""" %%HiResBoundingBox: 0 0 """ + "{:.6g} {:.6g}".format(e.size[0], e.size[1]) + r""" \lplxPicture{% """ ) for el in e.links: x, y, w, h = el.link_bbox lplxcmd = r'\lplxPutLink' lplxtailargs = '' if el.link_type == 'URI': hrstart = r"""\href{{{tgt}}}""".format(tgt=_makeltxsafe(el.link_target)) lplxtailargs = '{{{hrstart}}}{{}}'.format(hrstart=hrstart) elif el.link_type == 'latex-ref': hrstart = r"""\hyperref[{{{tgt}}}]""".format(tgt=_makeltxsafe(el.link_target)) lplxtailargs = '{{{hrstart}}}{{}}'.format(hrstart=hrstart) elif el.link_type == 'latex-cite': hrstart = r"""\hyperlink{{cite.{tgt}}}""".format(tgt=_makeltxsafe(el.link_target)) lplxtailargs = '{{{hrstart}}}{{}}'.format(hrstart=hrstart) elif el.link_type == 'latex-box': lplxcmd, lplxtailargs = _make_latexbox_from_url(el.link_target, el) else: logger.warning("Ignoring link with unsupported link_type: %r", el) continue # s += ( # r"\put({x},{y})".format(x=el.link_bbox[0], y=el.link_bbox[1]) + # "{" + s2 + "}\n" # ) s += ( r"{lplxcmd}{{{x:.8g}}}{{{y:.8g}}}{{{w:.8g}}}{{{h:.8g}}}{lplxtailargs}" .format(lplxcmd=lplxcmd, x=x, y=y, w=w, h=h, lplxtailargs=lplxtailargs) + r"%" + "\n" ) s += r"""}}%""" + "\n" return s
(self, extractedgraphiclinks)
43,137
ltxpdflinks._extractor
PdfGraphicLinksExtractor
null
class PdfGraphicLinksExtractor: def __init__(self, fname): super().__init__() self.fname = fname def extractGraphicLinks(self, pageno=None): if pageno is None: pageno = 0 with open(self.fname, 'rb') as f: pdf = PyPDF2.PdfFileReader(f) page = pdf.getPage(pageno).getObject() page_size = (page.mediaBox.getWidth(), page.mediaBox.getHeight()) page_bottomleft = page.mediaBox.lowerLeft extracted_list = [] if '/Annots' in page: for annot in page['/Annots']: annot = annot.getObject() extracted = self._extract_annot_link(annot, shift_rect_origin=page_bottomleft) if extracted is not None: logger.debug("Extracted link: %r", extracted) extracted_list.append(extracted) return ExtractedGraphicLinks(self.fname, page_size, extracted_list) def _extract_annot_link(self, annot, *, shift_rect_origin=(0,0)): if '/Subtype' not in annot or annot['/Subtype'].getObject() != '/Link': logger.debug("Found annotation, not a link: %r", annot) return None if '/A' not in annot: return None annot_A = annot['/A'].getObject() if '/S' not in annot_A or annot_A['/S'].getObject() != '/URI': logger.warning("Link action %r has supported type (/S!=/URI)", annot_A) return None if '/URI' not in annot_A: logger.warning("Link action %r does not have URI", annot_A) return None URI = annot_A['/URI'].getObject() if '/Rect' not in annot: logger.warning("Can't get annotation's bounding box (/Rect): %r", annot) return None else: (x0,y0,x1,y1) = annot['/Rect'].getObject() x = x0 - shift_rect_origin[0] y = y0 - shift_rect_origin[1] w = x1 - x0 h = y1 - y0 return ExtractedLink(link_bbox=(x,y,w,h), link_type='URI', link_target=URI)
(fname)
43,138
ltxpdflinks._extractor
__init__
null
def __init__(self, fname): super().__init__() self.fname = fname
(self, fname)
43,139
ltxpdflinks._extractor
_extract_annot_link
null
def _extract_annot_link(self, annot, *, shift_rect_origin=(0,0)): if '/Subtype' not in annot or annot['/Subtype'].getObject() != '/Link': logger.debug("Found annotation, not a link: %r", annot) return None if '/A' not in annot: return None annot_A = annot['/A'].getObject() if '/S' not in annot_A or annot_A['/S'].getObject() != '/URI': logger.warning("Link action %r has supported type (/S!=/URI)", annot_A) return None if '/URI' not in annot_A: logger.warning("Link action %r does not have URI", annot_A) return None URI = annot_A['/URI'].getObject() if '/Rect' not in annot: logger.warning("Can't get annotation's bounding box (/Rect): %r", annot) return None else: (x0,y0,x1,y1) = annot['/Rect'].getObject() x = x0 - shift_rect_origin[0] y = y0 - shift_rect_origin[1] w = x1 - x0 h = y1 - y0 return ExtractedLink(link_bbox=(x,y,w,h), link_type='URI', link_target=URI)
(self, annot, *, shift_rect_origin=(0, 0))
43,140
ltxpdflinks._extractor
extractGraphicLinks
null
def extractGraphicLinks(self, pageno=None): if pageno is None: pageno = 0 with open(self.fname, 'rb') as f: pdf = PyPDF2.PdfFileReader(f) page = pdf.getPage(pageno).getObject() page_size = (page.mediaBox.getWidth(), page.mediaBox.getHeight()) page_bottomleft = page.mediaBox.lowerLeft extracted_list = [] if '/Annots' in page: for annot in page['/Annots']: annot = annot.getObject() extracted = self._extract_annot_link(annot, shift_rect_origin=page_bottomleft) if extracted is not None: logger.debug("Extracted link: %r", extracted) extracted_list.append(extracted) return ExtractedGraphicLinks(self.fname, page_size, extracted_list)
(self, pageno=None)
43,145
salesforce_api.client
Client
null
class Client: def __init__(self, connection: Connection = None, domain: str = None, username: str = None, password: str = None, security_token: str = None, password_and_security_token: str = None, client_id: str = None, client_secret: str = None, access_token: str = None, session: requests.Session = None, is_sandbox=False, api_version: str = None): self.connection = connection if connection else login.magic( domain=domain, username=username, password=password, security_token=security_token, password_and_security_token=password_and_security_token, client_id=client_id, client_secret=client_secret, access_token=access_token, session=misc_utils.get_session(session), is_sandbox=is_sandbox, api_version=api_version ) self._setup_services() def _setup_services(self): self.basic = basic.Basic(self.connection) self.sobjects = sobjects.SObjects(self.connection) self.tooling = tooling.Tooling(self.connection) self.deploy = deploy.Deploy(self.connection) self.retrieve = retrieve.Retrieve(self.connection) self.bulk = bulk.Client(self.connection) self.bulk_v1 = bulk.v1.Client(self.connection) self.bulk_v2 = bulk.v2.Client(self.connection)
(connection: salesforce_api.core.Connection = None, domain: str = None, username: str = None, password: str = None, security_token: str = None, password_and_security_token: str = None, client_id: str = None, client_secret: str = None, access_token: str = None, session: requests.sessions.Session = None, is_sandbox=False, api_version: str = None)
43,146
salesforce_api.client
__init__
null
def __init__(self, connection: Connection = None, domain: str = None, username: str = None, password: str = None, security_token: str = None, password_and_security_token: str = None, client_id: str = None, client_secret: str = None, access_token: str = None, session: requests.Session = None, is_sandbox=False, api_version: str = None): self.connection = connection if connection else login.magic( domain=domain, username=username, password=password, security_token=security_token, password_and_security_token=password_and_security_token, client_id=client_id, client_secret=client_secret, access_token=access_token, session=misc_utils.get_session(session), is_sandbox=is_sandbox, api_version=api_version ) self._setup_services()
(self, connection: Optional[salesforce_api.core.Connection] = None, domain: Optional[str] = None, username: Optional[str] = None, password: Optional[str] = None, security_token: Optional[str] = None, password_and_security_token: Optional[str] = None, client_id: Optional[str] = None, client_secret: Optional[str] = None, access_token: Optional[str] = None, session: Optional[requests.sessions.Session] = None, is_sandbox=False, api_version: Optional[str] = None)
43,147
salesforce_api.client
_setup_services
null
def _setup_services(self): self.basic = basic.Basic(self.connection) self.sobjects = sobjects.SObjects(self.connection) self.tooling = tooling.Tooling(self.connection) self.deploy = deploy.Deploy(self.connection) self.retrieve = retrieve.Retrieve(self.connection) self.bulk = bulk.Client(self.connection) self.bulk_v1 = bulk.v1.Client(self.connection) self.bulk_v2 = bulk.v2.Client(self.connection)
(self)
43,157
fast_poisson_solver.data
Data
null
class Data: def __init__(self, cases, domain_x=None, domain_y=None, grid_num=32, noise_std=0, shuffle=False, initial_shuffle=False, batchsize=-1, batchsize_bc=-1, use_torch=False, device='cpu', precision=torch.float32, random_coords=False, seed=0): if domain_y is None: domain_y = [0, 1] if domain_x is None: domain_x = [0, 1] self.y = None self.x = None self.y_grid = None self.x_grid = None self.domain_x = domain_x self.domain_y = domain_y self.grid_num = grid_num self.cases = cases self.noise_std = noise_std self.num_cases = len(cases) self.shuffle = shuffle self.initial_shuffle = initial_shuffle self.batchsize = batchsize if batchsize > 0 else self.grid_num ** 2 self.batchsize_bc = batchsize_bc if batchsize_bc > 0 else 4 * self.grid_num + 4 self.batch_number = 0 self.number_of_batches = np.ceil(self.grid_num ** 2 / self.batchsize).astype(int) self.use_torch = use_torch self.device = device self.random_coords = random_coords self.seed = seed self.precision = precision assert 0 <= self.domain_y[0] <= 1 and 0 <= self.domain_y[1] <= 1, \ "Both elements of domain_y should lie within the interval [0, 1] inclusive" assert self.domain_y[1] > self.domain_y[0], \ "The second element of domain_y should be larger than the first element" assert 0 <= self.domain_x[0] <= 1 and 0 <= self.domain_x[1] <= 1, \ "Both elements of domain_x should lie within the interval [0, 1] inclusive" assert self.domain_x[1] > self.domain_x[0], \ "The second element of domain_y should be larger than the first element" x_domain_length = self.domain_x[1] - self.domain_x[0] y_domain_length = self.domain_y[1] - self.domain_y[0] if x_domain_length > y_domain_length: self.x_grid_num = self.grid_num self.y_grid_num = int(self.grid_num * y_domain_length / x_domain_length) elif x_domain_length < y_domain_length: self.x_grid_num = int(self.grid_num * x_domain_length / y_domain_length) self.y_grid_num = self.grid_num else: self.x_grid_num = self.grid_num self.y_grid_num = self.grid_num np.random.seed(self.seed) torch.manual_seed(self.seed) # self.indices = [np.random.choice(self.grid_num**2, self.batchsize, replace=False) for _ in range(self.num_cases)] # self.indices = np.arange(self.grid_num ** 2) self.indices = np.arange(self.x_grid_num * self.y_grid_num) # self.indices_bc = np.arange(4 * self.grid_num + 4) self.indices_bc = np.arange(2 * self.x_grid_num + 2 * self.y_grid_num + 4) if self.initial_shuffle: self.indices = np.random.permutation(self.indices) self.indices_bc = np.random.permutation(self.indices_bc) self.generate_boundary_coords() if not random_coords: self.generate_grid() else: self.generate_random_coords() self.calculate_cases() if self.use_torch: self.to_torch_tensors() else: self.x_grid = self.x_grid.reshape(-1, 1) self.y_grid = self.x_grid.reshape(-1, 1) self.x_bc = self.x_grid.reshape(-1, 1) self.y_bc = self.x_grid.reshape(-1, 1) def get_infos(self): data = { 'domain_x': self.domain_x, 'domain_y': self.domain_y, 'grid_num': self.grid_num, 'num_cases': self.num_cases, 'shuffle': self.shuffle, 'initial_shuffle': self.initial_shuffle, 'batchsize': self.batchsize, 'batchsize_bc': self.batchsize_bc, 'use_torch': self.use_torch, 'device': self.device, 'seed': self.seed, 'precision': str(self.precision), 'cases': self.cases, } return data def to_torch_tensors(self): self.out = torch.from_numpy(self.out).to(self.device).to(self.precision) self.x_grid = torch.from_numpy(self.x_grid).unsqueeze(1).to(self.device).to(self.precision) self.y_grid = torch.from_numpy(self.y_grid).unsqueeze(1).to(self.device).to(self.precision) self.bc = torch.from_numpy(self.bc).to(self.device).to(self.precision) self.x_bc = torch.from_numpy(self.x_bc).unsqueeze(1).to(self.device).to(self.precision) self.y_bc = torch.from_numpy(self.y_bc).unsqueeze(1).to(self.device).to(self.precision) self.indices = torch.from_numpy(self.indices).long().to(self.device) self.indices_bc = torch.from_numpy(self.indices_bc).long().to(self.device) def __iter__(self): return self def __next__(self): if self.batch_number < self.number_of_batches: out_call, x_call, y_call, bc_call, x_bc_call, y_bc_call = self.__call__(self.batch_number) self.batch_number += 1 return out_call, x_call, y_call, bc_call, x_bc_call, y_bc_call else: self.batch_number = 0 raise StopIteration def __call__(self, batch_number): if batch_number == 0 and self.shuffle: self.shuffle_epoch() min_batch = batch_number * self.batchsize max_batch = min((min_batch + self.batchsize, self.grid_num ** 2)) max_batch = min((min_batch + self.batchsize, self.x_grid_num * self.y_grid_num)) batchsize_i = max_batch - min_batch min_batch_bc = batch_number * self.batchsize_bc # max_batch_bc = min((min_batch_bc + self.batchsize_bc, 4 * self.grid_num + 4)) max_batch_bc = min((min_batch_bc + self.batchsize_bc, 2 * self.x_grid_num + 2 * self.y_grid_num + 4)) batchsize_i_bc = max_batch_bc - min_batch_bc indices_call = self.indices[min_batch: max_batch] if self.use_torch: out_call = torch.zeros(size=(batchsize_i, self.num_cases), device=self.device, dtype=self.precision) else: out_call = np.zeros((batchsize_i, self.num_cases)) x_call = self.x_grid[indices_call] y_call = self.y_grid[indices_call] for i, out_i in enumerate(self.out.T): out_call[:, i] = out_i[indices_call] indices_call_bc = self.indices_bc[min_batch_bc: max_batch_bc] if self.use_torch: bc_call = torch.zeros(size=(batchsize_i_bc, self.num_cases), device=self.device, dtype=self.precision) else: bc_call = np.zeros((batchsize_i_bc, self.num_cases)) x_bc_call = self.x_bc[indices_call_bc] y_bc_call = self.y_bc[indices_call_bc] for i, bc_i in enumerate(self.bc.T): bc_call[:, i] = bc_i[indices_call_bc] return out_call, x_call, y_call, bc_call, x_bc_call, y_bc_call def shuffle_epoch(self): if self.use_torch: self.indices = torch.randperm(self.indices.shape[0]) self.indices_bc = torch.randperm(self.indices_bc.shape[0]) else: self.indices = np.random.permutation(self.indices) self.indices_bc = np.random.permutation(self.indices_bc) def generate_grid(self): # x = np.linspace(self.domain_x[0], self.domain_x[1], self.grid_num + 2)[1:-1] x = np.linspace(self.domain_x[0], self.domain_x[1], self.x_grid_num + 2)[1:-1] y = np.linspace(self.domain_y[0], self.domain_y[1], self.y_grid_num + 2)[1:-1] x, y = np.meshgrid(x, y) self.x_grid = x.flatten() self.y_grid = y.flatten() def generate_random_coords(self): engine = qmc.Sobol(d=2, scramble=True, seed=self.seed) # d=2 for 2D points sample = engine.random(int(self.grid_num ** 2)) self.x_grid, self.y_grid = np.hsplit(sample, 2) self.y_grid = self.y_grid.flatten() self.x_grid = self.x_grid.flatten() def generate_boundary_coords(self): # x_grid = np.linspace(self.domain_x[0], self.domain_x[1], self.grid_num + 2) x_grid = np.linspace(self.domain_x[0], self.domain_x[1], self.x_grid_num + 2) y_grid = np.linspace(self.domain_y[0], self.domain_y[1], self.y_grid_num + 2)[1:-1] lower_x = np.ones_like(y_grid) * self.domain_x[0] lower_y = np.ones_like(x_grid) * self.domain_y[0] upper_x = np.ones_like(y_grid) * self.domain_x[1] upper_y = np.ones_like(x_grid) * self.domain_y[1] self.x_bc = np.concatenate([x_grid, x_grid, lower_x, upper_x]).flatten() self.y_bc = np.concatenate([lower_y, upper_y, y_grid, y_grid]).flatten() def calculate_cases(self): self.out = np.zeros((len(self.x_grid), len(self.cases))) self.bc = np.zeros((len(self.x_bc), len(self.cases))) for i, case in enumerate(self.cases): if case['b_val'] == 'random': self.bc[:, i] = np.ones_like(self.x_bc) * np.random.uniform(-10, 10) else: np.random.uniform(-10, 10) # to make sure the random seed is not affected self.bc[:, i] = np.ones_like(self.x_bc) * case['b_val'] # if case['name'] == 'sinsin': # self.out[:, i] = sf.sinsin(self.x_grid, self.y_grid, case['param']) if case['name'] == 'sin': self.out[:, i] = sf.sincos(self.x_grid, self.y_grid, case['param']) elif case['name'] == 'exp': self.out[:, i] = sf.exp(self.x_grid, self.y_grid, case['param']) elif case['name'] == 'perlin': self.out[:, i] = sf.perlin(self.x_grid, self.y_grid, case['param']) # elif case['name'] == 'rectangle': # self.out[:, i] = sf.rectangle(self.x_grid, self.y_grid, case['param']) # elif case['name'] == 'circle': # self.out[:, i] = sf.circle(self.x_grid, self.y_grid, case['param']) elif case['name'] == 'geo': self.out[:, i] = sf.rectangle_circle(self.x_grid, self.y_grid, case['param'], grid_num=self.grid_num) else: raise ValueError('Unknown case') if self.noise_std > 0: self.out[:, i] *= np.random.normal(1, self.noise_std, len(self.x_grid)) def plot_functions(self): f, x, y, *_ = self.__call__(0) if self.use_torch: f = f.float().cpu().detach().numpy() x = x.float().cpu().detach().numpy() y = y.float().cpu().detach().numpy() xy = np.array([x, y]).T ind = np.lexsort((xy[:, 1], xy[:, 0])) f = f[ind] x = x[ind] y = y[ind] def plot(fig, ax, x, y, v, title): x = x.reshape(self.grid_num, self.grid_num) y = y.reshape(self.grid_num, self.grid_num) v = v.reshape(self.grid_num, self.grid_num) c = ax.contourf(x, y, v, 100, cmap='jet') # c = ax.scatter(x, y, c=v, s=1) ax.set_title(title) ax.set_xlabel('x') ax.set_ylabel('y') fig.colorbar(c, ax=ax) plots_x = np.min((self.num_cases, 4)) plots_y = int(np.ceil(self.num_cases / plots_x)) fig, axs = plt.subplots(plots_y, plots_x, figsize=(plots_x * 4, plots_y * 4), dpi=100, tight_layout=True) if self.num_cases == 1: plot(fig, axs, x, y, f[:, 0], self.cases[0]['name']) else: i = 0 for ax in axs.flatten(): if i < self.num_cases: plot(fig, ax, x, y, f[:, i], self.cases[i]['name']) else: ax.axis('off') i += 1 plt.savefig(os.path.join('..', 'images', 'functions.png')) plt.close()
(cases, domain_x=None, domain_y=None, grid_num=32, noise_std=0, shuffle=False, initial_shuffle=False, batchsize=-1, batchsize_bc=-1, use_torch=False, device='cpu', precision=torch.float32, random_coords=False, seed=0)
43,158
fast_poisson_solver.data
__call__
null
def __call__(self, batch_number): if batch_number == 0 and self.shuffle: self.shuffle_epoch() min_batch = batch_number * self.batchsize max_batch = min((min_batch + self.batchsize, self.grid_num ** 2)) max_batch = min((min_batch + self.batchsize, self.x_grid_num * self.y_grid_num)) batchsize_i = max_batch - min_batch min_batch_bc = batch_number * self.batchsize_bc # max_batch_bc = min((min_batch_bc + self.batchsize_bc, 4 * self.grid_num + 4)) max_batch_bc = min((min_batch_bc + self.batchsize_bc, 2 * self.x_grid_num + 2 * self.y_grid_num + 4)) batchsize_i_bc = max_batch_bc - min_batch_bc indices_call = self.indices[min_batch: max_batch] if self.use_torch: out_call = torch.zeros(size=(batchsize_i, self.num_cases), device=self.device, dtype=self.precision) else: out_call = np.zeros((batchsize_i, self.num_cases)) x_call = self.x_grid[indices_call] y_call = self.y_grid[indices_call] for i, out_i in enumerate(self.out.T): out_call[:, i] = out_i[indices_call] indices_call_bc = self.indices_bc[min_batch_bc: max_batch_bc] if self.use_torch: bc_call = torch.zeros(size=(batchsize_i_bc, self.num_cases), device=self.device, dtype=self.precision) else: bc_call = np.zeros((batchsize_i_bc, self.num_cases)) x_bc_call = self.x_bc[indices_call_bc] y_bc_call = self.y_bc[indices_call_bc] for i, bc_i in enumerate(self.bc.T): bc_call[:, i] = bc_i[indices_call_bc] return out_call, x_call, y_call, bc_call, x_bc_call, y_bc_call
(self, batch_number)
43,159
fast_poisson_solver.data
__init__
null
def __init__(self, cases, domain_x=None, domain_y=None, grid_num=32, noise_std=0, shuffle=False, initial_shuffle=False, batchsize=-1, batchsize_bc=-1, use_torch=False, device='cpu', precision=torch.float32, random_coords=False, seed=0): if domain_y is None: domain_y = [0, 1] if domain_x is None: domain_x = [0, 1] self.y = None self.x = None self.y_grid = None self.x_grid = None self.domain_x = domain_x self.domain_y = domain_y self.grid_num = grid_num self.cases = cases self.noise_std = noise_std self.num_cases = len(cases) self.shuffle = shuffle self.initial_shuffle = initial_shuffle self.batchsize = batchsize if batchsize > 0 else self.grid_num ** 2 self.batchsize_bc = batchsize_bc if batchsize_bc > 0 else 4 * self.grid_num + 4 self.batch_number = 0 self.number_of_batches = np.ceil(self.grid_num ** 2 / self.batchsize).astype(int) self.use_torch = use_torch self.device = device self.random_coords = random_coords self.seed = seed self.precision = precision assert 0 <= self.domain_y[0] <= 1 and 0 <= self.domain_y[1] <= 1, \ "Both elements of domain_y should lie within the interval [0, 1] inclusive" assert self.domain_y[1] > self.domain_y[0], \ "The second element of domain_y should be larger than the first element" assert 0 <= self.domain_x[0] <= 1 and 0 <= self.domain_x[1] <= 1, \ "Both elements of domain_x should lie within the interval [0, 1] inclusive" assert self.domain_x[1] > self.domain_x[0], \ "The second element of domain_y should be larger than the first element" x_domain_length = self.domain_x[1] - self.domain_x[0] y_domain_length = self.domain_y[1] - self.domain_y[0] if x_domain_length > y_domain_length: self.x_grid_num = self.grid_num self.y_grid_num = int(self.grid_num * y_domain_length / x_domain_length) elif x_domain_length < y_domain_length: self.x_grid_num = int(self.grid_num * x_domain_length / y_domain_length) self.y_grid_num = self.grid_num else: self.x_grid_num = self.grid_num self.y_grid_num = self.grid_num np.random.seed(self.seed) torch.manual_seed(self.seed) # self.indices = [np.random.choice(self.grid_num**2, self.batchsize, replace=False) for _ in range(self.num_cases)] # self.indices = np.arange(self.grid_num ** 2) self.indices = np.arange(self.x_grid_num * self.y_grid_num) # self.indices_bc = np.arange(4 * self.grid_num + 4) self.indices_bc = np.arange(2 * self.x_grid_num + 2 * self.y_grid_num + 4) if self.initial_shuffle: self.indices = np.random.permutation(self.indices) self.indices_bc = np.random.permutation(self.indices_bc) self.generate_boundary_coords() if not random_coords: self.generate_grid() else: self.generate_random_coords() self.calculate_cases() if self.use_torch: self.to_torch_tensors() else: self.x_grid = self.x_grid.reshape(-1, 1) self.y_grid = self.x_grid.reshape(-1, 1) self.x_bc = self.x_grid.reshape(-1, 1) self.y_bc = self.x_grid.reshape(-1, 1)
(self, cases, domain_x=None, domain_y=None, grid_num=32, noise_std=0, shuffle=False, initial_shuffle=False, batchsize=-1, batchsize_bc=-1, use_torch=False, device='cpu', precision=torch.float32, random_coords=False, seed=0)
43,161
fast_poisson_solver.data
__next__
null
def __next__(self): if self.batch_number < self.number_of_batches: out_call, x_call, y_call, bc_call, x_bc_call, y_bc_call = self.__call__(self.batch_number) self.batch_number += 1 return out_call, x_call, y_call, bc_call, x_bc_call, y_bc_call else: self.batch_number = 0 raise StopIteration
(self)
43,162
fast_poisson_solver.data
calculate_cases
null
def calculate_cases(self): self.out = np.zeros((len(self.x_grid), len(self.cases))) self.bc = np.zeros((len(self.x_bc), len(self.cases))) for i, case in enumerate(self.cases): if case['b_val'] == 'random': self.bc[:, i] = np.ones_like(self.x_bc) * np.random.uniform(-10, 10) else: np.random.uniform(-10, 10) # to make sure the random seed is not affected self.bc[:, i] = np.ones_like(self.x_bc) * case['b_val'] # if case['name'] == 'sinsin': # self.out[:, i] = sf.sinsin(self.x_grid, self.y_grid, case['param']) if case['name'] == 'sin': self.out[:, i] = sf.sincos(self.x_grid, self.y_grid, case['param']) elif case['name'] == 'exp': self.out[:, i] = sf.exp(self.x_grid, self.y_grid, case['param']) elif case['name'] == 'perlin': self.out[:, i] = sf.perlin(self.x_grid, self.y_grid, case['param']) # elif case['name'] == 'rectangle': # self.out[:, i] = sf.rectangle(self.x_grid, self.y_grid, case['param']) # elif case['name'] == 'circle': # self.out[:, i] = sf.circle(self.x_grid, self.y_grid, case['param']) elif case['name'] == 'geo': self.out[:, i] = sf.rectangle_circle(self.x_grid, self.y_grid, case['param'], grid_num=self.grid_num) else: raise ValueError('Unknown case') if self.noise_std > 0: self.out[:, i] *= np.random.normal(1, self.noise_std, len(self.x_grid))
(self)
43,163
fast_poisson_solver.data
generate_boundary_coords
null
def generate_boundary_coords(self): # x_grid = np.linspace(self.domain_x[0], self.domain_x[1], self.grid_num + 2) x_grid = np.linspace(self.domain_x[0], self.domain_x[1], self.x_grid_num + 2) y_grid = np.linspace(self.domain_y[0], self.domain_y[1], self.y_grid_num + 2)[1:-1] lower_x = np.ones_like(y_grid) * self.domain_x[0] lower_y = np.ones_like(x_grid) * self.domain_y[0] upper_x = np.ones_like(y_grid) * self.domain_x[1] upper_y = np.ones_like(x_grid) * self.domain_y[1] self.x_bc = np.concatenate([x_grid, x_grid, lower_x, upper_x]).flatten() self.y_bc = np.concatenate([lower_y, upper_y, y_grid, y_grid]).flatten()
(self)
43,164
fast_poisson_solver.data
generate_grid
null
def generate_grid(self): # x = np.linspace(self.domain_x[0], self.domain_x[1], self.grid_num + 2)[1:-1] x = np.linspace(self.domain_x[0], self.domain_x[1], self.x_grid_num + 2)[1:-1] y = np.linspace(self.domain_y[0], self.domain_y[1], self.y_grid_num + 2)[1:-1] x, y = np.meshgrid(x, y) self.x_grid = x.flatten() self.y_grid = y.flatten()
(self)
43,165
fast_poisson_solver.data
generate_random_coords
null
def generate_random_coords(self): engine = qmc.Sobol(d=2, scramble=True, seed=self.seed) # d=2 for 2D points sample = engine.random(int(self.grid_num ** 2)) self.x_grid, self.y_grid = np.hsplit(sample, 2) self.y_grid = self.y_grid.flatten() self.x_grid = self.x_grid.flatten()
(self)
43,166
fast_poisson_solver.data
get_infos
null
def get_infos(self): data = { 'domain_x': self.domain_x, 'domain_y': self.domain_y, 'grid_num': self.grid_num, 'num_cases': self.num_cases, 'shuffle': self.shuffle, 'initial_shuffle': self.initial_shuffle, 'batchsize': self.batchsize, 'batchsize_bc': self.batchsize_bc, 'use_torch': self.use_torch, 'device': self.device, 'seed': self.seed, 'precision': str(self.precision), 'cases': self.cases, } return data
(self)
43,167
fast_poisson_solver.data
plot_functions
null
def plot_functions(self): f, x, y, *_ = self.__call__(0) if self.use_torch: f = f.float().cpu().detach().numpy() x = x.float().cpu().detach().numpy() y = y.float().cpu().detach().numpy() xy = np.array([x, y]).T ind = np.lexsort((xy[:, 1], xy[:, 0])) f = f[ind] x = x[ind] y = y[ind] def plot(fig, ax, x, y, v, title): x = x.reshape(self.grid_num, self.grid_num) y = y.reshape(self.grid_num, self.grid_num) v = v.reshape(self.grid_num, self.grid_num) c = ax.contourf(x, y, v, 100, cmap='jet') # c = ax.scatter(x, y, c=v, s=1) ax.set_title(title) ax.set_xlabel('x') ax.set_ylabel('y') fig.colorbar(c, ax=ax) plots_x = np.min((self.num_cases, 4)) plots_y = int(np.ceil(self.num_cases / plots_x)) fig, axs = plt.subplots(plots_y, plots_x, figsize=(plots_x * 4, plots_y * 4), dpi=100, tight_layout=True) if self.num_cases == 1: plot(fig, axs, x, y, f[:, 0], self.cases[0]['name']) else: i = 0 for ax in axs.flatten(): if i < self.num_cases: plot(fig, ax, x, y, f[:, i], self.cases[i]['name']) else: ax.axis('off') i += 1 plt.savefig(os.path.join('..', 'images', 'functions.png')) plt.close()
(self)
43,168
fast_poisson_solver.data
shuffle_epoch
null
def shuffle_epoch(self): if self.use_torch: self.indices = torch.randperm(self.indices.shape[0]) self.indices_bc = torch.randperm(self.indices_bc.shape[0]) else: self.indices = np.random.permutation(self.indices) self.indices_bc = np.random.permutation(self.indices_bc)
(self)
43,169
fast_poisson_solver.data
to_torch_tensors
null
def to_torch_tensors(self): self.out = torch.from_numpy(self.out).to(self.device).to(self.precision) self.x_grid = torch.from_numpy(self.x_grid).unsqueeze(1).to(self.device).to(self.precision) self.y_grid = torch.from_numpy(self.y_grid).unsqueeze(1).to(self.device).to(self.precision) self.bc = torch.from_numpy(self.bc).to(self.device).to(self.precision) self.x_bc = torch.from_numpy(self.x_bc).unsqueeze(1).to(self.device).to(self.precision) self.y_bc = torch.from_numpy(self.y_bc).unsqueeze(1).to(self.device).to(self.precision) self.indices = torch.from_numpy(self.indices).long().to(self.device) self.indices_bc = torch.from_numpy(self.indices_bc).long().to(self.device)
(self)
43,170
matplotlib.cm
ScalarMappable
A mixin class to map scalar data to RGBA. The ScalarMappable applies data normalization before returning RGBA colors from the given colormap.
class ScalarMappable: """ A mixin class to map scalar data to RGBA. The ScalarMappable applies data normalization before returning RGBA colors from the given colormap. """ def __init__(self, norm=None, cmap=None): """ Parameters ---------- norm : `.Normalize` (or subclass thereof) or str or None The normalizing object which scales data, typically into the interval ``[0, 1]``. If a `str`, a `.Normalize` subclass is dynamically generated based on the scale with the corresponding name. If *None*, *norm* defaults to a *colors.Normalize* object which initializes its scaling based on the first data processed. cmap : str or `~matplotlib.colors.Colormap` The colormap used to map normalized data values to RGBA colors. """ self._A = None self._norm = None # So that the setter knows we're initializing. self.set_norm(norm) # The Normalize instance of this ScalarMappable. self.cmap = None # So that the setter knows we're initializing. self.set_cmap(cmap) # The Colormap instance of this ScalarMappable. #: The last colorbar associated with this ScalarMappable. May be None. self.colorbar = None self.callbacks = cbook.CallbackRegistry(signals=["changed"]) def _scale_norm(self, norm, vmin, vmax): """ Helper for initial scaling. Used by public functions that create a ScalarMappable and support parameters *vmin*, *vmax* and *norm*. This makes sure that a *norm* will take precedence over *vmin*, *vmax*. Note that this method does not set the norm. """ if vmin is not None or vmax is not None: self.set_clim(vmin, vmax) if isinstance(norm, colors.Normalize): raise ValueError( "Passing a Normalize instance simultaneously with " "vmin/vmax is not supported. Please pass vmin/vmax " "directly to the norm when creating it.") # always resolve the autoscaling so we have concrete limits # rather than deferring to draw time. self.autoscale_None() def to_rgba(self, x, alpha=None, bytes=False, norm=True): """ Return a normalized RGBA array corresponding to *x*. In the normal case, *x* is a 1D or 2D sequence of scalars, and the corresponding `~numpy.ndarray` of RGBA values will be returned, based on the norm and colormap set for this ScalarMappable. There is one special case, for handling images that are already RGB or RGBA, such as might have been read from an image file. If *x* is an `~numpy.ndarray` with 3 dimensions, and the last dimension is either 3 or 4, then it will be treated as an RGB or RGBA array, and no mapping will be done. The array can be `~numpy.uint8`, or it can be floats with values in the 0-1 range; otherwise a ValueError will be raised. If it is a masked array, any masked elements will be set to 0 alpha. If the last dimension is 3, the *alpha* kwarg (defaulting to 1) will be used to fill in the transparency. If the last dimension is 4, the *alpha* kwarg is ignored; it does not replace the preexisting alpha. A ValueError will be raised if the third dimension is other than 3 or 4. In either case, if *bytes* is *False* (default), the RGBA array will be floats in the 0-1 range; if it is *True*, the returned RGBA array will be `~numpy.uint8` in the 0 to 255 range. If norm is False, no normalization of the input data is performed, and it is assumed to be in the range (0-1). """ # First check for special case, image input: try: if x.ndim == 3: if x.shape[2] == 3: if alpha is None: alpha = 1 if x.dtype == np.uint8: alpha = np.uint8(alpha * 255) m, n = x.shape[:2] xx = np.empty(shape=(m, n, 4), dtype=x.dtype) xx[:, :, :3] = x xx[:, :, 3] = alpha elif x.shape[2] == 4: xx = x else: raise ValueError("Third dimension must be 3 or 4") if xx.dtype.kind == 'f': if norm and (xx.max() > 1 or xx.min() < 0): raise ValueError("Floating point image RGB values " "must be in the 0..1 range.") if bytes: xx = (xx * 255).astype(np.uint8) elif xx.dtype == np.uint8: if not bytes: xx = xx.astype(np.float32) / 255 else: raise ValueError("Image RGB array must be uint8 or " "floating point; found %s" % xx.dtype) # Account for any masked entries in the original array # If any of R, G, B, or A are masked for an entry, we set alpha to 0 if np.ma.is_masked(x): xx[np.any(np.ma.getmaskarray(x), axis=2), 3] = 0 return xx except AttributeError: # e.g., x is not an ndarray; so try mapping it pass # This is the normal case, mapping a scalar array: x = ma.asarray(x) if norm: x = self.norm(x) rgba = self.cmap(x, alpha=alpha, bytes=bytes) return rgba def set_array(self, A): """ Set the value array from array-like *A*. Parameters ---------- A : array-like or None The values that are mapped to colors. The base class `.ScalarMappable` does not make any assumptions on the dimensionality and shape of the value array *A*. """ if A is None: self._A = None return A = cbook.safe_masked_invalid(A, copy=True) if not np.can_cast(A.dtype, float, "same_kind"): raise TypeError(f"Image data of dtype {A.dtype} cannot be " "converted to float") self._A = A def get_array(self): """ Return the array of values, that are mapped to colors. The base class `.ScalarMappable` does not make any assumptions on the dimensionality and shape of the array. """ return self._A def get_cmap(self): """Return the `.Colormap` instance.""" return self.cmap def get_clim(self): """ Return the values (min, max) that are mapped to the colormap limits. """ return self.norm.vmin, self.norm.vmax def set_clim(self, vmin=None, vmax=None): """ Set the norm limits for image scaling. Parameters ---------- vmin, vmax : float The limits. The limits may also be passed as a tuple (*vmin*, *vmax*) as a single positional argument. .. ACCEPTS: (vmin: float, vmax: float) """ # If the norm's limits are updated self.changed() will be called # through the callbacks attached to the norm if vmax is None: try: vmin, vmax = vmin except (TypeError, ValueError): pass if vmin is not None: self.norm.vmin = colors._sanitize_extrema(vmin) if vmax is not None: self.norm.vmax = colors._sanitize_extrema(vmax) def get_alpha(self): """ Returns ------- float Always returns 1. """ # This method is intended to be overridden by Artist sub-classes return 1. def set_cmap(self, cmap): """ Set the colormap for luminance data. Parameters ---------- cmap : `.Colormap` or str or None """ in_init = self.cmap is None self.cmap = _ensure_cmap(cmap) if not in_init: self.changed() # Things are not set up properly yet. @property def norm(self): return self._norm @norm.setter def norm(self, norm): _api.check_isinstance((colors.Normalize, str, None), norm=norm) if norm is None: norm = colors.Normalize() elif isinstance(norm, str): try: scale_cls = scale._scale_mapping[norm] except KeyError: raise ValueError( "Invalid norm str name; the following values are " f"supported: {', '.join(scale._scale_mapping)}" ) from None norm = _auto_norm_from_scale(scale_cls)() if norm is self.norm: # We aren't updating anything return in_init = self.norm is None # Remove the current callback and connect to the new one if not in_init: self.norm.callbacks.disconnect(self._id_norm) self._norm = norm self._id_norm = self.norm.callbacks.connect('changed', self.changed) if not in_init: self.changed() def set_norm(self, norm): """ Set the normalization instance. Parameters ---------- norm : `.Normalize` or str or None Notes ----- If there are any colorbars using the mappable for this norm, setting the norm of the mappable will reset the norm, locator, and formatters on the colorbar to default. """ self.norm = norm def autoscale(self): """ Autoscale the scalar limits on the norm instance using the current array """ if self._A is None: raise TypeError('You must first set_array for mappable') # If the norm's limits are updated self.changed() will be called # through the callbacks attached to the norm self.norm.autoscale(self._A) def autoscale_None(self): """ Autoscale the scalar limits on the norm instance using the current array, changing only limits that are None """ if self._A is None: raise TypeError('You must first set_array for mappable') # If the norm's limits are updated self.changed() will be called # through the callbacks attached to the norm self.norm.autoscale_None(self._A) def changed(self): """ Call this whenever the mappable is changed to notify all the callbackSM listeners to the 'changed' signal. """ self.callbacks.process('changed', self) self.stale = True
(norm=None, cmap=None)
43,171
matplotlib.cm
__init__
Parameters ---------- norm : `.Normalize` (or subclass thereof) or str or None The normalizing object which scales data, typically into the interval ``[0, 1]``. If a `str`, a `.Normalize` subclass is dynamically generated based on the scale with the corresponding name. If *None*, *norm* defaults to a *colors.Normalize* object which initializes its scaling based on the first data processed. cmap : str or `~matplotlib.colors.Colormap` The colormap used to map normalized data values to RGBA colors.
def __init__(self, norm=None, cmap=None): """ Parameters ---------- norm : `.Normalize` (or subclass thereof) or str or None The normalizing object which scales data, typically into the interval ``[0, 1]``. If a `str`, a `.Normalize` subclass is dynamically generated based on the scale with the corresponding name. If *None*, *norm* defaults to a *colors.Normalize* object which initializes its scaling based on the first data processed. cmap : str or `~matplotlib.colors.Colormap` The colormap used to map normalized data values to RGBA colors. """ self._A = None self._norm = None # So that the setter knows we're initializing. self.set_norm(norm) # The Normalize instance of this ScalarMappable. self.cmap = None # So that the setter knows we're initializing. self.set_cmap(cmap) # The Colormap instance of this ScalarMappable. #: The last colorbar associated with this ScalarMappable. May be None. self.colorbar = None self.callbacks = cbook.CallbackRegistry(signals=["changed"])
(self, norm=None, cmap=None)
43,172
matplotlib.cm
_scale_norm
Helper for initial scaling. Used by public functions that create a ScalarMappable and support parameters *vmin*, *vmax* and *norm*. This makes sure that a *norm* will take precedence over *vmin*, *vmax*. Note that this method does not set the norm.
def _scale_norm(self, norm, vmin, vmax): """ Helper for initial scaling. Used by public functions that create a ScalarMappable and support parameters *vmin*, *vmax* and *norm*. This makes sure that a *norm* will take precedence over *vmin*, *vmax*. Note that this method does not set the norm. """ if vmin is not None or vmax is not None: self.set_clim(vmin, vmax) if isinstance(norm, colors.Normalize): raise ValueError( "Passing a Normalize instance simultaneously with " "vmin/vmax is not supported. Please pass vmin/vmax " "directly to the norm when creating it.") # always resolve the autoscaling so we have concrete limits # rather than deferring to draw time. self.autoscale_None()
(self, norm, vmin, vmax)
43,173
matplotlib.cm
autoscale
Autoscale the scalar limits on the norm instance using the current array
def autoscale(self): """ Autoscale the scalar limits on the norm instance using the current array """ if self._A is None: raise TypeError('You must first set_array for mappable') # If the norm's limits are updated self.changed() will be called # through the callbacks attached to the norm self.norm.autoscale(self._A)
(self)
43,174
matplotlib.cm
autoscale_None
Autoscale the scalar limits on the norm instance using the current array, changing only limits that are None
def autoscale_None(self): """ Autoscale the scalar limits on the norm instance using the current array, changing only limits that are None """ if self._A is None: raise TypeError('You must first set_array for mappable') # If the norm's limits are updated self.changed() will be called # through the callbacks attached to the norm self.norm.autoscale_None(self._A)
(self)
43,175
matplotlib.cm
changed
Call this whenever the mappable is changed to notify all the callbackSM listeners to the 'changed' signal.
def changed(self): """ Call this whenever the mappable is changed to notify all the callbackSM listeners to the 'changed' signal. """ self.callbacks.process('changed', self) self.stale = True
(self)
43,176
matplotlib.cm
get_alpha
Returns ------- float Always returns 1.
def get_alpha(self): """ Returns ------- float Always returns 1. """ # This method is intended to be overridden by Artist sub-classes return 1.
(self)
43,177
matplotlib.cm
get_array
Return the array of values, that are mapped to colors. The base class `.ScalarMappable` does not make any assumptions on the dimensionality and shape of the array.
def get_array(self): """ Return the array of values, that are mapped to colors. The base class `.ScalarMappable` does not make any assumptions on the dimensionality and shape of the array. """ return self._A
(self)
43,178
matplotlib.cm
get_clim
Return the values (min, max) that are mapped to the colormap limits.
def get_clim(self): """ Return the values (min, max) that are mapped to the colormap limits. """ return self.norm.vmin, self.norm.vmax
(self)
43,179
matplotlib.cm
get_cmap
Return the `.Colormap` instance.
def get_cmap(self): """Return the `.Colormap` instance.""" return self.cmap
(self)
43,180
matplotlib.cm
set_array
Set the value array from array-like *A*. Parameters ---------- A : array-like or None The values that are mapped to colors. The base class `.ScalarMappable` does not make any assumptions on the dimensionality and shape of the value array *A*.
def set_array(self, A): """ Set the value array from array-like *A*. Parameters ---------- A : array-like or None The values that are mapped to colors. The base class `.ScalarMappable` does not make any assumptions on the dimensionality and shape of the value array *A*. """ if A is None: self._A = None return A = cbook.safe_masked_invalid(A, copy=True) if not np.can_cast(A.dtype, float, "same_kind"): raise TypeError(f"Image data of dtype {A.dtype} cannot be " "converted to float") self._A = A
(self, A)
43,181
matplotlib.cm
set_clim
Set the norm limits for image scaling. Parameters ---------- vmin, vmax : float The limits. The limits may also be passed as a tuple (*vmin*, *vmax*) as a single positional argument. .. ACCEPTS: (vmin: float, vmax: float)
def set_clim(self, vmin=None, vmax=None): """ Set the norm limits for image scaling. Parameters ---------- vmin, vmax : float The limits. The limits may also be passed as a tuple (*vmin*, *vmax*) as a single positional argument. .. ACCEPTS: (vmin: float, vmax: float) """ # If the norm's limits are updated self.changed() will be called # through the callbacks attached to the norm if vmax is None: try: vmin, vmax = vmin except (TypeError, ValueError): pass if vmin is not None: self.norm.vmin = colors._sanitize_extrema(vmin) if vmax is not None: self.norm.vmax = colors._sanitize_extrema(vmax)
(self, vmin=None, vmax=None)
43,182
matplotlib.cm
set_cmap
Set the colormap for luminance data. Parameters ---------- cmap : `.Colormap` or str or None
def set_cmap(self, cmap): """ Set the colormap for luminance data. Parameters ---------- cmap : `.Colormap` or str or None """ in_init = self.cmap is None self.cmap = _ensure_cmap(cmap) if not in_init: self.changed() # Things are not set up properly yet.
(self, cmap)
43,183
matplotlib.cm
set_norm
Set the normalization instance. Parameters ---------- norm : `.Normalize` or str or None Notes ----- If there are any colorbars using the mappable for this norm, setting the norm of the mappable will reset the norm, locator, and formatters on the colorbar to default.
def set_norm(self, norm): """ Set the normalization instance. Parameters ---------- norm : `.Normalize` or str or None Notes ----- If there are any colorbars using the mappable for this norm, setting the norm of the mappable will reset the norm, locator, and formatters on the colorbar to default. """ self.norm = norm
(self, norm)
43,184
matplotlib.cm
to_rgba
Return a normalized RGBA array corresponding to *x*. In the normal case, *x* is a 1D or 2D sequence of scalars, and the corresponding `~numpy.ndarray` of RGBA values will be returned, based on the norm and colormap set for this ScalarMappable. There is one special case, for handling images that are already RGB or RGBA, such as might have been read from an image file. If *x* is an `~numpy.ndarray` with 3 dimensions, and the last dimension is either 3 or 4, then it will be treated as an RGB or RGBA array, and no mapping will be done. The array can be `~numpy.uint8`, or it can be floats with values in the 0-1 range; otherwise a ValueError will be raised. If it is a masked array, any masked elements will be set to 0 alpha. If the last dimension is 3, the *alpha* kwarg (defaulting to 1) will be used to fill in the transparency. If the last dimension is 4, the *alpha* kwarg is ignored; it does not replace the preexisting alpha. A ValueError will be raised if the third dimension is other than 3 or 4. In either case, if *bytes* is *False* (default), the RGBA array will be floats in the 0-1 range; if it is *True*, the returned RGBA array will be `~numpy.uint8` in the 0 to 255 range. If norm is False, no normalization of the input data is performed, and it is assumed to be in the range (0-1).
def to_rgba(self, x, alpha=None, bytes=False, norm=True): """ Return a normalized RGBA array corresponding to *x*. In the normal case, *x* is a 1D or 2D sequence of scalars, and the corresponding `~numpy.ndarray` of RGBA values will be returned, based on the norm and colormap set for this ScalarMappable. There is one special case, for handling images that are already RGB or RGBA, such as might have been read from an image file. If *x* is an `~numpy.ndarray` with 3 dimensions, and the last dimension is either 3 or 4, then it will be treated as an RGB or RGBA array, and no mapping will be done. The array can be `~numpy.uint8`, or it can be floats with values in the 0-1 range; otherwise a ValueError will be raised. If it is a masked array, any masked elements will be set to 0 alpha. If the last dimension is 3, the *alpha* kwarg (defaulting to 1) will be used to fill in the transparency. If the last dimension is 4, the *alpha* kwarg is ignored; it does not replace the preexisting alpha. A ValueError will be raised if the third dimension is other than 3 or 4. In either case, if *bytes* is *False* (default), the RGBA array will be floats in the 0-1 range; if it is *True*, the returned RGBA array will be `~numpy.uint8` in the 0 to 255 range. If norm is False, no normalization of the input data is performed, and it is assumed to be in the range (0-1). """ # First check for special case, image input: try: if x.ndim == 3: if x.shape[2] == 3: if alpha is None: alpha = 1 if x.dtype == np.uint8: alpha = np.uint8(alpha * 255) m, n = x.shape[:2] xx = np.empty(shape=(m, n, 4), dtype=x.dtype) xx[:, :, :3] = x xx[:, :, 3] = alpha elif x.shape[2] == 4: xx = x else: raise ValueError("Third dimension must be 3 or 4") if xx.dtype.kind == 'f': if norm and (xx.max() > 1 or xx.min() < 0): raise ValueError("Floating point image RGB values " "must be in the 0..1 range.") if bytes: xx = (xx * 255).astype(np.uint8) elif xx.dtype == np.uint8: if not bytes: xx = xx.astype(np.float32) / 255 else: raise ValueError("Image RGB array must be uint8 or " "floating point; found %s" % xx.dtype) # Account for any masked entries in the original array # If any of R, G, B, or A are masked for an entry, we set alpha to 0 if np.ma.is_masked(x): xx[np.any(np.ma.getmaskarray(x), axis=2), 3] = 0 return xx except AttributeError: # e.g., x is not an ndarray; so try mapping it pass # This is the normal case, mapping a scalar array: x = ma.asarray(x) if norm: x = self.norm(x) rgba = self.cmap(x, alpha=alpha, bytes=bytes) return rgba
(self, x, alpha=None, bytes=False, norm=True)
43,185
fast_poisson_solver.solver
Solver
This class represents the main solver used for fast Poisson equation solving. It is a key component of the `fast_poisson_solver` package. Parameters ---------- device : str, optional Specifies the device where the computations will be performed. This should be a valid PyTorch device string such as 'cuda' for GPU processing or 'cpu' for CPU processing. Default is 'cuda'. precision : torch.dtype, optional This determines the precision of the computation. This should be a valid PyTorch data type such as torch.float32 or torch.float64. Default is torch.float32. verbose : bool, optional When set to True, enables the printing of detailed logs during computation. Default is False. use_weights : bool, optional Determines whether the network uses pre-trained weights or random weights. If True, the network uses pre-trained weights. Default is True. compile_model : bool, optional Specifies whether the network model is compiled for faster inference. If False, the model won't be compiled. Default is True. lambdas_pde : list of float, optional A list that weights the influence of the PDE part in the loss term. If None, default weight 1e-12 will be used. Default is None. seed : int, optional This parameter sets the seed for generating random numbers, which helps in achieving deterministic results. Default is 0.
class Solver: """ This class represents the main solver used for fast Poisson equation solving. It is a key component of the `fast_poisson_solver` package. Parameters ---------- device : str, optional Specifies the device where the computations will be performed. This should be a valid PyTorch device string such as 'cuda' for GPU processing or 'cpu' for CPU processing. Default is 'cuda'. precision : torch.dtype, optional This determines the precision of the computation. This should be a valid PyTorch data type such as torch.float32 or torch.float64. Default is torch.float32. verbose : bool, optional When set to True, enables the printing of detailed logs during computation. Default is False. use_weights : bool, optional Determines whether the network uses pre-trained weights or random weights. If True, the network uses pre-trained weights. Default is True. compile_model : bool, optional Specifies whether the network model is compiled for faster inference. If False, the model won't be compiled. Default is True. lambdas_pde : list of float, optional A list that weights the influence of the PDE part in the loss term. If None, default weight 1e-12 will be used. Default is None. seed : int, optional This parameter sets the seed for generating random numbers, which helps in achieving deterministic results. Default is 0. """ def __init__(self, device='cuda', precision=torch.float32, verbose=False, use_weights=True, compile_model=True, lambdas_pde=None, seed=0): if torch.cuda.is_available(): torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cudnn.allow_tf32 = False if lambdas_pde is None: lambdas_pde = [2 ** -12] self.path = os.path.join('resources', 'final.pt') self.verbose = verbose self.precision = precision self.device = device if torch.cuda.is_available() else 'cpu' self.use_weights = use_weights self.compile_model = compile_model self.lambdas_pde = lambdas_pde self.seed = seed random.seed(self.seed) np.random.seed(self.seed) torch.manual_seed(self.seed) self.n_lambdas = len(self.lambdas_pde) # The losses for all the different values of lambda are put in those arrays self.Ls = np.zeros(self.n_lambdas) self.Ls_pde = np.zeros(self.n_lambdas) self.Ls_bc = np.zeros(self.n_lambdas) if self.path.endswith('.pt'): self.weights_input = True self.weights_path = pkg_resources.resource_stream(__name__, self.path) self.path = os.path.join(*os.path.split(self.path)[:1]) else: self.weights_input = False # Path to the where the pre-computed data_utils will be saved. self.precompute_path = 'precomputed-resources' if not os.path.isdir(self.precompute_path): os.makedirs(self.precompute_path) self.precompute_file = os.path.join(self.precompute_path, 'default.pkl') self.load_data() self.build_model() def load_data(self): def tuple_constructor(loader, node): return tuple(loader.construct_sequence(node)) yaml.SafeLoader.add_constructor('tag:yaml.org,2002:python/tuple', tuple_constructor) path = pkg_resources.resource_stream(__name__, os.path.join(self.path, 'infos.yaml')) self.infos = yaml.safe_load(path) if 'out' not in self.infos['model']: self.infos['model']['out'] = self.infos['data_utils']['num_cases'] if 'width' not in self.infos['model']: if 'network_width' in self.infos['model']: self.infos['model']['width'] = self.infos['model']['network_width'] if 'depth' not in self.infos['model']: if 'network_depth' in self.infos['model']: self.infos['model']['depth'] = self.infos['model']['network_depth'] - 1 # self.infos['model']['width'] = 800 # self.infos['model']['depth'] = 8 def build_model(self): self.model = PINN(self.infos['model']).to(self.device) if self.use_weights: state_dict = torch.load(self.weights_path, map_location=self.device) is_data_parallel = any('module' in key for key in state_dict.keys()) is_compiled = any('_orig_mod' in key for key in state_dict.keys()) if is_data_parallel: state_dict = {key.replace("module.", ""): value for key, value in state_dict.items()} if is_compiled: state_dict = {key.replace("_orig_mod.", ""): value for key, value in state_dict.items()} self.model.load_state_dict(state_dict) self.model.to(self.precision) if self.compile_model: try: self.model = torch.compile(self.model) except Exception as e: print(e) print('Compiling did not work but do not worry, it will work without it.') print('You need pytorch 2.0 for compiling and Linux.') def evaluate_network_pde(self): self.x_pde.requires_grad = True self.y_pde.requires_grad = True self.H = self.model.h(self.x_pde, self.y_pde) self.DH = calculate_laplace(self.H, self.x_pde, self.y_pde).detach().to(self.precision) self.DHtDH = torch.matmul(self.DH.t(), self.DH) self.Dht = self.DH.t() self.x_pde.requires_grad = False self.y_pde.requires_grad = False self.H = self.H.detach() def evaluate_network_bc(self): with torch.no_grad(): self.H_bc = self.model.h(self.x_bc, self.y_bc).to(self.precision) self.Ht_bc = self.H_bc.t() ones_Hbc = torch.ones(self.H_bc.shape[0], 1, device=self.device).to(self.precision) self.Ht_bc_ones = (self.Ht_bc @ ones_Hbc).t() def load_precomputed_data(self): with open(self.precompute_file, 'rb') as f: self.LHSs, self.RHSs, self.H, self.DH, self.Dht, self.DHtDH, self.H_bc, self.Ht_bc, self.Ht_bc_ones, self.NdO, self.NO = format_input( pickle.load(f), self.precision, self.device, reshape=False) if self.verbose > 0: print('Pre-Computed data_utils loaded from storage.') def precompute_LHS_RHS(self): self.NO = self.DH.shape[0] self.NdO = self.H_bc.shape[0] M = torch.eye(self.NdO, device=self.device, dtype=self.precision) - \ torch.ones(self.NdO, device=self.device, dtype=self.precision) / self.NdO lambdas = torch.tensor(self.lambdas_pde, device=self.device, dtype=self.precision).view(-1, 1, 1) self.LHSs = lambdas / self.NO * self.DHtDH + \ 1 / self.NdO * torch.matmul(self.Ht_bc, torch.matmul(M, self.H_bc)) self.RHSs = lambdas / self.NO * self.Dht if self.verbose > 0: print('Pre-Computed data_utils calculated.') def save_precomputed_data(self): with open(self.precompute_file, 'wb') as file: pickle.dump( [self.LHSs, self.RHSs, self.H, self.DH, self.Dht, self.DHtDH, self.H_bc, self.Ht_bc, self.Ht_bc_ones, self.NdO, self.NO], file) if self.verbose > 0: print('Pre-Computed stored.') def precompute(self, x_pde, y_pde, x_bc, y_bc, name=None, save=False, load=False): """ This method is used for precomputing of the data based on the given coordinates. Parameters ---------- x_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the partial differential equation (PDE). y_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the partial differential equation (PDE). x_bc : tensor/array/list Coordinates of the boundary condition. y_bc : tensor/array/list Coordinates of the boundary condition. name : str, optional Name used for saving or loading the precomputed data. If no name is provided, the default name will be used. Default is None. save : bool, optional Specifies whether the precomputed data should be saved. If True, the data will be saved using the provided `name`. Default is True. load : bool, optional Specifies whether the precomputed data with the provided `name` should be loaded. If True, the method will attempt to load the data with the given name. Default is True. """ t0 = time.perf_counter() if name is not None: self.precompute_file = os.path.join(self.precompute_path, name + f'.pkl') self.x_pde, self.y_pde, self.x_bc, self.y_bc = format_input([x_pde, y_pde, x_bc, y_bc], self.precision, self.device) self.x_tot = torch.cat([self.x_pde, self.x_bc], dim=0) self.y_tot = torch.cat([self.y_pde, self.y_bc], dim=0) assert torch.min(self.x_tot) >= 0 and torch.max( self.x_tot) <= 1, 'x coordinates should be in [0, 1]. Please rescale.' assert torch.min(self.y_tot) >= 0 and torch.max( self.y_tot) <= 1, 'y coordinates should be in [0, 1]. Please rescale.' if load and os.path.isfile(self.precompute_file): self.load_precomputed_data() else: self.evaluate_network_pde() self.evaluate_network_bc() self.precompute_LHS_RHS() if save: self.save_precomputed_data() # First time running torch.linalg.solve() is very slow, so we run it once here to get rid of the delay torch.linalg.solve( torch.rand(self.LHSs[0].shape).to(self.device).to(self.precision), torch.rand(self.LHSs[0].shape[0], 1).to(self.device).to(self.precision)) t3 = time.perf_counter() if self.verbose > 0: print('\nPre-Computing Successful:', t3 - t0, 'seconds') def solve(self, f, bc): """ This method is used to solve the PDE with the provided source function and boundary condition. Parameters ---------- f : tensor/array/list The source function for the PDE. bc : tensor/array/list The boundary condition for the PDE. Returns ------- tuple The tuple contains the following elements: u_pred : tensor The complete solution of the PDE. u_pde_pred : tensor The solution of the PDE inside the domain. u_bc_pred : tensor The solution for the boundary condition. f_pred : tensor The predicted source function. runtime : float The time it took the method to run in seconds. """ self.f, self.bc = format_input([f, bc], self.precision, self.device) w_outs = [] biases = np.empty(len(self.lambdas_pde)) t0 = time.perf_counter() RHSs = torch.matmul(self.RHSs, self.f) self.bct_ones = torch.sum(self.bc).reshape(1, 1) for i, l in enumerate(self.lambdas_pde): self.w_out = torch.linalg.solve(self.LHSs[i], RHSs[i]) self.bias = - 1 / self.NdO * (self.Ht_bc_ones @ self.w_out - self.bct_ones) # Just calculates the loss if multiple lambdas are provided to find the best one. if self.n_lambdas > 1: u_pred_bc = torch.matmul(self.H_bc, self.w_out) + self.bias f_pred = torch.matmul(self.DH, self.w_out) self.Ls_pde[i] = torch.mean((f_pred - self.f) ** 2).item() self.Ls_bc[i] = torch.mean((u_pred_bc - self.bc) ** 2).item() self.Ls[i] = self.Ls_pde[i] + self.Ls_bc[i] w_outs.append(self.w_out) biases[i] = self.bias if self.n_lambdas > 1: minimum = np.argmin(self.Ls) self.lambda_pde = self.lambdas_pde[minimum] self.w_out = w_outs[minimum] self.bias = biases[minimum] else: self.lambda_pde = self.lambdas_pde[0] self.u_pde_pred = torch.add(torch.matmul(self.H, self.w_out), self.bias) self.f_pred = torch.matmul(self.DH, self.w_out) self.u_bc_pred = torch.matmul(self.H_bc, self.w_out) + self.bias self.u_pred = torch.cat([self.u_pde_pred, self.u_bc_pred]) t1 = time.perf_counter() if self.verbose > 0: print('\nRun Successful:', t1 - t0, 'seconds\n') return self.u_pred, self.u_pde_pred, self.u_bc_pred, self.f_pred, t1 - t0
(device='cuda', precision=torch.float32, verbose=False, use_weights=True, compile_model=True, lambdas_pde=None, seed=0)
43,186
fast_poisson_solver.solver
__init__
null
def __init__(self, device='cuda', precision=torch.float32, verbose=False, use_weights=True, compile_model=True, lambdas_pde=None, seed=0): if torch.cuda.is_available(): torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cudnn.allow_tf32 = False if lambdas_pde is None: lambdas_pde = [2 ** -12] self.path = os.path.join('resources', 'final.pt') self.verbose = verbose self.precision = precision self.device = device if torch.cuda.is_available() else 'cpu' self.use_weights = use_weights self.compile_model = compile_model self.lambdas_pde = lambdas_pde self.seed = seed random.seed(self.seed) np.random.seed(self.seed) torch.manual_seed(self.seed) self.n_lambdas = len(self.lambdas_pde) # The losses for all the different values of lambda are put in those arrays self.Ls = np.zeros(self.n_lambdas) self.Ls_pde = np.zeros(self.n_lambdas) self.Ls_bc = np.zeros(self.n_lambdas) if self.path.endswith('.pt'): self.weights_input = True self.weights_path = pkg_resources.resource_stream(__name__, self.path) self.path = os.path.join(*os.path.split(self.path)[:1]) else: self.weights_input = False # Path to the where the pre-computed data_utils will be saved. self.precompute_path = 'precomputed-resources' if not os.path.isdir(self.precompute_path): os.makedirs(self.precompute_path) self.precompute_file = os.path.join(self.precompute_path, 'default.pkl') self.load_data() self.build_model()
(self, device='cuda', precision=torch.float32, verbose=False, use_weights=True, compile_model=True, lambdas_pde=None, seed=0)
43,187
fast_poisson_solver.solver
build_model
null
def build_model(self): self.model = PINN(self.infos['model']).to(self.device) if self.use_weights: state_dict = torch.load(self.weights_path, map_location=self.device) is_data_parallel = any('module' in key for key in state_dict.keys()) is_compiled = any('_orig_mod' in key for key in state_dict.keys()) if is_data_parallel: state_dict = {key.replace("module.", ""): value for key, value in state_dict.items()} if is_compiled: state_dict = {key.replace("_orig_mod.", ""): value for key, value in state_dict.items()} self.model.load_state_dict(state_dict) self.model.to(self.precision) if self.compile_model: try: self.model = torch.compile(self.model) except Exception as e: print(e) print('Compiling did not work but do not worry, it will work without it.') print('You need pytorch 2.0 for compiling and Linux.')
(self)
43,188
fast_poisson_solver.solver
evaluate_network_bc
null
def evaluate_network_bc(self): with torch.no_grad(): self.H_bc = self.model.h(self.x_bc, self.y_bc).to(self.precision) self.Ht_bc = self.H_bc.t() ones_Hbc = torch.ones(self.H_bc.shape[0], 1, device=self.device).to(self.precision) self.Ht_bc_ones = (self.Ht_bc @ ones_Hbc).t()
(self)
43,189
fast_poisson_solver.solver
evaluate_network_pde
null
def evaluate_network_pde(self): self.x_pde.requires_grad = True self.y_pde.requires_grad = True self.H = self.model.h(self.x_pde, self.y_pde) self.DH = calculate_laplace(self.H, self.x_pde, self.y_pde).detach().to(self.precision) self.DHtDH = torch.matmul(self.DH.t(), self.DH) self.Dht = self.DH.t() self.x_pde.requires_grad = False self.y_pde.requires_grad = False self.H = self.H.detach()
(self)
43,190
fast_poisson_solver.solver
load_data
null
def load_data(self): def tuple_constructor(loader, node): return tuple(loader.construct_sequence(node)) yaml.SafeLoader.add_constructor('tag:yaml.org,2002:python/tuple', tuple_constructor) path = pkg_resources.resource_stream(__name__, os.path.join(self.path, 'infos.yaml')) self.infos = yaml.safe_load(path) if 'out' not in self.infos['model']: self.infos['model']['out'] = self.infos['data_utils']['num_cases'] if 'width' not in self.infos['model']: if 'network_width' in self.infos['model']: self.infos['model']['width'] = self.infos['model']['network_width'] if 'depth' not in self.infos['model']: if 'network_depth' in self.infos['model']: self.infos['model']['depth'] = self.infos['model']['network_depth'] - 1 # self.infos['model']['width'] = 800 # self.infos['model']['depth'] = 8
(self)
43,191
fast_poisson_solver.solver
load_precomputed_data
null
def load_precomputed_data(self): with open(self.precompute_file, 'rb') as f: self.LHSs, self.RHSs, self.H, self.DH, self.Dht, self.DHtDH, self.H_bc, self.Ht_bc, self.Ht_bc_ones, self.NdO, self.NO = format_input( pickle.load(f), self.precision, self.device, reshape=False) if self.verbose > 0: print('Pre-Computed data_utils loaded from storage.')
(self)
43,192
fast_poisson_solver.solver
precompute
This method is used for precomputing of the data based on the given coordinates. Parameters ---------- x_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the partial differential equation (PDE). y_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the partial differential equation (PDE). x_bc : tensor/array/list Coordinates of the boundary condition. y_bc : tensor/array/list Coordinates of the boundary condition. name : str, optional Name used for saving or loading the precomputed data. If no name is provided, the default name will be used. Default is None. save : bool, optional Specifies whether the precomputed data should be saved. If True, the data will be saved using the provided `name`. Default is True. load : bool, optional Specifies whether the precomputed data with the provided `name` should be loaded. If True, the method will attempt to load the data with the given name. Default is True.
def precompute(self, x_pde, y_pde, x_bc, y_bc, name=None, save=False, load=False): """ This method is used for precomputing of the data based on the given coordinates. Parameters ---------- x_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the partial differential equation (PDE). y_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the partial differential equation (PDE). x_bc : tensor/array/list Coordinates of the boundary condition. y_bc : tensor/array/list Coordinates of the boundary condition. name : str, optional Name used for saving or loading the precomputed data. If no name is provided, the default name will be used. Default is None. save : bool, optional Specifies whether the precomputed data should be saved. If True, the data will be saved using the provided `name`. Default is True. load : bool, optional Specifies whether the precomputed data with the provided `name` should be loaded. If True, the method will attempt to load the data with the given name. Default is True. """ t0 = time.perf_counter() if name is not None: self.precompute_file = os.path.join(self.precompute_path, name + f'.pkl') self.x_pde, self.y_pde, self.x_bc, self.y_bc = format_input([x_pde, y_pde, x_bc, y_bc], self.precision, self.device) self.x_tot = torch.cat([self.x_pde, self.x_bc], dim=0) self.y_tot = torch.cat([self.y_pde, self.y_bc], dim=0) assert torch.min(self.x_tot) >= 0 and torch.max( self.x_tot) <= 1, 'x coordinates should be in [0, 1]. Please rescale.' assert torch.min(self.y_tot) >= 0 and torch.max( self.y_tot) <= 1, 'y coordinates should be in [0, 1]. Please rescale.' if load and os.path.isfile(self.precompute_file): self.load_precomputed_data() else: self.evaluate_network_pde() self.evaluate_network_bc() self.precompute_LHS_RHS() if save: self.save_precomputed_data() # First time running torch.linalg.solve() is very slow, so we run it once here to get rid of the delay torch.linalg.solve( torch.rand(self.LHSs[0].shape).to(self.device).to(self.precision), torch.rand(self.LHSs[0].shape[0], 1).to(self.device).to(self.precision)) t3 = time.perf_counter() if self.verbose > 0: print('\nPre-Computing Successful:', t3 - t0, 'seconds')
(self, x_pde, y_pde, x_bc, y_bc, name=None, save=False, load=False)
43,193
fast_poisson_solver.solver
precompute_LHS_RHS
null
def precompute_LHS_RHS(self): self.NO = self.DH.shape[0] self.NdO = self.H_bc.shape[0] M = torch.eye(self.NdO, device=self.device, dtype=self.precision) - \ torch.ones(self.NdO, device=self.device, dtype=self.precision) / self.NdO lambdas = torch.tensor(self.lambdas_pde, device=self.device, dtype=self.precision).view(-1, 1, 1) self.LHSs = lambdas / self.NO * self.DHtDH + \ 1 / self.NdO * torch.matmul(self.Ht_bc, torch.matmul(M, self.H_bc)) self.RHSs = lambdas / self.NO * self.Dht if self.verbose > 0: print('Pre-Computed data_utils calculated.')
(self)
43,194
fast_poisson_solver.solver
save_precomputed_data
null
def save_precomputed_data(self): with open(self.precompute_file, 'wb') as file: pickle.dump( [self.LHSs, self.RHSs, self.H, self.DH, self.Dht, self.DHtDH, self.H_bc, self.Ht_bc, self.Ht_bc_ones, self.NdO, self.NO], file) if self.verbose > 0: print('Pre-Computed stored.')
(self)
43,195
fast_poisson_solver.solver
solve
This method is used to solve the PDE with the provided source function and boundary condition. Parameters ---------- f : tensor/array/list The source function for the PDE. bc : tensor/array/list The boundary condition for the PDE. Returns ------- tuple The tuple contains the following elements: u_pred : tensor The complete solution of the PDE. u_pde_pred : tensor The solution of the PDE inside the domain. u_bc_pred : tensor The solution for the boundary condition. f_pred : tensor The predicted source function. runtime : float The time it took the method to run in seconds.
def solve(self, f, bc): """ This method is used to solve the PDE with the provided source function and boundary condition. Parameters ---------- f : tensor/array/list The source function for the PDE. bc : tensor/array/list The boundary condition for the PDE. Returns ------- tuple The tuple contains the following elements: u_pred : tensor The complete solution of the PDE. u_pde_pred : tensor The solution of the PDE inside the domain. u_bc_pred : tensor The solution for the boundary condition. f_pred : tensor The predicted source function. runtime : float The time it took the method to run in seconds. """ self.f, self.bc = format_input([f, bc], self.precision, self.device) w_outs = [] biases = np.empty(len(self.lambdas_pde)) t0 = time.perf_counter() RHSs = torch.matmul(self.RHSs, self.f) self.bct_ones = torch.sum(self.bc).reshape(1, 1) for i, l in enumerate(self.lambdas_pde): self.w_out = torch.linalg.solve(self.LHSs[i], RHSs[i]) self.bias = - 1 / self.NdO * (self.Ht_bc_ones @ self.w_out - self.bct_ones) # Just calculates the loss if multiple lambdas are provided to find the best one. if self.n_lambdas > 1: u_pred_bc = torch.matmul(self.H_bc, self.w_out) + self.bias f_pred = torch.matmul(self.DH, self.w_out) self.Ls_pde[i] = torch.mean((f_pred - self.f) ** 2).item() self.Ls_bc[i] = torch.mean((u_pred_bc - self.bc) ** 2).item() self.Ls[i] = self.Ls_pde[i] + self.Ls_bc[i] w_outs.append(self.w_out) biases[i] = self.bias if self.n_lambdas > 1: minimum = np.argmin(self.Ls) self.lambda_pde = self.lambdas_pde[minimum] self.w_out = w_outs[minimum] self.bias = biases[minimum] else: self.lambda_pde = self.lambdas_pde[0] self.u_pde_pred = torch.add(torch.matmul(self.H, self.w_out), self.bias) self.f_pred = torch.matmul(self.DH, self.w_out) self.u_bc_pred = torch.matmul(self.H_bc, self.w_out) + self.bias self.u_pred = torch.cat([self.u_pde_pred, self.u_bc_pred]) t1 = time.perf_counter() if self.verbose > 0: print('\nRun Successful:', t1 - t0, 'seconds\n') return self.u_pred, self.u_pde_pred, self.u_bc_pred, self.f_pred, t1 - t0
(self, f, bc)
43,196
fast_poisson_solver.analyze
analyze
Analyze the performance of a Poisson equation solver by comparing predictions with true or numerical values. This function calculates various error metrics including Mean Squared Error (MSE), Root Mean Squared Error (RMSE), Mean Absolute Error (MAE), Relative Mean Absolute Error (rMSE), Structural Similarity Index (SSIM), Peak Signal-to-Noise Ratio (PSNR), and R-Squared (R2) for each of the source term (f), solution (u), and boundary condition (bc) predictions. The predicted source term 'f_pred' and boundary condition 'u_bc_pred' are compared with the true source term 'f' and true boundary condition 'u_bc'. If provided, the predicted solution 'u_pred' is compared with the numerical solution 'u_num'. Parameters ---------- f_pred : array-like The predicted source term by the solver. Can be a list, numpy array or PyTorch tensor. f : array-like The true source term of the Poisson equation. Can be a list, numpy array or PyTorch tensor. u_bc_pred : array-like The predicted solution for the boundary condition. Can be a list, numpy array or PyTorch tensor. u_bc : array-like The true boundary condition. Can be a list, numpy array or PyTorch tensor. u_pred : array-like, optional The predicted solution of the Poisson equation. Can be a list, numpy array or PyTorch tensor (default is None). u_num : array-like, optional The numerical solution of the Poisson equation. Can be a list, numpy array or PyTorch tensor (default is None). normalize : bool, optional If True, normalize the input arrays before calculating the error metrics (default is True). Returns ------- dict A dictionary containing the calculated error metrics for the corresponding part of the Poisson equation. 'u' A dictionary containing the error metrics for the predicted solution 'u_pred' compared to the numerical solution 'u_num' (only if 'u_num' and 'u_pred' are provided). 'f' A dictionary containing the error metrics for the predicted source term 'f_pred' compared to the true source term 'f'. 'bc' A dictionary containing the error metrics for the predicted boundary condition 'u_bc_pred' compared to the true boundary condition 'u_bc'.
def analyze(f_pred, f, u_bc_pred, u_bc, u_pred=None, u_num=None, normalize=True): """ Analyze the performance of a Poisson equation solver by comparing predictions with true or numerical values. This function calculates various error metrics including Mean Squared Error (MSE), Root Mean Squared Error (RMSE), Mean Absolute Error (MAE), Relative Mean Absolute Error (rMSE), Structural Similarity Index (SSIM), Peak Signal-to-Noise Ratio (PSNR), and R-Squared (R2) for each of the source term (f), solution (u), and boundary condition (bc) predictions. The predicted source term 'f_pred' and boundary condition 'u_bc_pred' are compared with the true source term 'f' and true boundary condition 'u_bc'. If provided, the predicted solution 'u_pred' is compared with the numerical solution 'u_num'. Parameters ---------- f_pred : array-like The predicted source term by the solver. Can be a list, numpy array or PyTorch tensor. f : array-like The true source term of the Poisson equation. Can be a list, numpy array or PyTorch tensor. u_bc_pred : array-like The predicted solution for the boundary condition. Can be a list, numpy array or PyTorch tensor. u_bc : array-like The true boundary condition. Can be a list, numpy array or PyTorch tensor. u_pred : array-like, optional The predicted solution of the Poisson equation. Can be a list, numpy array or PyTorch tensor (default is None). u_num : array-like, optional The numerical solution of the Poisson equation. Can be a list, numpy array or PyTorch tensor (default is None). normalize : bool, optional If True, normalize the input arrays before calculating the error metrics (default is True). Returns ------- dict A dictionary containing the calculated error metrics for the corresponding part of the Poisson equation. 'u' A dictionary containing the error metrics for the predicted solution 'u_pred' compared to the numerical solution 'u_num' (only if 'u_num' and 'u_pred' are provided). 'f' A dictionary containing the error metrics for the predicted source term 'f_pred' compared to the true source term 'f'. 'bc' A dictionary containing the error metrics for the predicted boundary condition 'u_bc_pred' compared to the true boundary condition 'u_bc'. """ if u_num is None or u_pred is None: u_num = [0] u_pred = [0] u_comparison = False else: u_comparison = True f, f_pred, u_pred, u_bc_pred, u_num, u_bc = format_input([f, f_pred, u_pred, u_bc_pred, u_num, u_bc], as_array=True) f, f_pred, u_pred, u_bc_pred, u_num, u_bc = [v.reshape(-1) for v in [f, f_pred, u_pred, u_bc_pred, u_num, u_bc]] if normalize: bv = u_bc[0] else: bv = 0 def analyze_struct(image1, image2, bv): image1_ = image1 - bv image2_ = image2 - bv maximum = np.max(image1_) minimum = np.min(image1_) if len(np.unique(image1_)) != 1 and normalize: image1_ = (image1_ - minimum) / (maximum - minimum) image2_ = (image2_ - minimum) / (maximum - minimum) range = 1 else: range = np.min((maximum - minimum, 1e-8)) ssim_value = ssim(image1_, image2_, data_range=range + 1e-8, multichannel=False, gaussian_weights=True) psnr_value = psnr(image1_, image2_, data_range=range) r2 = r2_score(image1_.reshape(-1), image2_.reshape(-1)) return ssim_value, psnr_value, r2 def analyze_standard(image1, image2, bv): image1_ = image1 - bv image2_ = image2 - bv maximum = np.max(image1_) minimum = np.min(image1_) if len(np.unique(image1_)) != 1 and normalize: image1_ = (image1_ - minimum) / (maximum - minimum) image2_ = (image2_ - minimum) / (maximum - minimum) mse = np.mean((image1_ - image2_) ** 2) rmse = np.sqrt(mse) mae = np.mean(np.abs(image1_ - image2_)) mae_r = np.mean(np.abs(image1_ - image2_)) / np.max((np.mean(np.abs(image1_)), 1e-6)) return mse, rmse, mae, mae_r res = {} if u_comparison: ssim_value_u, psnr_value_u, r2_u = analyze_struct(u_num, u_pred, bv) mse_u, rmse_u, mae_u, mae_r_u = analyze_standard(u_num, u_pred, bv) res['u'] = { 'MSE': mse_u, 'RMSE': rmse_u, 'MAE': mae_u, 'rMAE': mae_r_u, 'SSIM': ssim_value_u, 'PSNR': psnr_value_u, 'R2': r2_u } ssim_value_f, psnr_valuie_f, r2_f = analyze_struct(f, f_pred, 0) mse_f, rmse_f, mae_f, mae_r_f = analyze_standard(f, f_pred, 0) res['f'] = { 'MSE': mse_f, 'RMSE': rmse_f, 'MAE': mae_f, 'rMAE': mae_r_f, 'SSIM': ssim_value_f, 'PSNR': psnr_valuie_f, 'R2': r2_f } mse_u_bc, rmse_u_bc, mae_u_bc, mae_r_bc = analyze_standard(u_bc, u_bc_pred, bv) res['bc'] = { 'MSE': mse_u_bc, 'RMSE': rmse_u_bc, 'MAE': mae_u_bc, 'rMAE': mae_r_bc } return res
(f_pred, f, u_bc_pred, u_bc, u_pred=None, u_num=None, normalize=True)
43,197
fast_poisson_solver.utils
bicubic_interpolate
Performs bicubic interpolation of a two-dimensional field. The function performs bicubic interpolation of a field, which is defined by its values `v_base` at points `(x_pde_base, y_pde_base)` for the domain and `(x_bc_base, y_bc_base)` for the boundary. Interpolation is done at new points `(x_pde_new, y_pde_new)` for the domain and `(x_bc_new, y_bc_new)` for the boundary. Interpolation can be done only inside the domain, excluding the boundary, depending on the `domain` parameter. Parameters ---------- x_pde_base : tensor/array/list x-coordinates of the PDE base points. y_pde_base : tensor/array/list y-coordinates of the PDE base points. x_bc_base : tensor/array/list x-coordinates of the boundary base points. y_bc_base : tensor/array/list y-coordinates of the boundary base points. v_base : tensor/array/list Values of the field at the base points. x_pde_new : tensor/array/list x-coordinates of the new PDE points for interpolation. y_pde_new : tensor/array/list y-coordinates of the new PDE points for interpolation. x_bc_new : tensor/array/list x-coordinates of the new boundary points for interpolation. y_bc_new : tensor/array/list y-coordinates of the new boundary points for interpolation. domain : bool, optional If True, the interpolation is done only inside the domain, not on the boundary. Defaults to False. Returns ------- tensor/array/list The interpolated field values at the new points.
def bicubic_interpolate(x_pde_base, y_pde_base, x_bc_base, y_bc_base, v_base, x_pde_new, y_pde_new, x_bc_new, y_bc_new, domain=False): """ Performs bicubic interpolation of a two-dimensional field. The function performs bicubic interpolation of a field, which is defined by its values `v_base` at points `(x_pde_base, y_pde_base)` for the domain and `(x_bc_base, y_bc_base)` for the boundary. Interpolation is done at new points `(x_pde_new, y_pde_new)` for the domain and `(x_bc_new, y_bc_new)` for the boundary. Interpolation can be done only inside the domain, excluding the boundary, depending on the `domain` parameter. Parameters ---------- x_pde_base : tensor/array/list x-coordinates of the PDE base points. y_pde_base : tensor/array/list y-coordinates of the PDE base points. x_bc_base : tensor/array/list x-coordinates of the boundary base points. y_bc_base : tensor/array/list y-coordinates of the boundary base points. v_base : tensor/array/list Values of the field at the base points. x_pde_new : tensor/array/list x-coordinates of the new PDE points for interpolation. y_pde_new : tensor/array/list y-coordinates of the new PDE points for interpolation. x_bc_new : tensor/array/list x-coordinates of the new boundary points for interpolation. y_bc_new : tensor/array/list y-coordinates of the new boundary points for interpolation. domain : bool, optional If True, the interpolation is done only inside the domain, not on the boundary. Defaults to False. Returns ------- tensor/array/list The interpolated field values at the new points. """ x_pde_base, y_pde_base, x_bc_base, y_bc_base, v_base, x_pde_new, y_pde_new, x_bc_new, y_bc_new = format_input( [x_pde_base, y_pde_base, x_bc_base, y_bc_base, v_base, x_pde_new, y_pde_new, x_bc_new, y_bc_new], precision=v_base.dtype, device='cpu', as_array=True, reshape=True) if domain: x_base = x_pde_base.reshape(-1) y_base = y_pde_base.reshape(-1) x_new = x_pde_new.reshape(-1) y_new = y_pde_new.reshape(-1) else: x_base = np.concatenate([x_pde_base, x_bc_base]).reshape(-1) y_base = np.concatenate([y_pde_base, y_bc_base]).reshape(-1) x_new = np.concatenate([x_pde_new, x_bc_new]).reshape(-1) y_new = np.concatenate([y_pde_new, y_bc_new]).reshape(-1) coordinates_base = np.array([x_base, y_base]).T coordinates_new = np.array([x_new, y_new]).T v_new = griddata(coordinates_base, v_base.reshape(-1), coordinates_new, method='cubic') return v_new
(x_pde_base, y_pde_base, x_bc_base, y_bc_base, v_base, x_pde_new, y_pde_new, x_bc_new, y_bc_new, domain=False)
43,202
fast_poisson_solver.utils
format_input
null
def format_input(v, precision=torch.float32, device='cpu', as_array=False, reshape=True): if isinstance(device, str): device = torch.device(device) for i in range(len(v)): if isinstance(v[i], list): v[i] = torch.tensor(v[i], dtype=precision, device=device) elif isinstance(v[i], np.ndarray): v[i] = torch.tensor(v[i], dtype=precision, device=device) elif isinstance(v[i], int) or isinstance(v[i], float): continue else: v[i] = v[i].to(precision).to(device) if reshape: v = [v_i.reshape(-1, 1) for v_i in v] if as_array: v = [v_i.detach().cpu().numpy() for v_i in v] return v
(v, precision=torch.float32, device='cpu', as_array=False, reshape=True)
43,203
fast_poisson_solver.utils
minmax
null
def minmax(v1, v2=None): if v2 is None: v2 = v1 vmin = min((np.min(v1), np.min(v2))) vmax = max((np.max(v1), np.max(v2))) return vmin, vmax
(v1, v2=None)
43,206
fast_poisson_solver.numeric_solver
numeric_solve
This function numerically solves the partial differential equation (PDE) using the provided source function, PDE coordinates, boundary condition, and boundary condition coordinates. It uses the precision specified, measures the time taken for the operation if requested, and controls the verbosity of the output. Parameters ---------- f : tensor/array/list The source function for the PDE. x_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the PDE. y_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the PDE. u_bc : tensor/array/list The boundary condition for the PDE. x_bc : tensor/array/list Coordinates of the boundary condition. y_bc : tensor/array/list Coordinates of the boundary condition. precision : torch.dtype, optional The precision to be used for the numeric solver. Default is torch.float32. verbose : int, optional Controls the verbosity of the output. If 0, only the solution 'u' is returned. If greater than 0, both the solution 'u' and runtime 'delta t' are returned. Default is 1. Returns ------- tuple u : tensor The complete numeric solution of the PDE. t : float The runtime, i.e., the time it took the method to run in seconds. References ---------- Zaman, M.A. "Numerical Solution of the Poisson Equation Using Finite Difference Matrix Operators", Electronics 2022, 11, 2365. https://doi.org/10.3390/electronics11152365 See also: https://github.com/zaman13/Poisson-solver-2D
def numeric_solve(f, x_pde, y_pde, u_bc, x_bc, y_bc, precision=torch.float32, verbose=0): """ This function numerically solves the partial differential equation (PDE) using the provided source function, PDE coordinates, boundary condition, and boundary condition coordinates. It uses the precision specified, measures the time taken for the operation if requested, and controls the verbosity of the output. Parameters ---------- f : tensor/array/list The source function for the PDE. x_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the PDE. y_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the PDE. u_bc : tensor/array/list The boundary condition for the PDE. x_bc : tensor/array/list Coordinates of the boundary condition. y_bc : tensor/array/list Coordinates of the boundary condition. precision : torch.dtype, optional The precision to be used for the numeric solver. Default is torch.float32. verbose : int, optional Controls the verbosity of the output. If 0, only the solution 'u' is returned. If greater than 0, both the solution 'u' and runtime 'delta t' are returned. Default is 1. Returns ------- tuple u : tensor The complete numeric solution of the PDE. t : float The runtime, i.e., the time it took the method to run in seconds. References ---------- Zaman, M.A. "Numerical Solution of the Poisson Equation Using Finite Difference Matrix Operators", Electronics 2022, 11, 2365. https://doi.org/10.3390/electronics11152365 See also: https://github.com/zaman13/Poisson-solver-2D """ f, x_pde, y_pde, u_bc, x_bc, y_bc = format_input([f, x_pde, y_pde, u_bc, x_bc, y_bc], precision=precision, device="cpu", as_array=True) dtype_map = { torch.float32: np.float32, torch.float64: np.float64, # add more mappings as needed } precision = dtype_map.get(precision, None) x = np.concatenate([x_pde, x_bc], axis=0) y = np.concatenate([y_pde, y_bc], axis=0) x_unique = np.unique(x) y_unique = np.unique(y) Nx = len(x_unique) Ny = len(y_unique) dx = x_unique[1] - x_unique[0] dy = y_unique[1] - y_unique[0] Dx_2d, Dy_2d, D2x_2d, D2y_2d = Diff_mat_2D(Nx, Ny) Dx_2d = Dx_2d.astype(precision) Dy_2d = Dy_2d.astype(precision) D2x_2d = D2x_2d.astype(precision) D2y_2d = D2y_2d.astype(precision) # Construction of the system matrix and adjust the right hand vector for boundary conditions I_sp = sp.eye(Nx * Ny).tocsr().astype(precision) L_sys = D2x_2d / dx ** 2 + D2y_2d / dy ** 2 BD = I_sp # .tolil() # Dirichlet boundary operator BNx = Dx_2d # .tolil() # Neumann boundary operator for x component BNy = Dy_2d # .tolil() # Neumann boundary operator for y component t0_run = time.perf_counter() xy = np.concatenate([x.reshape(-1, 1), y.reshape(-1, 1)], 1) ind = np.lexsort((xy[:, 0], xy[:, 1])) reverse_ind = np.argsort(ind) b_ind = [np.argwhere(ind >= len(x_pde)).reshape(-1)] f = np.concatenate([f, u_bc], axis=0) f = f[ind] b_type = [0] b_val = [u_bc[0]] N_B = len(b_val) # Selectively replace the rows of the system matrix that correspond to # boundary value points. We replace these rows with # those of the boundary operator for m in range(N_B): # print(f[b_ind[m]], b_val[m] ) # f[b_ind[m]] = b_val[m] # Insert boundary values at the outer boundary points if b_type[m] == 0: L_sys[b_ind[m], :] = BD[b_ind[m], :] elif b_type[m] == 1: L_sys[b_ind[m], :] = BNx[b_ind[m], :] elif b_type[m] == 2: L_sys[b_ind[m], :] = BNy[b_ind[m], :] u = spsolve(L_sys, f) u = u[reverse_ind] # reorder the solution to the original order u = u.astype(precision) end = time.perf_counter() dt = end - t0_run if verbose > 0: print(f"Time Numeric Solver: {dt:.6f} s") return u, dt
(f, x_pde, y_pde, u_bc, x_bc, y_bc, precision=torch.float32, verbose=0)
43,209
fast_poisson_solver.plotter
plot
This function is used to plot the predicted Machine Learning solution of the PDE, the true source function, and the predicted source function. Parameters ---------- x_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the PDE. y_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the PDE. x_bc : tensor/array/list Coordinates of the boundary condition. y_bc : tensor/array/list Coordinates of the boundary condition. u_pred : tensor/array/list The predicted solution of the PDE using Machine Learning. f : tensor/array/list The true source function for the PDE. f_pred : tensor/array/list The predicted source function for the PDE. grid : bool, optional If True, the data is arranged into a grid and plotted as an image. If False, tricontourf is used to create a contour plot. Default is False. save : bool, optional Whether to save the image. The image is saved in both .pdf and .png formats. Default is False. save_path : str, optional Path where the image will be saved. Used only if `save` is True. Default is None. name : str, optional Name of the image file. Used only if `save` is True. Default is None. show : bool, optional Whether to display the plot. Default is False. show_points: bool, optional Whether to show the points used to train the model. Default is False.
def plot(x_pde, y_pde, x_bc, y_bc, u_pred, f, f_pred, grid=False, save=False, save_path=None, name=None, show=True, show_points=False): """ This function is used to plot the predicted Machine Learning solution of the PDE, the true source function, and the predicted source function. Parameters ---------- x_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the PDE. y_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the PDE. x_bc : tensor/array/list Coordinates of the boundary condition. y_bc : tensor/array/list Coordinates of the boundary condition. u_pred : tensor/array/list The predicted solution of the PDE using Machine Learning. f : tensor/array/list The true source function for the PDE. f_pred : tensor/array/list The predicted source function for the PDE. grid : bool, optional If True, the data is arranged into a grid and plotted as an image. If False, tricontourf is used to create a contour plot. Default is False. save : bool, optional Whether to save the image. The image is saved in both .pdf and .png formats. Default is False. save_path : str, optional Path where the image will be saved. Used only if `save` is True. Default is None. name : str, optional Name of the image file. Used only if `save` is True. Default is None. show : bool, optional Whether to display the plot. Default is False. show_points: bool, optional Whether to show the points used to train the model. Default is False. """ u_pred, f, f_pred, x, y, x_bc, y_bc = format_input([u_pred, f, f_pred, x_pde, y_pde, x_bc, y_bc], precision=torch.float64, device="cpu", as_array=True) x_tot = np.concatenate([x, x_bc]) y_tot = np.concatenate([y, y_bc]) if grid: x_tot, y_tot, u_pred = process_grid_data(x_tot, y_tot, u_pred) x, y, f, f_pred = process_grid_data(x, y, f, f_pred) vmin_u, vmax_u = minmax(u_pred) vmin_f, vmax_f = minmax(f_pred, f) fig, axs = plt.subplots(2, 3, figsize=(10, 8), dpi=400, tight_layout=True, sharey='row', sharex='col') axs[0][0].text(-0.15, 0.5, 'Potential', ha='center', va='center', rotation='vertical', fontsize=16, fontweight='bold') axs[1][0].text(-0.15, 0.5, 'Source Function', ha='center', va='center', rotation='vertical', fontsize=16, fontweight='bold') plot_subplot(axs[0][1], x_tot, y_tot, u_pred, 'Machine Learning', vmin_u, vmax_u, cb_pad=0.03, grid=grid) plot_subplot(axs[1][0], x, y, f, '', vmin_f, vmax_f, cb_pad=0.08, grid=grid, show_points=show_points) plot_subplot(axs[1][1], x, y, f_pred, '', vmin_f, vmax_f, cb_pad=0.08, grid=grid) plot_subplot(axs[1][2], x, y, (f_pred - f), '', cb_pad=0.08, cb_ztick=True, grid=grid) axs[0][0].set_ylabel('y', labelpad=-10, fontsize=14) axs[1][0].set_ylabel('y', labelpad=-10, fontsize=14) axs[1][0].set_xlabel('x', labelpad=-15, fontsize=14) axs[1][1].set_xlabel('x', labelpad=-15, fontsize=14) axs[1][2].set_xlabel('x', labelpad=-15, fontsize=14) if save: if not os.path.isdir(save_path): os.makedirs(save_path, exist_ok=True) plt.savefig(os.path.join(save_path, name + '.pdf'), bbox_inches="tight") plt.savefig(os.path.join(save_path, name + '.png'), bbox_inches="tight") if show: plt.show() plt.close()
(x_pde, y_pde, x_bc, y_bc, u_pred, f, f_pred, grid=False, save=False, save_path=None, name=None, show=True, show_points=False)
43,210
fast_poisson_solver.plotter
plot_comparison
This function is used to plot and compare the numeric solution, the predicted Machine Learning solution, and the residual between the two. It also shows the true source function, the predicted source function, and the residual between these two. Parameters ---------- x_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the PDE. y_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the PDE. x_bc : tensor/array/list Coordinates of the boundary condition. y_bc : tensor/array/list Coordinates of the boundary condition. u_pred : tensor/array/list The predicted solution of the PDE using Machine Learning. f : tensor/array/list The true source function for the PDE. f_pred : tensor/array/list The predicted source function for the PDE. u_num : tensor/array/list The numeric solution of the PDE. grid : bool, optional If True, the data is arranged into a grid and plotted as an image. If False, tricontourf is used to create a contour plot. Default is False. save : bool, optional Whether to save the image. The image is saved in both .pdf and .png formats. Default is False. save_path : str, optional Path where the image will be saved. Used only if `save` is True. Default is None. name : str, optional Name of the image file. Used only if `save` is True. Default is None. show : bool, optional Whether to display the plot. Default is False. show_points: bool, optional Whether to show the points of the data. Default is False.
def plot_comparison(x_pde, y_pde, x_bc, y_bc, u_pred, f, f_pred, u_num, grid=False, save=False, save_path=None, name=None, show=True, show_points=False): """ This function is used to plot and compare the numeric solution, the predicted Machine Learning solution, and the residual between the two. It also shows the true source function, the predicted source function, and the residual between these two. Parameters ---------- x_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the PDE. y_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the PDE. x_bc : tensor/array/list Coordinates of the boundary condition. y_bc : tensor/array/list Coordinates of the boundary condition. u_pred : tensor/array/list The predicted solution of the PDE using Machine Learning. f : tensor/array/list The true source function for the PDE. f_pred : tensor/array/list The predicted source function for the PDE. u_num : tensor/array/list The numeric solution of the PDE. grid : bool, optional If True, the data is arranged into a grid and plotted as an image. If False, tricontourf is used to create a contour plot. Default is False. save : bool, optional Whether to save the image. The image is saved in both .pdf and .png formats. Default is False. save_path : str, optional Path where the image will be saved. Used only if `save` is True. Default is None. name : str, optional Name of the image file. Used only if `save` is True. Default is None. show : bool, optional Whether to display the plot. Default is False. show_points: bool, optional Whether to show the points of the data. Default is False. """ u_pred, u_num, f, f_pred, x, y, x_bc, y_bc = format_input([u_pred, u_num, f, f_pred, x_pde, y_pde, x_bc, y_bc], precision=torch.float64, device="cpu", as_array=True) x_tot = np.concatenate([x, x_bc]) y_tot = np.concatenate([y, y_bc]) if grid: x_tot, y_tot, u_num, u_pred = process_grid_data(x_tot, y_tot, u_num, u_pred) x, y, f, f_pred = process_grid_data(x, y, f, f_pred) vmin_u, vmax_u = minmax(u_pred, u_num) vmin_f, vmax_f = minmax(f_pred, f) fig, axs = plt.subplots(2, 3, figsize=(10, 8), dpi=400, tight_layout=True, sharey='row', sharex='col') axs[0][0].text(-0.15, 0.5, 'Potential', ha='center', va='center', rotation='vertical', fontsize=16, fontweight='bold') axs[1][0].text(-0.15, 0.5, 'Source Function', ha='center', va='center', rotation='vertical', fontsize=16, fontweight='bold') plot_subplot(axs[0][0], x_tot, y_tot, u_num, 'Numeric', vmin_u, vmax_u, cb_pad=0.03, grid=grid) plot_subplot(axs[0][1], x_tot, y_tot, u_pred, 'Machine Learning', vmin_u, vmax_u, cb_pad=0.03, grid=grid) plot_subplot(axs[0][2], x_tot, y_tot, (u_pred - u_num), 'Residual', cb_pad=0.03, cb_ztick=True, grid=grid) plot_subplot(axs[1][0], x, y, f, '', vmin_f, vmax_f, cb_pad=0.08, grid=grid, show_points=show_points) plot_subplot(axs[1][1], x, y, f_pred, '', vmin_f, vmax_f, cb_pad=0.08, grid=grid) plot_subplot(axs[1][2], x, y, (f_pred - f), '', cb_pad=0.08, cb_ztick=True, grid=grid) axs[0][0].set_ylabel('y', labelpad=-10, fontsize=14) axs[1][0].set_ylabel('y', labelpad=-10, fontsize=14) axs[1][0].set_xlabel('x', labelpad=-15, fontsize=14) axs[1][1].set_xlabel('x', labelpad=-15, fontsize=14) axs[1][2].set_xlabel('x', labelpad=-15, fontsize=14) if save: if not os.path.isdir(save_path): os.makedirs(save_path, exist_ok=True) plt.savefig(os.path.join(save_path, name + '.pdf'), bbox_inches="tight") plt.savefig(os.path.join(save_path, name + '.png'), bbox_inches="tight") if show: plt.show() plt.close() # np.save(os.path.join(save_path, name + '_residual.npy'), (u_pred - u_num).reshape(solver.grid_size, solver.grid_size))
(x_pde, y_pde, x_bc, y_bc, u_pred, f, f_pred, u_num, grid=False, save=False, save_path=None, name=None, show=True, show_points=False)
43,211
fast_poisson_solver.plotter
plot_side_by_side
This function is used to plot and compare the numeric solution, the predicted Machine Learning solution, and the residual between the two. It also shows the true source function, the predicted source function, and the residual between these two. Parameters ---------- x_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the PDE. y_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the PDE. x_bc : tensor/array/list Coordinates of the boundary condition. y_bc : tensor/array/list Coordinates of the boundary condition. u_pred : tensor/array/list The predicted solution of the PDE using Machine Learning. f : tensor/array/list The true source function for the PDE. f_pred : tensor/array/list The predicted source function for the PDE. u_num : tensor/array/list The numeric solution of the PDE. grid : bool, optional If True, the data is arranged into a grid and plotted as an image. If False, tricontourf is used to create a contour plot. Default is False. save : bool, optional Whether to save the image. The image is saved in both .pdf and .png formats. Default is False. save_path : str, optional Path where the image will be saved. Used only if `save` is True. Default is None. name : str, optional Name of the image file. Used only if `save` is True. Default is None. show : bool, optional Whether to display the plot. Default is False.
def plot_side_by_side(x_pde, y_pde, x_bc, y_bc, u_pred, f, f_pred, u_num, grid=False, save=False, save_path=None, name=None, show=True): """ This function is used to plot and compare the numeric solution, the predicted Machine Learning solution, and the residual between the two. It also shows the true source function, the predicted source function, and the residual between these two. Parameters ---------- x_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the PDE. y_pde : tensor/array/list Coordinates that lie inside the domain and define the behavior of the PDE. x_bc : tensor/array/list Coordinates of the boundary condition. y_bc : tensor/array/list Coordinates of the boundary condition. u_pred : tensor/array/list The predicted solution of the PDE using Machine Learning. f : tensor/array/list The true source function for the PDE. f_pred : tensor/array/list The predicted source function for the PDE. u_num : tensor/array/list The numeric solution of the PDE. grid : bool, optional If True, the data is arranged into a grid and plotted as an image. If False, tricontourf is used to create a contour plot. Default is False. save : bool, optional Whether to save the image. The image is saved in both .pdf and .png formats. Default is False. save_path : str, optional Path where the image will be saved. Used only if `save` is True. Default is None. name : str, optional Name of the image file. Used only if `save` is True. Default is None. show : bool, optional Whether to display the plot. Default is False. """ u_pred, u_num, f, f_pred, x, y, x_bc, y_bc = format_input([u_pred, u_num, f, f_pred, x_pde, y_pde, x_bc, y_bc], precision=torch.float64, device="cpu", as_array=True) x_tot = np.concatenate([x, x_bc]) y_tot = np.concatenate([y, y_bc]) if grid: x_tot, y_tot, u_num, u_pred = process_grid_data(x_tot, y_tot, u_num, u_pred) x, y, f, f_pred = process_grid_data(x, y, f, f_pred) vmin_u, vmax_u = minmax(u_pred, u_num) vmin_f, vmax_f = minmax(f_pred, f) fig, ax = plt.subplots(1, 2, figsize=(10, 8), dpi=400, tight_layout=True, sharey='row', sharex='col') ax[0].tricontourf(x_tot.reshape(-1), y_tot.reshape(-1), u_num.reshape(-1), 200, cmap='jet', vmin=vmin_u, vmax=vmax_u) ax[1].tricontourf(x_tot.reshape(-1), y_tot.reshape(-1), u_pred.reshape(-1), 200, cmap='jet', vmin=vmin_u, vmax=vmax_u) font_name = "Calibri" if "Calibri" in fm.findSystemFonts(fontpaths=None, fontext='ttf') else None ax[0].set_title('Numeric (5s)', fontsize=30, fontweight='bold', fontname=font_name, pad=20) ax[1].set_title('Ours (0.003s)', fontsize=30, fontweight='bold', fontname=font_name, pad=20) ax[0].set_aspect('equal', adjustable='box') ax[1].set_aspect('equal', adjustable='box') ax[0].axis('off') ax[1].axis('off') plt.text(1, -0.06, '400x400 Grid', ha='right', va='bottom', transform=plt.gca().transAxes) if save: if not os.path.isdir(save_path): os.makedirs(save_path, exist_ok=True) plt.savefig(os.path.join(save_path, name + '.pdf'), bbox_inches="tight") plt.savefig(os.path.join(save_path, name + '.png'), bbox_inches="tight") if show: plt.show() plt.close() # np.save(os.path.join(save_path, name + '_residual.npy'), (u_pred - u_num).reshape(solver.grid_size, solver.grid_size))
(x_pde, y_pde, x_bc, y_bc, u_pred, f, f_pred, u_num, grid=False, save=False, save_path=None, name=None, show=True)
43,212
fast_poisson_solver.plotter
plot_subplot
null
def plot_subplot(ax, x, y, v, title, vmin=None, vmax=None, cb_pad=0.018, cb_ztick=False, grid=False, show_points=False): if vmin is None: vmin = min((np.min(v), 0)) if vmax is None: vmax = max((np.max(v), 0)) if grid: c = ax.imshow(v, cmap='jet', vmin=vmin, vmax=vmax, extent=(0, 1, 0, 1), origin='lower') else: c = ax.tricontourf(x.reshape(-1), y.reshape(-1), v.reshape(-1), 100, cmap='jet', vmin=vmin, vmax=vmax) # , extent=(0, 1, 0, 1)) if show_points: ax.scatter(x, y, s=1, c='black', marker='.', alpha=0.5) if title != '': ax.set_title(title, fontsize=16, pad=12, fontweight='bold') # ax.set_xlim(0, 1) # ax.set_ylim(0, 1) ax.set_aspect('equal', adjustable='box') ax.set_xticks((0, 1), labels=('0', '1'), fontsize=14) ax.set_yticks((0, 1), labels=('0', '1'), fontsize=14) for spine in ax.spines.values(): spine.set_visible(False) cb = plt.colorbar(ScalarMappable(norm=c.norm, cmap=c.cmap), ax=ax, extendrect=True, pad=cb_pad, location='bottom', shrink=0.8) cb.outline.set_visible(False) if cb_ztick: cb.set_ticks([vmin, 0, vmax], labels=[f'{vmin:.4f}', '0', f'{vmax:.4f}'], fontsize=15) else: cb.set_ticks([vmin, vmax], labels=[f'{vmin:.4f}', f'{vmax:.4f}'], fontsize=15)
(ax, x, y, v, title, vmin=None, vmax=None, cb_pad=0.018, cb_ztick=False, grid=False, show_points=False)
43,215
fast_poisson_solver.utils
process_grid_data
null
def process_grid_data(x, y, z1, z2=None): x_num = len(np.unique(x)) y_num = len(np.unique(y)) if z2 is not None: x, y, [z1, z2] = sort_ascend(x, y, [z1, z2]) z1 = z1.reshape(x_num, y_num) z2 = z2.reshape(x_num, y_num) return x, y, z1, z2 else: x, y, [z1] = sort_ascend(x, y, [z1]) z1 = z1.reshape(x_num, y_num) return x, y, z1
(x, y, z1, z2=None)
43,216
skimage.metrics.simple_metrics
peak_signal_noise_ratio
Compute the peak signal to noise ratio (PSNR) for an image. Parameters ---------- image_true : ndarray Ground-truth image, same shape as im_test. image_test : ndarray Test image. data_range : int, optional The data range of the input image (distance between minimum and maximum possible values). By default, this is estimated from the image data-type. Returns ------- psnr : float The PSNR metric. Notes ----- .. versionchanged:: 0.16 This function was renamed from ``skimage.measure.compare_psnr`` to ``skimage.metrics.peak_signal_noise_ratio``. References ---------- .. [1] https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
def peak_signal_noise_ratio(image_true, image_test, *, data_range=None): """ Compute the peak signal to noise ratio (PSNR) for an image. Parameters ---------- image_true : ndarray Ground-truth image, same shape as im_test. image_test : ndarray Test image. data_range : int, optional The data range of the input image (distance between minimum and maximum possible values). By default, this is estimated from the image data-type. Returns ------- psnr : float The PSNR metric. Notes ----- .. versionchanged:: 0.16 This function was renamed from ``skimage.measure.compare_psnr`` to ``skimage.metrics.peak_signal_noise_ratio``. References ---------- .. [1] https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio """ check_shape_equality(image_true, image_test) if data_range is None: if image_true.dtype != image_test.dtype: warn( "Inputs have mismatched dtype. Setting data_range based on " "image_true." ) dmin, dmax = dtype_range[image_true.dtype.type] true_min, true_max = np.min(image_true), np.max(image_true) if true_max > dmax or true_min < dmin: raise ValueError( "image_true has intensity values outside the range expected " "for its data type. Please manually specify the data_range." ) if true_min >= 0: # most common case (255 for uint8, 1 for float) data_range = dmax else: data_range = dmax - dmin image_true, image_test = _as_floats(image_true, image_test) err = mean_squared_error(image_true, image_test) return 10 * np.log10((data_range**2) / err)
(image_true, image_test, *, data_range=None)
43,217
sklearn.metrics._regression
r2_score
:math:`R^2` (coefficient of determination) regression score function. Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). In the general case when the true y is non-constant, a constant model that always predicts the average y disregarding the input features would get a :math:`R^2` score of 0.0. In the particular case when ``y_true`` is constant, the :math:`R^2` score is not finite: it is either ``NaN`` (perfect predictions) or ``-Inf`` (imperfect predictions). To prevent such non-finite numbers to pollute higher-level experiments such as a grid search cross-validation, by default these cases are replaced with 1.0 (perfect predictions) or 0.0 (imperfect predictions) respectively. You can set ``force_finite`` to ``False`` to prevent this fix from happening. Note: when the prediction residuals have zero mean, the :math:`R^2` score is identical to the :func:`Explained Variance score <explained_variance_score>`. Read more in the :ref:`User Guide <r2_score>`. Parameters ---------- y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. multioutput : {'raw_values', 'uniform_average', 'variance_weighted'}, array-like of shape (n_outputs,) or None, default='uniform_average' Defines aggregating of multiple output scores. Array-like value defines weights used to average scores. Default is "uniform_average". 'raw_values' : Returns a full set of scores in case of multioutput input. 'uniform_average' : Scores of all outputs are averaged with uniform weight. 'variance_weighted' : Scores of all outputs are averaged, weighted by the variances of each individual output. .. versionchanged:: 0.19 Default value of multioutput is 'uniform_average'. force_finite : bool, default=True Flag indicating if ``NaN`` and ``-Inf`` scores resulting from constant data should be replaced with real numbers (``1.0`` if prediction is perfect, ``0.0`` otherwise). Default is ``True``, a convenient setting for hyperparameters' search procedures (e.g. grid search cross-validation). .. versionadded:: 1.1 Returns ------- z : float or ndarray of floats The :math:`R^2` score or ndarray of scores if 'multioutput' is 'raw_values'. Notes ----- This is not a symmetric function. Unlike most other scores, :math:`R^2` score may be negative (it need not actually be the square of a quantity R). This metric is not well-defined for single samples and will return a NaN value if n_samples is less than two. References ---------- .. [1] `Wikipedia entry on the Coefficient of determination <https://en.wikipedia.org/wiki/Coefficient_of_determination>`_ Examples -------- >>> from sklearn.metrics import r2_score >>> y_true = [3, -0.5, 2, 7] >>> y_pred = [2.5, 0.0, 2, 8] >>> r2_score(y_true, y_pred) 0.948... >>> y_true = [[0.5, 1], [-1, 1], [7, -6]] >>> y_pred = [[0, 2], [-1, 2], [8, -5]] >>> r2_score(y_true, y_pred, ... multioutput='variance_weighted') 0.938... >>> y_true = [1, 2, 3] >>> y_pred = [1, 2, 3] >>> r2_score(y_true, y_pred) 1.0 >>> y_true = [1, 2, 3] >>> y_pred = [2, 2, 2] >>> r2_score(y_true, y_pred) 0.0 >>> y_true = [1, 2, 3] >>> y_pred = [3, 2, 1] >>> r2_score(y_true, y_pred) -3.0 >>> y_true = [-2, -2, -2] >>> y_pred = [-2, -2, -2] >>> r2_score(y_true, y_pred) 1.0 >>> r2_score(y_true, y_pred, force_finite=False) nan >>> y_true = [-2, -2, -2] >>> y_pred = [-2, -2, -2 + 1e-8] >>> r2_score(y_true, y_pred) 0.0 >>> r2_score(y_true, y_pred, force_finite=False) -inf
def mean_absolute_error( y_true, y_pred, *, sample_weight=None, multioutput="uniform_average" ): """Mean absolute error regression loss. Read more in the :ref:`User Guide <mean_absolute_error>`. Parameters ---------- y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. multioutput : {'raw_values', 'uniform_average'} or array-like of shape \ (n_outputs,), default='uniform_average' Defines aggregating of multiple output values. Array-like value defines weights used to average errors. 'raw_values' : Returns a full set of errors in case of multioutput input. 'uniform_average' : Errors of all outputs are averaged with uniform weight. Returns ------- loss : float or ndarray of floats If multioutput is 'raw_values', then mean absolute error is returned for each output separately. If multioutput is 'uniform_average' or an ndarray of weights, then the weighted average of all output errors is returned. MAE output is non-negative floating point. The best value is 0.0. Examples -------- >>> from sklearn.metrics import mean_absolute_error >>> y_true = [3, -0.5, 2, 7] >>> y_pred = [2.5, 0.0, 2, 8] >>> mean_absolute_error(y_true, y_pred) 0.5 >>> y_true = [[0.5, 1], [-1, 1], [7, -6]] >>> y_pred = [[0, 2], [-1, 2], [8, -5]] >>> mean_absolute_error(y_true, y_pred) 0.75 >>> mean_absolute_error(y_true, y_pred, multioutput='raw_values') array([0.5, 1. ]) >>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7]) 0.85... """ y_type, y_true, y_pred, multioutput = _check_reg_targets( y_true, y_pred, multioutput ) check_consistent_length(y_true, y_pred, sample_weight) output_errors = np.average(np.abs(y_pred - y_true), weights=sample_weight, axis=0) if isinstance(multioutput, str): if multioutput == "raw_values": return output_errors elif multioutput == "uniform_average": # pass None as weights to np.average: uniform mean multioutput = None return np.average(output_errors, weights=multioutput)
(y_true, y_pred, *, sample_weight=None, multioutput='uniform_average', force_finite=True)
43,219
skimage.metrics._structural_similarity
structural_similarity
Compute the mean structural similarity index between two images. Please pay attention to the `data_range` parameter with floating-point images. Parameters ---------- im1, im2 : ndarray Images. Any dimensionality with same shape. win_size : int or None, optional The side-length of the sliding window used in comparison. Must be an odd value. If `gaussian_weights` is True, this is ignored and the window size will depend on `sigma`. gradient : bool, optional If True, also return the gradient with respect to im2. data_range : float, optional The data range of the input image (difference between maximum and minimum possible values). By default, this is estimated from the image data type. This estimate may be wrong for floating-point image data. Therefore it is recommended to always pass this scalar value explicitly (see note below). channel_axis : int or None, optional If None, the image is assumed to be a grayscale (single channel) image. Otherwise, this parameter indicates which axis of the array corresponds to channels. .. versionadded:: 0.19 ``channel_axis`` was added in 0.19. gaussian_weights : bool, optional If True, each patch has its mean and variance spatially weighted by a normalized Gaussian kernel of width sigma=1.5. full : bool, optional If True, also return the full structural similarity image. Other Parameters ---------------- use_sample_covariance : bool If True, normalize covariances by N-1 rather than, N where N is the number of pixels within the sliding window. K1 : float Algorithm parameter, K1 (small constant, see [1]_). K2 : float Algorithm parameter, K2 (small constant, see [1]_). sigma : float Standard deviation for the Gaussian when `gaussian_weights` is True. Returns ------- mssim : float The mean structural similarity index over the image. grad : ndarray The gradient of the structural similarity between im1 and im2 [2]_. This is only returned if `gradient` is set to True. S : ndarray The full SSIM image. This is only returned if `full` is set to True. Notes ----- If `data_range` is not specified, the range is automatically guessed based on the image data type. However for floating-point image data, this estimate yields a result double the value of the desired range, as the `dtype_range` in `skimage.util.dtype.py` has defined intervals from -1 to +1. This yields an estimate of 2, instead of 1, which is most often required when working with image data (as negative light intensities are nonsensical). In case of working with YCbCr-like color data, note that these ranges are different per channel (Cb and Cr have double the range of Y), so one cannot calculate a channel-averaged SSIM with a single call to this function, as identical ranges are assumed for each channel. To match the implementation of Wang et al. [1]_, set `gaussian_weights` to True, `sigma` to 1.5, `use_sample_covariance` to False, and specify the `data_range` argument. .. versionchanged:: 0.16 This function was renamed from ``skimage.measure.compare_ssim`` to ``skimage.metrics.structural_similarity``. References ---------- .. [1] Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image quality assessment: From error visibility to structural similarity. IEEE Transactions on Image Processing, 13, 600-612. https://ece.uwaterloo.ca/~z70wang/publications/ssim.pdf, :DOI:`10.1109/TIP.2003.819861` .. [2] Avanaki, A. N. (2009). Exact global histogram specification optimized for structural similarity. Optical Review, 16, 613-621. :arxiv:`0901.0065` :DOI:`10.1007/s10043-009-0119-z`
def structural_similarity( im1, im2, *, win_size=None, gradient=False, data_range=None, channel_axis=None, gaussian_weights=False, full=False, **kwargs, ): """ Compute the mean structural similarity index between two images. Please pay attention to the `data_range` parameter with floating-point images. Parameters ---------- im1, im2 : ndarray Images. Any dimensionality with same shape. win_size : int or None, optional The side-length of the sliding window used in comparison. Must be an odd value. If `gaussian_weights` is True, this is ignored and the window size will depend on `sigma`. gradient : bool, optional If True, also return the gradient with respect to im2. data_range : float, optional The data range of the input image (difference between maximum and minimum possible values). By default, this is estimated from the image data type. This estimate may be wrong for floating-point image data. Therefore it is recommended to always pass this scalar value explicitly (see note below). channel_axis : int or None, optional If None, the image is assumed to be a grayscale (single channel) image. Otherwise, this parameter indicates which axis of the array corresponds to channels. .. versionadded:: 0.19 ``channel_axis`` was added in 0.19. gaussian_weights : bool, optional If True, each patch has its mean and variance spatially weighted by a normalized Gaussian kernel of width sigma=1.5. full : bool, optional If True, also return the full structural similarity image. Other Parameters ---------------- use_sample_covariance : bool If True, normalize covariances by N-1 rather than, N where N is the number of pixels within the sliding window. K1 : float Algorithm parameter, K1 (small constant, see [1]_). K2 : float Algorithm parameter, K2 (small constant, see [1]_). sigma : float Standard deviation for the Gaussian when `gaussian_weights` is True. Returns ------- mssim : float The mean structural similarity index over the image. grad : ndarray The gradient of the structural similarity between im1 and im2 [2]_. This is only returned if `gradient` is set to True. S : ndarray The full SSIM image. This is only returned if `full` is set to True. Notes ----- If `data_range` is not specified, the range is automatically guessed based on the image data type. However for floating-point image data, this estimate yields a result double the value of the desired range, as the `dtype_range` in `skimage.util.dtype.py` has defined intervals from -1 to +1. This yields an estimate of 2, instead of 1, which is most often required when working with image data (as negative light intensities are nonsensical). In case of working with YCbCr-like color data, note that these ranges are different per channel (Cb and Cr have double the range of Y), so one cannot calculate a channel-averaged SSIM with a single call to this function, as identical ranges are assumed for each channel. To match the implementation of Wang et al. [1]_, set `gaussian_weights` to True, `sigma` to 1.5, `use_sample_covariance` to False, and specify the `data_range` argument. .. versionchanged:: 0.16 This function was renamed from ``skimage.measure.compare_ssim`` to ``skimage.metrics.structural_similarity``. References ---------- .. [1] Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image quality assessment: From error visibility to structural similarity. IEEE Transactions on Image Processing, 13, 600-612. https://ece.uwaterloo.ca/~z70wang/publications/ssim.pdf, :DOI:`10.1109/TIP.2003.819861` .. [2] Avanaki, A. N. (2009). Exact global histogram specification optimized for structural similarity. Optical Review, 16, 613-621. :arxiv:`0901.0065` :DOI:`10.1007/s10043-009-0119-z` """ check_shape_equality(im1, im2) float_type = _supported_float_type(im1.dtype) if channel_axis is not None: # loop over channels args = dict( win_size=win_size, gradient=gradient, data_range=data_range, channel_axis=None, gaussian_weights=gaussian_weights, full=full, ) args.update(kwargs) nch = im1.shape[channel_axis] mssim = np.empty(nch, dtype=float_type) if gradient: G = np.empty(im1.shape, dtype=float_type) if full: S = np.empty(im1.shape, dtype=float_type) channel_axis = channel_axis % im1.ndim _at = functools.partial(utils.slice_at_axis, axis=channel_axis) for ch in range(nch): ch_result = structural_similarity(im1[_at(ch)], im2[_at(ch)], **args) if gradient and full: mssim[ch], G[_at(ch)], S[_at(ch)] = ch_result elif gradient: mssim[ch], G[_at(ch)] = ch_result elif full: mssim[ch], S[_at(ch)] = ch_result else: mssim[ch] = ch_result mssim = mssim.mean() if gradient and full: return mssim, G, S elif gradient: return mssim, G elif full: return mssim, S else: return mssim K1 = kwargs.pop('K1', 0.01) K2 = kwargs.pop('K2', 0.03) sigma = kwargs.pop('sigma', 1.5) if K1 < 0: raise ValueError("K1 must be positive") if K2 < 0: raise ValueError("K2 must be positive") if sigma < 0: raise ValueError("sigma must be positive") use_sample_covariance = kwargs.pop('use_sample_covariance', True) if gaussian_weights: # Set to give an 11-tap filter with the default sigma of 1.5 to match # Wang et. al. 2004. truncate = 3.5 if win_size is None: if gaussian_weights: # set win_size used by crop to match the filter size r = int(truncate * sigma + 0.5) # radius as in ndimage win_size = 2 * r + 1 else: win_size = 7 # backwards compatibility if np.any((np.asarray(im1.shape) - win_size) < 0): raise ValueError( 'win_size exceeds image extent. ' 'Either ensure that your images are ' 'at least 7x7; or pass win_size explicitly ' 'in the function call, with an odd value ' 'less than or equal to the smaller side of your ' 'images. If your images are multichannel ' '(with color channels), set channel_axis to ' 'the axis number corresponding to the channels.' ) if not (win_size % 2 == 1): raise ValueError('Window size must be odd.') if data_range is None: if np.issubdtype(im1.dtype, np.floating) or np.issubdtype( im2.dtype, np.floating ): raise ValueError( 'Since image dtype is floating point, you must specify ' 'the data_range parameter. Please read the documentation ' 'carefully (including the note). It is recommended that ' 'you always specify the data_range anyway.' ) if im1.dtype != im2.dtype: warn( "Inputs have mismatched dtypes. Setting data_range based on im1.dtype.", stacklevel=2, ) dmin, dmax = dtype_range[im1.dtype.type] data_range = dmax - dmin if np.issubdtype(im1.dtype, np.integer) and (im1.dtype != np.uint8): warn( "Setting data_range based on im1.dtype. " + f"data_range = {data_range:.0f}. " + "Please specify data_range explicitly to avoid mistakes.", stacklevel=2, ) ndim = im1.ndim if gaussian_weights: filter_func = gaussian filter_args = {'sigma': sigma, 'truncate': truncate, 'mode': 'reflect'} else: filter_func = uniform_filter filter_args = {'size': win_size} # ndimage filters need floating point data im1 = im1.astype(float_type, copy=False) im2 = im2.astype(float_type, copy=False) NP = win_size**ndim # filter has already normalized by NP if use_sample_covariance: cov_norm = NP / (NP - 1) # sample covariance else: cov_norm = 1.0 # population covariance to match Wang et. al. 2004 # compute (weighted) means ux = filter_func(im1, **filter_args) uy = filter_func(im2, **filter_args) # compute (weighted) variances and covariances uxx = filter_func(im1 * im1, **filter_args) uyy = filter_func(im2 * im2, **filter_args) uxy = filter_func(im1 * im2, **filter_args) vx = cov_norm * (uxx - ux * ux) vy = cov_norm * (uyy - uy * uy) vxy = cov_norm * (uxy - ux * uy) R = data_range C1 = (K1 * R) ** 2 C2 = (K2 * R) ** 2 A1, A2, B1, B2 = ( 2 * ux * uy + C1, 2 * vxy + C2, ux**2 + uy**2 + C1, vx + vy + C2, ) D = B1 * B2 S = (A1 * A2) / D # to avoid edge effects will ignore filter radius strip around edges pad = (win_size - 1) // 2 # compute (weighted) mean of ssim. Use float64 for accuracy. mssim = crop(S, pad).mean(dtype=np.float64) if gradient: # The following is Eqs. 7-8 of Avanaki 2009. grad = filter_func(A1 / D, **filter_args) * im1 grad += filter_func(-S / B2, **filter_args) * im2 grad += filter_func((ux * (A2 - A1) - uy * (B2 - B1) * S) / D, **filter_args) grad *= 2 / im1.size if full: return mssim, grad, S else: return mssim, grad else: if full: return mssim, S else: return mssim
(im1, im2, *, win_size=None, gradient=False, data_range=None, channel_axis=None, gaussian_weights=False, full=False, **kwargs)
43,292
plumage
get_path
Returns installation path of the theme. Used in ``pelicanconf.py`` to dynamiccaly fetch theme location on the system.
def get_path() -> str: """Returns installation path of the theme. Used in ``pelicanconf.py`` to dynamiccaly fetch theme location on the system. """ return str(PLUMAGE_ROOT)
() -> str
43,294
plumage.config
register_signals
null
def register_signals() -> None: signals.initialized.connect(check_config) signals.static_generator_finalized.connect(add_favicon_assets) signals.content_written.connect(transform)
() -> NoneType
43,296
mo_dots.datas
Data
Please see https://github.com/klahnakoski/mo-dots/tree/dev/docs#data-replaces-pythons-dict
class Data: """ Please see https://github.com/klahnakoski/mo-dots/tree/dev/docs#data-replaces-pythons-dict """ __slots__ = [SLOT] def __init__(self, *args, **kwargs): """ CONSTRUCT DATA WITH GIVEN PROPERTY VALUES """ if args: raise Exception("only keywords are allowed, not " + args[0].__class__.__name__) _set(self, SLOT, kwargs) def __bool__(self): d = _get(self, SLOT) if _get(d, CLASS) is dict: return True else: return d != None __nonzero__ = __bool__ def __contains__(self, item): value = Data.__getitem__(self, item) if is_data(value) or value: return True return False def __iter__(self): d = _get(self, SLOT) if _get(d, CLASS) is dict: yield from d.items() else: yield from d.__iter__() def __getitem__(self, key): if is_null(key): return Null if key == ".": output = _get(self, SLOT) if is_data(output): return self else: return output key = str(key) d = _get(self, SLOT) if key.find(".") >= 0: seq = _split_field(key) for n in seq: if _get(d, CLASS) is NullType: d = NullType(d, n) # OH DEAR, Null TREATS n AS PATH, NOT LITERAL elif is_many(d): d = [_getdefault(dd, n) for dd in d] else: d = _getdefault(d, n) # EVERYTHING ELSE TREATS n AS LITERAL return to_data(d) else: o = d.get(key) if is_null(o): return NullType(d, key) return to_data(o) def __setitem__(self, key, value): if key == "": get_logger().error("key is empty string. Probably a bad idea") if is_null(key): return Null if key == ".": # SOMETHING TERRIBLE HAPPENS WHEN value IS NOT A Mapping; # HOPEFULLY THE ONLY OTHER METHOD RUN ON self IS from_data() v = from_data(value) if is_many(v): _set(self, CLASS, FlatList) _set(self, SLOT, v) return self try: d = _get(self, SLOT) value = from_data(value) if "." not in key: if value is None: d.pop(key, None) else: d[key] = value return self seq = _split_field(key) for k in seq[:-1]: d = _getdefault(d, k) if is_null(value): try: d.pop(seq[-1], None) except Exception as _: pass elif is_null(d): d[literal_field(seq[-1])] = value elif is_sequence(d): for dd in d: from_data(dd)[seq[-1]] = value else: d[seq[-1]] = value return self except Exception as e: from mo_logs import Log Log.error("can not set key={{key}}", key=key, cause=e) def __getattr__(self, key): d = _get(self, SLOT) v = d.get(key) t = _get(v, CLASS) # OPTIMIZED to_data() if t is dict: return dict_to_data(v) elif t in utils._null_types: return NullType(d, key) elif t is list: return list_to_data(v) elif t in generator_types: return FlatList(list(from_data(vv) for vv in v)) else: return v def __setattr__(self, key, value): d = _get(self, SLOT) value = from_data(value) if value is None: d = _get(self, SLOT) d.pop(key, None) else: d[key] = value return self def __add__(self, other): return _iadd(_iadd({}, self), other) def __radd__(self, other): return _iadd(_iadd({}, other), self) def __iadd__(self, other): return _iadd(self, other) def __or__(self, other): """ RECURSIVE COALESCE OF DATA PROPERTIES """ if not is_data(other): get_logger().error("Expecting Data") d = _get(self, SLOT) output = Data(**d) # COPY output.__ior__(other) return output def __ror__(self, other): """ RECURSIVE COALESCE OF DATA PROPERTIES """ if not is_data(other): get_logger().error("Expecting Data") return to_data(other).__or__(self) def __ior__(self, other): """ RECURSIVE COALESCE OF DATA PROPERTIES """ d = _get(self, SLOT) if not is_data(other): if is_missing(d) or (isinstance(d, dict) and not d): _set(self, SLOT, other) return self for ok, ov in other.items(): if is_null(ov): continue sv = to_data(d.get(ok)) if is_null(sv): d[ok] = ov elif is_data(sv): d[ok] = sv | ov return self def __hash__(self): d = _get(self, SLOT) return hash_value(d) def __eq__(self, other): if self is other: return True d = _get(self, SLOT) if _get(d, CLASS) is not dict: return d == other if not d and is_null(other): return False if not is_data(other): return False e = other for k, v in d.items(): if e.get(k) != v: return False for k, v in e.items(): if d.get(k) != v: return False return True def __ne__(self, other): return not self.__eq__(other) def get(self, key, default=Null): v = self[key] if _get(v, CLASS) == NullType: if default is Null: return NullType(self, key) return default return v def items(self): d = _get(self, SLOT) return [(k, to_data(v)) for k, v in d.items() if v != None or is_data(v)] def leaves(self, prefix=None): """ LIKE items() BUT RECURSIVE, AND ONLY FOR THE LEAVES (non dict) VALUES """ return leaves(self, prefix) def iteritems(self): # LOW LEVEL ITERATION, NO WRAPPING d = _get(self, SLOT) return ((k, to_data(v)) for k, v in d.items()) def pop(self, key, default=Null): if is_null(key): return Null if key == ".": raise NotImplemented() key = str(key) d = _get(self, SLOT) if key.find(".") >= 0: seq = _split_field(key) for n in seq[:-1]: if _get(d, CLASS) is NullType: d = NullType(d, n) # OH DEAR, Null TREATS n AS PATH, NOT LITERAL elif is_many(d): d = [_getdefault(dd, n) for dd in d] else: d = _getdefault(d, n) # EVERYTHING ELSE TREATS n AS LITERAL key = seq[-1] o = d.get(key) if is_null(o): if default is Null: return NullType(d, key) return default d[key] = None return to_data(o) def keys(self): d = _get(self, SLOT) return set(d.keys()) def values(self): d = _get(self, SLOT) return listwrap(list(d.values())) def clear(self): get_logger().error("clear() not supported") def __len__(self): d = _get(self, SLOT) return dict.__len__(d) def copy(self): d = _get(self, SLOT) if _get(d, CLASS) is dict: return Data(**d) else: return copy(d) def __copy__(self): d = _get(self, SLOT) if _get(d, CLASS) is dict: return Data(**self) else: return copy(d) def __deepcopy__(self, memo): d = _get(self, SLOT) return to_data(deepcopy(d, memo)) def __delitem__(self, key): if "." not in key: d = _get(self, SLOT) d.pop(key, None) return d = _get(self, SLOT) seq = _split_field(key) for k in seq[:-1]: d = d[k] d.pop(seq[-1], None) def __delattr__(self, key): key = str(key) d = _get(self, SLOT) d.pop(key, None) def setdefault(self, k, d=None): v = self[k] if is_null(v): self[k] = d return d return v def __str__(self): return str(_get(self, SLOT)) def __dir__(self): d = _get(self, SLOT) return d.keys() def __repr__(self): try: return f"to_data({repr(_get(self, SLOT))})" except Exception as e: return "Data(?)"
(*args, **kwargs)
43,297
mo_dots.datas
__add__
null
def __add__(self, other): return _iadd(_iadd({}, self), other)
(self, other)
43,298
mo_dots.datas
__bool__
null
def __bool__(self): d = _get(self, SLOT) if _get(d, CLASS) is dict: return True else: return d != None
(self)
43,299
mo_dots.datas
__contains__
null
def __contains__(self, item): value = Data.__getitem__(self, item) if is_data(value) or value: return True return False
(self, item)
43,300
mo_dots.datas
__copy__
null
def __copy__(self): d = _get(self, SLOT) if _get(d, CLASS) is dict: return Data(**self) else: return copy(d)
(self)
43,301
mo_dots.datas
__deepcopy__
null
def __deepcopy__(self, memo): d = _get(self, SLOT) return to_data(deepcopy(d, memo))
(self, memo)
43,302
mo_dots.datas
__delattr__
null
def __delattr__(self, key): key = str(key) d = _get(self, SLOT) d.pop(key, None)
(self, key)
43,303
mo_dots.datas
__delitem__
null
def __delitem__(self, key): if "." not in key: d = _get(self, SLOT) d.pop(key, None) return d = _get(self, SLOT) seq = _split_field(key) for k in seq[:-1]: d = d[k] d.pop(seq[-1], None)
(self, key)
43,304
mo_dots.datas
__dir__
null
def __dir__(self): d = _get(self, SLOT) return d.keys()
(self)
43,305
mo_dots.datas
__eq__
null
def __eq__(self, other): if self is other: return True d = _get(self, SLOT) if _get(d, CLASS) is not dict: return d == other if not d and is_null(other): return False if not is_data(other): return False e = other for k, v in d.items(): if e.get(k) != v: return False for k, v in e.items(): if d.get(k) != v: return False return True
(self, other)
43,306
mo_dots.datas
__getattr__
null
def __getattr__(self, key): d = _get(self, SLOT) v = d.get(key) t = _get(v, CLASS) # OPTIMIZED to_data() if t is dict: return dict_to_data(v) elif t in utils._null_types: return NullType(d, key) elif t is list: return list_to_data(v) elif t in generator_types: return FlatList(list(from_data(vv) for vv in v)) else: return v
(self, key)
43,307
mo_dots.datas
__getitem__
null
def __getitem__(self, key): if is_null(key): return Null if key == ".": output = _get(self, SLOT) if is_data(output): return self else: return output key = str(key) d = _get(self, SLOT) if key.find(".") >= 0: seq = _split_field(key) for n in seq: if _get(d, CLASS) is NullType: d = NullType(d, n) # OH DEAR, Null TREATS n AS PATH, NOT LITERAL elif is_many(d): d = [_getdefault(dd, n) for dd in d] else: d = _getdefault(d, n) # EVERYTHING ELSE TREATS n AS LITERAL return to_data(d) else: o = d.get(key) if is_null(o): return NullType(d, key) return to_data(o)
(self, key)
43,308
mo_dots.datas
__hash__
null
def __hash__(self): d = _get(self, SLOT) return hash_value(d)
(self)
43,309
mo_dots.datas
__iadd__
null
def __iadd__(self, other): return _iadd(self, other)
(self, other)
43,310
mo_dots.datas
__init__
CONSTRUCT DATA WITH GIVEN PROPERTY VALUES
def __init__(self, *args, **kwargs): """ CONSTRUCT DATA WITH GIVEN PROPERTY VALUES """ if args: raise Exception("only keywords are allowed, not " + args[0].__class__.__name__) _set(self, SLOT, kwargs)
(self, *args, **kwargs)
43,311
mo_dots.datas
__ior__
RECURSIVE COALESCE OF DATA PROPERTIES
def __ior__(self, other): """ RECURSIVE COALESCE OF DATA PROPERTIES """ d = _get(self, SLOT) if not is_data(other): if is_missing(d) or (isinstance(d, dict) and not d): _set(self, SLOT, other) return self for ok, ov in other.items(): if is_null(ov): continue sv = to_data(d.get(ok)) if is_null(sv): d[ok] = ov elif is_data(sv): d[ok] = sv | ov return self
(self, other)
43,312
mo_dots.datas
__iter__
null
def __iter__(self): d = _get(self, SLOT) if _get(d, CLASS) is dict: yield from d.items() else: yield from d.__iter__()
(self)
43,313
mo_dots.datas
__len__
null
def __len__(self): d = _get(self, SLOT) return dict.__len__(d)
(self)
43,316
mo_dots.datas
__or__
RECURSIVE COALESCE OF DATA PROPERTIES
def __or__(self, other): """ RECURSIVE COALESCE OF DATA PROPERTIES """ if not is_data(other): get_logger().error("Expecting Data") d = _get(self, SLOT) output = Data(**d) # COPY output.__ior__(other) return output
(self, other)
43,317
mo_dots.datas
__radd__
null
def __radd__(self, other): return _iadd(_iadd({}, other), self)
(self, other)
43,318
mo_dots.datas
__repr__
null
def __repr__(self): try: return f"to_data({repr(_get(self, SLOT))})" except Exception as e: return "Data(?)"
(self)
43,319
mo_dots.datas
__ror__
RECURSIVE COALESCE OF DATA PROPERTIES
def __ror__(self, other): """ RECURSIVE COALESCE OF DATA PROPERTIES """ if not is_data(other): get_logger().error("Expecting Data") return to_data(other).__or__(self)
(self, other)
43,320
mo_dots.datas
__setattr__
null
def __setattr__(self, key, value): d = _get(self, SLOT) value = from_data(value) if value is None: d = _get(self, SLOT) d.pop(key, None) else: d[key] = value return self
(self, key, value)
43,321
mo_dots.datas
__setitem__
null
def __setitem__(self, key, value): if key == "": get_logger().error("key is empty string. Probably a bad idea") if is_null(key): return Null if key == ".": # SOMETHING TERRIBLE HAPPENS WHEN value IS NOT A Mapping; # HOPEFULLY THE ONLY OTHER METHOD RUN ON self IS from_data() v = from_data(value) if is_many(v): _set(self, CLASS, FlatList) _set(self, SLOT, v) return self try: d = _get(self, SLOT) value = from_data(value) if "." not in key: if value is None: d.pop(key, None) else: d[key] = value return self seq = _split_field(key) for k in seq[:-1]: d = _getdefault(d, k) if is_null(value): try: d.pop(seq[-1], None) except Exception as _: pass elif is_null(d): d[literal_field(seq[-1])] = value elif is_sequence(d): for dd in d: from_data(dd)[seq[-1]] = value else: d[seq[-1]] = value return self except Exception as e: from mo_logs import Log Log.error("can not set key={{key}}", key=key, cause=e)
(self, key, value)
43,322
mo_dots.datas
__str__
null
def __str__(self): return str(_get(self, SLOT))
(self)
43,323
mo_dots.datas
clear
null
def clear(self): get_logger().error("clear() not supported")
(self)
43,324
mo_dots.datas
copy
null
def copy(self): d = _get(self, SLOT) if _get(d, CLASS) is dict: return Data(**d) else: return copy(d)
(self)
43,325
mo_dots.datas
get
null
def get(self, key, default=Null): v = self[key] if _get(v, CLASS) == NullType: if default is Null: return NullType(self, key) return default return v
(self, key, default=Null)
43,326
mo_dots.datas
items
null
def items(self): d = _get(self, SLOT) return [(k, to_data(v)) for k, v in d.items() if v != None or is_data(v)]
(self)
43,327
mo_dots.datas
iteritems
null
def iteritems(self): # LOW LEVEL ITERATION, NO WRAPPING d = _get(self, SLOT) return ((k, to_data(v)) for k, v in d.items())
(self)
43,328
mo_dots.datas
keys
null
def keys(self): d = _get(self, SLOT) return set(d.keys())
(self)
43,329
mo_dots.datas
leaves
LIKE items() BUT RECURSIVE, AND ONLY FOR THE LEAVES (non dict) VALUES
def leaves(self, prefix=None): """ LIKE items() BUT RECURSIVE, AND ONLY FOR THE LEAVES (non dict) VALUES """ return leaves(self, prefix)
(self, prefix=None)
43,330
mo_dots.datas
pop
null
def pop(self, key, default=Null): if is_null(key): return Null if key == ".": raise NotImplemented() key = str(key) d = _get(self, SLOT) if key.find(".") >= 0: seq = _split_field(key) for n in seq[:-1]: if _get(d, CLASS) is NullType: d = NullType(d, n) # OH DEAR, Null TREATS n AS PATH, NOT LITERAL elif is_many(d): d = [_getdefault(dd, n) for dd in d] else: d = _getdefault(d, n) # EVERYTHING ELSE TREATS n AS LITERAL key = seq[-1] o = d.get(key) if is_null(o): if default is Null: return NullType(d, key) return default d[key] = None return to_data(o)
(self, key, default=Null)
43,331
mo_dots.datas
setdefault
null
def setdefault(self, k, d=None): v = self[k] if is_null(v): self[k] = d return d return v
(self, k, d=None)
43,332
mo_dots.datas
values
null
def values(self): d = _get(self, SLOT) return listwrap(list(d.values()))
(self)
43,333
mo_dots.objects
DataClass
ALLOW INSTANCES OF class_ TO ACT LIKE dicts ALLOW CONSTRUCTOR TO ACCEPT @override
class DataClass(object): """ ALLOW INSTANCES OF class_ TO ACT LIKE dicts ALLOW CONSTRUCTOR TO ACCEPT @override """ def __init__(self, _class): register_type(_class) self.class_ = _class self.constructor = _class.__init__ def __call__(self, *args, **kwargs): settings = to_data(kwargs).settings params = get_function_arguments(self.constructor)[1:] func_defaults = get_function_defaults(self.constructor) if not func_defaults: defaults = {} else: defaults = {k: v for k, v in zip(reversed(params), reversed(func_defaults))} ordered_params = dict(zip(params, args)) output = self.class_(**params_pack(params, ordered_params, kwargs, settings, defaults)) return DataObject(output)
(_class)
43,334
mo_dots.objects
__call__
null
def __call__(self, *args, **kwargs): settings = to_data(kwargs).settings params = get_function_arguments(self.constructor)[1:] func_defaults = get_function_defaults(self.constructor) if not func_defaults: defaults = {} else: defaults = {k: v for k, v in zip(reversed(params), reversed(func_defaults))} ordered_params = dict(zip(params, args)) output = self.class_(**params_pack(params, ordered_params, kwargs, settings, defaults)) return DataObject(output)
(self, *args, **kwargs)
43,335
mo_dots.objects
__init__
null
def __init__(self, _class): register_type(_class) self.class_ = _class self.constructor = _class.__init__
(self, _class)