code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if plot: pylab.clf() self.hist() self.plot_pdf(Nbest=Nbest, lw=lw) pylab.grid(True) Nbest = min(Nbest, len(self.distributions)) try: names = self.df_errors.sort_values( by="sumsquare_error").index[0:Nbest] except: names = self.df_errors.sort("sumsquare_error").index[0:Nbest] return self.df_errors.loc[names]
def summary(self, Nbest=5, lw=2, plot=True)
Plots the distribution of the data and Nbest distribution
3.599942
3.617224
0.995222
class InterruptableThread(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.result = default self.exc_info = (None, None, None) def run(self): try: self.result = func(args, **kwargs) except Exception as err: self.exc_info = sys.exc_info() def suicide(self): raise RuntimeError('Stop has been called') it = InterruptableThread() it.start() started_at = datetime.now() it.join(self.timeout) ended_at = datetime.now() diff = ended_at - started_at if it.exc_info[0] is not None: # if there were any exceptions a,b,c = it.exc_info raise Exception(a,b,c) # communicate that to caller if it.isAlive(): it.suicide() raise RuntimeError else: return it.result
def _timed_run(self, func, distribution, args=(), kwargs={}, default=None)
This function will spawn a thread and run the given function using the args, kwargs and return the given default value if the timeout is exceeded. http://stackoverflow.com/questions/492519/timeout-on-a-python-function-call
2.723419
2.643816
1.030109
r return np.sum(np.dot(G.A, G.A), axis=1) / (np.sum(G.A, axis=1) + 1.)
def compute_avg_adj_deg(G)
r""" Compute the average adjacency degree for each node. The average adjacency degree is the average of the degrees of a node and its neighbors. Parameters ---------- G: Graph Graph on which the statistic is extracted
4.452445
5.703854
0.780603
r tig = compute_tig(g, **kwargs) return np.linalg.norm(tig, axis=1, ord=2)
def compute_norm_tig(g, **kwargs)
r""" Compute the :math:`\ell_2` norm of the Tig. See :func:`compute_tig`. Parameters ---------- g: Filter The filter or filter bank. kwargs: dict Additional parameters to be passed to the :func:`pygsp.filters.Filter.filter` method.
5.201986
5.579448
0.932348
r if not atom: def atom(x): return np.exp(-M * (x / G.lmax)**2) scale = np.linspace(0, G.lmax, M) spectr = np.empty((G.N, M)) for shift_idx in range(M): shift_filter = filters.Filter(G, lambda x: atom(x - scale[shift_idx])) tig = compute_norm_tig(shift_filter, **kwargs).squeeze()**2 spectr[:, shift_idx] = tig G.spectr = spectr return spectr
def compute_spectrogram(G, atom=None, M=100, **kwargs)
r""" Compute the norm of the Tig for all nodes with a kernel shifted along the spectral axis. Parameters ---------- G : Graph Graph on which to compute the spectrogram. atom : func Kernel to use in the spectrogram (default = exp(-M*(x/lmax)²)). M : int (optional) Number of samples on the spectral scale. (default = 100) kwargs: dict Additional parameters to be passed to the :func:`pygsp.filters.Filter.filter` method.
4.959955
3.410822
1.454182
r x = np.asanyarray(x) # Avoid to copy data as with np.array([g(x) for g in self._kernels]). y = np.empty([self.Nf] + list(x.shape)) for i, kernel in enumerate(self._kernels): y[i] = kernel(x) return y
def evaluate(self, x)
r"""Evaluate the kernels at given frequencies. Parameters ---------- x : array_like Graph frequencies at which to evaluate the filter. Returns ------- y : ndarray Frequency response of the filters. Shape ``(g.Nf, len(x))``. Examples -------- Frequency response of a low-pass filter: >>> import matplotlib.pyplot as plt >>> G = graphs.Logo() >>> G.compute_fourier_basis() >>> f = filters.Expwin(G) >>> G.compute_fourier_basis() >>> y = f.evaluate(G.e) >>> plt.plot(G.e, y[0]) # doctest: +ELLIPSIS [<matplotlib.lines.Line2D object at ...>]
5.852732
6.990191
0.837278
r if s.ndim == 3 and s.shape[-1] != 1: raise ValueError('Last dimension (#features) should be ' '1, got {}.'.format(s.shape)) return self.filter(s, method, order)
def analyze(self, s, method='chebyshev', order=30)
r"""Convenience alias to :meth:`filter`.
6.03383
5.193557
1.161791
r if s.shape[-1] != self.Nf: raise ValueError('Last dimension (#features) should be the number ' 'of filters Nf = {}, got {}.'.format(self.Nf, s.shape)) return self.filter(s, method, order)
def synthesize(self, s, method='chebyshev', order=30)
r"""Convenience wrapper around :meth:`filter`. Will be an alias to `adjoint().filter()` in the future.
7.416657
6.423833
1.154553
r s = np.zeros(self.G.N) s[i] = 1 return np.sqrt(self.G.N) * self.filter(s, **kwargs)
def localize(self, i, **kwargs)
r"""Localize the kernels at a node (to visualize them). That is particularly useful to visualize a filter in the vertex domain. A kernel is localized on vertex :math:`v_i` by filtering a Kronecker delta :math:`\delta_i` as .. math:: (g(L) \delta_i)(j) = g(L)(i,j), \text{ where } \delta_i(j) = \begin{cases} 0 \text{ if } i \neq j, \\ 1 \text{ if } i = j. \end{cases} Parameters ---------- i : int Index of the node where to localize the kernel. kwargs: dict Parameters to be passed to the :meth:`analyze` method. Returns ------- s : ndarray Kernel localized at vertex i. Examples -------- Visualize heat diffusion on a grid by localizing the heat kernel. >>> import matplotlib >>> N = 20 >>> DELTA = N//2 * (N+1) >>> G = graphs.Grid2d(N) >>> G.estimate_lmax() >>> g = filters.Heat(G, 100) >>> s = g.localize(DELTA) >>> _ = G.plot(s, highlight=DELTA)
6.63866
8.991295
0.738343
r if self.G.N > 2000: _logger.warning('Creating a big matrix. ' 'You should prefer the filter method.') # Filter one delta per vertex. s = np.identity(self.G.N) return self.filter(s, **kwargs).T.reshape(-1, self.G.N)
def compute_frame(self, **kwargs)
r"""Compute the associated frame. A filter bank defines a frame, which is a generalization of a basis to sets of vectors that may be linearly dependent. See `Wikipedia <https://en.wikipedia.org/wiki/Frame_(linear_algebra)>`_. The frame of a filter bank is the union of the frames of its constituent filters. The vectors forming the frame are the rows of the *analysis operator* .. math:: g(L) = \begin{pmatrix} g_1(L) \\ \vdots \\ g_F(L) \end{pmatrix} \in \mathbb{R}^{NF \times N}, \quad g_i(L) = U g_i(\Lambda) U^\top, where :math:`g_i` are the filter kernels, :math:`N` is the number of nodes, :math:`F` is the number of filters, :math:`L` is the graph Laplacian, :math:`\Lambda` is a diagonal matrix of the Laplacian's eigenvalues, and :math:`U` is the Fourier basis, i.e., its columns are the eigenvectors of the Laplacian. The matrix :math:`g(L)` represents the *analysis operator* of the frame. Its adjoint :math:`g(L)^\top` represents the *synthesis operator*. A signal :math:`x` is thus analyzed with the frame by :math:`y = g(L) x`, and synthesized from its frame coefficients by :math:`z = g(L)^\top y`. Computing this matrix is however a rather inefficient way of doing those operations. If :math:`F > 1`, the frame is said to be over-complete and the representation :math:`g(L) x` of the signal :math:`x` is said to be redundant. If the frame is tight, the *frame operator* :math:`g(L)^\top g(L)` is diagonal, with entries equal to the frame bound :math:`A = B`. Parameters ---------- kwargs: dict Parameters to be passed to the :meth:`analyze` method. Returns ------- frame : ndarray Array of size (#nodes x #filters) x #nodes. See Also -------- estimate_frame_bounds: estimate the frame bounds filter: more efficient way to filter signals Examples -------- >>> G = graphs.Sensor(100, seed=42) >>> G.compute_fourier_basis() Filtering as a multiplication with the matrix representation of the frame analysis operator: >>> g = filters.MexicanHat(G, Nf=6) >>> s = np.random.uniform(size=G.N) >>> >>> gL = g.compute_frame() >>> gL.shape (600, 100) >>> s1 = gL.dot(s) >>> s1 = s1.reshape(G.N, -1, order='F') >>> >>> s2 = g.filter(s) >>> np.all(np.abs(s1 - s2) < 1e-10) True The frame operator of a tight frame is the identity matrix times the frame bound: >>> g = filters.Itersine(G) >>> A, B = g.estimate_frame_bounds() >>> print('A={:.3f}, B={:.3f}'.format(A, B)) A=1.000, B=1.000 >>> gL = g.compute_frame(method='exact') >>> gL.shape (600, 100) >>> np.all(gL.T.dot(gL) - np.identity(G.N) < 1e-10) True
11.266474
12.114826
0.929974
r def kernel(x, *args, **kwargs): y = self.evaluate(x) np.power(y, 2, out=y) y = np.sum(y, axis=0) if frame_bound is None: bound = y.max() elif y.max() > frame_bound: raise ValueError('The chosen bound is not feasible. ' 'Choose at least {}.'.format(y.max())) else: bound = frame_bound return np.sqrt(bound - y) return Filter(self.G, kernel)
def complement(self, frame_bound=None)
r"""Return the filter that makes the frame tight. The complementary filter is designed such that the union of a filter bank and its complementary filter forms a tight frame. Parameters ---------- frame_bound : float or None The desired frame bound :math:`A = B` of the resulting tight frame. The chosen bound should be larger than the sum of squared evaluations of all filters in the filter bank. If None (the default), the method chooses the smallest feasible bound. Returns ------- complement: Filter The complementary filter. See Also -------- estimate_frame_bounds: estimate the frame bounds Examples -------- >>> from matplotlib import pyplot as plt >>> G = graphs.Sensor(100, seed=42) >>> G.estimate_lmax() >>> g = filters.Abspline(G, 4) >>> A, B = g.estimate_frame_bounds() >>> print('A={:.3f}, B={:.3f}'.format(A, B)) A=0.200, B=1.971 >>> fig, axes = plt.subplots(1, 2) >>> fig, ax = g.plot(ax=axes[0]) >>> g += g.complement() >>> A, B = g.estimate_frame_bounds() >>> print('A={:.3f}, B={:.3f}'.format(A, B)) A=1.971, B=1.971 >>> fig, ax = g.plot(ax=axes[1])
5.028481
4.25148
1.18276
r from pygsp.plotting import _plot_filter return _plot_filter(self, n=n, eigenvalues=eigenvalues, sum=sum, title=title, ax=ax, **kwargs)
def plot(self, n=500, eigenvalues=None, sum=None, title=None, ax=None, **kwargs)
r"""Docstring overloaded at import time.
3.658793
3.421882
1.069234
return super(Gabor, self).filter(s, method='exact')
def filter(self, s, method='exact', order=None)
TODO: indirection will be removed when poly filtering is merged.
13.624628
9.546903
1.427125
r if self._D is None: self.logger.warning('The differential operator G.D is not ' 'available, we need to compute it. Explicitly ' 'call G.compute_differential_operator() ' 'once beforehand to suppress the warning.') self.compute_differential_operator() return self._D
def D(self)
r"""Differential operator (for gradient and divergence). Is computed by :func:`compute_differential_operator`.
7.39989
6.803161
1.087713
r x = self._check_signal(x) return self.D.T.dot(x)
def grad(self, x)
r"""Compute the gradient of a signal defined on the vertices. The gradient :math:`y` of a signal :math:`x` is defined as .. math:: y = \nabla_\mathcal{G} x = D^\top x, where :math:`D` is the differential operator :attr:`D`. The value of the gradient on the edge :math:`e_k = (v_i, v_j)` from :math:`v_i` to :math:`v_j` with weight :math:`W[i, j]` is .. math:: y[k] = D[i, k] x[i] + D[j, k] x[j] = \sqrt{\frac{W[i, j]}{2}} (x[j] - x[i]) for the combinatorial Laplacian, and .. math:: y[k] = \sqrt{\frac{W[i, j]}{2}} \left( \frac{x[j]}{\sqrt{d[j]}} - \frac{x[i]}{\sqrt{d[i]}} \right) for the normalized Laplacian. For undirected graphs, only half the edges are kept and the :math:`1/\sqrt{2}` factor disappears from the above equations. See :meth:`compute_differential_operator` for details. Parameters ---------- x : array_like Signal of length :attr:`n_vertices` living on the vertices. Returns ------- y : ndarray Gradient signal of length :attr:`n_edges` living on the edges. See Also -------- compute_differential_operator div : compute the divergence of an edge signal dirichlet_energy : compute the norm of the gradient Examples -------- Non-directed graph and combinatorial Laplacian: >>> graph = graphs.Path(4, directed=False, lap_type='combinatorial') >>> graph.compute_differential_operator() >>> graph.grad([0, 2, 4, 2]) array([ 2., 2., -2.]) Directed graph and combinatorial Laplacian: >>> graph = graphs.Path(4, directed=True, lap_type='combinatorial') >>> graph.compute_differential_operator() >>> graph.grad([0, 2, 4, 2]) array([ 1.41421356, 1.41421356, -1.41421356]) Non-directed graph and normalized Laplacian: >>> graph = graphs.Path(4, directed=False, lap_type='normalized') >>> graph.compute_differential_operator() >>> graph.grad([0, 2, 4, 2]) array([ 1.41421356, 1.41421356, -0.82842712]) Directed graph and normalized Laplacian: >>> graph = graphs.Path(4, directed=True, lap_type='normalized') >>> graph.compute_differential_operator() >>> graph.grad([0, 2, 4, 2]) array([ 1.41421356, 1.41421356, -0.82842712])
13.311055
20.817278
0.639423
r y = np.asanyarray(y) if y.shape[0] != self.Ne: raise ValueError('First dimension must be the number of edges ' 'G.Ne = {}, got {}.'.format(self.Ne, y.shape)) return self.D.dot(y)
def div(self, y)
r"""Compute the divergence of a signal defined on the edges. The divergence :math:`z` of a signal :math:`y` is defined as .. math:: z = \operatorname{div}_\mathcal{G} y = D y, where :math:`D` is the differential operator :attr:`D`. The value of the divergence on the vertex :math:`v_i` is .. math:: z[i] = \sum_k D[i, k] y[k] = \sum_{\{k,j | e_k=(v_j, v_i) \in \mathcal{E}\}} \sqrt{\frac{W[j, i]}{2}} y[k] - \sum_{\{k,j | e_k=(v_i, v_j) \in \mathcal{E}\}} \sqrt{\frac{W[i, j]}{2}} y[k] for the combinatorial Laplacian, and .. math:: z[i] = \sum_k D[i, k] y[k] = \sum_{\{k,j | e_k=(v_j, v_i) \in \mathcal{E}\}} \sqrt{\frac{W[j, i]}{2 d[i]}} y[k] - \sum_{\{k,j | e_k=(v_i, v_j) \in \mathcal{E}\}} \sqrt{\frac{W[i, j]}{2 d[i]}} y[k] for the normalized Laplacian. For undirected graphs, only half the edges are kept and the :math:`1/\sqrt{2}` factor disappears from the above equations. See :meth:`compute_differential_operator` for details. Parameters ---------- y : array_like Signal of length :attr:`n_edges` living on the edges. Returns ------- z : ndarray Divergence signal of length :attr:`n_vertices` living on the vertices. See Also -------- compute_differential_operator grad : compute the gradient of a vertex signal Examples -------- Non-directed graph and combinatorial Laplacian: >>> graph = graphs.Path(4, directed=False, lap_type='combinatorial') >>> graph.compute_differential_operator() >>> graph.div([2, -2, 0]) array([-2., 4., -2., 0.]) Directed graph and combinatorial Laplacian: >>> graph = graphs.Path(4, directed=True, lap_type='combinatorial') >>> graph.compute_differential_operator() >>> graph.div([2, -2, 0]) array([-1.41421356, 2.82842712, -1.41421356, 0. ]) Non-directed graph and normalized Laplacian: >>> graph = graphs.Path(4, directed=False, lap_type='normalized') >>> graph.compute_differential_operator() >>> graph.div([2, -2, 0]) array([-2. , 2.82842712, -1.41421356, 0. ]) Directed graph and normalized Laplacian: >>> graph = graphs.Path(4, directed=True, lap_type='normalized') >>> graph.compute_differential_operator() >>> graph.div([2, -2, 0]) array([-2. , 2.82842712, -1.41421356, 0. ])
6.571594
9.101349
0.722046
r if n_eigenvectors is None: n_eigenvectors = self.n_vertices if (self._U is not None and n_eigenvectors <= len(self._e)): return assert self.L.shape == (self.n_vertices, self.n_vertices) if self.n_vertices**2 * n_eigenvectors > 3000**3: self.logger.warning( 'Computing the {0} eigendecomposition of a large matrix ({1} x' ' {1}) is expensive. Consider decreasing n_eigenvectors ' 'or, if using the Fourier basis to filter, using a ' 'polynomial filter instead.'.format( 'full' if n_eigenvectors == self.N else 'partial', self.N)) # TODO: handle non-symmetric Laplacians. Test lap_type? if n_eigenvectors == self.n_vertices: self._e, self._U = np.linalg.eigh(self.L.toarray()) else: # fast partial eigendecomposition of hermitian matrices self._e, self._U = sparse.linalg.eigsh(self.L, n_eigenvectors, which='SM') # Columns are eigenvectors. Sorted in ascending eigenvalue order. # Smallest eigenvalue should be zero: correct numerical errors. # Eigensolver might sometimes return small negative values, which # filter's implementations may not anticipate. Better for plotting too. assert -1e-5 < self._e[0] < 1e-5 self._e[0] = 0 # Bounded spectrum. assert self._e[-1] <= self._get_upper_bound() + 1e-5 assert np.max(self._e) == self._e[-1] if n_eigenvectors == self.N: self._lmax = self._e[-1] self._lmax_method = 'fourier' self._coherence = np.max(np.abs(self._U))
def compute_fourier_basis(self, n_eigenvectors=None)
r"""Compute the (partial) Fourier basis of the graph (cached). The result is cached and accessible by the :attr:`U`, :attr:`e`, :attr:`lmax`, and :attr:`coherence` properties. Parameters ---------- n_eigenvectors : int or `None` Number of eigenvectors to compute. If `None`, all eigenvectors are computed. (default: None) Notes ----- 'G.compute_fourier_basis()' computes a full eigendecomposition of the graph Laplacian :math:`L` such that: .. math:: L = U \Lambda U^*, or a partial eigendecomposition of the graph Laplacian :math:`L` such that: .. math:: L \approx U \Lambda U^*, where :math:`\Lambda` is a diagonal matrix of eigenvalues and the columns of :math:`U` are the eigenvectors. *G.e* is a vector of length `n_eigenvectors` :math:`\le` *G.N* containing the Laplacian eigenvalues. The largest eigenvalue is stored in *G.lmax*. The eigenvectors are stored as column vectors of *G.U* in the same order that the eigenvalues. Finally, the coherence of the Fourier basis is found in *G.coherence*. References ---------- See :cite:`chung1997spectral`. Examples -------- >>> G = graphs.Torus() >>> G.compute_fourier_basis(n_eigenvectors=64) >>> G.U.shape (256, 64) >>> G.e.shape (64,) >>> G.compute_fourier_basis() >>> G.U.shape (256, 256) >>> G.e.shape (256,) >>> G.lmax == G.e[-1] True >>> G.coherence < 1 True
4.670615
4.479486
1.042668
r s = self._check_signal(s) U = np.conjugate(self.U) # True Hermitian. (Although U is often real.) return np.tensordot(U, s, ([0], [0]))
def gft(self, s)
r"""Compute the graph Fourier transform. The graph Fourier transform of a signal :math:`s` is defined as .. math:: \hat{s} = U^* s, where :math:`U` is the Fourier basis attr:`U` and :math:`U^*` denotes the conjugate transpose or Hermitian transpose of :math:`U`. Parameters ---------- s : array_like Graph signal in the vertex domain. Returns ------- s_hat : ndarray Representation of s in the Fourier domain. Examples -------- >>> G = graphs.Logo() >>> G.compute_fourier_basis() >>> s = np.random.normal(size=(G.N, 5, 1)) >>> s_hat = G.gft(s) >>> s_star = G.igft(s_hat) >>> np.all((s - s_star) < 1e-10) True
10.411597
13.932919
0.747266
r s_hat = self._check_signal(s_hat) return np.tensordot(self.U, s_hat, ([1], [0]))
def igft(self, s_hat)
r"""Compute the inverse graph Fourier transform. The inverse graph Fourier transform of a Fourier domain signal :math:`\hat{s}` is defined as .. math:: s = U \hat{s}, where :math:`U` is the Fourier basis :attr:`U`. Parameters ---------- s_hat : array_like Graph signal in the Fourier domain. Returns ------- s : ndarray Representation of s_hat in the vertex domain. Examples -------- >>> G = graphs.Logo() >>> G.compute_fourier_basis() >>> s_hat = np.random.normal(size=(G.N, 5, 1)) >>> s = G.igft(s_hat) >>> s_hat_star = G.gft(s) >>> np.all((s_hat - s_hat_star) < 1e-10) True
5.601054
8.731914
0.641446
r G = f.G i = kwargs.pop('i', 0) if not N: N = m + 1 a_arange = [0, G.lmax] a1 = (a_arange[1] - a_arange[0]) / 2 a2 = (a_arange[1] + a_arange[0]) / 2 c = np.zeros(m + 1) tmpN = np.arange(N) num = np.cos(np.pi * (tmpN + 0.5) / N) for o in range(m + 1): c[o] = 2. / N * np.dot(f._kernels[i](a1 * num + a2), np.cos(np.pi * o * (tmpN + 0.5) / N)) return c
def compute_cheby_coeff(f, m=30, N=None, *args, **kwargs)
r""" Compute Chebyshev coefficients for a Filterbank. Parameters ---------- f : Filter Filterbank with at least 1 filter m : int Maximum order of Chebyshev coeff to compute (default = 30) N : int Grid order used to compute quadrature (default = m + 1) i : int Index of the Filterbank element to compute (default = 0) Returns ------- c : ndarray Matrix of Chebyshev coefficients
3.776926
3.759774
1.004562
r # Handle if we do not have a list of filters but only a simple filter in cheby_coeff. if not isinstance(c, np.ndarray): c = np.array(c) c = np.atleast_2d(c) Nscales, M = c.shape if M < 2: raise TypeError("The coefficients have an invalid shape") # thanks to that, we can also have 1d signal. try: Nv = np.shape(signal)[1] r = np.zeros((G.N * Nscales, Nv)) except IndexError: r = np.zeros((G.N * Nscales)) a_arange = [0, G.lmax] a1 = float(a_arange[1] - a_arange[0]) / 2. a2 = float(a_arange[1] + a_arange[0]) / 2. twf_old = signal twf_cur = (G.L.dot(signal) - a2 * signal) / a1 tmpN = np.arange(G.N, dtype=int) for i in range(Nscales): r[tmpN + G.N*i] = 0.5 * c[i, 0] * twf_old + c[i, 1] * twf_cur factor = 2/a1 * (G.L - a2 * sparse.eye(G.N)) for k in range(2, M): twf_new = factor.dot(twf_cur) - twf_old for i in range(Nscales): r[tmpN + G.N*i] += c[i, k] * twf_new twf_old = twf_cur twf_cur = twf_new return r
def cheby_op(G, c, signal, **kwargs)
r""" Chebyshev polynomial of graph Laplacian applied to vector. Parameters ---------- G : Graph c : ndarray or list of ndarrays Chebyshev coefficients for a Filter or a Filterbank signal : ndarray Signal to filter Returns ------- r : ndarray Result of the filtering
3.87077
3.811658
1.015508
r if not (isinstance(bounds, (list, np.ndarray)) and len(bounds) == 2): raise ValueError('Bounds of wrong shape.') bounds = np.array(bounds) m = int(kwargs.pop('order', 30) + 1) try: Nv = np.shape(signal)[1] r = np.zeros((G.N, Nv)) except IndexError: r = np.zeros((G.N)) b1, b2 = np.arccos(2. * bounds / G.lmax - 1.) factor = 4./G.lmax * G.L - 2.*sparse.eye(G.N) T_old = signal T_cur = factor.dot(signal) / 2. r = (b1 - b2)/np.pi * signal + 2./np.pi * (np.sin(b1) - np.sin(b2)) * T_cur for k in range(2, m): T_new = factor.dot(T_cur) - T_old r += 2./(k*np.pi) * (np.sin(k*b1) - np.sin(k*b2)) * T_new T_old = T_cur T_cur = T_new return r
def cheby_rect(G, bounds, signal, **kwargs)
r""" Fast filtering using Chebyshev polynomial for a perfect rectangle filter. Parameters ---------- G : Graph bounds : array_like The bounds of the pass-band filter signal : array_like Signal to filter order : int (optional) Order of the Chebyshev polynomial (default: 30) Returns ------- r : array_like Result of the filtering
3.526621
3.520016
1.001877
r # Parameters check if delta_lambda[0] > filter_bounds[0] or delta_lambda[1] < filter_bounds[1]: _logger.error("Bounds of the filter are out of the lambda values") raise() elif delta_lambda[0] > delta_lambda[1]: _logger.error("lambda_min is greater than lambda_max") raise() # Scaling and translating to standard cheby interval a1 = (delta_lambda[1]-delta_lambda[0])/2 a2 = (delta_lambda[1]+delta_lambda[0])/2 # Scaling bounds of the band pass according to lrange filter_bounds[0] = (filter_bounds[0]-a2)/a1 filter_bounds[1] = (filter_bounds[1]-a2)/a1 # First compute cheby coeffs ch = np.empty(m+1, dtype=float) ch[0] = (2/(np.pi))*(np.arccos(filter_bounds[0])-np.arccos(filter_bounds[1])) for i in range(1, len(ch)): ch[i] = (2/(np.pi * i)) * \ (np.sin(i * np.arccos(filter_bounds[0])) - np.sin(i * np.arccos(filter_bounds[1]))) # Then compute jackson coeffs jch = np.empty(m+1, dtype=float) alpha = (np.pi/(m+2)) for i in range(len(jch)): jch[i] = (1/np.sin(alpha)) * \ ((1 - i/(m+2)) * np.sin(alpha) * np.cos(i * alpha) + (1/(m+2)) * np.cos(alpha) * np.sin(i * alpha)) # Combine jackson and cheby coeffs jch = ch * jch return ch, jch
def compute_jackson_cheby_coeff(filter_bounds, delta_lambda, m)
r""" To compute the m+1 coefficients of the polynomial approximation of an ideal band-pass between a and b, between a range of values defined by lambda_min and lambda_max. Parameters ---------- filter_bounds : list [a, b] delta_lambda : list [lambda_min, lambda_max] m : int Returns ------- ch : ndarray jch : ndarray References ---------- :cite:`tremblay2016compressive`
2.823387
2.735137
1.032266
r G = f.G Nf = len(f.g) # To have the right shape for the output array depending on the signal dim try: Nv = np.shape(s)[1] is2d = True c = np.zeros((G.N*Nf, Nv)) except IndexError: Nv = 1 is2d = False c = np.zeros((G.N*Nf)) tmpN = np.arange(G.N, dtype=int) for j in range(Nv): if is2d: V, H, _ = lanczos(G.L.toarray(), order, s[:, j]) else: V, H, _ = lanczos(G.L.toarray(), order, s) Eh, Uh = np.linalg.eig(H) Eh[Eh < 0] = 0 fe = f.evaluate(Eh) V = np.dot(V, Uh) for i in range(Nf): if is2d: c[tmpN + i*G.N, j] = np.dot(V, fe[:][i] * np.dot(V.T, s[:, j])) else: c[tmpN + i*G.N] = np.dot(V, fe[:][i] * np.dot(V.T, s)) return c
def lanczos_op(f, s, order=30)
r""" Perform the lanczos approximation of the signal s. Parameters ---------- f: Filter s : ndarray Signal to approximate. order : int Degree of the lanczos approximation. (default = 30) Returns ------- L : ndarray lanczos approximation of s
3.469782
3.558622
0.975035
r try: N, M = np.shape(x) except ValueError: N = np.shape(x)[0] M = 1 x = x[:, np.newaxis] # normalization q = np.divide(x, np.kron(np.ones((N, 1)), np.linalg.norm(x, axis=0))) # initialization hiv = np.arange(0, order*M, order) V = np.zeros((N, M*order)) V[:, hiv] = q H = np.zeros((order + 1, M*order)) r = np.dot(A, q) H[0, hiv] = np.sum(q*r, axis=0) r -= np.kron(np.ones((N, 1)), H[0, hiv])*q H[1, hiv] = np.linalg.norm(r, axis=0) orth = np.zeros((order)) orth[0] = np.linalg.norm(np.dot(V.T, V) - M) for k in range(1, order): if np.sum(np.abs(H[k, hiv + k - 1])) <= np.spacing(1): H = H[:k - 1, _sum_ind(np.arange(k), hiv)] V = V[:, _sum_ind(np.arange(k), hiv)] orth = orth[:k] return V, H, orth H[k - 1, hiv + k] = H[k, hiv + k - 1] v = q q = r/np.tile(H[k - 1, k + hiv], (N, 1)) V[:, k + hiv] = q r = np.dot(A, q) r -= np.tile(H[k - 1, k + hiv], (N, 1))*v H[k, k + hiv] = np.sum(np.multiply(q, r), axis=0) r -= np.tile(H[k, k + hiv], (N, 1))*q # The next line has to be checked r -= np.dot(V, np.dot(V.T, r)) # full reorthogonalization H[k + 1, k + hiv] = np.linalg.norm(r, axis=0) orth[k] = np.linalg.norm(np.dot(V.T, V) - M) H = H[np.ix_(np.arange(order), np.arange(order))] return V, H, orth
def lanczos(A, order, x)
r""" TODO short description Parameters ---------- A: ndarray Returns -------
2.592021
2.616961
0.99047
r # Test the input parameters if isinstance(M, graphs.Graph): if not M.lap_type == 'combinatorial': raise NotImplementedError L = M.L else: L = M N = np.shape(L)[0] if not 1./np.sqrt(N) <= epsilon < 1: raise ValueError('GRAPH_SPARSIFY: Epsilon out of required range') # Not sparse resistance_distances = utils.resistance_distance(L).toarray() # Get the Weight matrix if isinstance(M, graphs.Graph): W = M.W else: W = np.diag(L.diagonal()) - L.toarray() W[W < 1e-10] = 0 W = sparse.coo_matrix(W) W.data[W.data < 1e-10] = 0 W = W.tocsc() W.eliminate_zeros() start_nodes, end_nodes, weights = sparse.find(sparse.tril(W)) # Calculate the new weights. weights = np.maximum(0, weights) Re = np.maximum(0, resistance_distances[start_nodes, end_nodes]) Pe = weights * Re Pe = Pe / np.sum(Pe) for i in range(maxiter): # Rudelson, 1996 Random Vectors in the Isotropic Position # (too hard to figure out actual C0) C0 = 1 / 30. # Rudelson and Vershynin, 2007, Thm. 3.1 C = 4 * C0 q = round(N * np.log(N) * 9 * C**2 / (epsilon**2)) results = stats.rv_discrete(values=(np.arange(np.shape(Pe)[0]), Pe)).rvs(size=int(q)) spin_counts = stats.itemfreq(results).astype(int) per_spin_weights = weights / (q * Pe) counts = np.zeros(np.shape(weights)[0]) counts[spin_counts[:, 0]] = spin_counts[:, 1] new_weights = counts * per_spin_weights sparserW = sparse.csc_matrix((new_weights, (start_nodes, end_nodes)), shape=(N, N)) sparserW = sparserW + sparserW.T sparserL = sparse.diags(sparserW.diagonal(), 0) - sparserW if graphs.Graph(sparserW).is_connected(): break elif i == maxiter - 1: logger.warning('Despite attempts to reduce epsilon, sparsified graph is disconnected') else: epsilon -= (epsilon - 1/np.sqrt(N)) / 2. if isinstance(M, graphs.Graph): sparserW = sparse.diags(sparserL.diagonal(), 0) - sparserL if not M.is_directed(): sparserW = (sparserW + sparserW.T) / 2. Mnew = graphs.Graph(sparserW) #M.copy_graph_attributes(Mnew) else: Mnew = sparse.lil_matrix(sparserL) return Mnew
def graph_sparsify(M, epsilon, maxiter=10)
r"""Sparsify a graph (with Spielman-Srivastava). Parameters ---------- M : Graph or sparse matrix Graph structure or a Laplacian matrix epsilon : int Sparsification parameter Returns ------- Mnew : Graph or sparse matrix New graph structure or sparse matrix Notes ----- Epsilon should be between 1/sqrt(N) and 1 Examples -------- >>> from pygsp import reduction >>> G = graphs.Sensor(256, k=20, distributed=True) >>> epsilon = 0.4 >>> G2 = reduction.graph_sparsify(G, epsilon) References ---------- See :cite:`spielman2011graph`, :cite:`rudelson1999random` and :cite:`rudelson2007sampling`. for more informations
4.325468
4.140566
1.044656
r L_reg = G.L + reg_eps * sparse.eye(G.N) K_reg = getattr(G.mr, 'K_reg', kron_reduction(L_reg, keep_inds)) green_kernel = getattr(G.mr, 'green_kernel', filters.Filter(G, lambda x: 1. / (reg_eps + x))) alpha = K_reg.dot(f_subsampled) try: Nv = np.shape(f_subsampled)[1] f_interpolated = np.zeros((G.N, Nv)) except IndexError: f_interpolated = np.zeros((G.N)) f_interpolated[keep_inds] = alpha return _analysis(green_kernel, f_interpolated, order=order, **kwargs)
def interpolate(G, f_subsampled, keep_inds, order=100, reg_eps=0.005, **kwargs)
r"""Interpolate a graph signal. Parameters ---------- G : Graph f_subsampled : ndarray A graph signal on the graph G. keep_inds : ndarray List of indices on which the signal is sampled. order : int Degree of the Chebyshev approximation (default = 100). reg_eps : float The regularized graph Laplacian is $\bar{L}=L+\epsilon I$. A smaller epsilon may lead to better regularization, but will also require a higher order Chebyshev approximation. Returns ------- f_interpolated : ndarray Interpolated graph signal on the full vertex set of G. References ---------- See :cite:`pesenson2009variational`
4.717455
4.907137
0.961346
r if isinstance(G, graphs.Graph): if G.lap_type != 'combinatorial': msg = 'Unknown reduction for {} Laplacian.'.format(G.lap_type) raise NotImplementedError(msg) if G.is_directed(): msg = 'This method only work for undirected graphs.' raise NotImplementedError(msg) L = G.L else: L = G N = np.shape(L)[0] ind_comp = np.setdiff1d(np.arange(N, dtype=int), ind) L_red = L[np.ix_(ind, ind)] L_in_out = L[np.ix_(ind, ind_comp)] L_out_in = L[np.ix_(ind_comp, ind)].tocsc() L_comp = L[np.ix_(ind_comp, ind_comp)].tocsc() Lnew = L_red - L_in_out.dot(linalg.spsolve(L_comp, L_out_in)) # Make the laplacian symmetric if it is almost symmetric! if np.abs(Lnew - Lnew.T).sum() < np.spacing(1) * np.abs(Lnew).sum(): Lnew = (Lnew + Lnew.T) / 2. if isinstance(G, graphs.Graph): # Suppress the diagonal ? This is a good question? Wnew = sparse.diags(Lnew.diagonal(), 0) - Lnew Snew = Lnew.diagonal() - np.ravel(Wnew.sum(0)) if np.linalg.norm(Snew, 2) >= np.spacing(1000): Wnew = Wnew + sparse.diags(Snew, 0) # Removing diagonal for stability Wnew = Wnew - Wnew.diagonal() coords = G.coords[ind, :] if len(G.coords.shape) else np.ndarray(None) Gnew = graphs.Graph(Wnew, coords=coords, lap_type=G.lap_type, plotting=G.plotting) else: Gnew = Lnew return Gnew
def kron_reduction(G, ind)
r"""Compute the Kron reduction. This function perform the Kron reduction of the weight matrix in the graph *G*, with boundary nodes labeled by *ind*. This function will create a new graph with a weight matrix Wnew that contain only boundary nodes and is computed as the Schur complement of the original matrix with respect to the selected indices. Parameters ---------- G : Graph or sparse matrix Graph structure or weight matrix ind : list indices of the nodes to keep Returns ------- Gnew : Graph or sparse matrix New graph structure or weight matrix References ---------- See :cite:`dorfler2013kron`
3.360586
3.370421
0.997082
r if np.shape(f)[0] != Gs[0].N: raise ValueError("PYRAMID ANALYSIS: The signal to analyze should have the same dimension as the first graph.") levels = len(Gs) - 1 # check if the type of filters is right. h_filters = kwargs.pop('h_filters', lambda x: 1. / (2*x+1)) if not isinstance(h_filters, list): if hasattr(h_filters, '__call__'): logger.warning('Converting filters into a list.') h_filters = [h_filters] else: logger.error('Filters must be a list of functions.') if len(h_filters) == 1: h_filters = h_filters * levels elif len(h_filters) != levels: message = 'The number of filters must be one or equal to {}.'.format(levels) raise ValueError(message) ca = [f] pe = [] for i in range(levels): # Low pass the signal s_low = _analysis(filters.Filter(Gs[i], h_filters[i]), ca[i], **kwargs) # Keep only the coefficient on the selected nodes ca.append(s_low[Gs[i+1].mr['idx']]) # Compute prediction s_pred = interpolate(Gs[i], ca[i+1], Gs[i+1].mr['idx'], **kwargs) # Compute errors pe.append(ca[i] - s_pred) return ca, pe
def pyramid_analysis(Gs, f, **kwargs)
r"""Compute the graph pyramid transform coefficients. Parameters ---------- Gs : list of graphs A multiresolution sequence of graph structures. f : ndarray Graph signal to analyze. h_filters : list A list of filter that will be used for the analysis and sythesis operator. If only one filter is given, it will be used for all levels. Default is h(x) = 1 / (2x+1) Returns ------- ca : ndarray Coarse approximation at each level pe : ndarray Prediction error at each level h_filters : list Graph spectral filters applied References ---------- See :cite:`shuman2013framework` and :cite:`pesenson2009variational`.
4.447117
3.797167
1.171167
r least_squares = bool(kwargs.pop('least_squares', False)) def_ul = Gs[0].N > 3000 or Gs[0]._e is None or Gs[0]._U is None use_landweber = bool(kwargs.pop('use_landweber', def_ul)) reg_eps = float(kwargs.get('reg_eps', 0.005)) if least_squares and 'h_filters' not in kwargs: ValueError('h-filters not provided.') levels = len(Gs) - 1 if len(pe) != levels: ValueError('Gs and pe have different shapes.') ca = [cap] # Reconstruct each level for i in range(levels): if not least_squares: s_pred = interpolate(Gs[levels - i - 1], ca[i], Gs[levels - i].mr['idx'], order=order, reg_eps=reg_eps, **kwargs) ca.append(s_pred + pe[levels - i - 1]) else: ca.append(_pyramid_single_interpolation(Gs[levels - i - 1], ca[i], pe[levels - i - 1], h_filters[levels - i - 1], use_landweber=use_landweber, **kwargs)) ca.reverse() reconstruction = ca[0] return reconstruction, ca
def pyramid_synthesis(Gs, cap, pe, order=30, **kwargs)
r"""Synthesize a signal from its pyramid coefficients. Parameters ---------- Gs : Array of Graphs A multiresolution sequence of graph structures. cap : ndarray Coarsest approximation of the original signal. pe : ndarray Prediction error at each level. use_exact : bool To use exact graph spectral filtering instead of the Chebyshev approximation. order : int Degree of the Chebyshev approximation (default=30). least_squares : bool To use the least squares synthesis (default=False). h_filters : ndarray The filters used in the analysis operator. These are required for least squares synthesis, but not for the direct synthesis method. use_landweber : bool To use the Landweber iteration approximation in the least squares synthesis. reg_eps : float Interpolation parameter. landweber_its : int Number of iterations in the Landweber approximation for least squares synthesis. landweber_tau : float Parameter for the Landweber iteration. Returns ------- reconstruction : ndarray The reconstructed signal. ca : ndarray Coarse approximations at each level
4.956628
3.930243
1.261151
if not hasattr(self, '_coefficients'): # Graph Fourier transform -> modulation -> inverse GFT. c = self.G.igft(self._kernels.evaluate(self.G.e).squeeze()) c = np.sqrt(self.G.n_vertices) * self.G.U * c[:, np.newaxis] self._coefficients = self.G.gft(c) shape = x.shape x = x.flatten() y = np.full((self.n_features_out, x.size), np.nan) for i in range(len(x)): query = self._coefficients[x[i] == self.G.e] if len(query) != 0: y[:, i] = query[0] return y.reshape((self.n_features_out,) + shape)
def evaluate(self, x)
TODO: will become _evaluate once polynomial filtering is merged.
5.251229
5.026245
1.044762
if self._modulation_first: return super(Modulation, self).filter(s, method='exact') else: # The dot product with each modulated kernel is equivalent to the # GFT, as for the localization and the IGFT. y = np.empty((self.G.n_vertices, self.G.n_vertices)) for i in range(self.G.n_vertices): x = s * self._kernels.localize(i) y[i] = np.sqrt(self.G.n_vertices) * self.G.gft(x) return y
def filter(self, s, method='exact', order=None)
TODO: indirection will be removed when poly filtering is merged. TODO: with _filter and shape handled in Filter.filter, synthesis will work.
6.761902
6.428928
1.051793
r # Preserve documentation of plot. @functools.wraps(plot) def inner(obj, **kwargs): # Create a figure and an axis if none were passed. if kwargs['ax'] is None: _, plt, _ = _import_plt() fig = plt.figure() global _plt_figures _plt_figures.append(fig) if (hasattr(obj, 'coords') and obj.coords.ndim == 2 and obj.coords.shape[1] == 3): kwargs['ax'] = fig.add_subplot(111, projection='3d') else: kwargs['ax'] = fig.add_subplot(111) title = kwargs.pop('title') plot(obj, **kwargs) kwargs['ax'].set_title(title) try: fig.show(warn=False) except NameError: # No figure created, an axis was passed. pass return kwargs['ax'].figure, kwargs['ax'] return inner
def _plt_handle_figure(plot)
r"""Handle the common work (creating an axis if not given, setting the title) of all matplotlib plot commands.
3.904582
3.91142
0.998252
r # Windows can be closed by releasing all references to them so they can be # garbage collected. May not be necessary to call close(). global _qtg_windows for window in _qtg_windows: window.close() _qtg_windows = [] global _qtg_widgets for widget in _qtg_widgets: widget.close() _qtg_widgets = [] global _plt_figures for fig in _plt_figures: _, plt, _ = _import_plt() plt.close(fig) _plt_figures = []
def close_all()
r"""Close all opened windows.
4.932296
4.481461
1.1006
r _, plt, _ = _import_plt() plt.show(*args, **kwargs)
def show(*args, **kwargs)
r"""Show created figures, alias to ``plt.show()``. By default, showing plots does not block the prompt. Calling this function will block execution.
13.584561
10.459726
1.298749
r _, plt, _ = _import_plt() plt.close(*args, **kwargs)
def close(*args, **kwargs)
r"""Close last created figure, alias to ``plt.close()``.
14.962062
10.103118
1.480935
r if eigenvalues is None: eigenvalues = (filters.G._e is not None) if sum is None: sum = filters.n_filters > 1 if title is None: title = repr(filters) return _plt_plot_filter(filters, n=n, eigenvalues=eigenvalues, sum=sum, title=title, ax=ax, **kwargs)
def _plot_filter(filters, n, eigenvalues, sum, title, ax, **kwargs)
r"""Plot the spectral response of a filter bank. Parameters ---------- n : int Number of points where the filters are evaluated. eigenvalues : boolean Whether to show the eigenvalues of the graph Laplacian. The eigenvalues should have been computed with :meth:`~pygsp.graphs.Graph.compute_fourier_basis`. By default, the eigenvalues are shown if they are available. sum : boolean Whether to plot the sum of the squared magnitudes of the filters. Default True if there is multiple filters. title : str Title of the figure. ax : :class:`matplotlib.axes.Axes` Axes where to draw the graph. Optional, created if not passed. Only available with the matplotlib backend. kwargs : dict Additional parameters passed to the matplotlib plot function. Useful for example to change the linewidth, linestyle, or set a label. Only available with the matplotlib backend. Returns ------- fig : :class:`matplotlib.figure.Figure` The figure the plot belongs to. Only with the matplotlib backend. ax : :class:`matplotlib.axes.Axes` The axes the plot belongs to. Only with the matplotlib backend. Notes ----- This function is only implemented for the matplotlib backend at the moment. Examples -------- >>> import matplotlib >>> G = graphs.Logo() >>> mh = filters.MexicanHat(G) >>> fig, ax = mh.plot()
3.879429
4.419518
0.877795
r from pygsp import features qtg, _, _ = _import_qtg() if not hasattr(G, 'spectr'): features.compute_spectrogram(G) M = G.spectr.shape[1] spectr = G.spectr[node_idx, :] if node_idx is not None else G.spectr spectr = np.ravel(spectr) min_spec, max_spec = spectr.min(), spectr.max() pos = np.array([0., 0.25, 0.5, 0.75, 1.]) color = [[20, 133, 212, 255], [53, 42, 135, 255], [48, 174, 170, 255], [210, 184, 87, 255], [249, 251, 14, 255]] color = np.array(color, dtype=np.ubyte) cmap = qtg.ColorMap(pos, color) spectr = (spectr.astype(float) - min_spec) / (max_spec - min_spec) w = qtg.GraphicsWindow() w.setWindowTitle("Spectrogram of {}".format(G.__repr__(limit=4))) label = 'frequencies {}:{:.2f}:{:.2f}'.format(0, G.lmax/M, G.lmax) v = w.addPlot(labels={'bottom': 'nodes', 'left': label}) v.setAspectLocked() spi = qtg.ScatterPlotItem(np.repeat(np.arange(G.N), M), np.ravel(np.tile(np.arange(M), (1, G.N))), pxMode=False, symbol='s', size=1, brush=cmap.map(spectr, 'qcolor')) v.addItem(spi) global _qtg_windows _qtg_windows.append(w)
def _plot_spectrogram(G, node_idx)
r"""Plot the graph's spectrogram. Parameters ---------- node_idx : ndarray Order to sort the nodes in the spectrogram. By default, does not reorder the nodes. Notes ----- This function is only implemented for the pyqtgraph backend at the moment. Examples -------- >>> G = graphs.Ring(15) >>> G.plot_spectrogram()
3.60162
3.487439
1.032741
r y[M == False] = 0 Y = _to_logits(y.astype(np.int)) return regression_tikhonov(G, Y, M, tau)
def classification_tikhonov(G, y, M, tau=0)
r"""Solve a classification problem on graph via Tikhonov minimization. The function first transforms :math:`y` in logits :math:`Y`, then solves .. math:: \operatorname*{arg min}_X \| M X - Y \|_2^2 + \tau \ tr(X^T L X) if :math:`\tau > 0`, and .. math:: \operatorname*{arg min}_X tr(X^T L X) \ \text{ s. t. } \ Y = M X otherwise, where :math:`X` and :math:`Y` are logits. The function returns the maximum of the logits. Parameters ---------- G : :class:`pygsp.graphs.Graph` y : array, length G.n_vertices Measurements. M : array of boolean, length G.n_vertices Masking vector. tau : float Regularization parameter. Returns ------- logits : array, length G.n_vertices The logits :math:`X`. Examples -------- >>> from pygsp import graphs, learning >>> import matplotlib.pyplot as plt >>> >>> G = graphs.Logo() Create a ground truth signal: >>> signal = np.zeros(G.n_vertices) >>> signal[G.info['idx_s']] = 1 >>> signal[G.info['idx_p']] = 2 Construct a measurement signal from a binary mask: >>> rs = np.random.RandomState(42) >>> mask = rs.uniform(0, 1, G.n_vertices) > 0.5 >>> measures = signal.copy() >>> measures[~mask] = np.nan Solve the classification problem by reconstructing the signal: >>> recovery = learning.classification_tikhonov(G, measures, mask, tau=0) Plot the results. Note that we recover the class with ``np.argmax(recovery, axis=1)``. >>> prediction = np.argmax(recovery, axis=1) >>> fig, ax = plt.subplots(2, 3, sharey=True, figsize=(10, 6)) >>> _ = G.plot_signal(signal, ax=ax[0, 0], title='Ground truth') >>> _ = G.plot_signal(measures, ax=ax[0, 1], title='Measurements') >>> _ = G.plot_signal(prediction, ax=ax[0, 2], title='Recovered class') >>> _ = G.plot_signal(recovery[:, 0], ax=ax[1, 0], title='Logit 0') >>> _ = G.plot_signal(recovery[:, 1], ax=ax[1, 1], title='Logit 1') >>> _ = G.plot_signal(recovery[:, 2], ax=ax[1, 2], title='Logit 2') >>> _ = fig.tight_layout()
7.748217
15.387325
0.503545
r if tau > 0: y[M == False] = 0 if sparse.issparse(G.L): def Op(x): return (M * x.T).T + tau * (G.L.dot(x)) LinearOp = sparse.linalg.LinearOperator([G.N, G.N], Op) if y.ndim > 1: sol = np.empty(shape=y.shape) res = np.empty(shape=y.shape[1]) for i in range(y.shape[1]): sol[:, i], res[i] = sparse.linalg.cg( LinearOp, y[:, i]) else: sol, res = sparse.linalg.cg(LinearOp, y) # TODO: do something with the residual... return sol else: # Creating this matrix may be problematic in term of memory. # Consider using an operator instead... if type(G.L).__module__ == np.__name__: LinearOp = np.diag(M*1) + tau * G.L return np.linalg.solve(LinearOp, M * y) else: if np.prod(M.shape) != G.n_vertices: raise ValueError("M should be of size [G.n_vertices,]") indl = M indu = (M == False) Luu = G.L[indu, :][:, indu] Wul = - G.L[indu, :][:, indl] if sparse.issparse(G.L): sol_part = sparse.linalg.spsolve(Luu, Wul.dot(y[indl])) else: sol_part = np.linalg.solve(Luu, np.matmul(Wul, y[indl])) sol = y.copy() sol[indu] = sol_part return sol
def regression_tikhonov(G, y, M, tau=0)
r"""Solve a regression problem on graph via Tikhonov minimization. The function solves .. math:: \operatorname*{arg min}_x \| M x - y \|_2^2 + \tau \ x^T L x if :math:`\tau > 0`, and .. math:: \operatorname*{arg min}_x x^T L x \ \text{ s. t. } \ y = M x otherwise. Parameters ---------- G : :class:`pygsp.graphs.Graph` y : array, length G.n_vertices Measurements. M : array of boolean, length G.n_vertices Masking vector. tau : float Regularization parameter. Returns ------- x : array, length G.n_vertices Recovered values :math:`x`. Examples -------- >>> from pygsp import graphs, filters, learning >>> import matplotlib.pyplot as plt >>> >>> G = graphs.Sensor(N=100, seed=42) >>> G.estimate_lmax() Create a smooth ground truth signal: >>> filt = lambda x: 1 / (1 + 10*x) >>> filt = filters.Filter(G, filt) >>> rs = np.random.RandomState(42) >>> signal = filt.analyze(rs.normal(size=G.n_vertices)) Construct a measurement signal from a binary mask: >>> mask = rs.uniform(0, 1, G.n_vertices) > 0.5 >>> measures = signal.copy() >>> measures[~mask] = np.nan Solve the regression problem by reconstructing the signal: >>> recovery = learning.regression_tikhonov(G, measures, mask, tau=0) Plot the results: >>> fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(10, 3)) >>> limits = [signal.min(), signal.max()] >>> _ = G.plot_signal(signal, ax=ax1, limits=limits, title='Ground truth') >>> _ = G.plot_signal(measures, ax=ax2, limits=limits, title='Measures') >>> _ = G.plot_signal(recovery, ax=ax3, limits=limits, title='Recovery') >>> _ = fig.tight_layout()
3.567299
3.655585
0.975849
r signal = self._check_signal(signal) self.signals[name] = signal
def set_signal(self, signal, name)
r"""Attach a signal to the graph. Attached signals can be accessed (and modified or deleted) through the :attr:`signals` dictionary. Parameters ---------- signal : array_like A sequence that assigns a value to each vertex. The value of the signal at vertex `i` is ``signal[i]``. name : String Name of the signal used as a key in the :attr:`signals` dictionary. Examples -------- >>> graph = graphs.Sensor(10) >>> signal = np.arange(graph.n_vertices) >>> graph.set_signal(signal, 'mysignal') >>> graph.signals {'mysignal': array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}
6.943024
15.965492
0.434877
r adjacency = self.W[vertices, :][:, vertices] try: coords = self.coords[vertices] except AttributeError: coords = None graph = Graph(adjacency, self.lap_type, coords, self.plotting) for name, signal in self.signals.items(): graph.set_signal(signal[vertices], name) return graph
def subgraph(self, vertices)
r"""Create a subgraph from a list of vertices. Parameters ---------- vertices : list Vertices to keep. Either a list of indices or an indicator function. Returns ------- subgraph : :class:`Graph` Subgraph. Examples -------- >>> graph = graphs.Graph([ ... [0., 3., 0., 0.], ... [3., 0., 4., 0.], ... [0., 4., 0., 2.], ... [0., 0., 2., 0.], ... ]) >>> graph = graph.subgraph([0, 2, 1]) >>> graph.W.toarray() array([[0., 0., 3.], [0., 0., 4.], [3., 4., 0.]])
5.753093
6.388507
0.900538
r if self._connected is not None: return self._connected adjacencies = [self.W] if self.is_directed(): adjacencies.append(self.W.T) for adjacency in adjacencies: visited = np.zeros(self.n_vertices, dtype=np.bool) stack = set([0]) while stack: vertex = stack.pop() if visited[vertex]: continue visited[vertex] = True neighbors = adjacency[vertex].nonzero()[1] stack.update(neighbors) if not np.all(visited): self._connected = False return self._connected self._connected = True return self._connected
def is_connected(self)
r"""Check if the graph is connected (cached). A graph is connected if and only if there exists a (directed) path between any two vertices. Returns ------- connected : bool True if the graph is connected, False otherwise. Notes ----- For undirected graphs, starting at a vertex and trying to visit all the others is enough. For directed graphs, one needs to check that a vertex can both be visited by all the others and visit all the others. Examples -------- Connected graph: >>> graph = graphs.Graph([ ... [0, 3, 0, 0], ... [3, 0, 4, 0], ... [0, 4, 0, 2], ... [0, 0, 2, 0], ... ]) >>> graph.is_connected() True Disconnected graph: >>> graph = graphs.Graph([ ... [0, 3, 0, 0], ... [3, 0, 4, 0], ... [0, 0, 0, 2], ... [0, 0, 2, 0], ... ]) >>> graph.is_connected() False
2.485103
2.674634
0.929138
r if self._directed is None: self._directed = (self.W != self.W.T).nnz != 0 return self._directed
def is_directed(self)
r"""Check if the graph has directed edges (cached). In this framework, we consider that a graph is directed if and only if its weight matrix is not symmetric. Returns ------- directed : bool True if the graph is directed, False otherwise. Examples -------- Directed graph: >>> graph = graphs.Graph([ ... [0, 3, 0], ... [3, 0, 4], ... [0, 0, 0], ... ]) >>> graph.is_directed() True Undirected graph: >>> graph = graphs.Graph([ ... [0, 3, 0], ... [3, 0, 4], ... [0, 4, 0], ... ]) >>> graph.is_directed() False
5.715477
6.441412
0.887302
r if self.A.shape[0] != self.A.shape[1]: self.logger.error('Inconsistent shape to extract components. ' 'Square matrix required.') return None if self.is_directed(): raise NotImplementedError('Directed graphs not supported yet.') graphs = [] visited = np.zeros(self.A.shape[0], dtype=bool) # indices = [] # Assigned but never used while not visited.all(): # pick a node not visted yet stack = set(np.nonzero(~visited)[0][[0]]) comp = [] while len(stack): v = stack.pop() if not visited[v]: comp.append(v) visited[v] = True # Add indices of nodes not visited yet and accessible from # v stack.update(set([idx for idx in self.A[v, :].nonzero()[1] if not visited[idx]])) comp = sorted(comp) self.logger.info(('Constructing subgraph for component of ' 'size {}.').format(len(comp))) G = self.subgraph(comp) G.info = {'orig_idx': comp} graphs.append(G) return graphs
def extract_components(self)
r"""Split the graph into connected components. See :func:`is_connected` for the method used to determine connectedness. Returns ------- graphs : list A list of graph structures. Each having its own node list and weight matrix. If the graph is directed, add into the info parameter the information about the source nodes and the sink nodes. Examples -------- >>> from scipy import sparse >>> W = sparse.rand(10, 10, 0.2) >>> W = utils.symmetrize(W) >>> G = graphs.Graph(W) >>> components = G.extract_components() >>> has_sinks = 'sink' in components[0].info >>> sinks_0 = components[0].info['sink'] if has_sinks else []
4.315772
4.375573
0.986333
r s = np.asanyarray(s) if s.shape[0] != self.n_vertices: raise ValueError('First dimension must be the number of vertices ' 'G.N = {}, got {}.'.format(self.N, s.shape)) return s
def _check_signal(self, s)
r"""Check if signal is valid.
6.040164
6.024555
1.002591
r x = self._check_signal(x) return x.T.dot(self.L.dot(x))
def dirichlet_energy(self, x)
r"""Compute the Dirichlet energy of a signal defined on the vertices. The Dirichlet energy of a signal :math:`x` is defined as .. math:: x^\top L x = \| \nabla_\mathcal{G} x \|_2^2 = \frac12 \sum_{i,j} W[i, j] (x[j] - x[i])^2 for the combinatorial Laplacian, and .. math:: x^\top L x = \| \nabla_\mathcal{G} x \|_2^2 = \frac12 \sum_{i,j} W[i, j] \left( \frac{x[j]}{d[j]} - \frac{x[i]}{d[i]} \right)^2 for the normalized Laplacian, where :math:`d` is the weighted degree :attr:`dw`, :math:`\nabla_\mathcal{G} x = D^\top x` and :math:`D` is the differential operator :attr:`D`. See :meth:`grad` for the definition of the gradient :math:`\nabla_\mathcal{G}`. Parameters ---------- x : array_like Signal of length :attr:`n_vertices` living on the vertices. Returns ------- energy : float The Dirichlet energy of the graph signal. See Also -------- grad : compute the gradient of a vertex signal Examples -------- Non-directed graph: >>> graph = graphs.Path(5, directed=False) >>> signal = [0, 2, 2, 4, 4] >>> graph.dirichlet_energy(signal) 8.0 >>> # The Dirichlet energy is indeed the squared norm of the gradient. >>> graph.compute_differential_operator() >>> graph.grad(signal) array([2., 0., 2., 0.]) Directed graph: >>> graph = graphs.Path(5, directed=True) >>> signal = [0, 2, 2, 4, 4] >>> graph.dirichlet_energy(signal) 4.0 >>> # The Dirichlet energy is indeed the squared norm of the gradient. >>> graph.compute_differential_operator() >>> graph.grad(signal) array([1.41421356, 0. , 1.41421356, 0. ])
10.045087
14.230047
0.705907
r if self._A is None: self._A = self.W > 0 return self._A
def A(self)
r"""Graph adjacency matrix (the binary version of W). The adjacency matrix defines which edges exist on the graph. It is represented as an N-by-N matrix of booleans. :math:`A_{i,j}` is True if :math:`W_{i,j} > 0`.
8.976963
7.824647
1.147267
r if self._d is None: if not self.is_directed(): # Shortcut for undirected graphs. self._d = self.W.getnnz(axis=1) # axis=1 faster for CSR (https://stackoverflow.com/a/16391764) else: degree_in = self.W.getnnz(axis=0) degree_out = self.W.getnnz(axis=1) self._d = (degree_in + degree_out) / 2 return self._d
def d(self)
r"""The degree (number of neighbors) of vertices. For undirected graphs, the degree of a vertex is the number of vertices it is connected to. For directed graphs, the degree is the average of the in and out degrees, where the in degree is the number of incoming edges, and the out degree the number of outgoing edges. In both cases, the degree of the vertex :math:`v_i` is the average between the number of non-zero values in the :math:`i`-th column (the in degree) and the :math:`i`-th row (the out degree) of the weighted adjacency matrix :attr:`W`. Examples -------- Undirected graph: >>> graph = graphs.Graph([ ... [0, 1, 0], ... [1, 0, 2], ... [0, 2, 0], ... ]) >>> print(graph.d) # Number of neighbors. [1 2 1] >>> print(graph.dw) # Weighted degree. [1 3 2] Directed graph: >>> graph = graphs.Graph([ ... [0, 1, 0], ... [0, 0, 2], ... [0, 2, 0], ... ]) >>> print(graph.d) # Number of neighbors. [0.5 1.5 1. ] >>> print(graph.dw) # Weighted degree. [0.5 2.5 2. ]
4.153697
3.877913
1.071117
r if self._dw is None: if not self.is_directed(): # Shortcut for undirected graphs. self._dw = np.ravel(self.W.sum(axis=0)) else: degree_in = np.ravel(self.W.sum(axis=0)) degree_out = np.ravel(self.W.sum(axis=1)) self._dw = (degree_in + degree_out) / 2 return self._dw
def dw(self)
r"""The weighted degree of vertices. For undirected graphs, the weighted degree of the vertex :math:`v_i` is defined as .. math:: d[i] = \sum_j W[j, i] = \sum_j W[i, j], where :math:`W` is the weighted adjacency matrix :attr:`W`. For directed graphs, the weighted degree of the vertex :math:`v_i` is defined as .. math:: d[i] = \frac12 (d^\text{in}[i] + d^\text{out}[i]) = \frac12 (\sum_j W[j, i] + \sum_j W[i, j]), i.e., as the average of the in and out degrees. Examples -------- Undirected graph: >>> graph = graphs.Graph([ ... [0, 1, 0], ... [1, 0, 2], ... [0, 2, 0], ... ]) >>> print(graph.d) # Number of neighbors. [1 2 1] >>> print(graph.dw) # Weighted degree. [1 3 2] Directed graph: >>> graph = graphs.Graph([ ... [0, 1, 0], ... [0, 0, 2], ... [0, 2, 0], ... ]) >>> print(graph.d) # Number of neighbors. [0.5 1.5 1. ] >>> print(graph.dw) # Weighted degree. [0.5 2.5 2. ]
2.810119
2.710153
1.036886
r if self._lmax is None: self.logger.warning('The largest eigenvalue G.lmax is not ' 'available, we need to estimate it. ' 'Explicitly call G.estimate_lmax() or ' 'G.compute_fourier_basis() ' 'once beforehand to suppress the warning.') self.estimate_lmax() return self._lmax
def lmax(self)
r"""Largest eigenvalue of the graph Laplacian. Can be exactly computed by :func:`compute_fourier_basis` or approximated by :func:`estimate_lmax`.
7.479468
5.717588
1.308151
r if method == self._lmax_method: return self._lmax_method = method if method == 'lanczos': try: # We need to cast the matrix L to a supported type. # TODO: not good for memory. Cast earlier? lmax = sparse.linalg.eigsh(self.L.asfptype(), k=1, tol=5e-3, ncv=min(self.N, 10), return_eigenvectors=False) lmax = lmax[0] assert lmax <= self._get_upper_bound() + 1e-12 lmax *= 1.01 # Increase by 1% to be robust to errors. self._lmax = lmax except sparse.linalg.ArpackNoConvergence: raise ValueError('The Lanczos method did not converge. ' 'Try to use bounds.') elif method == 'bounds': self._lmax = self._get_upper_bound() else: raise ValueError('Unknown method {}'.format(method))
def estimate_lmax(self, method='lanczos')
r"""Estimate the Laplacian's largest eigenvalue (cached). The result is cached and accessible by the :attr:`lmax` property. Exact value given by the eigendecomposition of the Laplacian, see :func:`compute_fourier_basis`. That estimation is much faster than the eigendecomposition. Parameters ---------- method : {'lanczos', 'bounds'} Whether to estimate the largest eigenvalue with the implicitly restarted Lanczos method, or to return an upper bound on the spectrum of the Laplacian. Notes ----- Runs the implicitly restarted Lanczos method (as implemented in :func:`scipy.sparse.linalg.eigsh`) with a large tolerance, then increases the calculated largest eigenvalue by 1 percent. For much of the PyGSP machinery, we need to approximate filter kernels on an interval that contains the spectrum of L. The only cost of using a larger interval is that the polynomial approximation over the larger interval may be a slightly worse approximation on the actual spectrum. As this is a very mild effect, it is not necessary to obtain very tight bounds on the spectrum of L. A faster but less tight alternative is to use known algebraic bounds on the graph Laplacian. Examples -------- >>> G = graphs.Logo() >>> G.compute_fourier_basis() # True value. >>> print('{:.2f}'.format(G.lmax)) 13.78 >>> G.estimate_lmax(method='lanczos') # Estimate. >>> print('{:.2f}'.format(G.lmax)) 13.92 >>> G.estimate_lmax(method='bounds') # Upper bound. >>> print('{:.2f}'.format(G.lmax)) 18.58
4.169281
3.896341
1.07005
r if self.lap_type == 'normalized': return 2 # Equal iff the graph is bipartite. elif self.lap_type == 'combinatorial': bounds = [] # Equal for full graphs. bounds += [self.n_vertices * np.max(self.W)] # Gershgorin circle theorem. Equal for regular bipartite graphs. # Special case of the below bound. bounds += [2 * np.max(self.dw)] # Anderson, Morley, Eigenvalues of the Laplacian of a graph. # Equal for regular bipartite graphs. if self.n_edges > 0: sources, targets, _ = self.get_edge_list() bounds += [np.max(self.dw[sources] + self.dw[targets])] # Merris, A note on Laplacian graph eigenvalues. if not self.is_directed(): W = self.W else: W = utils.symmetrize(self.W, method='average') m = W.dot(self.dw) / self.dw # Mean degree of adjacent vertices. bounds += [np.max(self.dw + m)] # Good review: On upper bounds for Laplacian graph eigenvalues. return min(bounds) else: raise ValueError('Unknown Laplacian type ' '{}'.format(self.lap_type))
def _get_upper_bound(self)
r"""Return an upper bound on the eigenvalues of the Laplacian.
6.374532
5.743394
1.109889
r if self.is_directed(): W = self.W.tocoo() else: W = sparse.triu(self.W, format='coo') sources = W.row targets = W.col weights = W.data assert self.n_edges == sources.size == targets.size == weights.size return sources, targets, weights
def get_edge_list(self)
r"""Return an edge list, an alternative representation of the graph. Each edge :math:`e_k = (v_i, v_j) \in \mathcal{E}` from :math:`v_i` to :math:`v_j` is associated with the weight :math:`W[i, j]`. For each edge :math:`e_k`, the method returns :math:`(i, j, W[i, j])` as `(sources[k], targets[k], weights[k])`, with :math:`i \in [0, |\mathcal{V}|-1], j \in [0, |\mathcal{V}|-1], k \in [0, |\mathcal{E}|-1]`. Returns ------- sources : vector of int Source node indices. targets : vector of int Target node indices. weights : vector of float Edge weights. Notes ----- The weighted adjacency matrix is the canonical form used in this package to represent a graph as it is the easiest to work with when considering spectral methods. Edge orientation (i.e., which node is the source or the target) is arbitrary for undirected graphs. The implementation uses the upper triangular part of the adjacency matrix, hence :math:`i \leq j \ \forall k`. Examples -------- Edge list of a directed graph. >>> graph = graphs.Graph([ ... [0, 3, 0], ... [3, 0, 4], ... [0, 0, 0], ... ]) >>> sources, targets, weights = graph.get_edge_list() >>> list(sources), list(targets), list(weights) ([0, 1, 1], [1, 0, 2], [3, 3, 4]) Edge list of an undirected graph. >>> graph = graphs.Graph([ ... [0, 3, 0], ... [3, 0, 4], ... [0, 4, 0], ... ]) >>> sources, targets, weights = graph.get_edge_list() >>> list(sources), list(targets), list(weights) ([0, 1], [1, 2], [3, 4])
3.826515
4.143363
0.923529
r from pygsp.plotting import _plot_graph return _plot_graph(self, vertex_color=vertex_color, vertex_size=vertex_size, highlight=highlight, edges=edges, indices=indices, colorbar=colorbar, edge_color=edge_color, edge_width=edge_width, limits=limits, ax=ax, title=title, backend=backend)
def plot(self, vertex_color=None, vertex_size=None, highlight=[], edges=None, edge_color=None, edge_width=None, indices=False, colorbar=True, limits=None, ax=None, title=None, backend=None)
r"""Docstring overloaded at import time.
2.136193
2.111421
1.011732
r from pygsp.plotting import _plot_spectrogram _plot_spectrogram(self, node_idx=node_idx)
def plot_spectrogram(self, node_idx=None)
r"""Docstring overloaded at import time.
4.842373
5.253648
0.921716
r if A is None: def A(x): return x if At is None: def At(x): return x tight = 0 l1_nu = 2 * G.lmax * nu if use_matrix: def l1_a(x): return G.Diff * A(x) def l1_at(x): return G.Diff * At(D.T * x) else: def l1_a(x): return G.grad(A(x)) def l1_at(x): return G.div(x) functions, _ = _import_pyunlocbox() functions.norm_l1(x, gamma, A=l1_a, At=l1_at, tight=tight, maxit=maxit, verbose=verbose, tol=tol)
def prox_tv(x, gamma, G, A=None, At=None, nu=1, tol=10e-4, maxit=200, use_matrix=True)
r""" Total Variation proximal operator for graphs. This function computes the TV proximal operator for graphs. The TV norm is the one norm of the gradient. The gradient is defined in the function :meth:`pygsp.graphs.Graph.grad`. This function requires the PyUNLocBoX to be executed. This function solves: :math:`sol = \min_{z} \frac{1}{2} \|x - z\|_2^2 + \gamma \|x\|_{TV}` Parameters ---------- x: int Input signal gamma: ndarray Regularization parameter G: graph object Graphs structure A: lambda function Forward operator, this parameter allows to solve the following problem: :math:`sol = \min_{z} \frac{1}{2} \|x - z\|_2^2 + \gamma \| A x\|_{TV}` (default = Id) At: lambda function Adjoint operator. (default = Id) nu: float Bound on the norm of the operator (default = 1) tol: float Stops criterion for the loop. The algorithm will stop if : :math:`\frac{n(t) - n(t - 1)} {n(t)} < tol` where :math:`n(t) = f(x) + 0.5 \|x-y\|_2^2` is the objective function at iteration :math:`t` (default = :math:`10e-4`) maxit: int Maximum iteration. (default = 200) use_matrix: bool If a matrix should be used. (default = True) Returns ------- sol: solution Examples --------
4.414818
4.060869
1.087161
r warn = False msg = 'The given matrix' # check symmetry if np.abs(self.A - self.A.T).sum() > 0: warn = True msg = '{} is not symmetric,'.format(msg) # check parallel edged if self.A.max(axis=None) > 1: warn = True msg = '{} has parallel edges,'.format(msg) # check that d is d-regular if np.min(self.d) != np.max(self.d): warn = True msg = '{} is not d-regular,'.format(msg) # check that g doesn't contain any self-loop if self.A.diagonal().any(): warn = True msg = '{} has self loop.'.format(msg) if warn: self.logger.warning('{}.'.format(msg[:-1]))
def is_regular(self)
r""" Troubleshoot a given regular graph.
4.044256
3.682601
1.098206
r for name in list(self.signals.keys()): if self.signals[name].ndim == 2: for i, signal_1d in enumerate(self.signals[name].T): self.signals[name + '_' + str(i)] = signal_1d del self.signals[name]
def _break_signals(self)
r"""Break N-dimensional signals into N 1D signals.
3.581225
2.871929
1.246975
r joined = dict() for name in self.signals: name_base = name.rsplit('_', 1)[0] names = joined.get(name_base, list()) names.append(name) joined[name_base] = names for name_base, names in joined.items(): if len(names) > 1: names = sorted(names) # ensure dim ordering (_0, _1, etc.) signal_nd = np.stack([self.signals[n] for n in names], axis=1) self.signals[name_base] = signal_nd for name in names: del self.signals[name]
def _join_signals(self)
r"""Join N 1D signals into one N-dimensional signal.
3.367244
3.19688
1.053291
r nx = _import_networkx() def convert(number): # NetworkX accepts arbitrary python objects as attributes, but: # * the GEXF writer does not accept any NumPy types (on signals), # * the GraphML writer does not accept NumPy ints. if issubclass(number.dtype.type, (np.integer, np.bool_)): return int(number) else: return float(number) def edges(): for source, target, weight in zip(*self.get_edge_list()): yield int(source), int(target), {'weight': convert(weight)} def nodes(): for vertex in range(self.n_vertices): signals = {name: convert(signal[vertex]) for name, signal in self.signals.items()} yield vertex, signals self._break_signals() graph = nx.DiGraph() if self.is_directed() else nx.Graph() graph.add_nodes_from(nodes()) graph.add_edges_from(edges()) graph.name = self.__class__.__name__ return graph
def to_networkx(self)
r"""Export the graph to NetworkX. Edge weights are stored as an edge attribute, under the name "weight". Signals are stored as node attributes, under their name in the :attr:`signals` dictionary. `N`-dimensional signals are broken into `N` 1-dimensional signals. They will eventually be joined back together on import. Returns ------- graph : :class:`networkx.Graph` A NetworkX graph object. See Also -------- to_graphtool : export to graph-tool save : save to a file Examples -------- >>> import networkx as nx >>> from matplotlib import pyplot as plt >>> graph = graphs.Path(4, directed=True) >>> graph.set_signal(np.full(4, 2.3), 'signal') >>> graph = graph.to_networkx() >>> print(nx.info(graph)) Name: Path Type: DiGraph Number of nodes: 4 Number of edges: 3 Average in degree: 0.7500 Average out degree: 0.7500 >>> nx.is_directed(graph) True >>> graph.nodes() NodeView((0, 1, 2, 3)) >>> graph.edges() OutEdgeView([(0, 1), (1, 2), (2, 3)]) >>> graph.nodes()[2] {'signal': 2.3} >>> graph.edges()[(0, 1)] {'weight': 1.0} >>> # nx.draw(graph, with_labels=True) Another common goal is to use NetworkX to compute some properties to be be imported back in the PyGSP as signals. >>> import networkx as nx >>> from matplotlib import pyplot as plt >>> graph = graphs.Sensor(100, seed=42) >>> graph.set_signal(graph.coords, 'coords') >>> graph = graph.to_networkx() >>> betweenness = nx.betweenness_centrality(graph, weight='weight') >>> nx.set_node_attributes(graph, betweenness, 'betweenness') >>> graph = graphs.Graph.from_networkx(graph) >>> graph.compute_fourier_basis() >>> graph.set_coordinates(graph.signals['coords']) >>> fig, axes = plt.subplots(1, 2) >>> _ = graph.plot(graph.signals['betweenness'], ax=axes[0]) >>> _ = axes[1].plot(graph.e, graph.gft(graph.signals['betweenness']))
4.432641
4.893656
0.905793
r # See gt.value_types() for the list of accepted types. # See the definition of _type_alias() for a list of aliases. # Mapping from https://docs.scipy.org/doc/numpy/user/basics.types.html. convert = { np.bool_: 'bool', np.int8: 'int8_t', np.int16: 'int16_t', np.int32: 'int32_t', np.int64: 'int64_t', np.short: 'short', np.intc: 'int', np.uintc: 'unsigned int', np.long: 'long', np.longlong: 'long long', np.uint: 'unsigned long', np.single: 'float', np.double: 'double', np.longdouble: 'long double', } gt = _import_graphtool() graph = gt.Graph(directed=self.is_directed()) sources, targets, weights = self.get_edge_list() graph.add_edge_list(np.asarray((sources, targets)).T) try: dtype = convert[weights.dtype.type] except KeyError: raise TypeError("Type {} of the edge weights is not supported." .format(weights.dtype)) prop = graph.new_edge_property(dtype) prop.get_array()[:] = weights graph.edge_properties['weight'] = prop self._break_signals() for name, signal in self.signals.items(): try: dtype = convert[signal.dtype.type] except KeyError: raise TypeError("Type {} of signal {} is not supported." .format(signal.dtype, name)) prop = graph.new_vertex_property(dtype) prop.get_array()[:] = signal graph.vertex_properties[name] = prop return graph
def to_graphtool(self)
r"""Export the graph to graph-tool. Edge weights are stored as an edge property map, under the name "weight". Signals are stored as vertex property maps, under their name in the :attr:`signals` dictionary. `N`-dimensional signals are broken into `N` 1-dimensional signals. They will eventually be joined back together on import. Returns ------- graph : :class:`graph_tool.Graph` A graph-tool graph object. See Also -------- to_networkx : export to NetworkX save : save to a file Examples -------- >>> import graph_tool as gt >>> import graph_tool.draw >>> from matplotlib import pyplot as plt >>> graph = graphs.Path(4, directed=True) >>> graph.set_signal(np.full(4, 2.3), 'signal') >>> graph = graph.to_graphtool() >>> graph.is_directed() True >>> graph.vertex_properties['signal'][2] 2.3 >>> graph.edge_properties['weight'][(0, 1)] 1.0 >>> # gt.draw.graph_draw(graph, vertex_text=graph.vertex_index) Another common goal is to use graph-tool to compute some properties to be imported back in the PyGSP as signals. >>> import graph_tool as gt >>> import graph_tool.centrality >>> from matplotlib import pyplot as plt >>> graph = graphs.Sensor(100, seed=42) >>> graph.set_signal(graph.coords, 'coords') >>> graph = graph.to_graphtool() >>> vprop, eprop = gt.centrality.betweenness( ... graph, weight=graph.edge_properties['weight']) >>> graph.vertex_properties['betweenness'] = vprop >>> graph = graphs.Graph.from_graphtool(graph) >>> graph.compute_fourier_basis() >>> graph.set_coordinates(graph.signals['coords']) >>> fig, axes = plt.subplots(1, 2) >>> _ = graph.plot(graph.signals['betweenness'], ax=axes[0]) >>> _ = axes[1].plot(graph.e, graph.gft(graph.signals['betweenness']))
2.695962
2.558387
1.053774
r nx = _import_networkx() from .graph import Graph adjacency = nx.to_scipy_sparse_matrix(graph, weight=weight) graph_pg = Graph(adjacency) for i, node in enumerate(graph.nodes()): for name in graph.nodes[node].keys(): try: signal = graph_pg.signals[name] except KeyError: signal = np.full(graph_pg.n_vertices, np.nan) graph_pg.set_signal(signal, name) try: signal[i] = graph.nodes[node][name] except KeyError: pass # attribute not set for node graph_pg._join_signals() return graph_pg
def from_networkx(cls, graph, weight='weight')
r"""Import a graph from NetworkX. Edge weights are retrieved as an edge attribute, under the name specified by the ``weight`` parameter. Signals are retrieved from node attributes, and stored in the :attr:`signals` dictionary under the attribute name. `N`-dimensional signals that were broken during export are joined. Parameters ---------- graph : :class:`networkx.Graph` A NetworkX graph object. weight : string or None, optional The edge attribute that holds the numerical values used as the edge weights. All edge weights are set to 1 if None, or not found. Returns ------- graph : :class:`~pygsp.graphs.Graph` A PyGSP graph object. Notes ----- The nodes are ordered according to :meth:`networkx.Graph.nodes`. In NetworkX, node attributes need not be set for every node. If a node attribute is not set for a node, a NaN is assigned to the corresponding signal for that node. If the graph is a :class:`networkx.MultiGraph`, multiedges are aggregated by summation. See Also -------- from_graphtool : import from graph-tool load : load from a file Examples -------- >>> import networkx as nx >>> graph = nx.Graph() >>> graph.add_edge(1, 2, weight=0.2) >>> graph.add_edge(2, 3, weight=0.9) >>> graph.add_node(4, sig=3.1416) >>> graph.nodes() NodeView((1, 2, 3, 4)) >>> graph = graphs.Graph.from_networkx(graph) >>> graph.W.toarray() array([[0. , 0.2, 0. , 0. ], [0.2, 0. , 0.9, 0. ], [0. , 0.9, 0. , 0. ], [0. , 0. , 0. , 0. ]]) >>> graph.signals {'sig': array([ nan, nan, nan, 3.1416])}
3.838067
4.206926
0.912321
r gt = _import_graphtool() import graph_tool.spectral from .graph import Graph weight = graph.edge_properties.get(weight, None) adjacency = gt.spectral.adjacency(graph, weight=weight) graph_pg = Graph(adjacency.T) for name, signal in graph.vertex_properties.items(): graph_pg.set_signal(signal.get_array(), name) graph_pg._join_signals() return graph_pg
def from_graphtool(cls, graph, weight='weight')
r"""Import a graph from graph-tool. Edge weights are retrieved as an edge property, under the name specified by the ``weight`` parameter. Signals are retrieved from node properties, and stored in the :attr:`signals` dictionary under the property name. `N`-dimensional signals that were broken during export are joined. Parameters ---------- graph : :class:`graph_tool.Graph` A graph-tool graph object. weight : string The edge property that holds the numerical values used as the edge weights. All edge weights are set to 1 if None, or not found. Returns ------- graph : :class:`~pygsp.graphs.Graph` A PyGSP graph object. Notes ----- If the graph has multiple edge connecting the same two nodes, a sum over the edges is taken to merge them. See Also -------- from_networkx : import from NetworkX load : load from a file Examples -------- >>> import graph_tool as gt >>> graph = gt.Graph(directed=False) >>> e1 = graph.add_edge(0, 1) >>> e2 = graph.add_edge(1, 2) >>> v = graph.add_vertex() >>> eprop = graph.new_edge_property("double") >>> eprop[e1] = 0.2 >>> eprop[(1, 2)] = 0.9 >>> graph.edge_properties["weight"] = eprop >>> vprop = graph.new_vertex_property("double", val=np.nan) >>> vprop[3] = 3.1416 >>> graph.vertex_properties["sig"] = vprop >>> graph = graphs.Graph.from_graphtool(graph) >>> graph.W.toarray() array([[0. , 0.2, 0. , 0. ], [0.2, 0. , 0.9, 0. ], [0. , 0.9, 0. , 0. ], [0. , 0. , 0. , 0. ]]) >>> graph.signals {'sig': PropertyArray([ nan, nan, nan, 3.1416])}
6.107215
6.60002
0.925333
r if fmt is None: fmt = os.path.splitext(path)[1][1:] if fmt not in ['graphml', 'gml', 'gexf']: raise ValueError('Unsupported format {}.'.format(fmt)) def load_networkx(path, fmt): nx = _import_networkx() load = getattr(nx, 'read_' + fmt) graph = load(path) return cls.from_networkx(graph) def load_graphtool(path, fmt): gt = _import_graphtool() graph = gt.load_graph(path, fmt=fmt) return cls.from_graphtool(graph) if backend == 'networkx': return load_networkx(path, fmt) elif backend == 'graph-tool': return load_graphtool(path, fmt) elif backend is None: try: return load_networkx(path, fmt) except ImportError: try: return load_graphtool(path, fmt) except ImportError: raise ImportError('Cannot import networkx nor graph-tool.') else: raise ValueError('Unknown backend {}.'.format(backend))
def load(cls, path, fmt=None, backend=None)
r"""Load a graph from a file. Edge weights are retrieved as an edge attribute named "weight". Signals are retrieved from node attributes, and stored in the :attr:`signals` dictionary under the attribute name. `N`-dimensional signals that were broken during export are joined. Parameters ---------- path : string Path to the file from which to load the graph. fmt : {'graphml', 'gml', 'gexf', None}, optional Format in which the graph is saved. Guessed from the filename extension if None. backend : {'networkx', 'graph-tool', None}, optional Library used to load the graph. Automatically chosen if None. Returns ------- graph : :class:`Graph` The loaded graph. See Also -------- save : save a graph to a file from_networkx : load with NetworkX then import in the PyGSP from_graphtool : load with graph-tool then import in the PyGSP Notes ----- A lossless round-trip is only guaranteed if the graph (and its signals) is saved and loaded with the same backend. Loading from other formats is possible by loading in NetworkX or graph-tool, and importing to the PyGSP. The proposed formats are however tested for faithful round-trips. Examples -------- >>> graph = graphs.Logo() >>> graph.save('logo.graphml') >>> graph = graphs.Graph.load('logo.graphml') >>> import os >>> os.remove('logo.graphml')
1.992083
1.868206
1.066308
r data = pkgutil.get_data('pygsp', 'data/' + path + '.mat') data = io.BytesIO(data) return scipy.io.loadmat(data)
def loadmat(path)
r""" Load a matlab data file. Parameters ---------- path : string Path to the mat file from the data folder, without the .mat extension. Returns ------- data : dict dictionary with variable names as keys, and loaded matrices as values. Examples -------- >>> from pygsp import utils >>> data = utils.loadmat('pointclouds/bunny') >>> data['bunny'].shape (2503, 3)
4.08848
6.701628
0.610073
r try: x.shape[1] except IndexError: x = x.reshape(1, x.shape[0]) if y is None: y = x else: try: y.shape[1] except IndexError: y = y.reshape(1, y.shape[0]) rx, cx = x.shape ry, cy = y.shape # Size verification if rx != ry: raise ValueError("The sizes of x and y do not fit") xx = (x * x).sum(axis=0) yy = (y * y).sum(axis=0) xy = np.dot(x.T, y) d = abs(np.kron(np.ones((cy, 1)), xx).T + np.kron(np.ones((cx, 1)), yy) - 2 * xy) return np.sqrt(d)
def distanz(x, y=None)
r""" Calculate the distance between two colon vectors. Parameters ---------- x : ndarray First colon vector y : ndarray Second colon vector Returns ------- d : ndarray Distance between x and y Examples -------- >>> from pygsp import utils >>> x = np.arange(3) >>> utils.distanz(x, x) array([[0., 1., 2.], [1., 0., 1.], [2., 1., 0.]])
2.528706
2.813174
0.89888
r if sparse.issparse(G): L = G.tocsc() else: if G.lap_type != 'combinatorial': raise ValueError('Need a combinatorial Laplacian.') L = G.L.tocsc() try: pseudo = sparse.linalg.inv(L) except RuntimeError: pseudo = sparse.lil_matrix(np.linalg.pinv(L.toarray())) N = np.shape(L)[0] d = sparse.csc_matrix(pseudo.diagonal()) rd = sparse.kron(d, sparse.csc_matrix(np.ones((N, 1)))).T \ + sparse.kron(d, sparse.csc_matrix(np.ones((N, 1)))) \ - pseudo - pseudo.T return rd
def resistance_distance(G)
r""" Compute the resistance distances of a graph. Parameters ---------- G : Graph or sparse matrix Graph structure or Laplacian matrix (L) Returns ------- rd : sparse matrix distance matrix References ---------- :cite:`klein1993resistance`
3.856943
3.596397
1.072446
r if W.shape[0] != W.shape[1]: raise ValueError('Matrix must be square.') if method == 'average': return (W + W.T) / 2 elif method == 'maximum': if sparse.issparse(W): bigger = (W.T > W) return W - W.multiply(bigger) + W.T.multiply(bigger) else: return np.maximum(W, W.T) elif method == 'fill': A = (W > 0) # Boolean type. if sparse.issparse(W): mask = (A + A.T) - A W = W + mask.multiply(W.T) else: # Numpy boolean subtract is deprecated. mask = np.logical_xor(np.logical_or(A, A.T), A) W = W + mask * W.T return symmetrize(W, method='average') # Resolve ambiguous entries. elif method in ['tril', 'triu']: if sparse.issparse(W): tri = getattr(sparse, method) else: tri = getattr(np, method) W = tri(W) return symmetrize(W, method='maximum') else: raise ValueError('Unknown symmetrization method {}.'.format(method))
def symmetrize(W, method='average')
r""" Symmetrize a square matrix. Parameters ---------- W : array_like Square matrix to be symmetrized method : string * 'average' : symmetrize by averaging with the transpose. Most useful when transforming a directed graph to an undirected one. * 'maximum' : symmetrize by taking the maximum with the transpose. Similar to 'fill' except that ambiguous entries are resolved by taking the largest value. * 'fill' : symmetrize by filling in the zeros in both the upper and lower triangular parts. Ambiguous entries are resolved by averaging the values. * 'tril' : symmetrize by considering the lower triangular part only. * 'triu' : symmetrize by considering the upper triangular part only. Notes ----- You can have the sum by multiplying the average by two. It is however not a good candidate for this function as it modifies an already symmetric matrix. Examples -------- >>> from pygsp import utils >>> W = np.array([[0, 3, 0], [3, 1, 6], [4, 2, 3]], dtype=float) >>> W array([[0., 3., 0.], [3., 1., 6.], [4., 2., 3.]]) >>> utils.symmetrize(W, method='average') array([[0., 3., 2.], [3., 1., 4.], [2., 4., 3.]]) >>> 2 * utils.symmetrize(W, method='average') array([[0., 6., 4.], [6., 2., 8.], [4., 8., 6.]]) >>> utils.symmetrize(W, method='maximum') array([[0., 3., 4.], [3., 1., 6.], [4., 6., 3.]]) >>> utils.symmetrize(W, method='fill') array([[0., 3., 4.], [3., 1., 4.], [4., 4., 3.]]) >>> utils.symmetrize(W, method='tril') array([[0., 3., 4.], [3., 1., 2.], [4., 2., 3.]]) >>> utils.symmetrize(W, method='triu') array([[0., 3., 0.], [3., 1., 6.], [0., 6., 3.]])
2.873098
2.807107
1.023508
r N = x.shape[1] y = x - np.kron(np.ones((1, N)), np.mean(x, axis=1)[:, np.newaxis]) c = np.amax(y) r = y / c return r
def rescale_center(x)
r""" Rescale and center data, e.g. embedding coordinates. Parameters ---------- x : ndarray Data to be rescaled. Returns ------- r : ndarray Rescaled data. Examples -------- >>> from pygsp import utils >>> x = np.array([[1, 6], [2, 5], [3, 4]]) >>> utils.rescale_center(x) array([[-1. , 1. ], [-0.6, 0.6], [-0.2, 0.2]])
4.472158
5.349227
0.836038
r scale_min = t1 / lmax scale_max = t2 / lmin return np.exp(np.linspace(np.log(scale_max), np.log(scale_min), Nscales))
def compute_log_scales(lmin, lmax, Nscales, t1=1, t2=2)
r""" Compute logarithm scales for wavelets. Parameters ---------- lmin : float Smallest non-zero eigenvalue. lmax : float Largest eigenvalue, i.e. :py:attr:`pygsp.graphs.Graph.lmax`. Nscales : int Number of scales. Returns ------- scales : ndarray List of scales of length Nscales. Examples -------- >>> from pygsp import utils >>> utils.compute_log_scales(1, 10, 3) array([2. , 0.4472136, 0.1 ])
3.157362
5.087738
0.620583
for name in names: module = importlib.import_module(src + '.' + name) setattr(sys.modules[dst], name, module)
def import_modules(names, src, dst)
Import modules in package.
2.068207
2.286816
0.904404
for name in names: module = importlib.import_module('pygsp.' + src + '.' + name.lower()) setattr(sys.modules['pygsp.' + dst], name, getattr(module, name))
def import_classes(names, src, dst)
Import classes in package from their implementation modules.
2.733793
2.890965
0.945634
for name in names: module = importlib.import_module('pygsp.' + src) setattr(sys.modules['pygsp.' + dst], name, getattr(module, name))
def import_functions(names, src, dst)
Import functions in package from their implementation modules.
3.080429
3.167238
0.972591
if isinstance(result, dict): if result.get('status') == 'failed': raise ActionFailed(retcode=result.get('retcode')) return result.get('data')
def _handle_api_result(result: Optional[Dict[str, Any]]) -> Any
Retrieve 'data' field from the API result object. :param result: API result that received from HTTP API :return: the 'data' field in result object :raise ActionFailed: the 'status' field is 'failed'
4.353683
3.159768
1.377849
idx = 0 while idx < len(self): if idx > 0 and \ self[idx - 1].type == 'text' and self[idx].type == 'text': self[idx - 1].data['text'] += self[idx].data['text'] del self[idx] else: idx += 1
def reduce(self) -> None
Remove redundant segments. Since this class is implemented based on list, this method may require O(n) time.
2.857631
2.634682
1.084621
if reduce: self.reduce() result = '' for seg in self: if seg.type == 'text': result += ' ' + seg.data['text'] if result: result = result[1:] return result
def extract_plain_text(self, reduce: bool = False) -> str
Extract text segments from the message, joined by single space. :param reduce: reduce the message before extracting :return: the joined string
4.573221
3.97392
1.150808
from django.utils.encoding import force_text from django.core.mail import EmailMultiAlternatives from mailer.models import make_message priority = get_priority(priority) # need to do this in case subject used lazy version of ugettext subject = force_text(subject) message = force_text(message) msg = make_message(subject=subject, body=message, from_email=from_email, to=recipient_list, priority=priority) email = msg.email email = EmailMultiAlternatives( email.subject, email.body, email.from_email, email.to, headers=headers ) email.attach_alternative(message_html, "text/html") msg.email = email msg.save() return 1
def send_html_mail(subject, message, message_html, from_email, recipient_list, priority=None, fail_silently=False, auth_user=None, auth_password=None, headers={})
Function to queue HTML e-mails
2.822615
2.774627
1.017295
to = filter_recipient_list(to) bcc = filter_recipient_list(bcc) core_msg = EmailMessage( subject=subject, body=body, from_email=from_email, to=to, bcc=bcc, attachments=attachments, headers=headers ) db_msg = Message(priority=priority) db_msg.email = core_msg return db_msg
def make_message(subject="", body="", from_email=None, to=None, bcc=None, attachments=None, headers=None, priority=None)
Creates a simple message for the email parameters supplied. The 'to' and 'bcc' lists are filtered using DontSendEntry. If needed, the 'email' attribute can be set to any instance of EmailMessage if e-mails with attachments etc. need to be supported. Call 'save()' on the result when it is ready to be sent, and not before.
2.417308
2.58059
0.936727
queryset = self.filter(to_address__iexact=address) return queryset.exists()
def has_address(self, address)
is the given address on the don't send list?
6.402432
5.020135
1.275351
return self.create( message_data=message.message_data, message_id=get_message_id(message.email), when_added=message.when_added, priority=message.priority, # @@@ other fields from Message result=result_code, log_message=log_message, )
def log(self, message, result_code, log_message="")
create a log entry for an attempt to send the given message and record the given result and (optionally) a log message
5.847821
5.585184
1.047024
while True: hp_qs = Message.objects.high_priority().using('default') mp_qs = Message.objects.medium_priority().using('default') lp_qs = Message.objects.low_priority().using('default') while hp_qs.count() or mp_qs.count(): while hp_qs.count(): for message in hp_qs.order_by("when_added"): yield message while hp_qs.count() == 0 and mp_qs.count(): yield mp_qs.order_by("when_added")[0] while hp_qs.count() == 0 and mp_qs.count() == 0 and lp_qs.count(): yield lp_qs.order_by("when_added")[0] if Message.objects.non_deferred().using('default').count() == 0: break
def prioritize()
Yield the messages in the queue in the order they should be sent.
2.748701
2.494098
1.102082
# The actual backend to use for sending, defaulting to the Django default. # To make testing easier this is not stored at module level. EMAIL_BACKEND = getattr( settings, "MAILER_EMAIL_BACKEND", "django.core.mail.backends.smtp.EmailBackend" ) acquired, lock = acquire_lock() if not acquired: return start_time = time.time() deferred = 0 sent = 0 try: connection = None for message in prioritize(): try: if connection is None: connection = get_connection(backend=EMAIL_BACKEND) logging.info("sending message '{0}' to {1}".format( message.subject, ", ".join(message.to_addresses)) ) email = message.email if email is not None: email.connection = connection if not hasattr(email, 'reply_to'): # Compatability fix for EmailMessage objects # pickled when running < Django 1.8 and then # unpickled under Django 1.8 email.reply_to = [] ensure_message_id(email) email.send() # connection can't be stored in the MessageLog email.connection = None message.email = email # For the sake of MessageLog MessageLog.objects.log(message, RESULT_SUCCESS) sent += 1 else: logging.warning("message discarded due to failure in converting from DB. Added on '%s' with priority '%s'" % (message.when_added, message.priority)) # noqa message.delete() except (socket_error, smtplib.SMTPSenderRefused, smtplib.SMTPRecipientsRefused, smtplib.SMTPDataError, smtplib.SMTPAuthenticationError) as err: message.defer() logging.info("message deferred due to failure: %s" % err) MessageLog.objects.log(message, RESULT_FAILURE, log_message=str(err)) deferred += 1 # Get new connection, it case the connection itself has an error. connection = None # Check if we reached the limits for the current run if _limits_reached(sent, deferred): break _throttle_emails() finally: release_lock(lock) logging.info("") logging.info("%s sent; %s deferred;" % (sent, deferred)) logging.info("done in %.2f seconds" % (time.time() - start_time))
def send_all()
Send all eligible messages in the queue.
4.658339
4.58437
1.016135
while True: while not Message.objects.all(): logging.debug("sleeping for %s seconds before checking queue again" % EMPTY_QUEUE_SLEEP) time.sleep(EMPTY_QUEUE_SLEEP) send_all()
def send_loop()
Loop indefinitely, checking queue at intervals of EMPTY_QUEUE_SLEEP and sending messages if any are on queue.
5.999557
3.806597
1.576094
if isinstance(name, str): components = name.split('.') mod = __import__('.'.join(components[0:-1]), globals(), locals(), [components[-1]] ) return getattr(mod, components[-1]) else: return name
def import_name(name)
import module given by str or pass the module if it is not str
2.078004
1.901169
1.093014
if request.user.is_authenticated: try: return { 'ACCOUNT_EXPIRED': request.user.userplan.is_expired(), 'ACCOUNT_NOT_ACTIVE': ( not request.user.userplan.is_active() and not request.user.userplan.is_expired()), 'EXPIRE_IN_DAYS': request.user.userplan.days_left(), 'EXTEND_URL': reverse('current_plan'), 'ACTIVATE_URL': reverse('account_activation'), } except UserPlan.DoesNotExist: pass return {}
def account_status(request)
Set following ``RequestContext`` variables: * ``ACCOUNT_EXPIRED = boolean``, account was expired state, * ``ACCOUNT_NOT_ACTIVE = boolean``, set when account is not expired, but it is over quotas so it is not active * ``EXPIRE_IN_DAYS = integer``, number of days to account expiration, * ``EXTEND_URL = string``, URL to account extend page. * ``ACTIVATE_URL = string``, URL to account activation needed if account is not active
3.430434
2.18326
1.571244
for plan in queryset: plan_copy = deepcopy(plan) plan_copy.id = None plan_copy.available = False plan_copy.default = False plan_copy.created = None plan_copy.save(force_insert=True) for pricing in plan.planpricing_set.all(): pricing.id = None pricing.plan = plan_copy pricing.save(force_insert=True) for quota in plan.planquota_set.all(): quota.id = None quota.plan = plan_copy quota.save(force_insert=True)
def copy_plan(modeladmin, request, queryset)
Admin command for duplicating plans preserving quotas and pricings.
1.980598
1.778664
1.113531
# Retrieve all quotas that are used by any ``Plan`` in ``plan_list`` quota_list = Quota.objects.all().filter(planquota__plan__in=plan_list).distinct() # Create random access dict that for every ``Plan`` map ``Quota`` -> ``PlanQuota`` plan_quotas_dic = {} for plan in plan_list: plan_quotas_dic[plan] = {} for plan_quota in plan.planquota_set.all(): plan_quotas_dic[plan][plan_quota.quota] = plan_quota # Generate data structure described in method docstring, propagate ``None`` whenever # ``PlanQuota`` is not available for given ``Plan`` and ``Quota`` return map(lambda quota: (quota, map(lambda plan: plan_quotas_dic[plan].get(quota, None), plan_list) ), quota_list)
def get_plan_table(self, plan_list)
This method return a list in following order: [ ( Quota1, [ Plan1Quota1, Plan2Quota1, ... , PlanNQuota1] ), ( Quota2, [ Plan1Quota2, Plan2Quota2, ... , PlanNQuota2] ), ... ( QuotaM, [ Plan1QuotaM, Plan2QuotaM, ... , PlanNQuotaM] ), ] This can be very easily printed as an HTML table element with quotas by row. Quotas are calculated based on ``plan_list``. These are all available quotas that are used by given plans. If any ``Plan`` does not have any of ``PlanQuota`` then value ``None`` will be propagated to the data structure.
4.558063
3.500732
1.302031
order = Order(pk=-1) order.amount = amount order.currency = self.get_currency() country = getattr(billing_info, 'country', None) if not country is None: country = country.code tax_number = getattr(billing_info, 'tax_number', None) # Calculating tax can be complex task (e.g. VIES webservice call) # To ensure that tax calculated on order preview will be the same on final order # tax rate is cached for a given billing data (as this value only depends on it) tax_session_key = "tax_%s_%s" % (tax_number, country) tax = self.request.session.get(tax_session_key) if tax is None: taxation_policy = getattr(settings, 'PLANS_TAXATION_POLICY', None) if not taxation_policy: raise ImproperlyConfigured('PLANS_TAXATION_POLICY is not set') taxation_policy = import_name(taxation_policy) tax = str(taxation_policy.get_tax_rate(tax_number, country)) # Because taxation policy could return None which clutters with saving this value # into cache, we use str() representation of this value self.request.session[tax_session_key] = tax order.tax = Decimal(tax) if tax != 'None' else None return order
def recalculate(self, amount, billing_info)
Calculates and return pre-filled Order
4.890622
4.750306
1.029538
self.plan_pricing = get_object_or_404(PlanPricing.objects.all().select_related('plan', 'pricing'), Q(pk=self.kwargs['pk']) & Q(plan__available=True) & ( Q(plan__customized=self.request.user) | Q( plan__customized__isnull=True))) # User is not allowed to create new order for Plan when he has different Plan # He should use Plan Change View for this kind of action if not self.request.user.userplan.is_expired() and self.request.user.userplan.plan != self.plan_pricing.plan: raise Http404 self.plan = self.plan_pricing.plan self.pricing = self.plan_pricing.pricing
def get_all_context(self)
Retrieves Plan and Pricing for current order creation
4.748603
4.063541
1.168587
if created: Invoice.create(instance, Invoice.INVOICE_TYPES['PROFORMA'])
def create_proforma_invoice(sender, instance, created, **kwargs)
For every Order if there are defined billing_data creates invoice proforma, which is an order confirmation document
5.988439
7.334734
0.816449
if plan is None: # if plan is not given, the default is to use current plan of the user plan = user.userplan.plan quota_dict = plan.get_quota_dict() validators = getattr(settings, 'PLANS_VALIDATORS', {}) validators = import_name(validators) errors = { 'required_to_activate': [], 'other': [], } for quota in validators: validator = import_name(validators[quota]) if on_activation: validator.on_activation(user, quota_dict) else: try: validator(user, quota_dict) except ValidationError as e: if validator.required_to_activate: errors['required_to_activate'].extend(e.messages) else: errors['other'].extend(e.messages) return errors
def plan_validation(user, plan=None, on_activation=False)
Validates validator that represents quotas in a given system :param user: :param plan: :return:
3.147885
3.148485
0.99981
if quota_dict is None: quota_dict = get_user_quota(user) return quota_dict.get(self.code, self.default_quota_value)
def get_quota_value(self, user, quota_dict=None)
Returns quota value for a given user
2.960237
2.796767
1.05845
send_emails = getattr(settings, 'SEND_PLANS_EMAILS', True) if not send_emails: return site_name = getattr(settings, 'SITE_NAME', 'Please define settings.SITE_NAME') domain = getattr(settings, 'SITE_URL', None) if domain is None: try: Site = apps.get_model('sites', 'Site') current_site = Site.objects.get_current() site_name = current_site.name domain = current_site.domain except LookupError: pass context.update({'site_name': site_name, 'site_domain': domain}) if language is not None: translation.activate(language) mail_title_template = loader.get_template(title_template) mail_body_template = loader.get_template(body_template) title = mail_title_template.render(context) body = mail_body_template.render(context) try: email_from = getattr(settings, 'DEFAULT_FROM_EMAIL') except AttributeError: raise ImproperlyConfigured('DEFAULT_FROM_EMAIL setting needed for sending e-mails') mail.send_mail(title, body, email_from, recipients) if language is not None: translation.deactivate() email_logger.info(u"Email (%s) sent to %s\nTitle: %s\n%s\n\n" % (language, recipients, title, body))
def send_template_email(recipients, title_template, body_template, context, language)
Sends e-mail using templating system
2.227509
2.187829
1.018137
return_value = {} user_language.send(sender=user, user=user, return_value=return_value) return return_value.get('language')
def get_user_language(user)
Simple helper that will fire django signal in order to get User language possibly given by other part of application. :param user: :return: string or None
4.204709
4.332342
0.970539
plan_pricings = plan.planpricing_set.order_by('-pricing__period').select_related('pricing') selected_pricing = None for plan_pricing in plan_pricings: selected_pricing = plan_pricing if plan_pricing.pricing.period <= period: break if selected_pricing: return (selected_pricing.price / selected_pricing.pricing.period).quantize(Decimal('1.00')) raise ValueError('Plan %s has no pricings.' % plan)
def _calculate_day_cost(self, plan, period)
Finds most fitted plan pricing for a given period, and calculate day cost
3.203084
2.986169
1.07264
if period is None or period < 1: return None plan_old_day_cost = self._calculate_day_cost(plan_old, period) plan_new_day_cost = self._calculate_day_cost(plan_new, period) if plan_new_day_cost <= plan_old_day_cost: return self._calculate_final_price(period, None) else: return self._calculate_final_price(period, plan_new_day_cost - plan_old_day_cost)
def get_change_price(self, plan_old, plan_new, period)
Calculates total price of plan change. Returns None if no payment is required.
2.195526
2.054993
1.068386
@wraps(operator) def wrapper(self, other): if not isinstance(other, (VersionInfo, dict)): return NotImplemented return operator(self, other) return wrapper
def comparator(operator)
Wrap a VersionInfo binary op method in a type-check
3.751859
2.672415
1.403921
assert type(alpha) == float and type(beta) == float self.alpha = alpha self.beta = beta
def set_priors(self, alpha, beta)
Override default values for Dirichlet hyperparameters. alpha: hyperparameter for distribution of topics within documents. beta: hyperparameter for distribution of tokens within topics.
3.249166
2.849347
1.14032
assert sampled_topics.dtype == np.int and \ len(sampled_topics.shape) <= 2 if len(sampled_topics.shape) == 1: self.sampled_topics = \ sampled_topics.reshape(1, sampled_topics.shape[0]) else: self.sampled_topics = sampled_topics self.samples = self.sampled_topics.shape[0] self.tt = self.tt_comp(self.sampled_topics) self.dt = self.dt_comp(self.sampled_topics)
def set_sampled_topics(self, sampled_topics)
Allocate sampled topics to the documents rather than estimate them. Automatically generate term-topic and document-topic matrices.
2.641459
2.432705
1.085811