code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
from eqcorrscan.utils import clustering tribes = [] func = getattr(clustering, method) if method in ['space_cluster', 'space_time_cluster']: cat = Catalog([t.event for t in self.templates]) groups = func(cat, **kwargs) for group in groups: new_tribe = Tribe() for event in group: new_tribe.templates.extend([t for t in self.templates if t.event == event]) tribes.append(new_tribe) return tribes
def cluster(self, method, **kwargs)
Cluster the tribe. Cluster templates within a tribe: returns multiple tribes each of which could be stacked. :type method: str :param method: Method of stacking, see :mod:`eqcorrscan.utils.clustering` :return: List of tribes. .. rubric:: Example
5.727488
3.887985
1.473125
templates, catalog, process_lengths = template_gen.template_gen( method=method, lowcut=lowcut, highcut=highcut, filt_order=filt_order, samp_rate=samp_rate, prepick=prepick, return_event=True, save_progress=save_progress, **kwargs) for template, event, process_len in zip(templates, catalog, process_lengths): t = Template() for tr in template: if not np.any(tr.data.astype(np.float16)): warnings.warn('Data are zero in float16, missing data,' ' will not use: %s' % tr.id) template.remove(tr) if len(template) == 0: print('Empty Template') continue t.st = template t.name = template.sort(['starttime'])[0]. \ stats.starttime.strftime('%Y_%m_%dt%H_%M_%S') t.lowcut = lowcut t.highcut = highcut t.filt_order = filt_order t.samp_rate = samp_rate t.process_length = process_len t.prepick = prepick event.comments.append(Comment( text="eqcorrscan_template_" + t.name, creation_info=CreationInfo(agency='eqcorrscan', author=getpass.getuser()))) t.event = event self.templates.append(t) return self
def construct(self, method, lowcut, highcut, samp_rate, filt_order, prepick, save_progress=False, **kwargs)
Generate a Tribe of Templates. See :mod:`eqcorrscan.core.template_gen` for available methods. :param method: Method of Tribe generation. :param kwargs: Arguments for the given method. :type lowcut: float :param lowcut: Low cut (Hz), if set to None will not apply a lowcut :type highcut: float :param highcut: High cut (Hz), if set to None will not apply a highcut. :type samp_rate: float :param samp_rate: New sampling rate in Hz. :type filt_order: int :param filt_order: Filter level (number of corners). :type prepick: float :param prepick: Pre-pick time in seconds :type save_progress: bool :param save_progress: Whether to save the resulting party at every data step or not. Useful for long-running processes. .. Note:: Methods: `from_contbase`, `from_sfile` and `from_sac` are not supported by Tribe.construct and must use Template.construct. .. Note:: The Method `multi_template_gen` is not supported because the processing parameters for the stream are not known. Use `from_meta_file` instead. .. Note:: Templates will be named according to their start-time.
4.354722
4.032381
1.079938
mode = 'w' if append and os.path.isfile(fname): mode = 'a' header = '; '.join(['Template name', 'Detection time (UTC)', 'Number of channels', 'Channel list', 'Detection value', 'Threshold', 'Threshold type', 'Input threshold', 'Detection type']) print_str = "{0}; {1}; {2}; {3}; {4}; {5}; {6}; {7}; {8}\n".format( self.template_name, self.detect_time, self.no_chans, self.chans, self.detect_val, self.threshold, self.threshold_type, self.threshold_input, self.typeofdet) with open(fname, mode) as _f: _f.write(header + '\n') # Write a header for the file _f.write(print_str)
def write(self, fname, append=True)
Write detection to csv formatted file. Will append if append==True and file exists :type fname: str :param fname: Full path to file to open and write to. :type append: bool :param append: Set to true to append to an existing file, if True \ and file doesn't exist, will create new file and warn. If False will overwrite old files.
3.338042
3.491107
0.956156
if template is not None and template.name != self.template_name: print("Template names do not match: {0}: {1}".format( template.name, self.template_name)) return # Detect time must be valid QuakeML uri within resource_id. # This will write a formatted string which is still # readable by UTCDateTime det_time = str(self.detect_time.strftime('%Y%m%dT%H%M%S.%f')) ev = Event(resource_id=ResourceIdentifier( id=self.template_name + '_' + det_time, prefix='smi:local')) ev.creation_info = CreationInfo( author='EQcorrscan', creation_time=UTCDateTime()) ev.comments.append( Comment(text='threshold={0}'.format(self.threshold))) ev.comments.append( Comment(text='detect_val={0}'.format(self.detect_val))) if self.chans is not None: ev.comments.append( Comment(text='channels used: {0}'.format( ' '.join([str(pair) for pair in self.chans])))) if template is not None: template_st = template.st min_template_tm = min( [tr.stats.starttime for tr in template_st]) for tr in template_st: if (tr.stats.station, tr.stats.channel) \ not in self.chans: continue elif tr.stats.__contains__("not_in_original"): continue else: pick_time = self.detect_time + ( tr.stats.starttime - min_template_tm) ev.picks.append(Pick( time=pick_time, waveform_id=WaveformStreamID( network_code=tr.stats.network, station_code=tr.stats.station, channel_code=tr.stats.channel, location_code=tr.stats.location))) self.event = ev return
def _calculate_event(self, template=None, template_st=None)
Calculate an event for this detection using a given template. :type template: Template :param template: The template that made this detection :type template_st: `obspy.core.stream.Stream` :param template_st: Template stream, used to calculate pick times, not needed if template is given. .. rubric:: Note Works in place on Detection - over-writes previous events. Does not correct for pre-pick.
4.091574
3.811225
1.073559
# We want to download some QuakeML files from the New Zealand GeoNet # network, GeoNet currently doesn't support FDSN event queries, so we # have to work around to download quakeml from their quakeml.geonet site. client = Client(network_code) # We want to download a few events from an earthquake sequence, these are # identified by publiID numbers, given as arguments catalog = Catalog() for publicID in publicIDs: if network_code == 'GEONET': data_stream = client._download( 'http://quakeml.geonet.org.nz/quakeml/1.2/' + publicID) data_stream.seek(0, 0) catalog += read_events(data_stream, format="quakeml") data_stream.close() else: catalog += client.get_events( eventid=publicID, includearrivals=True) # Lets plot the catalog to see what we have if plot: catalog.plot(projection='local', resolution='h') # We don't need all the picks, lets take the information from the # five most used stations - note that this is done to reduce computational # costs. catalog = filter_picks(catalog, top_n_picks=5) # We only want the P picks in this example, but you can use others or all # picks if you want. for event in catalog: for pick in event.picks: if pick.phase_hint == 'S': event.picks.remove(pick) # Now we can generate the templates templates = template_gen.template_gen( method='from_client', catalog=catalog, client_id=network_code, lowcut=2.0, highcut=9.0, samp_rate=20.0, filt_order=4, length=3.0, prepick=0.15, swin='all', process_len=3600, debug=0, plot=plot) # We now have a series of templates! Using Obspy's Stream.write() method we # can save these to disk for later use. We will do that now for use in the # following tutorials. for i, template in enumerate(templates): template.write('tutorial_template_' + str(i) + '.ms', format='MSEED') # Note that this will warn you about data types. As we don't care # at the moment, whatever obspy chooses is fine. return
def mktemplates(network_code='GEONET', publicIDs=['2016p008122', '2016p008353', '2016p008155', '2016p008194'], plot=True)
Functional wrapper to make templates
6.997546
7.036327
0.994488
# Locate the slowness file information gridfiles = [] stations_out = [] for station in stations: gridfiles += (glob.glob(path + '*.' + phase + '.' + station + '.time.csv')) if glob.glob(path + '*.' + phase + '.' + station + '*.csv'): stations_out += [station] # Read the files allnodes = [] for gridfile in gridfiles: print(' Reading slowness from: ' + gridfile) f = open(gridfile, 'r') grid = csv.reader(f, delimiter=str(' ')) traveltime = [] nodes = [] for row in grid: nodes.append((float(row[0]), float(row[1]), float(row[2]))) traveltime.append(float(row[3])) traveltime = np.array(traveltime) if not phase == phaseout: if phase == 'S': traveltime = traveltime / ps_ratio else: traveltime = traveltime * ps_ratio if lags_switch: lags = traveltime - min(traveltime) else: lags = traveltime if 'alllags' not in locals(): alllags = [lags] else: alllags = np.concatenate((alllags, [lags]), axis=0) allnodes = nodes # each element of allnodes should be the same as the # other one, e.g. for each station the grid must be the # same, hence allnodes=nodes f.close() alllags = np.array(alllags) return stations_out, allnodes, alllags
def _read_tt(path, stations, phase, phaseout='S', ps_ratio=1.68, lags_switch=True)
Read in .csv files of slowness generated from Grid2Time. Converts these data to a useful format here. It should be noted that this can read either P or S travel-time grids, not both at the moment. :type path: str :param path: The path to the .csv Grid2Time outputs :type stations: list :param stations: List of station names to read slowness files for. :type phase: str :param phase: Input phase type. :type phaseout: str :param phaseout: What phase to return the lagtimes in. :type ps_ratio: float :param ps_ratio: p to s ratio for conversion :type lags_switch: bool :param lags_switch: Return lags or raw travel-times, if set to true will return lags. :returns: Stations :rtype: list :returns: List of lists of tuples of node locations :rtype: list :returns: Array of lags. :rtype: :class:`numpy.ndarray` .. note:: **Output:** station[1] refers to nodes[1] and lags[1] nodes[1][1] refers to station[1] and lags[1][1] nodes[n][n] is a tuple of latitude, longitude and depth. .. note:: This function currently needs comma separated grid files in NonLinLoc format. Only certain versions of NonLinLoc write these csv files, however it should be possible to read the binary files directly. If you find you need this capability let us know and we can try and implement it.
3.281991
3.008185
1.09102
resamp_nodes = [] resamp_lags = [] # Cut the volume for i, node in enumerate(nodes): # If the node is within the volume range, keep it if mindepth < float(node[2]) < maxdepth and\ corners.contains_point(node[0:2]): resamp_nodes.append(node) resamp_lags.append([lags[:, i]]) # Reshape the lags print(np.shape(resamp_lags)) resamp_lags = np.reshape(resamp_lags, (len(resamp_lags), len(stations))).T # Resample the nodes - they are sorted in order of size with largest long # then largest lat, then depth. print(' '.join(['Grid now has ', str(len(resamp_nodes)), 'nodes'])) return stations, resamp_nodes, resamp_lags
def _resample_grid(stations, nodes, lags, mindepth, maxdepth, corners)
Resample the lagtime grid to a given volume. For use if the grid from Grid2Time is too large or you want to run a faster, downsampled scan. :type stations: list :param stations: List of station names from in the form where stations[i] refers to nodes[i][:] and lags[i][:] :type nodes: list :param nodes: List of node points where nodes[i] referes to stations[i] and nodes[:][:][0] is latitude in degrees, nodes[:][:][1] is longitude in degrees, nodes[:][:][2] is depth in km. :type lags: numpy.ndarray :param lags: Array of arrays where lags[i][:] refers to stations[i]. lags[i][j] should be the delay to the nodes[i][j] for stations[i] in seconds. :type mindepth: float :param mindepth: Upper limit of volume :type maxdepth: float :param maxdepth: Lower limit of volume :type corners: matplotlib.path.Path :param corners: matplotlib Path of the corners for the 2D polygon to cut to in lat and lon. :returns: Stations :rtype: list :returns: List of lists of tuples of node locations :rtype: list :returns: Array of lags. :rtype: :class:`numpy.ndarray` .. note:: **Output:** station[1] refers to nodes[1] and lags[1] nodes[1][1] refers to station[1] and lags[1][1] nodes[n][n] is a tuple of latitude, longitude and depth.
4.566091
4.157295
1.098332
netdif = abs((lags.T - lags.T[0]).sum(axis=1).reshape(1, len(nodes))) \ > threshold for i in range(len(nodes)): _netdif = abs((lags.T - lags.T[i]).sum(axis=1).reshape(1, len(nodes)))\ > threshold netdif = np.concatenate((netdif, _netdif), axis=0) sys.stdout.write("\r" + str(float(i) // len(nodes) * 100) + "% \r") sys.stdout.flush() nodes_out = [nodes[0]] node_indices = [0] print("\n") print(len(nodes)) for i in range(1, len(nodes)): if np.all(netdif[i][node_indices]): node_indices.append(i) nodes_out.append(nodes[i]) lags_out = lags.T[node_indices].T print("Removed " + str(len(nodes) - len(nodes_out)) + " duplicate nodes") return stations, nodes_out, lags_out
def _rm_similarlags(stations, nodes, lags, threshold)
Remove nodes that have a very similar network moveout to another node. This function will, for each node, calculate the difference in lagtime at each station at every node, then sum these for each node to get a cumulative difference in network moveout. This will result in an array of arrays with zeros on the diagonal. :type stations: list :param stations: List of station names from in the form where stations[i] refers to nodes[i][:] and lags[i][:] :type nodes: list :param nodes: List of node points where nodes[i] referes to stations[i] and nodes[:][:][0] is latitude in degrees, nodes[:][:][1] is longitude in degrees, nodes[:][:][2] is depth in km. :type lags: numpy.ndarray :param lags: Array of arrays where lags[i][:] refers to stations[i]. lags[i][j] should be the delay to the nodes[i][j] for stations[i] in seconds. :type threshold: float :param threshold: Threshold for removal in seconds :returns: Stations :rtype: list :returns: List of lists of tuples of node locations :rtype: list :returns: Array of lags. :rtype: :class:`numpy.ndarray` .. note:: **Output:** station[1] refers to nodes[1] and lags[1] nodes[1][1] refers to station[1] and lags[1][1] nodes[n][n] is a tuple of latitude, longitude and depth.
2.709805
2.725359
0.994293
cum_net_resp = np.load('tmp' + str(instance) + '/node_' + str(node_lis[0]) + '.npy')[0] os.remove('tmp' + str(instance) + '/node_' + str(node_lis[0]) + '.npy') indices = np.ones(len(cum_net_resp)) * node_lis[0] for i in node_lis[1:]: node_energy = np.load('tmp' + str(instance) + '/node_' + str(i) + '.npy')[0] updated_indices = np.argmax([cum_net_resp, node_energy], axis=0) temp = np.array([cum_net_resp, node_energy]) cum_net_resp = np.array([temp[updated_indices[j]][j] for j in range(len(updated_indices))]) del temp, node_energy updated_indices[updated_indices == 1] = i indices = updated_indices os.remove('tmp' + str(instance) + '/node_' + str(i) + '.npy') return cum_net_resp, indices
def _cum_net_resp(node_lis, instance=0)
Compute the cumulative network response by reading saved energy .npy files. :type node_lis: numpy.ndarray :param node_lis: List of nodes (ints) to read from :type instance: int :param instance: Instance flag for parallel workflows, defaults to 0. :returns: cumulative network response :rtype: numpy.ndarray :returns: node indices for each sample of the cumulative network response. :rtype: list
2.331495
2.385843
0.977221
cum_net_resp = np.nan_to_num(cum_net_resp) # Force no NaNs if np.isnan(cum_net_resp).any(): raise ValueError("Nans present") print('Mean of data is: ' + str(np.median(cum_net_resp))) print('RMS of data is: ' + str(np.sqrt(np.mean(np.square(cum_net_resp))))) print('MAD of data is: ' + str(np.median(np.abs(cum_net_resp)))) if thresh_type == 'MAD': thresh = (np.median(np.abs(cum_net_resp)) * threshold) elif thresh_type == 'abs': thresh = threshold elif thresh_type == 'RMS': thresh = _rms(cum_net_resp) * threshold print('Threshold is set to: ' + str(thresh)) print('Max of data is: ' + str(max(cum_net_resp))) peaks = findpeaks.find_peaks2_short(cum_net_resp, thresh, length * samp_rate, debug=0) detections = [] if peaks: for peak in peaks: node = nodes[peak[1]] detections.append( Detection(template_name=str(node[0]) + '_' + str(node[1]) + '_' + str(node[2]), detect_time=peak[1] / samp_rate, no_chans=len(realstations), detect_val=peak[0], threshold=thresh, typeofdet='brightness', chans=realstations, id=str(node[0]) + '_' + str(node[1]) + '_' + str(node[2]) + str(peak[1] / samp_rate), threshold_type=thresh_type, threshold_input=threshold)) else: detections = [] print('I have found ' + str(len(peaks)) + ' possible detections') return detections
def _find_detections(cum_net_resp, nodes, threshold, thresh_type, samp_rate, realstations, length)
Find detections within the cumulative network response. :type cum_net_resp: numpy.ndarray :param cum_net_resp: Array of cumulative network response for nodes :type nodes: list :param nodes: Nodes associated with the source of energy in the \ cum_net_resp :type threshold: float :param threshold: Threshold value :type thresh_type: str :param thresh_type: Either MAD (Median Absolute Deviation) or abs \ (absolute) or RMS (Root Mean Squared) :type samp_rate: float :param samp_rate: Sampling rate in Hz :type realstations: list :param realstations: List of stations used to make the cumulative network response, will be reported in the :class:`eqcorrscan.core.match_filter.Detection` :type length: float :param length: Maximum length of peak to look for in seconds :returns: Detections as :class:`eqcorrscan.core.match_filter.Detection` objects. :rtype: list
2.939744
2.804686
1.048154
stream = stream_in.copy() # Copy the data before we remove stations # First check that all channels in stream have data of the same length maxlen = np.max([len(tr.data) for tr in stream]) if maxlen == 0: warnings.warn('template without data') return 0.0, len(stream) if not stations[0] == 'all': for tr in stream: if tr.stats.station not in stations: stream.remove(tr) # Remove stations we don't want to use for tr in stream: if not len(tr.data) == maxlen and not len(tr.data) == 0: warnings.warn(tr.stats.station + '.' + tr.stats.channel + ' is not the same length, padding \n' + 'Length is ' + str(len(tr.data)) + ' samples') pad = np.zeros(maxlen - len(tr.data)) if tr.stats.starttime.hour == 0: tr.data = np.concatenate((pad, tr.data), axis=0) else: tr.data = np.concatenate((tr.data, pad), axis=0) elif len(tr.data) == 0: tr.data = np.zeros(maxlen) # Clip the data to the set length if clip: for tr in stream: tr.trim(tr.stats.starttime + clip[0], tr.stats.starttime + clip[1]) _coherence = 0.0 # Loop through channels and generate a correlation value for each # unique cross-channel pairing for i in range(len(stream)): for j in range(i + 1, len(stream)): _coherence += np.abs(normxcorr2(stream[i].data, stream[j].data))[0][0] _coherence = 2 * _coherence / (len(stream) * (len(stream) - 1)) return _coherence, len(stream)
def coherence(stream_in, stations=['all'], clip=False)
Determine the average network coherence of a given template or detection. You will want your stream to contain only signal as noise will reduce the coherence (assuming it is incoherent random noise). :type stream_in: obspy.core.stream.Stream :param stream_in: The stream of seismic data you want to calculate the \ coherence for. :type stations: list :param stations: List of stations to use for coherence, default is all :type clip: tuple :param clip: Default is to use all the data given (`False`) - \ tuple of start and end in seconds from start of trace :return: tuple of coherence and number of channels used. :rtype: tuple
2.741144
2.703576
1.013896
min_fftlen = int(stream[0][0].data.shape[0] + detector.data[0].shape[0] - Nc) fftlen = scipy.fftpack.next_fast_len(min_fftlen) mplen = stream[0][0].data.shape[0] ulen = detector.data[0].shape[0] num_st_fd = [np.fft.rfft(tr.data, n=fftlen) for tr in stream[0]] denom_st_fd = [np.fft.rfft(np.square(tr.data), n=fftlen) for tr in stream[0]] # Frequency domain of boxcar w = np.fft.rfft(np.ones(detector.data[0].shape[0]), n=fftlen) # This should go into the detector object as in Detex detector_fd = [] for dat_mat in detector.data: detector_fd.append(np.array([np.fft.rfft(col[::-1], n=fftlen) for col in dat_mat.T])) return detector_fd, denom_st_fd, num_st_fd, w, ulen, mplen
def _do_ffts(detector, stream, Nc)
Perform ffts on data, detector and denominator boxcar :type detector: eqcorrscan.core.subspace.Detector :param detector: Detector object for doing detecting :type stream: list of obspy.core.stream.Stream :param stream: List of streams processed according to detector :type Nc: int :param Nc: Number of channels in data. 1 for non-multiplexed :return: list of time-reversed detector(s) in freq domain :rtype: list :return: list of squared data stream(s) in freq domain :rtype: list :return: list of data stream(s) in freq domain :return: detector-length boxcar in freq domain :rtype: numpy.ndarray :return: length of detector :rtype: int :return: length of data :rtype: int
4.001939
3.603515
1.110565
num_cor = np.multiply(det_freq, data_freq) # Numerator convolution den_cor = np.multiply(w, data_freq_sq) # Denominator convolution # Do inverse fft # First and last Nt - 1 samples are invalid; clip them off num_ifft = np.real(np.fft.irfft(num_cor))[:, ulen-1:mplen:Nc] denominator = np.real(np.fft.irfft(den_cor))[ulen-1:mplen:Nc] # Ratio of projected to envelope energy = det_stat across all channels result = np.sum(np.square(num_ifft), axis=0) / denominator return result
def _det_stat_freq(det_freq, data_freq_sq, data_freq, w, Nc, ulen, mplen)
Compute detection statistic in the frequency domain :type det_freq: numpy.ndarray :param det_freq: detector in freq domain :type data_freq_sq: numpy.ndarray :param data_freq_sq: squared data in freq domain :type data_freq: numpy.ndarray :param data_freq: data in freq domain :type w: numpy.ndarray :param w: boxcar in freq domain :type Nc: int :param Nc: number of channels in data stream :type ulen: int :param ulen: length of detector :type mplen: int :param mplen: length of data :return: Array of detection statistics :rtype: numpy.ndarray
5.750855
6.114059
0.940595
stack = stream[0].data for tr in stream[1:]: stack = np.dstack(np.array([stack, tr.data])) multiplex = stack.reshape(stack.size, ) return multiplex
def multi(stream)
Internal multiplexer for multiplex_detect. :type stream: obspy.core.stream.Stream :param stream: Stream to multiplex :return: trace of multiplexed data :rtype: obspy.core.trace.Trace .. Note: Requires all channels to be the same length. Maps a standard multiplexed stream of seismic data to a single traces of \ multiplexed data as follows: Input: x = [x1, x2, x3, ...] y = [y1, y2, y3, ...] z = [z1, z2, z3, ...] Output: xyz = [x1, y1, z1, x2, y2, z2, x3, y3, z3, ...]
6.276159
5.423364
1.157245
from multiprocessing import Pool, cpu_count # First check that detector parameters are the same parameters = [] detections = [] for detector in detectors: parameter = (detector.lowcut, detector.highcut, detector.filt_order, detector.sampling_rate, detector.multiplex, detector.stachans) if parameter not in parameters: parameters.append(parameter) for parameter_set in parameters: parameter_detectors = [] for detector in detectors: det_par = (detector.lowcut, detector.highcut, detector.filt_order, detector.sampling_rate, detector.multiplex, detector.stachans) if det_par == parameter_set: parameter_detectors.append(detector) stream, stachans = \ _subspace_process( streams=[stream.copy()], lowcut=parameter_set[0], highcut=parameter_set[1], filt_order=parameter_set[2], sampling_rate=parameter_set[3], multiplex=parameter_set[4], stachans=parameter_set[5], parallel=True, align=False, shift_len=None, reject=False) if not parallel: for detector in parameter_detectors: detections += _detect( detector=detector, st=stream[0], threshold=threshold, trig_int=trig_int, moveout=moveout, min_trig=min_trig, process=False, extract_detections=False, debug=0) else: if num_cores: ncores = num_cores else: ncores = cpu_count() pool = Pool(processes=ncores) results = [pool.apply_async( _detect, args=(detector, stream[0], threshold, trig_int, moveout, min_trig, False, False, 0)) for detector in parameter_detectors] pool.close() try: _detections = [p.get() for p in results] except KeyboardInterrupt as e: # pragma: no cover pool.terminate() raise e pool.join() for d in _detections: if isinstance(d, list): detections += d else: detections.append(d) return detections
def subspace_detect(detectors, stream, threshold, trig_int, moveout=0, min_trig=1, parallel=True, num_cores=None)
Conduct subspace detection with chosen detectors. :type detectors: list :param detectors: list of :class:`eqcorrscan.core.subspace.Detector` to be used for detection. :type stream: obspy.core.stream.Stream :param stream: Stream to detect within. :type threshold: float :param threshold: Threshold between 0 and 1 for detection, see :func:`Detector.detect` :type trig_int: float :param trig_int: Minimum trigger interval in seconds. :type moveout: float :param moveout: Maximum allowable moveout window for non-multiplexed, network detection. See note. :type min_trig: int :param min_trig: Minimum number of stations exceeding threshold for non-multiplexed, network detection. See note in :func:`Detector.detect`. :type parallel: bool :param parallel: Whether to run detectors in parallel in groups. :type num_cores: int :param num_cores: How many cpu cores to use if parallel==True. If set to None (default), will use all available cores. :rtype: list :return: List of :class:`eqcorrscan.core.match_filter.Detection` detections. .. Note:: This will loop through your detectors using their detect method. If the detectors are multiplexed it will run groups of detectors with the same channels at the same time.
2.410128
2.42211
0.995053
self.lowcut = lowcut self.highcut = highcut self.filt_order = filt_order self.sampling_rate = sampling_rate self.name = name self.multiplex = multiplex # Pre-process data p_streams, stachans = _subspace_process( streams=copy.deepcopy(streams), lowcut=lowcut, highcut=highcut, filt_order=filt_order, sampling_rate=sampling_rate, multiplex=multiplex, align=align, shift_len=shift_len, reject=reject, plot=plot, no_missed=no_missed) # Compute the SVD, use the cluster.SVD function u, sigma, v, svd_stachans = svd(stream_list=p_streams, full=True) self.stachans = stachans # self.delays = delays self.u = u self.v = v self.sigma = sigma self.data = copy.deepcopy(u) # Set the data matrix to be full rank U. self.dimension = np.inf return self
def construct(self, streams, lowcut, highcut, filt_order, sampling_rate, multiplex, name, align, shift_len=0, reject=0.3, no_missed=True, plot=False)
Construct a subspace detector from a list of streams, full rank. Subspace detector will be full-rank, further functions can be used \ to select the desired dimensions. :type streams: list :param streams: List of :class:`obspy.core.stream.Stream` to be used to generate the subspace detector. These should be pre-clustered and aligned. :type lowcut: float :param lowcut: Lowcut in Hz, can be None to not apply filter :type highcut: float :param highcut: Highcut in Hz, can be None to not apply filter :type filt_order: int :param filt_order: Number of corners for filter. :type sampling_rate: float :param sampling_rate: Desired sampling rate in Hz :type multiplex: bool :param multiplex: Whether to multiplex the data or not. Data are multiplexed according to the method of Harris, see the multi function for details. :type name: str :param name: Name of the detector, used for book-keeping. :type align: bool :param align: Whether to align the data or not - needs to be done at some point :type shift_len: float :param shift_len: Maximum shift allowed for alignment in seconds. :type reject: float :param reject: Minimum correlation to include traces - only used if align=True. :type no_missed: bool :param no_missed: Reject streams with missed traces, defaults to True. A missing trace from lots of events will reduce the quality of the subspace detector if multiplexed. Only used when multi is set to True. :type plot: bool :param plot: Whether to plot the alignment stage or not. .. note:: The detector will be normalized such that the data, before computing the singular-value decomposition, will have unit energy. e.g. We divide the amplitudes of the data by the L1 norm of the data. .. warning:: EQcorrscan's alignment will attempt to align over the whole data window given. For long (more than 2s) chunks of data this can give poor results and you might be better off using the :func:`eqcorrscan.utils.stacking.align_traces` function externally, focusing on a smaller window of data. To do this you would align the data prior to running construct.
3.489916
3.444882
1.013073
# Take leftmost 'dimension' input basis vectors for i, channel in enumerate(self.u): if self.v[i].shape[1] < dimension: raise IndexError('Channel is max dimension %s' % self.v[i].shape[1]) self.data[i] = channel[:, 0:dimension] self.dimension = dimension return self
def partition(self, dimension)
Partition subspace into desired dimension. :type dimension: int :param dimension: Maximum dimension to use.
7.245292
7.296092
0.993037
if show: return subspace_fc_plot(detector=self, stachans=stachans, size=size, show=show) percent_capture = 0 if np.isinf(self.dimension): return 100 for channel in self.sigma: fc = np.sum(channel[0:self.dimension]) / np.sum(channel) percent_capture += fc else: return 100 * (percent_capture / len(self.sigma))
def energy_capture(self, stachans='all', size=(10, 7), show=False)
Calculate the average percentage energy capture for this subspace. :return: Percentage energy capture :rtype: float
5.242532
4.823985
1.086764
return _detect(detector=self, st=st, threshold=threshold, trig_int=trig_int, moveout=moveout, min_trig=min_trig, process=process, extract_detections=extract_detections, debug=debug, cores=cores)
def detect(self, st, threshold, trig_int, moveout=0, min_trig=0, process=True, extract_detections=False, cores=1, debug=0)
Detect within continuous data using the subspace method. :type st: obspy.core.stream.Stream :param st: Un-processed stream to detect within using the subspace detector. :type threshold: float :param threshold: Threshold value for detections between 0-1 :type trig_int: float :param trig_int: Minimum trigger interval in seconds. :type moveout: float :param moveout: Maximum allowable moveout window for non-multiplexed, network detection. See note. :type min_trig: int :param min_trig: Minimum number of stations exceeding threshold for non-multiplexed, network detection. See note. :type process: bool :param process: Whether or not to process the stream according to the parameters defined by the detector. Default is True, which will process the data. :type extract_detections: bool :param extract_detections: Whether to extract waveforms for each detection or not, if True will return detections and streams. :type cores: int :param cores: Number of threads to process data with. :type debug: int :param debug: Debug output level from 0-5. :return: list of :class:`eqcorrscan.core.match_filter.Detection` :rtype: list .. warning:: Subspace is currently in beta, see note in the subspace tutorial for information. .. note:: If running in bulk with detectors that all have the same parameters then you can pre-process the data and set process to False. This will speed up this detect function dramatically. .. warning:: If the detector and stream are multiplexed then they must contain the same channels and multiplexed in the same order. This is handled internally when process=True, but if running in bulk you must take care. .. note:: Non-multiplexed, network detection. When the detector is not multiplexed, but there are multiple channels within the detector, we do not stack the single-channel detection statistics because we do not have a one-size-fits-all solution for computing delays for a subspace detector (if you want to implement one, then please contribute it!). Therefore, these parameters provide a means for declaring a network coincidence trigger using single-channel detection statistics, in a similar fashion to the commonly used network-coincidence trigger with energy detection statistics.
1.907498
2.437174
0.782668
f = h5py.File(filename, "w") # Must store eqcorrscan version number, username would be useful too. data_group = f.create_group(name="data") for i, data in enumerate(self.data): dset = data_group.create_dataset(name="data_" + str(i), shape=data.shape, dtype=data.dtype) dset[...] = data data_group.attrs['length'] = len(self.data) data_group.attrs['name'] = self.name.encode("ascii", "ignore") data_group.attrs['sampling_rate'] = self.sampling_rate data_group.attrs['multiplex'] = self.multiplex data_group.attrs['lowcut'] = self.lowcut data_group.attrs['highcut'] = self.highcut data_group.attrs['filt_order'] = self.filt_order data_group.attrs['dimension'] = self.dimension data_group.attrs['user'] = getpass.getuser() data_group.attrs['eqcorrscan_version'] = str(eqcorrscan.__version__) # Convert station-channel list to something writable ascii_stachans = ['.'.join(stachan).encode("ascii", "ignore") for stachan in self.stachans] stachans = f.create_dataset(name="stachans", shape=(len(ascii_stachans),), dtype='S10') stachans[...] = ascii_stachans u_group = f.create_group("u") for i, u in enumerate(self.u): uset = u_group.create_dataset(name="u_" + str(i), shape=u.shape, dtype=u.dtype) uset[...] = u u_group.attrs['length'] = len(self.u) sigma_group = f.create_group("sigma") for i, sigma in enumerate(self.sigma): sigmaset = sigma_group.create_dataset(name="sigma_" + str(i), shape=sigma.shape, dtype=sigma.dtype) sigmaset[...] = sigma sigma_group.attrs['length'] = len(self.sigma) v_group = f.create_group("v") for i, v in enumerate(self.v): vset = v_group.create_dataset(name="v_" + str(i), shape=v.shape, dtype=v.dtype) vset[...] = v v_group.attrs['length'] = len(self.v) f.flush() f.close() return self
def write(self, filename)
Write detector to a file - uses HDF5 file format. Meta-data are stored alongside numpy data arrays. See h5py.org for \ details of the methods. :type filename: str :param filename: Filename to save the detector to.
2.10069
2.099559
1.000539
f = h5py.File(filename, "r") self.data = [] for i in range(f['data'].attrs['length']): self.data.append(f['data']['data_' + str(i)].value) self.u = [] for i in range(f['u'].attrs['length']): self.u.append(f['u']['u_' + str(i)].value) self.sigma = [] for i in range(f['sigma'].attrs['length']): self.sigma.append(f['sigma']['sigma_' + str(i)].value) self.v = [] for i in range(f['v'].attrs['length']): self.v.append(f['v']['v_' + str(i)].value) self.stachans = [tuple(stachan.decode('ascii').split('.')) for stachan in f['stachans'].value] self.dimension = f['data'].attrs['dimension'] self.filt_order = f['data'].attrs['filt_order'] self.highcut = f['data'].attrs['highcut'] self.lowcut = f['data'].attrs['lowcut'] self.multiplex = bool(f['data'].attrs['multiplex']) self.sampling_rate = f['data'].attrs['sampling_rate'] if isinstance(f['data'].attrs['name'], str): self.name = f['data'].attrs['name'] else: self.name = f['data'].attrs['name'].decode('ascii') return self
def read(self, filename)
Read detector from a file, must be HDF5 format. Reads a Detector object from an HDF5 file, usually created by \ eqcorrscan. :type filename: str :param filename: Filename to save the detector to.
1.966662
2.056052
0.956523
return subspace_detector_plot(detector=self, stachans=stachans, size=size, show=show)
def plot(self, stachans='all', size=(10, 7), show=True)
Plot the output basis vectors for the detector at the given dimension. Corresponds to the first n horizontal vectors of the V matrix. :type stachans: list :param stachans: list of tuples of station, channel pairs to plot. :type stachans: list :param stachans: List of tuples of (station, channel) to use. Can set\ to 'all' to use all the station-channel pairs available. If \ detector is multiplexed, will just plot that. :type size: tuple :param size: Figure size. :type show: bool :param show: Whether or not to show the figure. :returns: Figure :rtype: matplotlib.pyplot.Figure .. Note:: See :func:`eqcorrscan.utils.plotting.subspace_detector_plot` for example.
6.802861
3.848461
1.767683
lines = open(os.path.join(*path), 'r').readlines()[2:] return [s.strip() for s in lines if s.strip() != '']
def export_symbols(*path)
Required for windows systems - functions defined in libutils.def.
3.707255
3.67806
1.007937
R = 6371.009 # Radius of the Earth in km dlat = np.radians(abs(loc1[0] - loc2[0])) dlong = np.radians(abs(loc1[1] - loc2[1])) ddepth = abs(loc1[2] - loc2[2]) mean_lat = np.radians((loc1[0] + loc2[0]) / 2) dist = R * np.sqrt(dlat ** 2 + (np.cos(mean_lat) * dlong) ** 2) dist = np.sqrt(dist ** 2 + ddepth ** 2) return dist
def dist_calc(loc1, loc2)
Function to calculate the distance in km between two points. Uses the flat Earth approximation. Better things are available for this, like `gdal <http://www.gdal.org/>`_. :type loc1: tuple :param loc1: Tuple of lat, lon, depth (in decimal degrees and km) :type loc2: tuple :param loc2: Tuple of lat, lon, depth (in decimal degrees and km) :returns: Distance between points in km. :rtype: float
2.016729
2.142003
0.941516
counts = Counter(magnitudes) df = np.zeros(len(counts)) mag_steps = np.zeros(len(counts)) grad = np.zeros(len(counts) - 1) grad_points = grad.copy() for i, magnitude in enumerate(sorted(counts.keys(), reverse=True)): mag_steps[i] = magnitude if i > 0: df[i] = counts[magnitude] + df[i - 1] else: df[i] = counts[magnitude] for i, val in enumerate(df): if i > 0: grad[i - 1] = (val - df[i - 1]) / (mag_steps[i] - mag_steps[i - 1]) grad_points[i - 1] = mag_steps[i] - ((mag_steps[i] - mag_steps[i - 1]) / 2.0) # Need to find the second order derivative curvature = np.zeros(len(grad) - 1) curvature_points = curvature.copy() for i, _grad in enumerate(grad): if i > 0: curvature[i - 1] = (_grad - grad[i - 1]) / (grad_points[i] - grad_points[i - 1]) curvature_points[i - 1] = grad_points[i] - ((grad_points[i] - grad_points[i - 1]) / 2.0) if plotvar: plt.scatter(mag_steps, df, c='k', label='Magnitude function') plt.plot(mag_steps, df, c='k') plt.scatter(grad_points, grad, c='r', label='Gradient') plt.plot(grad_points, grad, c='r') plt.scatter(curvature_points, curvature, c='g', label='Curvature') plt.plot(curvature_points, curvature, c='g') plt.legend() plt.show() return curvature_points[np.argmax(abs(curvature))]
def calc_max_curv(magnitudes, plotvar=False)
Calculate the magnitude of completeness using the maximum curvature method. :type magnitudes: list :param magnitudes: List of magnitudes from which to compute the maximum curvature which will give an estimate of the magnitude of completeness given the assumption of a power-law scaling. :type plotvar: bool :param plotvar: Turn plotting on and off :rtype: float :return: Magnitude at maximum curvature .. Note:: Should be used as a guide, often under-estimates Mc. .. rubric:: Example >>> import numpy as np >>> mags = [] >>> for mag in np.arange(2.5,3, 0.1): ... mags.extend([mag] * int(20000 - 10 * mag)) >>> for mag in np.arange(3,7, 0.1): ... mags.extend([mag] * int(10 ** (7 - 1 * mag))) >>> calc_max_curv(mags, plotvar=False) 3.0
1.83405
1.88606
0.972424
# Note Wood anderson sensitivity is 2080 as per Uhrhammer & Collins 1990 PAZ_WA = {'poles': [-6.283 + 4.7124j, -6.283 - 4.7124j], 'zeros': [0 + 0j], 'gain': 1.0, 'sensitivity': 2080} if velocity: PAZ_WA['zeros'] = [0 + 0j, 0 + 0j] # De-trend data trace.detrend('simple') # Simulate Wood Anderson if PAZ: trace.data = seis_sim(trace.data, trace.stats.sampling_rate, paz_remove=PAZ, paz_simulate=PAZ_WA, water_level=water_level, remove_sensitivity=True) elif seedresp: trace.data = seis_sim(trace.data, trace.stats.sampling_rate, paz_remove=None, paz_simulate=PAZ_WA, water_level=water_level, seedresp=seedresp) else: UserWarning('No response given to remove, will just simulate WA') trace.data = seis_sim(trace.data, trace.stats.sampling_rate, paz_remove=None, paz_simulate=PAZ_WA, water_level=water_level) return trace
def _sim_WA(trace, PAZ, seedresp, water_level, velocity=False)
Remove the instrument response from a trace and simulate a Wood-Anderson. Returns a de-meaned, de-trended, Wood Anderson simulated trace in its place. Works in-place on data and will destroy your original data, copy the trace before giving it to this function! :type trace: obspy.core.trace.Trace :param trace: A standard obspy trace, generally should be given without pre-filtering, if given with pre-filtering for use with amplitude determination for magnitudes you will need to worry about how you cope with the response of this filter yourself. :type PAZ: dict :param PAZ: Dictionary containing lists of poles and zeros, the gain and the sensitivity. If unset will expect seedresp. :type seedresp: dict :param seedresp: Seed response information - if unset will expect PAZ. :type water_level: int :param water_level: Water level for the simulation. :type velocity: bool :param velocity: Whether to return a velocity trace or not - velocity is non-standard for Wood-Anderson instruments, but institutes that use seiscomp3 or Antelope require picks in velocity. :returns: Trace of Wood-Anderson simulated data :rtype: :class:`obspy.core.trace.Trace`
3.462025
3.127048
1.107123
turning_points = [] # A list of tuples of (amplitude, sample) for i in range(1, len(data) - 1): if (data[i] < data[i - 1] and data[i] < data[i + 1]) or\ (data[i] > data[i - 1] and data[i] > data[i + 1]): turning_points.append((data[i], i)) if len(turning_points) >= 1: amplitudes = np.empty([len(turning_points) - 1],) half_periods = np.empty([len(turning_points) - 1],) else: print('Turning points has length: ' + str(len(turning_points)) + ' data have length: ' + str(len(data))) return 0.0, 0.0, 0.0 for i in range(1, len(turning_points)): half_periods[i - 1] = (delta * (turning_points[i][1] - turning_points[i - 1][1])) amplitudes[i - 1] = np.abs(turning_points[i][0] - turning_points[i - 1][0]) amplitude = np.max(amplitudes) period = 2 * half_periods[np.argmax(amplitudes)] return amplitude, period, delta * turning_points[np.argmax(amplitudes)][1]
def _max_p2t(data, delta)
Finds the maximum peak-to-trough amplitude and period. Originally designed to be used to calculate magnitudes (by \ taking half of the peak-to-trough amplitude as the peak amplitude). :type data: numpy.ndarray :param data: waveform trace to find the peak-to-trough in. :type delta: float :param delta: Sampling interval in seconds :returns: tuple of (amplitude, period, time) with amplitude in the same \ scale as given in the input data, and period in seconds, and time in \ seconds from the start of the data window. :rtype: tuple
2.101549
2.031645
1.034407
with open(gsefile, 'r') as f: # First line should start with CAL2 header = f.readline() if not header[0:4] == 'CAL2': raise IOError('Unknown format for GSE file, only coded for CAL2') station = header.split()[1] channel = header.split()[2] sensor = header.split()[3] date = dt.datetime.strptime(header.split()[7], '%Y/%m/%d') header = f.readline() if not header[0:4] == 'PAZ2': raise IOError('Unknown format for GSE file, only coded for PAZ2') gain = float(header.split()[3]) # Measured in nm/counts kpoles = int(header.split()[4]) kzeros = int(header.split()[5]) poles = [] for i in range(kpoles): pole = f.readline() poles.append(complex(float(pole.split()[0]), float(pole.split()[1]))) zeros = [] for i in range(kzeros): zero = f.readline() zeros.append(complex(float(zero.split()[0]), float(zero.split()[1]))) # Have Poles and Zeros, but need Gain and Sensitivity # Gain should be in the DIG2 line: for line in f: if line[0:4] == 'DIG2': sensitivity = float(line.split()[2]) # measured in counts/muVolt PAZ = {'poles': poles, 'zeros': zeros, 'gain': gain, 'sensitivity': sensitivity} return PAZ, date, station, channel, sensor
def _GSE2_PAZ_read(gsefile)
Read the instrument response information from a GSE Poles and Zeros file. Formatted for files generated by the SEISAN program RESP. Format must be CAL2, not coded for any other format at the moment, contact the authors to add others in. :type gsefile: string :param gsefile: Path to GSE file :returns: Dictionary of poles, zeros, gain and sensitivity :rtype: dict
3.002113
2.791595
1.075411
possible_respfiles = glob.glob(directory + os.path.sep + 'RESP.' + network + '.' + station + '.*.' + channel) # GeoNet RESP naming possible_respfiles += glob.glob(directory + os.path.sep + 'RESP.' + network + '.' + channel + '.' + station) # RDseed RESP naming possible_respfiles += glob.glob(directory + os.path.sep + 'RESP.' + station + '.' + network) # WIZARD resp naming # GSE format, station needs to be 5 characters padded with _, channel is 4 # characters padded with _ station = str(station) channel = str(channel) possible_respfiles += glob.glob(directory + os.path.sep + station.ljust(5, str('_')) + channel[0:len(channel) - 1]. ljust(3, str('_')) + channel[-1] + '.*_GSE') PAZ = [] seedresp = [] for respfile in possible_respfiles: print('Reading response from: ' + respfile) if respfile.split(os.path.sep)[-1][0:4] == 'RESP': # Read from a resp file seedresp = {'filename': respfile, 'date': UTCDateTime(time), 'units': 'DIS', 'network': network, 'station': station, 'channel': channel, 'location': '*'} try: # Attempt to evaluate the response for this information, if not # then this is not the correct response info! freq_resp, freqs = evalresp( delta, 100, seedresp['filename'], seedresp['date'], units=seedresp['units'], freq=True, network=seedresp['network'], station=seedresp['station'], channel=seedresp['channel']) except: print('Issues with RESP file') seedresp = [] continue elif respfile[-3:] == 'GSE': PAZ, pazdate, pazstation, pazchannel, pazsensor =\ _GSE2_PAZ_read(respfile) # check that the date is good! if pazdate >= time and pazchannel != channel and\ pazstation != station: print('Issue with GSE file') print('date: ' + str(pazdate) + ' channel: ' + pazchannel + ' station: ' + pazstation) PAZ = [] else: continue # Check that PAZ are for the correct station, channel and date if PAZ or seedresp: break if PAZ: return PAZ elif seedresp: return seedresp
def _find_resp(station, channel, network, time, delta, directory)
Helper function to find the response information. Works for a given station and channel at a given time and return a dictionary of poles and zeros, gain and sensitivity. :type station: str :param station: Station name (as in the response files) :type channel: str :param channel: Channel name (as in the response files) :type network: str :param network: Network to scan for, can be a wildcard :type time: datetime.datetime :param time: Date-time to look for repsonse information :type delta: float :param delta: Sample interval in seconds :type directory: str :param directory: Directory to scan for response information :returns: dictionary of response information :rtype: dict
4.232015
4.322282
0.979116
a, b = itertools.tee(iterable) next(b, None) if sys.version_info.major == 2: return itertools.izip(a, b) else: return zip(a, b)
def _pairwise(iterable)
Wrapper on itertools for SVD_magnitude.
2.112878
2.212131
0.955132
print('Depreciated, use svd_moments instead') return svd_moments(u=U, s=s, v=V, stachans=stachans, event_list=event_list, n_svs=n_SVs)
def SVD_moments(U, s, V, stachans, event_list, n_SVs=4)
Depreciated.
2.616929
2.244124
1.166125
cat_out = catalog.copy() if mindepth is not None: for event in cat_out: try: origin = _get_origin(event) except IOError: continue if origin.depth < mindepth * 1000: cat_out.events.remove(event) if maxdepth is not None: for event in cat_out: try: origin = _get_origin(event) except IOError: continue if origin.depth > maxdepth * 1000: cat_out.events.remove(event) for event in cat_out: try: origin = _get_origin(event) except IOError: continue if not corners.contains_point((origin.latitude, origin.longitude)): cat_out.events.remove(event) return cat_out
def spatial_clip(catalog, corners, mindepth=None, maxdepth=None)
Clip the catalog to a spatial box, can be irregular. Can only be irregular in 2D, depth must be between bounds. :type catalog: :class:`obspy.core.catalog.Catalog` :param catalog: Catalog to clip. :type corners: :class:`matplotlib.path.Path` :param corners: Corners to clip the catalog to :type mindepth: float :param mindepth: Minimum depth for earthquakes in km. :type maxdepth: float :param maxdepth: Maximum depth for earthquakes in km. .. Note:: Corners is expected to be a :class:`matplotlib.path.Path` in the form of tuples of (lat, lon) in decimal degrees.
1.893922
1.923494
0.984626
if event.preferred_origin() is not None: origin = event.preferred_origin() elif len(event.origins) > 0: origin = event.origins[0] else: raise IndexError('No origin set, cannot constrain') return origin
def _get_origin(event)
Get the origin of an event. :type event: :class:`obspy.core.event.Event` :param event: Event to get the origin of. :return: :class:`obspy.core.event.Origin`
3.614987
3.8118
0.948367
try: import ConfigParser except ImportError: import configparser as ConfigParser import ast f = open(infile, 'r') print('Reading parameters with the following header:') for line in f: if line[0] == '#': print(line.rstrip('\n').lstrip('\n')) f.close() config = ConfigParser.ConfigParser() config.read(infile) # Slightly tricky list reading template_names = list(ast.literal_eval(config.get("eqcorrscan_pars", "template_names"))) parameters = \ EQcorrscanParameters(template_names=template_names, lowcut=config.get("eqcorrscan_pars", "lowcut"), highcut=config.get("eqcorrscan_pars", "highcut"), filt_order=config.get("eqcorrscan_pars", "filt_order"), samp_rate=config.get("eqcorrscan_pars", "samp_rate"), debug=config.get("eqcorrscan_pars", "debug"), startdate=config.get("eqcorrscan_pars", "startdate"), enddate=config.get("eqcorrscan_pars", "enddate"), archive=config.get("eqcorrscan_pars", "archive"), arc_type=config.get("eqcorrscan_pars", "arc_type"), cores=config.get("eqcorrscan_pars", "cores"), plotvar=config.getboolean("eqcorrscan_pars", "plotvar"), plotdir=config.get("eqcorrscan_pars", "plotdir"), plot_format=config.get("eqcorrscan_pars", "plot_format"), tempdir=ast.literal_eval(config. get("eqcorrscan_pars", "tempdir")), threshold=config.get("eqcorrscan_pars", "threshold"), threshold_type=config.get("eqcorrscan_pars", "threshold_type"), trigger_interval=config.get("eqcorrscan_pars", "trigger_interval") ) return parameters
def read_parameters(infile='../parameters/EQcorrscan_parameters.txt')
Read the default parameters from file. :type infile: str :param infile: Full path to parameter file. :returns: parameters read from file. :rtype: :class:`eqcorrscan.utils.parameters.EQcorrscanParameters`
2.303109
2.300641
1.001073
outpath = os.sep.join(outfile.split(os.sep)[0:-1]) if len(outpath) > 0 and not os.path.isdir(outpath): msg = ' '.join([os.path.join(outfile.split(os.sep)[0:-1]), 'does not exist, check path.']) raise IOError(msg) # Make sure that the user wants to overwrite the old parameters if os.path.isfile(outfile) and not overwrite: responding = True while responding: print(' '.join([outfile, 'exists. Overwrite? [y/N]'])) option = raw_input() if option.upper() == 'N': raise IOError('File exists, will not overwrite') elif option.upper() == 'Y': responding = False else: print('Must respond with y or n') f = open(outfile, 'w') # Write creation info. header = ' '.join(['# User:', getpass.getuser(), '\n# Creation date:', str(UTCDateTime()), '\n# EQcorrscan version:', str(eqcorrscan.__version__), '\n\n\n']) f.write(header) # Write parameter info in a user readable, and parsable format. parameters = self.__str__().split('\n')[1:] f.write('[eqcorrscan_pars]\n') for parameter in parameters: f.write(parameter.lstrip() + '\n') f.close() print('Written parameter file: ' + outfile)
def write(self, outfile='../parameters/EQcorrscan_parameters.txt', overwrite=False)
Function to write the parameters to a file - user readable. :type outfile: str :param outfile: Full path to filename to store parameters in. :type overwrite: bool :param overwrite: Whether to overwrite the old file or not.
3.190906
3.238972
0.98516
if len(np.nonzero(tr.data)[0]) < 0.5 * len(tr.data): qual = False else: qual = True return qual
def _check_daylong(tr)
Check the data quality of the daylong file. Check to see that the day isn't just zeros, with large steps, if it is then the resampling will hate it. :type tr: obspy.core.trace.Trace :param tr: Trace to check if the data are daylong. :return quality (simply good or bad) :rtype: bool .. rubric:: Example >>> from obspy import read >>> from eqcorrscan.utils.pre_processing import _check_daylong >>> # Get the path to the test data >>> import eqcorrscan >>> import os >>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data' >>> st = read(TEST_PATH + '/WAV/TEST_/' + ... '2013-09-01-0410-35.DFDPC_024_00') >>> _check_daylong(st[0]) True
5.027489
5.027179
1.000062
start_in, end_in = (tr.stats.starttime, tr.stats.endtime) for gap in gaps: stream = Stream() if gap['starttime'] > tr.stats.starttime: stream += tr.slice(tr.stats.starttime, gap['starttime']).copy() if gap['endtime'] < tr.stats.endtime: # Note this can happen when gaps are calculated for a trace that # is longer than `length`, e.g. gaps are calculated pre-trim. stream += tr.slice(gap['endtime'], tr.stats.endtime).copy() tr = stream.merge()[0] if fill_gaps: tr = tr.split() tr = tr.detrend() tr = tr.merge(fill_value=0)[0] # Need to check length - if a gap happened overlapping the end or start # of the trace this will be lost. if tr.stats.starttime != start_in: # pad with zeros tr.data = np.concatenate( [np.zeros(int(tr.stats.starttime - start_in)), tr.data]) tr.stats.starttime = start_in if tr.stats.endtime != end_in: tr.data = np.concatenate( [tr.data, np.zeros(int(end_in - tr.stats.endtime))]) return tr
def _zero_pad_gaps(tr, gaps, fill_gaps=True)
Replace padded parts of trace with zeros. Will cut around gaps, detrend, then pad the gaps with zeros. :type tr: :class:`osbpy.core.stream.Trace` :param tr: A trace that has had the gaps padded :param gaps: List of dict of start-time and end-time as UTCDateTime objects :type gaps: list :return: :class:`obspy.core.stream.Trace`
3.107205
3.11458
0.997632
tr = tr.split() gaps = tr.get_gaps() tr = tr.detrend().merge(fill_value=0)[0] gaps = [{'starttime': gap[4], 'endtime': gap[5]} for gap in gaps] return gaps, tr
def _fill_gaps(tr)
Interpolate through gaps and work-out where gaps are. :param tr: Gappy trace (e.g. tr.data is np.ma.MaskedArray) :type tr: `obspy.core.stream.Trace` :return: gaps, trace, where gaps is a list of dict
6.360559
4.801918
1.324587
''' if number != 1 ''' if number > 1: ''' repeat the test few times ''' for time in range(3): ''' Draw a RANDOM number in range of number ( Z_number ) ''' randomNumber = random.randint(2, number - 1) ''' Test if a^(n-1) = 1 mod n ''' if pow(randomNumber, number - 1, number) != 1: return False return True else: ''' case number == 1 ''' return False
def is_prime(number)
Function to test primality of a number. Function lifted from online resource: http://www.codeproject.com/Articles/691200/Primality-test-algorithms-Prime-test-The-fastest-w This function is distributed under a separate licence: This article, along with any associated source code and files, is \ licensed under The Code Project Open License (CPOL) :type number: int :param number: Integer to test for primality :returns: bool >>> is_prime(4) False >>> is_prime(3) True
5.655096
6.203468
0.911602
peaks = [] if not parallel: for sub_arr, arr_thresh in zip(arr, thresh): peaks.append(find_peaks2_short( arr=sub_arr, thresh=arr_thresh, trig_int=trig_int, debug=debug, starttime=starttime, samp_rate=samp_rate, full_peaks=full_peaks)) else: if cores is None: cores = arr.shape[0] with pool_boy(Pool=Pool, traces=cores) as pool: params = ((sub_arr, arr_thresh, trig_int, debug, False, 1.0, full_peaks) for sub_arr, arr_thresh in zip(arr, thresh)) results = [pool.apply_async(find_peaks2_short, param) for param in params] peaks = [res.get() for res in results] return peaks
def multi_find_peaks(arr, thresh, trig_int, debug=0, starttime=False, samp_rate=1.0, parallel=True, full_peaks=False, cores=None)
Wrapper for find-peaks for multiple arrays. :type arr: numpy.ndarray :param arr: 2-D numpy array is required :type thresh: list :param thresh: The threshold below which will be considered noise and peaks will not be found in. One threshold per array. :type trig_int: int :param trig_int: The minimum difference in samples between triggers, if multiple peaks within this window this code will find the highest. :type debug: int :param debug: Optional, debug level 0-5 :type starttime: obspy.core.utcdatetime.UTCDateTime :param starttime: Starttime for plotting, only used if debug > 2. :type samp_rate: float :param samp_rate: Sampling rate in Hz, only used for plotting if debug > 2. :type parallel: bool :param parallel: Whether to compute in parallel or not - will use multiprocessing :type full_peaks: bool :param full_peaks: See `eqcorrscan.utils.findpeaks.find_peaks2_short` :type cores: int :param cores: Maximum number of processes to spin up for parallel peak-finding :returns: List of list of tuples of (peak, index) in same order as input arrays
2.708576
2.781443
0.973802
utilslib = _load_cdll('libutils') length = np.int32(len(peaks)) utilslib.find_peaks.argtypes = [ np.ctypeslib.ndpointer(dtype=np.float32, shape=(length,), flags=native_str('C_CONTIGUOUS')), np.ctypeslib.ndpointer(dtype=np.float32, shape=(length,), flags=native_str('C_CONTIGUOUS')), ctypes.c_int, ctypes.c_float, ctypes.c_float, np.ctypeslib.ndpointer(dtype=np.uint32, shape=(length,), flags=native_str('C_CONTIGUOUS'))] utilslib.find_peaks.restype = ctypes.c_int peaks_sort = sorted(zip(peaks, index), key=lambda amplitude: abs(amplitude[0]), reverse=True) arr, inds = zip(*peaks_sort) arr = np.ascontiguousarray(arr, dtype=np.float32) inds = np.array(inds, dtype=np.float32) / trig_int inds = np.ascontiguousarray(inds, dtype=np.float32) out = np.zeros(len(arr), dtype=np.uint32) ret = utilslib.find_peaks( arr, inds, length, 0, np.float32(1), out) if ret != 0: raise MemoryError("Issue with c-routine, returned %i" % ret) peaks_out = list(compress(peaks_sort, out)) return peaks_out
def decluster(peaks, index, trig_int)
Decluster peaks based on an enforced minimum separation. :type peaks: np.array :param peaks: array of peak values :type index: np.ndarray :param index: locations of peaks :type trig_int: int :param trig_int: Minimum trigger interval in samples :return: list of tuples of (value, sample)
2.632705
2.699325
0.97532
triggers = [] for stachan, _peaks in zip(stachans, peaks): for peak in _peaks: trigger = (peak[1], peak[0], '.'.join(stachan)) triggers.append(trigger) coincidence_triggers = [] for i, master in enumerate(triggers): slaves = triggers[i + 1:] coincidence = 1 trig_time = master[0] trig_val = master[1] for slave in slaves: if abs(slave[0] - master[0]) <= (moveout * samp_rate) and \ slave[2] != master[2]: coincidence += 1 if slave[0] < master[0]: trig_time = slave[0] trig_val += slave[1] if coincidence >= min_trig: coincidence_triggers.append((trig_val / coincidence, trig_time)) # Sort by trigger-value, largest to smallest - remove duplicate detections if coincidence_triggers: coincidence_triggers.sort(key=lambda tup: tup[0], reverse=True) output = [coincidence_triggers[0]] for coincidence_trigger in coincidence_triggers[1:]: add = True for peak in output: # If the event occurs within the trig_int time then do not add # it, and break out of the inner loop. if abs(coincidence_trigger[1] - peak[1]) < (trig_int * samp_rate): add = False break if add: output.append((coincidence_trigger[0], coincidence_trigger[1])) output.sort(key=lambda tup: tup[1]) return output else: return []
def coin_trig(peaks, stachans, samp_rate, moveout, min_trig, trig_int)
Find network coincidence triggers within peaks of detection statistics. Useful for finding network detections from sets of detections on individual stations. :type peaks: list :param peaks: List of lists of tuples of (peak, index) for each \ station-channel. Index should be in samples. :type stachans: list :param stachans: List of tuples of (station, channel) in the order of \ peaks. :type samp_rate: float :param samp_rate: Sampling rate in Hz :type moveout: float :param moveout: Allowable network moveout in seconds. :type min_trig: int :param min_trig: Minimum station-channels required to declare a trigger. :type trig_int: float :param trig_int: Minimum allowable time between network triggers in seconds. :return: List of tuples of (peak, index), for the earliest detected station. :rtype: list >>> peaks = [[(0.5, 100), (0.3, 800)], [(0.4, 120), (0.7, 850)]] >>> triggers = coin_trig(peaks, [('a', 'Z'), ('b', 'Z')], 10, 3, 2, 1) >>> print(triggers) [(0.45, 100)]
2.688596
2.732068
0.984088
fig.suptitle(title) if show: fig.show() if save: fig.savefig(savefile) print("Saved figure to {0}".format(savefile)) if return_fig: return fig return None
def _finalise_figure(fig, **kwargs): # pragma: no cover title = kwargs.get("title") or None show = kwargs.get("show") or False save = kwargs.get("save") or False savefile = kwargs.get("savefile") or "EQcorrscan_figure.png" return_fig = kwargs.get("return_figure") or False if title
Internal function to wrap up a figure. Possible arguments: :type title: str :type show: bool :type save: bool :type savefile: str :type return_figure: bool
3.02811
3.310484
0.914703
trout = tr.copy() # Don't do it inplace on data x = np.arange(len(tr.data)) y = tr.data chunksize = int(round(tr.stats.sampling_rate / samp_rate)) # Wrap the array into a 2D array of chunks, truncating the last chunk if # chunksize isn't an even divisor of the total size. # (This part won't use _any_ additional memory) numchunks = int(y.size // chunksize) ychunks = y[:chunksize * numchunks].reshape((-1, chunksize)) xchunks = x[:chunksize * numchunks].reshape((-1, chunksize)) # Calculate the max, min, and means of chunksize-element chunks... if state == 'Max': trout.data = ychunks.max(axis=1) elif state == 'Min': trout.data = ychunks.min(axis=1) elif state == 'Mean': trout.data = ychunks.mean(axis=1) elif state == 'Maxabs': max_env = ychunks.max(axis=1) min_env = ychunks.min(axis=1) indeces = np.argmax(np.vstack([np.abs(max_env), np.abs(min_env)]), axis=0) stack = np.vstack([max_env, min_env]).T trout.data = np.array([stack[i][indeces[i]] for i in range(len(stack))]) xcenters = xchunks.mean(axis=1) trout.stats.starttime = tr.stats.starttime + xcenters[0] /\ tr.stats.sampling_rate trout.stats.sampling_rate = samp_rate return trout
def chunk_data(tr, samp_rate, state='mean')
Downsample data for plotting. Computes the maximum of data within chunks, useful for plotting waveforms or cccsums, large datasets that would otherwise exceed the complexity allowed, and overflow. :type tr: obspy.core.trace.Trace :param tr: Trace to be chunked :type samp_rate: float :param samp_rate: Desired sampling rate in Hz :type state: str :param state: Either 'Min', 'Max', 'Mean' or 'Maxabs' to return one of these for the chunks. Maxabs will return the largest (positive or negative) for that chunk. :returns: :class:`obspy.core.trace.Trace`
3.324647
3.022612
1.099925
import matplotlib.pyplot as plt if cc is None or shift is None: if not isinstance(cc_vec, np.ndarray): print('Given cc: %s and shift: %s' % (cc, shift)) raise IOError('Must provide either cc_vec, or cc and shift') shift = np.abs(cc_vec).argmax() cc = cc_vec[shift] x = np.arange(len(image)) plt.plot(x, image / abs(image).max(), 'k', lw=1.3, label='Image') x = np.arange(len(template)) + shift plt.plot(x, template / abs(template).max(), 'r', lw=1.1, label='Template') plt.title('Shift=%s, Correlation=%s' % (shift, cc)) fig = plt.gcf() fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover return fig
def xcorr_plot(template, image, shift=None, cc=None, cc_vec=None, **kwargs)
Plot a template overlying an image aligned by correlation. :type template: numpy.ndarray :param template: Short template image :type image: numpy.ndarray :param image: Long master image :type shift: int :param shift: Shift to apply to template relative to image, in samples :type cc: float :param cc: Cross-correlation at shift :type cc_vec: numpy.ndarray :param cc_vec: Cross-correlation vector. :type save: bool :param save: Whether to save the plot or not. :type savefile: str :param savefile: File name to save to :returns: :class:`matplotlib.figure.Figure` .. rubric:: Example >>> from obspy import read >>> from eqcorrscan.utils.plotting import xcorr_plot >>> from eqcorrscan.utils.stacking import align_traces >>> st = read().detrend('simple').filter('bandpass', freqmin=2, freqmax=15) >>> shifts, ccs = align_traces([st[0], st[1]], 40) >>> shift = shifts[1] * st[1].stats.sampling_rate >>> cc = ccs[1] >>> xcorr_plot(template=st[1].data, image=st[0].data, shift=shift, ... cc=cc) # doctest: +SKIP .. image:: ../../plots/xcorr_plot.png
2.851632
3.109034
0.917208
import matplotlib.pyplot as plt if len(cccsum) != len(trace.data): print('cccsum is: ' + str(len(cccsum)) + ' trace is: ' + str(len(trace.data))) msg = ' '.join(['cccsum and trace must have the', 'same number of data points']) raise ValueError(msg) df = trace.stats.sampling_rate npts = trace.stats.npts t = np.arange(npts, dtype=np.float32) / (df * 3600) # Generate the subplot for the seismic data ax1 = plt.subplot2grid((2, 5), (0, 0), colspan=4) ax1.plot(t, trace.data, 'k') ax1.axis('tight') ax1.set_ylim([-15 * np.mean(np.abs(trace.data)), 15 * np.mean(np.abs(trace.data))]) # Generate the subplot for the correlation sum data ax2 = plt.subplot2grid((2, 5), (1, 0), colspan=4, sharex=ax1) # Plot the threshold values ax2.plot([min(t), max(t)], [threshold, threshold], color='r', lw=1, label="Threshold") ax2.plot([min(t), max(t)], [-threshold, -threshold], color='r', lw=1) ax2.plot(t, cccsum, 'k') ax2.axis('tight') ax2.set_ylim([-1.7 * threshold, 1.7 * threshold]) ax2.set_xlabel("Time after %s [hr]" % trace.stats.starttime.isoformat()) # ax2.legend() # Generate a small subplot for the histogram of the cccsum data ax3 = plt.subplot2grid((2, 5), (1, 4), sharey=ax2) ax3.hist(cccsum_hist, 200, normed=1, histtype='stepfilled', orientation='horizontal', color='black') ax3.set_ylim([-5, 5]) fig = plt.gcf() fig.suptitle(trace.id) fig.canvas.draw() fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover return fig
def triple_plot(cccsum, cccsum_hist, trace, threshold, **kwargs)
Plot a seismogram, correlogram and histogram. :type cccsum: numpy.ndarray :param cccsum: Array of the cross-channel cross-correlation sum :type cccsum_hist: numpy.ndarray :param cccsum_hist: cccsum for histogram plotting, can be the same as \ cccsum but included if cccsum is just an envelope. :type trace: obspy.core.trace.Trace :param trace: A sample trace from the same time as cccsum :type threshold: float :param threshold: Detection threshold within cccsum :type save: bool :param save: If True will save and not plot to screen, vice-versa if False :type savefile: str :param savefile: Path to save figure to, only required if save=True :returns: :class:`matplotlib.figure.Figure` .. rubric:: Example >>> from obspy import read >>> from eqcorrscan.core.match_filter import normxcorr2 >>> from eqcorrscan.utils.plotting import triple_plot >>> st = read() >>> template = st[0].copy().trim(st[0].stats.starttime + 8, ... st[0].stats.starttime + 12) >>> tr = st[0] >>> ccc = normxcorr2(template=template.data, image=tr.data) >>> tr.data = tr.data[0:len(ccc[0])] >>> triple_plot(cccsum=ccc[0], cccsum_hist=ccc[0], trace=tr, ... threshold=0.8) # doctest: +SKIP .. image:: ../../plots/triple_plot.png
2.509887
2.53681
0.989387
import matplotlib.pyplot as plt npts = len(data) t = np.arange(npts, dtype=np.float32) / (samp_rate * 3600) fig = plt.figure() ax1 = fig.add_subplot(111) ax1.plot(t, data, 'k') ax1.scatter(peaks[0][1] / (samp_rate * 3600), abs(peaks[0][0]), color='r', label='Peaks') for peak in peaks: ax1.scatter(peak[1] / (samp_rate * 3600), abs(peak[0]), color='r') ax1.legend() ax1.set_xlabel("Time after %s [hr]" % starttime.isoformat()) ax1.axis('tight') fig.suptitle('Peaks') fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover return fig
def peaks_plot(data, starttime, samp_rate, peaks=[(0, 0)], **kwargs)
Plot peaks to check that the peak finding routine is running correctly. Used in debugging for the EQcorrscan module. :type data: numpy.array :param data: Numpy array of the data within which peaks have been found :type starttime: obspy.core.utcdatetime.UTCDateTime :param starttime: Start time for the data :type samp_rate: float :param samp_rate: Sampling rate of data in Hz :type peaks: list :param peaks: List of tuples of peak locations and amplitudes (loc, amp) :returns: :class:`matplotlib.figure.Figure` .. rubric:: Example >>> import numpy as np >>> from eqcorrscan.utils import findpeaks >>> from eqcorrscan.utils.plotting import peaks_plot >>> from obspy import UTCDateTime >>> data = np.random.randn(200) >>> data[30] = 100 >>> data[60] = 40 >>> threshold = 10 >>> peaks = findpeaks.find_peaks2_short(data, threshold, 3) >>> peaks_plot(data=data, starttime=UTCDateTime("2008001"), ... samp_rate=10, peaks=peaks) # doctest: +SKIP .. plot:: import matplotlib.pyplot as plt import numpy as np from eqcorrscan.utils import findpeaks from eqcorrscan.utils.plotting import peaks_plot from obspy import UTCDateTime data = np.random.randn(200) data[30]=100 data[60]=40 threshold = 10 peaks = findpeaks.find_peaks2_short(data, threshold, 3) peaks_plot(data=data, starttime=UTCDateTime("2008001"), samp_rate=10, peaks=peaks)
2.434054
2.475772
0.983149
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 import matplotlib.pyplot as plt lats = [] longs = [] depths = [] for node in nodes: lats.append(float(node[0])) longs.append(float(node[1])) depths.append(float(node[2])) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(lats, longs, depths) ax.set_ylabel("Latitude (deg)") ax.set_xlabel("Longitude (deg)") ax.set_zlabel("Depth(km)") ax.get_xaxis().get_major_formatter().set_scientific(False) ax.get_yaxis().get_major_formatter().set_scientific(False) fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover return fig
def threeD_gridplot(nodes, **kwargs)
Plot in a series of grid points in 3D. :type nodes: list :param nodes: List of tuples of the form (lat, long, depth) :returns: :class:`matplotlib.figure.Figure` .. rubric:: Example >>> from eqcorrscan.utils.plotting import threeD_gridplot >>> nodes = [(-43.5, 170.4, 4), (-43.3, 170.8, 12), (-43.4, 170.3, 8)] >>> threeD_gridplot(nodes=nodes) # doctest: +SKIP .. plot:: from eqcorrscan.utils.plotting import threeD_gridplot nodes = [(-43.5, 170.4, 4), (-43.3, 170.8, 12), (-43.4, 170.3, 8)] threeD_gridplot(nodes=nodes)
1.828829
1.809131
1.010888
import matplotlib.pyplot as plt from eqcorrscan.core.match_filter import normxcorr2 n_axes = len(traces) if stack in ['linstack', 'PWS']: n_axes += 1 fig, axes = plt.subplots(n_axes, 1, sharex=True, figsize=size) if len(traces) > 1: axes = axes.ravel() traces = [(trace, trace.stats.starttime.datetime) for trace in traces] traces.sort(key=lambda tup: tup[1]) traces = [trace[0] for trace in traces] # Plot the traces for i, tr in enumerate(traces): y = tr.data x = np.arange(len(y)) x = x / tr.stats.sampling_rate # convert to seconds if not stack: ind = i else: ind = i + 1 axes[ind].plot(x, y, 'k', linewidth=1.1) axes[ind].yaxis.set_ticks([]) traces = [Stream(trace) for trace in traces] if stack == 'PWS': stacked = PWS_stack(traces) elif stack == 'linstack': stacked = linstack(traces) if stack in ['linstack', 'PWS']: tr = stacked[0] y = tr.data x = np.arange(len(y)) x = x / tr.stats.sampling_rate axes[0].plot(x, y, 'r', linewidth=2.0) axes[0].set_ylabel('Stack', rotation=0) axes[0].yaxis.set_ticks([]) for i, slave in enumerate(traces): if corr: cc = normxcorr2(tr.data, slave[0].data) if not stack: ind = i else: ind = i + 1 if corr: axes[ind].set_ylabel('cc=' + str(round(np.max(cc), 2)), rotation=0) axes[ind].text(0.9, 0.15, str(round(np.max(slave[0].data))), bbox=dict(facecolor='white', alpha=0.95), transform=axes[ind].transAxes) axes[ind].text(0.7, 0.85, slave[0].stats.starttime.datetime. strftime('%Y/%m/%d %H:%M:%S'), bbox=dict(facecolor='white', alpha=0.95), transform=axes[ind].transAxes) axes[-1].set_xlabel('Time (s)') fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover return fig
def multi_trace_plot(traces, corr=True, stack='linstack', size=(7, 12), **kwargs)
Plot multiple traces (usually from the same station) on the same plot. Differs somewhat to obspy's stream.plot in that only relative time within traces is worried about, it will not merge traces together. :type traces: list :param traces: List of obspy.core.Trace :type corr: bool :param corr: To calculate the correlation or not, if True, will add this to the axes :type stack: str :param stack: To plot the stack as the first trace or not, select type of stack: 'linstack' or 'PWS', or None. :type size: tuple :param size: Size of figure.
2.695117
2.589256
1.040885
import matplotlib.pyplot as plt info = [(times[i], mags[i]) for i in range(len(times))] info.sort(key=lambda tup: tup[0]) times = [x[0] for x in info] mags = [x[1] for x in info] # Make two subplots next to each other of time before and time after fig, axes = plt.subplots(1, 2, sharey=True, figsize=size) axes = axes.ravel() pre_times = [] post_times = [] for i in range(len(times)): if i > 0: pre_times.append((times[i] - times[i - 1]) / 60) if i < len(times) - 1: post_times.append((times[i + 1] - times[i]) / 60) axes[0].scatter(pre_times, mags[1:]) axes[0].set_title('Pre-event times') axes[0].set_ylabel('Magnitude') axes[0].set_xlabel('Time (Minutes)') plt.setp(axes[0].xaxis.get_majorticklabels(), rotation=30) axes[1].scatter(pre_times, mags[:-1]) axes[1].set_title('Post-event times') axes[1].set_xlabel('Time (Minutes)') axes[0].autoscale(enable=True, tight=True) axes[1].autoscale(enable=True, tight=True) plt.setp(axes[1].xaxis.get_majorticklabels(), rotation=30) fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover return fig
def interev_mag(times, mags, size=(10.5, 7.5), **kwargs)
Plot inter-event times against magnitude. :type times: list :param times: list of the detection times, must be sorted the same as mags :type mags: list :param mags: list of magnitudes :type size: tuple :param size: Size of figure in inches. :returns: :class:`matplotlib.figure.Figure` .. rubric:: Example >>> from obspy.clients.fdsn import Client >>> from obspy import UTCDateTime >>> from eqcorrscan.utils.plotting import interev_mag >>> client = Client('IRIS') >>> t1 = UTCDateTime('2012-03-26T00:00:00') >>> t2 = t1 + (3 * 86400) >>> catalog = client.get_events(starttime=t1, endtime=t2, minmagnitude=3) >>> magnitudes = [event.preferred_magnitude().mag for event in catalog] >>> times = [event.preferred_origin().time for event in catalog] >>> interev_mag(times, magnitudes) # doctest: +SKIP .. plot:: from obspy.clients.fdsn import Client from obspy import UTCDateTime from eqcorrscan.utils.plotting import interev_mag client = Client('IRIS') t1 = UTCDateTime('2012-03-26T00:00:00') t2 = t1 + (3 * 86400) catalog = client.get_events(starttime=t1, endtime=t2, minmagnitude=3) magnitudes = [event.preferred_magnitude().mag for event in catalog] times = [event.preferred_origin().time for event in catalog] interev_mag(times, magnitudes)
2.032732
2.040387
0.996248
nodes = [] for ev in catalog: nodes.append((ev.preferred_origin().latitude, ev.preferred_origin().longitude, ev.preferred_origin().depth / 1000)) # Will plot borehole instruments at elevation - depth if provided all_stas = [] for net in inventory: for sta in net: if len(sta.channels) > 0: all_stas.append((sta.latitude, sta.longitude, sta.elevation / 1000 - sta.channels[0].depth / 1000)) else: warnings.warn('No channel information attached, ' 'setting elevation without depth') all_stas.append((sta.latitude, sta.longitude, sta.elevation / 1000)) fig = threeD_seismplot( stations=all_stas, nodes=nodes, size=size, **kwargs) return fig
def obspy_3d_plot(inventory, catalog, size=(10.5, 7.5), **kwargs)
Plot obspy Inventory and obspy Catalog classes in three dimensions. :type inventory: obspy.core.inventory.inventory.Inventory :param inventory: Obspy inventory class containing station metadata :type catalog: obspy.core.event.catalog.Catalog :param catalog: Obspy catalog class containing event metadata :type save: bool :param save: False will plot to screen, true will save plot and not show \ to screen. :type savefile: str :param savefile: Filename to save to, required for save=True :type size: tuple :param size: Size of figure in inches. :returns: :class:`matplotlib.figure.Figure` .. rubric:: Example: >>> from obspy.clients.fdsn import Client >>> from obspy import UTCDateTime >>> from eqcorrscan.utils.plotting import obspy_3d_plot >>> client = Client('IRIS') >>> t1 = UTCDateTime(2012, 3, 26) >>> t2 = t1 + 86400 >>> catalog = client.get_events(starttime=t1, endtime=t2, latitude=-43, ... longitude=170, maxradius=5) >>> inventory = client.get_stations(starttime=t1, endtime=t2, latitude=-43, ... longitude=170, maxradius=10) >>> obspy_3d_plot(inventory=inventory, catalog=catalog) # doctest: +SKIP .. plot:: from obspy.clients.fdsn import Client from obspy import UTCDateTime from eqcorrscan.utils.plotting import obspy_3d_plot client = Client('IRIS') t1 = UTCDateTime(2012, 3, 26) t2 = t1 + 86400 catalog = client.get_events(starttime=t1, endtime=t2, latitude=-43, longitude=170, maxradius=5) inventory = client.get_stations(starttime=t1, endtime=t2, latitude=-43, longitude=170, maxradius=10) obspy_3d_plot(inventory=inventory, catalog=catalog)
3.672297
3.918009
0.937287
import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D stalats, stalongs, staelevs = zip(*stations) evlats, evlongs, evdepths = zip(*nodes) # Cope with +/-180 latitudes... _evlongs = [] for evlong in evlongs: if evlong < 0: evlong = float(evlong) evlong += 360 _evlongs.append(evlong) evlongs = _evlongs _stalongs = [] for stalong in stalongs: if stalong < 0: stalong = float(stalong) stalong += 360 _stalongs.append(stalong) stalongs = _stalongs evdepths = [-1 * depth for depth in evdepths] fig = plt.figure(figsize=size) ax = Axes3D(fig) ax.scatter(evlats, evlongs, evdepths, marker="x", c="k", label='Hypocenters') ax.scatter(stalats, stalongs, staelevs, marker="v", c="r", label='Stations') ax.set_ylabel("Longitude (deg)") ax.set_xlabel("Latitude (deg)") ax.set_zlabel("Elevation (km)") ax.get_xaxis().get_major_formatter().set_scientific(False) ax.get_yaxis().get_major_formatter().set_scientific(False) plt.legend() fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover return fig
def threeD_seismplot(stations, nodes, size=(10.5, 7.5), **kwargs)
Plot seismicity and stations in a 3D, movable, zoomable space. Uses matplotlibs Axes3D package. :type stations: list :param stations: list of one tuple per station of (lat, long, elevation), \ with up positive. :type nodes: list :param nodes: list of one tuple per event of (lat, long, depth) with down \ positive. :type size: tuple :param size: Size of figure in inches. :returns: :class:`matplotlib.figure.Figure` .. Note:: See :func:`eqcorrscan.utils.plotting.obspy_3d_plot` for example output.
2.212297
2.147864
1.029999
import matplotlib.pyplot as plt # Work out how many traces we can plot n_traces = 0 for tr in signal: try: noise.select(id=tr.id)[0] except IndexError: # pragma: no cover continue n_traces += 1 fig, axes = plt.subplots(n_traces, 2, sharex=True) if len(signal) > 1: axes = axes.ravel() i = 0 lines = [] labels = [] for tr in signal: try: noise_tr = noise.select(id=tr.id)[0] except IndexError: # pragma: no cover continue ax1 = axes[i] ax2 = axes[i + 1] fft_len = fftpack.next_fast_len( max(noise_tr.stats.npts, tr.stats.npts)) if not normalise: signal_fft = fftpack.rfft(tr.data, fft_len) noise_fft = fftpack.rfft(noise_tr.data, fft_len) else: signal_fft = fftpack.rfft(tr.data / max(tr.data), fft_len) noise_fft = fftpack.rfft( noise_tr.data / max(noise_tr.data), fft_len) frequencies = np.linspace(0, 1 / (2 * tr.stats.delta), fft_len // 2) noise_line, = ax1.semilogy( frequencies, 2.0 / fft_len * np.abs(noise_fft[0: fft_len // 2]), 'k', label="noise") signal_line, = ax1.semilogy( frequencies, 2.0 / fft_len * np.abs(signal_fft[0: fft_len // 2]), 'r', label="signal") if "signal" not in labels: labels.append("signal") lines.append(signal_line) if "noise" not in labels: labels.append("noise") lines.append(noise_line) ax1.set_ylabel(tr.id, rotation=0, horizontalalignment='right') ax2.plot( frequencies, (2.0 / fft_len * np.abs(signal_fft[0: fft_len // 2])) - (2.0 / fft_len * np.abs(noise_fft[0: fft_len // 2])), 'k') ax2.yaxis.tick_right() ax2.set_ylim(bottom=0) i += 2 axes[-1].set_xlabel("Frequency (Hz)") axes[-2].set_xlabel("Frequency (Hz)") axes[0].set_title("Spectra") axes[1].set_title("Signal - noise") plt.figlegend(lines, labels, 'upper left') plt.tight_layout() plt.subplots_adjust(hspace=0) fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover return fig
def noise_plot(signal, noise, normalise=False, **kwargs)
Plot signal and noise fourier transforms and the difference. :type signal: `obspy.core.stream.Stream` :param signal: Stream of "signal" window :type noise: `obspy.core.stream.Stream` :param noise: Stream of the "noise" window. :type normalise: bool :param normalise: Whether to normalise the data before plotting or not. :return: `matplotlib.pyplot.Figure`
1.967307
1.927018
1.020907
import matplotlib.pyplot as plt figures = [] for sval, stachan in zip(svalues, stachans): print(stachan) plot_traces = [SVStream.select(station=stachan[0], channel=stachan[1])[0] for SVStream in svstreams] fig, axes = plt.subplots(len(plot_traces), 1, sharex=True) if len(plot_traces) > 1: axes = axes.ravel() for i, tr in enumerate(plot_traces): y = tr.data x = np.linspace(0, len(y) * tr.stats.delta, len(y)) axes[i].plot(x, y, 'k', linewidth=1.1) ylab = 'SV %s = %s' % (i + 1, round(sval[i] / len(sval), 2)) axes[i].set_ylabel(ylab, rotation=0) axes[i].yaxis.set_ticks([]) print(i) axes[-1].set_xlabel('Time (s)') plt.subplots_adjust(hspace=0) fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover figures.append(fig) return figures
def svd_plot(svstreams, svalues, stachans, **kwargs)
Plot singular vectors from the :mod:`eqcorrscan.utils.clustering` routines. One plot for each channel. :type svstreams: list :param svstreams: See :func:`eqcorrscan.utils.clustering.svd_to_stream` - these should be ordered by power, e.g. first singular vector in the first stream. :type svalues: list :param svalues: List of floats of the singular values corresponding to the SVStreams :type stachans: list :param stachans: List of station.channel :returns: :class:`matplotlib.figure.Figure` .. rubric:: Example >>> from obspy import read >>> import glob >>> from eqcorrscan.utils.plotting import svd_plot >>> from eqcorrscan.utils.clustering import svd, svd_to_stream >>> wavefiles = glob.glob('eqcorrscan/tests/test_data/WAV/TEST_/*') >>> streams = [read(w) for w in wavefiles[1:10]] >>> stream_list = [] >>> for st in streams: ... tr = st.select(station='GCSZ', channel='EHZ') ... tr = tr.detrend('simple').resample(100).filter( ... 'bandpass', freqmin=2, freqmax=8) ... stream_list.append(tr) >>> uvec, sval, svec, stachans = svd(stream_list=stream_list) >>> svstreams = svd_to_stream(uvectors=uvec, stachans=stachans, k=3, ... sampling_rate=100) >>> svd_plot(svstreams=svstreams, svalues=sval, ... stachans=stachans) # doctest: +SKIP .. plot:: from obspy import read import glob, os from eqcorrscan.utils.plotting import svd_plot from eqcorrscan.utils.clustering import svd, svd_to_stream wavefiles = glob.glob(os.path.realpath('../../..') + '/tests/test_data/WAV/TEST_/*') streams = [read(w) for w in wavefiles[1:10]] stream_list = [] for st in streams: tr = st.select(station='GCSZ', channel='EHZ') st.detrend('simple').resample(100).filter('bandpass', freqmin=5, freqmax=40) stream_list.append(tr) svec, sval, uvec, stachans = svd(stream_list=stream_list) svstreams = svd_to_stream(uvectors=uvec, stachans=stachans, k=3, sampling_rate=100) svd_plot(svstreams=svstreams, svalues=sval, stachans=stachans)
2.764703
2.660056
1.03934
import matplotlib.pyplot as plt if isinstance(traces, Stream): traces.sort(['station', 'channel']) if not fig: fig = plt.figure() for i, tr in enumerate(traces): if i == 0: ax = fig.add_subplot(len(traces), 1, i + 1) else: ax = fig.add_subplot(len(traces), 1, i + 1, sharex=ax) ax1, ax2 = _spec_trace(tr, cmap=cmap, wlen=wlen, log=log, trc=trc, tralpha=tralpha, axes=ax) ax.set_yticks([]) if i < len(traces) - 1: plt.setp(ax1.get_xticklabels(), visible=False) if isinstance(traces, list): ax.text(0.005, 0.85, "{0}::{1}".format(tr.id, tr.stats.starttime), bbox=dict(facecolor='white', alpha=0.8), transform=ax2.transAxes) elif isinstance(traces, Stream): ax.text(0.005, 0.85, tr.id, bbox=dict(facecolor='white', alpha=0.8), transform=ax2.transAxes) ax.text(0.005, 0.02, str(np.max(tr.data).round(1)), bbox=dict(facecolor='white', alpha=0.95), transform=ax2.transAxes) ax.set_xlabel('Time (s)') fig.subplots_adjust(hspace=0) fig.set_size_inches(w=size[0], h=size[1], forward=True) fig.text(0.04, 0.5, 'Frequency (Hz)', va='center', rotation='vertical') fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover return fig
def spec_trace(traces, cmap=None, wlen=0.4, log=False, trc='k', tralpha=0.9, size=(10, 13), fig=None, **kwargs)
Plots seismic data with spectrogram behind. Takes a stream or list of traces and plots the trace with the spectra beneath it. :type traces: list :param traces: Traces to be plotted, can be a single :class:`obspy.core.stream.Stream`, or a list of :class:`obspy.core.trace.Trace`. :type cmap: str :param cmap: `Matplotlib colormap <http://matplotlib.org/examples/color/colormaps_reference.html>`_. :type wlen: float :param wlen: Window length for fft in seconds :type log: bool :param log: Use a log frequency scale :type trc: str :param trc: Color for the trace. :type tralpha: float :param tralpha: Opacity level for the seismogram, from transparent (0.0) \ to opaque (1.0). :type size: tuple :param size: Plot size, tuple of floats, inches :type fig: matplotlib.figure.Figure :param fig: Figure to plot onto, defaults to self generating. :returns: :class:`matplotlib.figure.Figure` .. rubric:: Example >>> from obspy import read >>> from eqcorrscan.utils.plotting import spec_trace >>> st = read() >>> spec_trace(st, trc='white') # doctest: +SKIP .. plot:: from obspy import read from eqcorrscan.utils.plotting import spec_trace st = read() spec_trace(st, trc='white')
2.019948
2.079573
0.971328
import matplotlib.pyplot as plt if not axes: fig = plt.figure(figsize=size) ax1 = fig.add_subplot(111) else: ax1 = axes trace.spectrogram(wlen=wlen, log=log, show=False, cmap=cmap, axes=ax1) fig = plt.gcf() ax2 = ax1.twinx() y = trace.data x = np.linspace(0, len(y) / trace.stats.sampling_rate, len(y)) ax2.plot(x, y, color=trc, linewidth=2.0, alpha=tralpha) ax2.set_xlim(min(x), max(x)) ax2.set_ylim(min(y) * 2, max(y) * 2) if title: ax1.set_title(' '.join([trace.stats.station, trace.stats.channel, trace.stats.starttime.datetime. strftime('%Y/%m/%d %H:%M:%S')])) if not axes: fig.set_size_inches(size) fig.show() else: return ax1, ax2
def _spec_trace(trace, cmap=None, wlen=0.4, log=False, trc='k', tralpha=0.9, size=(10, 2.5), axes=None, title=None)
Function to plot a trace over that traces spectrogram. Uses obspys spectrogram routine. :type trace: obspy.core.trace.Trace :param trace: trace to plot :type cmap: str :param cmap: [Matplotlib colormap](http://matplotlib.org/examples/color/ colormaps_reference.html) :type wlen: float :param wlen: Window length for fft in seconds :type log: bool :param log: Use a log frequency scale :type trc: str :param trc: Color for the trace. :type tralpha: float :param tralpha: Opacity level for the seismogram, from transparent (0.0) \ to opaque (1.0). :type size: tuple :param size: Plot size, tuple of floats, inches :type axes: matplotlib axes :param axes: Axes to plot onto, defaults to self generating. :type title: str :param title: Title for the plot.
2.114182
2.053187
1.029708
import matplotlib.pyplot as plt if stachans == 'all' and not detector.multiplex: stachans = detector.stachans elif detector.multiplex: stachans = [('multi', ' ')] if np.isinf(detector.dimension): msg = ' '.join(['Infinite subspace dimension. Only plotting as many', 'dimensions as events in design set']) warnings.warn(msg) nrows = detector.v[0].shape[1] else: nrows = detector.dimension fig, axes = plt.subplots(nrows=nrows, ncols=len(stachans), sharex=True, sharey=True, figsize=size) x = np.arange(len(detector.u[0]), dtype=np.float32) if detector.multiplex: x /= len(detector.stachans) * detector.sampling_rate else: x /= detector.sampling_rate for column, stachan in enumerate(stachans): channel = detector.u[column] for row, vector in enumerate(channel.T[0:nrows]): if len(stachans) == 1: if nrows == 1: axis = axes else: axis = axes[row] else: axis = axes[row, column] if row == 0: axis.set_title('.'.join(stachan)) axis.plot(x, vector, 'k', linewidth=1.1) if column == 0: axis.set_ylabel('Basis %s' % (row + 1), rotation=0) if row == nrows - 1: axis.set_xlabel('Time (s)') axis.set_yticks([]) plt.subplots_adjust(hspace=0.05) plt.subplots_adjust(wspace=0.05) fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover return fig
def subspace_detector_plot(detector, stachans, size, **kwargs)
Plotting for the subspace detector class. Plot the output basis vectors for the detector at the given dimension. Corresponds to the first n horizontal vectors of the V matrix. :type detector: :class:`eqcorrscan.core.subspace.Detector` :type stachans: list :param stachans: list of tuples of station, channel pairs to plot. :type stachans: list :param stachans: List of tuples of (station, channel) to use. Can set\ to 'all' to use all the station-channel pairs available. If \ detector is multiplexed, will just plot that. :type size: tuple :param size: Figure size. :returns: Figure :rtype: matplotlib.pyplot.Figure .. rubric:: Example >>> from eqcorrscan.core import subspace >>> import os >>> detector = subspace.Detector() >>> detector.read(os.path.join( ... os.path.abspath(os.path.dirname(__file__)), ... '..', 'tests', 'test_data', 'subspace', ... 'stat_test_detector.h5')) Detector: Tester >>> subspace_detector_plot(detector=detector, stachans='all', size=(10, 7), ... show=True) # doctest: +SKIP .. plot:: from eqcorrscan.core import subspace from eqcorrscan.utils.plotting import subspace_detector_plot import os print('running subspace plot') detector = subspace.Detector() detector.read(os.path.join('..', '..', '..', 'tests', 'test_data', 'subspace', 'stat_test_detector.h5')) subspace_detector_plot(detector=detector, stachans='all', size=(10, 7), show=True)
3.180212
3.070689
1.035667
def _match_filter_plot(stream, cccsum, template_names, rawthresh, plotdir, plot_format, i): # pragma: no cover import matplotlib.pyplot as plt plt.ioff() stream_plot = copy.deepcopy(stream[0]) # Downsample for plotting stream_plot = _plotting_decimation(stream_plot, 10e5, 4) cccsum_plot = Trace(cccsum) cccsum_plot.stats.sampling_rate = stream[0].stats.sampling_rate # Resample here to maintain shape better cccsum_hist = cccsum_plot.copy() cccsum_hist = cccsum_hist.decimate(int(stream[0].stats. sampling_rate / 10)).data cccsum_plot = chunk_data(cccsum_plot, 10, 'Maxabs').data # Enforce same length stream_plot.data = stream_plot.data[0:len(cccsum_plot)] cccsum_plot = cccsum_plot[0:len(stream_plot.data)] cccsum_hist = cccsum_hist[0:len(stream_plot.data)] plot_name = (plotdir + os.sep + 'cccsum_plot_' + template_names[i] + '_' + stream[0].stats.starttime.datetime.strftime('%Y-%m-%d') + '.' + plot_format) triple_plot(cccsum=cccsum_plot, cccsum_hist=cccsum_hist, trace=stream_plot, threshold=rawthresh, save=True, savefile=plot_name)
Plotting function for match_filter. :param stream: Stream to plot :param cccsum: Cross-correlation sum to plot :param template_names: Template names used :param rawthresh: Threshold level :param plotdir: Location to save plots :param plot_format: Output plot type (e.g. png, svg, eps, pdf...) :param i: Template index name to plot.
null
null
null
trace_len = trace.stats.npts while trace_len > max_len: trace.decimate(decimation_step) trace_len = trace.stats.npts return trace
def _plotting_decimation(trace, max_len=10e5, decimation_step=4)
Decimate data until required length reached. :type trace: obspy.core.stream.Trace :param trace: Trace to decimate type max_len: int :param max_len: Maximum length in samples :type decimation_step: int :param decimation_step: Decimation factor to use for each step. :return: obspy.core.stream.Trace .. rubric: Example >>> from obspy import Trace >>> import numpy as np >>> trace = Trace(np.random.randn(1000)) >>> trace = _plotting_decimation(trace, max_len=100, decimation_step=2) >>> print(trace.stats.npts) 63
2.58325
3.415365
0.756361
for fig in doctree.traverse(condition=nodes.figure): if 'thumbnail' in fig['classes']: continue for img in fig.traverse(condition=nodes.image): img['classes'].append('img-responsive')
def make_images_responsive(app, doctree)
Add Bootstrap img-responsive class to images.
4.553406
4.132237
1.101923
log = logging.getLogger('ciu') request = requests.get("https://raw.githubusercontent.com/brettcannon/" "caniusepython3/master/caniusepython3/overrides.json") if request.status_code == 200: log.info("Overrides loaded from GitHub and cached") overrides = request.json() else: log.info("Overrides loaded from included package data and cached") raw_bytes = pkgutil.get_data(__name__, 'overrides.json') overrides = json.loads(raw_bytes.decode('utf-8')) return frozenset(map(packaging.utils.canonicalize_name, overrides.keys()))
def _manual_overrides(_cache_date=None)
Read the overrides file. An attempt is made to read the file as it currently stands on GitHub, and then only if that fails is the included file used.
4.202333
3.661725
1.147638
log = logging.getLogger("ciu") log.info("Checking {} ...".format(project_name)) request = requests.get("https://pypi.org/pypi/{}/json".format(project_name)) if request.status_code >= 400: log = logging.getLogger("ciu") log.warning("problem fetching {}, assuming ported ({})".format( project_name, request.status_code)) return True response = request.json() return any(c.startswith("Programming Language :: Python :: 3") for c in response["info"]["classifiers"])
def supports_py3(project_name)
Check with PyPI if a project supports Python 3.
3.55022
3.325585
1.067547
dependencies = [] dependencies.extend(projects_.projects_from_requirements(requirements_paths)) dependencies.extend(projects_.projects_from_metadata(metadata)) dependencies.extend(projects) manual_overrides = pypi.manual_overrides() for dependency in dependencies: if dependency in manual_overrides: continue elif not pypi.supports_py3(dependency): return False return True
def check(requirements_paths=[], metadata=[], projects=[])
Return True if all of the specified dependencies have been ported to Python 3. The requirements_paths argument takes a sequence of file paths to requirements files. The 'metadata' argument takes a sequence of strings representing metadata. The 'projects' argument takes a sequence of project names. Any project that is not listed on PyPI will be considered ported.
5.608242
5.325543
1.053084
description = ('Determine if a set of project dependencies will work with ' 'Python 3') parser = argparse.ArgumentParser(description=description) req_help = 'path(s) to a pip requirements file (e.g. requirements.txt)' parser.add_argument('--requirements', '-r', nargs='+', default=(), help=req_help) meta_help = 'path(s) to a PEP 426 metadata file (e.g. PKG-INFO, pydist.json)' parser.add_argument('--metadata', '-m', nargs='+', default=(), help=meta_help) parser.add_argument('--projects', '-p', nargs='+', default=(), help='name(s) of projects to test for Python 3 support') parser.add_argument('--verbose', '-v', action='store_true', help='verbose output (e.g. list compatibility overrides)') parsed = parser.parse_args(args) if not (parsed.requirements or parsed.metadata or parsed.projects): parser.error("Missing 'requirements', 'metadata', or 'projects'") projects = [] if parsed.verbose: logging.getLogger('ciu').setLevel(logging.INFO) projects.extend(projects_.projects_from_requirements(parsed.requirements)) metadata = [] for metadata_path in parsed.metadata: with io.open(metadata_path) as file: metadata.append(file.read()) projects.extend(projects_.projects_from_metadata(metadata)) projects.extend(map(packaging.utils.canonicalize_name, parsed.projects)) return projects
def projects_from_cli(args)
Take arguments through the CLI can create a list of specified projects.
2.829976
2.857818
0.990258
if not blockers: encoding = getattr(sys.stdout, 'encoding', '') if encoding: encoding = encoding.lower() if encoding == 'utf-8': # party hat flair = "\U0001F389 " else: flair = '' return [flair + 'You have 0 projects blocking you from using Python 3!'] flattened_blockers = set() for blocker_reasons in blockers: for blocker in blocker_reasons: flattened_blockers.add(blocker) need = 'You need {0} project{1} to transition to Python 3.' formatted_need = need.format(len(flattened_blockers), 's' if len(flattened_blockers) != 1 else '') can_port = ('Of {0} {1} project{2}, {3} {4} no direct dependencies ' 'blocking {5} transition:') formatted_can_port = can_port.format( 'those' if len(flattened_blockers) != 1 else 'that', len(flattened_blockers), 's' if len(flattened_blockers) != 1 else '', len(blockers), 'have' if len(blockers) != 1 else 'has', 'their' if len(blockers) != 1 else 'its') return formatted_need, formatted_can_port
def message(blockers)
Create a sequence of key messages based on what is blocking.
3.856477
3.864621
0.997893
pprinted = [] for blocker in sorted(blockers, key=lambda x: tuple(reversed(x))): buf = [blocker[0]] if len(blocker) > 1: buf.append(' (which is blocking ') buf.append(', which is blocking '.join(blocker[1:])) buf.append(')') pprinted.append(''.join(buf)) return pprinted
def pprint_blockers(blockers)
Pretty print blockers into a sequence of strings. Results will be sorted by top-level project name. This means that if a project is blocking another project then the dependent project will be what is used in the sorting, not the project at the bottom of the dependency graph.
3.102303
2.826969
1.097395
log = logging.getLogger('ciu') log.info('{0} top-level projects to check'.format(len(projects))) print('Finding and checking dependencies ...') blockers = dependencies.blockers(projects) print('') for line in message(blockers): print(line) print('') for line in pprint_blockers(blockers): print(' ', line) return len(blockers) == 0
def check(projects)
Check the specified projects for Python 3 compatibility.
6.599981
6.382237
1.034117
blockers = set(reasons.keys()) - set(reasons.values()) paths = set() for blocker in blockers: path = [blocker] parent = reasons[blocker] while parent: if parent in path: raise CircularDependencyError(dict(parent=parent, blocker=blocker, path=path)) path.append(parent) parent = reasons.get(parent) paths.add(tuple(path)) return paths
def reasons_to_paths(reasons)
Calculate the dependency paths to the reasons of the blockers. Paths will be in reverse-dependency order (i.e. parent projects are in ascending order).
2.606675
2.372238
1.098825
log = logging.getLogger('ciu') log.info('Locating dependencies for {}'.format(project_name)) located = distlib.locators.locate(project_name, prereleases=True) if not located: log.warning('{0} not found'.format(project_name)) return None return {packaging.utils.canonicalize_name(pypi.just_name(dep)) for dep in located.run_requires}
def dependencies(project_name)
Get the dependencies for a project.
6.500982
6.563947
0.990407
log = logging.getLogger('ciu') valid_reqs = [] for requirements_path in requirements: with io.open(requirements_path) as file: requirements_text = file.read() # Drop line continuations. requirements_text = re.sub(r"\\s*", "", requirements_text) # Drop comments. requirements_text = re.sub(r"#.*", "", requirements_text) reqs = [] for line in requirements_text.splitlines(): if not line: continue try: reqs.append(packaging.requirements.Requirement(line)) except packaging.requirements.InvalidRequirement: log.warning('Skipping {0!r}: could not parse requirement'.format(line)) for req in reqs: if not req.name: log.warning('A requirement lacks a name ' '(e.g. no `#egg` on a `file:` path)') elif req.url: log.warning( 'Skipping {0}: URL-specified projects unsupported'.format(req.name)) else: valid_reqs.append(req.name) return frozenset(map(packaging.utils.canonicalize_name, valid_reqs))
def projects_from_requirements(requirements)
Extract the project dependencies from a Requirements specification.
3.357564
3.290751
1.020303
projects = [] for data in metadata: meta = distlib.metadata.Metadata(fileobj=io.StringIO(data)) projects.extend(pypi.just_name(project) for project in meta.run_requires) return frozenset(map(packaging.utils.canonicalize_name, projects))
def projects_from_metadata(metadata)
Extract the project dependencies from a metadata spec.
6.194633
5.62488
1.101292
if grayscale is None: grayscale = GRAYSCALE_DEFAULT confidence = float(confidence) needleImage = _load_cv2(needleImage, grayscale) needleHeight, needleWidth = needleImage.shape[:2] haystackImage = _load_cv2(haystackImage, grayscale) if region: haystackImage = haystackImage[region[1]:region[1]+region[3], region[0]:region[0]+region[2]] else: region = (0, 0) # full image; these values used in the yield statement if (haystackImage.shape[0] < needleImage.shape[0] or haystackImage.shape[1] < needleImage.shape[1]): # avoid semi-cryptic OpenCV error below if bad size raise ValueError('needle dimension(s) exceed the haystack image or region dimensions') if step == 2: confidence *= 0.95 needleImage = needleImage[::step, ::step] haystackImage = haystackImage[::step, ::step] else: step = 1 # get all matches at once, credit: https://stackoverflow.com/questions/7670112/finding-a-subimage-inside-a-numpy-image/9253805#9253805 result = cv2.matchTemplate(haystackImage, needleImage, cv2.TM_CCOEFF_NORMED) match_indices = numpy.arange(result.size)[(result > confidence).flatten()] matches = numpy.unravel_index(match_indices[:limit], result.shape) if len(matches[0]) == 0: if USE_IMAGE_NOT_FOUND_EXCEPTION: raise ImageNotFoundException('Could not locate the image (highest confidence = %.3f)' % result.max()) else: return None # use a generator for API consistency: matchx = matches[1] * step + region[0] # vectorized matchy = matches[0] * step + region[1] for x, y in zip(matchx, matchy): yield Box(x, y, needleWidth, needleHeight)
def _locateAll_opencv(needleImage, haystackImage, grayscale=None, limit=10000, region=None, step=1, confidence=0.999)
faster but more memory-intensive than pure python step 2 skips every other row and column = ~3x faster but prone to miss; to compensate, the algorithm automatically reduces the confidence threshold by 5% (which helps but will not avoid all misses). limitations: - OpenCV 3.x & python 3.x not tested - RGBA images are treated as RBG (ignores alpha channel)
3.474484
3.429243
1.013193
start = time.time() while True: try: screenshotIm = screenshot(region=None) # the locateAll() function must handle cropping to return accurate coordinates, so don't pass a region here. retVal = locate(image, screenshotIm, **kwargs) try: screenshotIm.fp.close() except AttributeError: # Screenshots on Windows won't have an fp since they came from # ImageGrab, not a file. Screenshots on Linux will have fp set # to None since the file has been unlinked pass if retVal or time.time() - start > minSearchTime: return retVal except ImageNotFoundException: if time.time() - start > minSearchTime: if USE_IMAGE_NOT_FOUND_EXCEPTION: raise else: return None
def locateOnScreen(image, minSearchTime=0, **kwargs)
minSearchTime - amount of time in seconds to repeat taking screenshots and trying to locate a match. The default of 0 performs a single search.
6.606282
6.625762
0.99706
raw_details = self._requestDetails(ip_address) raw_details['country_name'] = self.countries.get(raw_details.get('country')) raw_details['ip_address'] = ipaddress.ip_address(raw_details.get('ip')) raw_details['latitude'], raw_details['longitude'] = self._read_coords(raw_details.get('loc')) return Details(raw_details)
def getDetails(self, ip_address=None)
Get details for specified IP address as a Details object.
3.211161
2.976893
1.078695
if ip_address not in self.cache: url = self.API_URL if ip_address: url += '/' + ip_address response = requests.get(url, headers=self._get_headers(), **self.request_options) if response.status_code == 429: raise RequestQuotaExceededError() response.raise_for_status() self.cache[ip_address] = response.json() return self.cache[ip_address]
def _requestDetails(self, ip_address=None)
Get IP address data by sending request to IPinfo API.
2.551976
2.448221
1.04238
headers = { 'user-agent': 'IPinfoClient/Python{version}/1.0'.format(version=sys.version_info[0]), 'accept': 'application/json' } if self.access_token: headers['authorization'] = 'Bearer {}'.format(self.access_token) return headers
def _get_headers(self)
Built headers for request to IPinfo API.
3.677362
2.741659
1.341291
if not countries_file: countries_file = os.path.join(os.path.dirname(__file__), self.COUNTRY_FILE_DEFAULT) with open(countries_file) as f: countries_json = f.read() return json.loads(countries_json)
def _read_country_names(self, countries_file=None)
Read list of countries from specified country file or default file.
2.526591
2.205381
1.145648
if not self.has_section(section): return False if not self.has_option(section, option): return False if ConfigParser.get(self, section, option) == self._secure_placeholder: return True return False
def is_secure_option(self, section, option)
Test an option to see if it is secured or not. :param section: section id :type section: string :param option: option name :type option: string :rtype: boolean otherwise.
2.738115
2.929443
0.934688
items = [] for k, v in ConfigParser.items(self, section): if self.is_secure_option(section, k): v = self.get(section, k) if v == '!!False!!': v = False items.append((k, v)) return items
def items(self, section)
Get all items for a section. Subclassed, to ensure secure items come back with the unencrypted data. :param section: section id :type section: string
3.608242
3.964122
0.910225
return [x for x in self.items(section) if self.is_secure_option(section, x[0])]
def secure_items(self, section)
Like items() but only return secure items. :param section: section id :type section: string
4.536427
6.592966
0.688071
if not value: value = '!!False!!' if self.is_secure_option(section, option): self.set_secure(section, option, value) else: ConfigParser.set(self, section, option, value)
def set(self, section, option, value)
Set an option value. Knows how to set options properly marked as secure.
3.986539
3.343507
1.192322
if self.keyring_available: s_option = "%s%s" % (section, option) self._unsaved[s_option] = ('set', value) value = self._secure_placeholder ConfigParser.set(self, section, option, value)
def set_secure(self, section, option, value)
Set an option and mark it as secure. Any subsequent uses of 'set' or 'get' will also now know that this option is secure as well.
5.566339
5.599273
0.994118
if self.is_secure_option(section, option) and self.keyring_available: s_option = "%s%s" % (section, option) if self._unsaved.get(s_option, [''])[0] == 'set': res = self._unsaved[s_option][1] else: res = keyring.get_password(self.keyring_name, s_option) else: res = ConfigParser.get(self, section, option, *args) if res == '!!False!!': return False return res
def get(self, section, option, *args)
Get option value from section. If an option is secure, populates the plain text.
3.283937
3.155586
1.040674
if self.is_secure_option(section, option) and self.keyring_available: s_option = "%s%s" % (section, option) self._unsaved[s_option] = ('delete', None) ConfigParser.remove_option(self, section, option)
def remove_option(self, section, option)
Removes the option from ConfigParser as well as the secure storage backend
5.061491
4.33823
1.166718
ConfigParser.write(self, *args) if self.keyring_available: for key, thing in self._unsaved.items(): action = thing[0] value = thing[1] if action == 'set': keyring.set_password(self.keyring_name, key, value) elif action == 'delete': try: keyring.delete_password(self.keyring_name, key) except: pass self._unsaved = {}
def write(self, *args)
See ConfigParser.write(). Also writes secure items to keystore.
2.930012
2.770543
1.057559
if self.parser.has_section(id): return self._section_to_account(id) return None
def account(self, id)
Get :py:class:`ofxclient.Account` by section id
5.548723
4.782842
1.160131
serialized = account.serialize() section_items = flatten_dict(serialized) section_id = section_items['local_id'] if not self.parser.has_section(section_id): self.parser.add_section(section_id) for key in sorted(section_items): self.parser.set(section_id, key, section_items[key]) self.encrypt_account(id=section_id) return self
def add_account(self, account)
Add Account to config (does not save)
3.684846
3.600353
1.023468
for key in self.secured_field_names: value = self.parser.get(id, key) self.parser.set_secure(id, key, value) return self
def encrypt_account(self, id)
Make sure that certain fields are encrypted.
5.672447
4.88822
1.160432
for key in self.secured_field_names: if not self.parser.is_secure_option(id, key): return False return True
def is_encrypted_account(self, id)
Are all fields for the account id encrypted?
7.868612
5.937199
1.325307
if self.parser.has_section(id): self.parser.remove_section(id) return True return False
def remove_account(self, id)
Add Account from config (does not save)
3.758677
3.08306
1.219138
with open(self.file_name, 'w') as fp: self.parser.write(fp) return self
def save(self)
Save changes to config file
4.709911
4.22405
1.115023
client_args = {'ofx_version': str(ofx_version)} if 'ofx.discovercard.com' in bank_info['url']: # Discover needs no User-Agent and no Accept headers client_args['user_agent'] = False client_args['accept'] = False if 'www.accountonline.com' in bank_info['url']: # Citi needs no User-Agent header client_args['user_agent'] = False return client_args
def client_args_for_bank(bank_info, ofx_version)
Return the client arguments to use for a particular Institution, as found from ofxhome. This provides us with an extension point to override or augment ofxhome data for specific institutions, such as those that require specific User-Agent headers (or no User-Agent header). :param bank_info: OFXHome bank information for the institution, as returned by ``OFXHome.lookup()`` :type bank_info: dict :param ofx_version: OFX Version argument specified on command line :type ofx_version: str :return: Client arguments for a specific institution :rtype: dict
4.188964
4.343592
0.964401
return hashlib.sha256(("%s%s" % ( self.id, self.username)).encode()).hexdigest()
def local_id(self)
Locally generated unique account identifier. :rtype: string
5.483054
6.427457
0.853067
u = self.username p = self.password if username and password: u = username p = password client = self.client() query = client.authenticated_query(username=u, password=p) res = client.post(query) ofx = BeautifulSoup(res, 'lxml') sonrs = ofx.find('sonrs') code = int(sonrs.find('code').contents[0].strip()) try: status = sonrs.find('message').contents[0].strip() except Exception: status = '' if code == 0: return 1 raise ValueError(status)
def authenticate(self, username=None, password=None)
Test the authentication credentials Raises a ``ValueError`` if there is a problem authenticating with the human readable reason given by the institution. :param username: optional username (use self.username by default) :type username: string or None :param password: optional password (use self.password by default) :type password: string or None
4.117438
4.060611
1.013995
from ofxclient.account import Account client = self.client() query = client.account_list_query() resp = client.post(query) resp_handle = StringIO(resp) if IS_PYTHON_2: parsed = OfxParser.parse(resp_handle) else: parsed = OfxParser.parse(BytesIO(resp_handle.read().encode())) return [Account.from_ofxparse(a, institution=self) for a in parsed.accounts]
def accounts(self)
Ask the bank for the known :py:class:`ofxclient.Account` list. :rtype: list of :py:class:`ofxclient.Account` objects
5.133127
5.062311
1.013989
return { 'id': self.id, 'org': self.org, 'url': self.url, 'broker_id': self.broker_id, 'username': self.username, 'password': self.password, 'description': self.description, 'client_args': self.client().init_args, 'local_id': self.local_id() }
def serialize(self)
Serialize predictably for use in configuration storage. Output looks like this:: { 'local_id': 'unique local identifier', 'id': 'FI Id', 'org': 'FI Org', 'url': 'FI OFX Endpoint Url', 'broker_id': 'FI Broker Id', 'username': 'Customer username', 'password': 'Customer password', 'description': 'descr', 'client_args': { 'id': 'random client id - see Client() for default', 'app_id': 'app name - see Client() for default', '...': 'see Client() for other options' } } :rtype: nested dictionary
3.664181
1.89707
1.931495
return Institution( id=raw['id'], org=raw['org'], url=raw['url'], broker_id=raw.get('broker_id', ''), username=raw['username'], password=raw['password'], description=raw.get('description', None), client_args=raw.get('client_args', {}) )
def deserialize(raw)
Instantiate :py:class:`ofxclient.Institution` from dictionary :param raw: serialized ``Institution`` :param type: dict per :py:method:`~Institution.serialize` :rtype: subclass of :py:class:`ofxclient.Institution`
3.535197
3.31451
1.066582
return hashlib.sha256(("%s%s" % ( self.institution.local_id(), self.number)).encode()).hexdigest()
def local_id(self)
Locally generated unique account identifier. :rtype: string
6.14431
7.368673
0.833842
days_ago = datetime.datetime.now() - datetime.timedelta(days=days) as_of = time.strftime("%Y%m%d", days_ago.timetuple()) query = self._download_query(as_of=as_of) response = self.institution.client().post(query) return StringIO(response)
def download(self, days=60)
Downloaded OFX response for the given time range :param days: Number of days to look back at :type days: integer :rtype: :py:class:`StringIO`
4.17875
3.699454
1.129559
if IS_PYTHON_2: return OfxParser.parse( self.download(days=days) ) else: return OfxParser.parse( BytesIO(self.download(days=days).read().encode()) )
def download_parsed(self, days=60)
Downloaded OFX response parsed by :py:meth:`OfxParser.parse` :param days: Number of days to look back at :type days: integer :rtype: :py:class:`ofxparser.Ofx`
5.280279
3.831855
1.377995