file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
zplsc_c_echogram.py | 35
TVR = []
VTX = []
BP = []
EL = []
DS = []
# Freq 38kHz
TVR.append(1.691999969482e2)
VTX.append(1.533999938965e2)
BP.append(8.609999902546e-3)
EL.append(1.623000030518e2)
DS.append(2.280000038445e-2)
# Freq 125kHz
TVR.append(1.668999938965e2)
VTX.append(5.8e+01)
BP.append(1.530999969691e-2)
EL.append(1.376999969482e2)
DS.append(2.280000038445e-2)
# Freq 200kHz
TVR.append(1.688999938965e2)
VTX.append(9.619999694824e1)
BP.append(1.530999969691e-2)
EL.append(1.456000061035e2)
DS.append(2.250000089407e-2)
# Freq 455kHz
TVR.append(1.696000061035e2)
VTX.append(1.301000061035e2)
BP.append(8.609999902546e-3)
EL.append(1.491999969482e2)
DS.append(2.300000004470e-2)
class ZPLSCCPlot(object):
font_size_small = 14
font_size_large = 18
num_xticks = 25
num_yticks = 7
interplot_spacing = 0.1
lower_percentile = 5
upper_percentile = 95
def __init__(self, data_times, channel_data_dict, frequency_dict, depth_range, _min_db=None, _max_db=None):
self.fig = None
self.power_data_dict = self._transpose_and_flip(channel_data_dict)
if (_min_db is None) or (_max_db is None):
self.min_db, self.max_db = self._get_power_range(channel_data_dict)
else:
self.min_db = _min_db
self.max_db = _max_db
self.frequency_dict = frequency_dict
# convert ntp time, i.e. seconds since 1900-01-01 00:00:00 to matplotlib time
self.data_times = (data_times / (60 * 60 * 24)) + REF_TIME
max_depth, _ = self.power_data_dict[1].shape
self._setup_plot(depth_range, max_depth)
def generate_plots(self):
"""
Generate plots for all channels in data set
"""
freq_to_channel = {v: k for k, v in self.frequency_dict.iteritems()}
data_axes = None
for index, frequency in enumerate(sorted(freq_to_channel)):
channel = freq_to_channel[frequency]
td_f = self.frequency_dict[channel]
title = 'Volume Backscatter (Sv) :Channel #%d: Frequency: %.1f kHz' % (channel, td_f)
data_axes = self._generate_plot(self.ax[index], self.power_data_dict[channel], title,
self.min_db, self.max_db)
if data_axes:
self._display_x_labels(self.ax[2], self.data_times)
self.fig.tight_layout(rect=[0, 0.0, 0.97, 1.0])
self._display_colorbar(self.fig, data_axes)
def write_image(self, filename):
self.fig.savefig(filename)
plt.close(self.fig)
self.fig = None
def _setup_plot(self, depth_range, max_depth):
# subset the yticks so that we don't plot every one
yticks = np.linspace(0, max_depth, self.num_yticks)
# create range vector (depth in meters)
yticklabels = np.round(np.linspace(depth_range[0][-1], depth_range[0][0], self.num_yticks)).astype(int)
self.fig, self.ax = plt.subplots(len(self.frequency_dict), sharex=True, sharey=True)
self.fig.subplots_adjust(hspace=self.interplot_spacing)
self.fig.set_size_inches(40, 19)
for axes in self.ax:
axes.grid(False)
axes.set_ylabel('depth (m)', fontsize=self.font_size_small)
axes.set_yticks(yticks)
axes.set_yticklabels(yticklabels, fontsize=self.font_size_small)
axes.tick_params(axis="both", labelcolor="k", pad=4, direction='out', length=5, width=2)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.spines['bottom'].set_visible(False)
axes.spines['left'].set_visible(False)
@staticmethod
def _get_power_range(power_dict):
# Calculate the power data range across all channels
all_power_data = np.concatenate(power_dict.values())
max_db = np.nanpercentile(all_power_data, ZPLSCCPlot.upper_percentile)
min_db = np.nanpercentile(all_power_data, ZPLSCCPlot.lower_percentile)
return min_db, max_db
@staticmethod
def | (power_dict):
for channel in power_dict:
# Transpose array data so we have time on the x-axis and depth on the y-axis
power_dict[channel] = power_dict[channel].transpose()
# reverse the Y axis (so depth is measured from the surface (at the top) to the ZPLS (at the bottom)
power_dict[channel] = power_dict[channel][::-1]
return power_dict
@staticmethod
def _generate_plot(ax, power_data, title, min_db, max_db):
"""
Generate a ZPLS plot for an individual channel
:param ax: matplotlib axis to receive the plot image
:param power_data: Transducer data array
:param title: plot title
:param min_db: minimum power level
:param max_db: maximum power level
"""
# only generate plots for the transducers that have data
if power_data.size <= 0:
return
ax.set_title(title, fontsize=ZPLSCCPlot.font_size_large)
return imshow(ax, power_data, interpolation='none', aspect='auto', cmap='jet', vmin=min_db, vmax=max_db)
@staticmethod
def _display_x_labels(ax, data_times):
time_format = '%Y-%m-%d\n%H:%M:%S'
time_length = data_times.size
# X axis label
# subset the xticks so that we don't plot every one
if time_length < ZPLSCCPlot.num_xticks:
ZPLSCCPlot.num_xticks = time_length
xticks = np.linspace(0, time_length, ZPLSCCPlot.num_xticks)
xstep = int(round(xticks[1]))
# format trans_array_time array so that it can be used to label the x-axis
xticklabels = [i for i in num2date(data_times[::xstep])] + [num2date(data_times[-1])]
xticklabels = [i.strftime(time_format) for i in xticklabels]
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
ax.set_xlabel('time (UTC)', fontsize=ZPLSCCPlot.font_size_small)
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels, rotation=45, horizontalalignment='center', fontsize=ZPLSCCPlot.font_size_small)
ax.set_xlim(0, time_length)
@staticmethod
def _display_colorbar(fig, data_axes):
# Add a colorbar to the specified figure using the data from the given axes
ax = fig.add_axes([0.965, 0.12, 0.01, 0.775])
cb = fig.colorbar(data_axes, cax=ax, use_gridspec=True)
cb.set_label('dB', fontsize=ZPLSCCPlot.font_size_large)
cb.ax.tick_params(labelsize=ZPLSCCPlot.font_size_small)
class ZPLSCCEchogram(object):
def __init__(self):
self.cc = ZplscCCalibrationCoefficients()
self.params = ZplscCParameters()
def compute_backscatter(self, profile_hdr, chan_data, sound_speed, depth_range, sea_absorb | _transpose_and_flip | identifier_name |
zplsc_c_echogram.py | (1.696000061035e2)
VTX.append(1.301000061035e2)
BP.append(8.609999902546e-3)
EL.append(1.491999969482e2)
DS.append(2.300000004470e-2)
class ZPLSCCPlot(object):
font_size_small = 14
font_size_large = 18
num_xticks = 25
num_yticks = 7
interplot_spacing = 0.1
lower_percentile = 5
upper_percentile = 95
def __init__(self, data_times, channel_data_dict, frequency_dict, depth_range, _min_db=None, _max_db=None):
self.fig = None
self.power_data_dict = self._transpose_and_flip(channel_data_dict)
if (_min_db is None) or (_max_db is None):
self.min_db, self.max_db = self._get_power_range(channel_data_dict)
else:
self.min_db = _min_db
self.max_db = _max_db
self.frequency_dict = frequency_dict
# convert ntp time, i.e. seconds since 1900-01-01 00:00:00 to matplotlib time
self.data_times = (data_times / (60 * 60 * 24)) + REF_TIME
max_depth, _ = self.power_data_dict[1].shape
self._setup_plot(depth_range, max_depth)
def generate_plots(self):
"""
Generate plots for all channels in data set
"""
freq_to_channel = {v: k for k, v in self.frequency_dict.iteritems()}
data_axes = None
for index, frequency in enumerate(sorted(freq_to_channel)):
channel = freq_to_channel[frequency]
td_f = self.frequency_dict[channel]
title = 'Volume Backscatter (Sv) :Channel #%d: Frequency: %.1f kHz' % (channel, td_f)
data_axes = self._generate_plot(self.ax[index], self.power_data_dict[channel], title,
self.min_db, self.max_db)
if data_axes:
self._display_x_labels(self.ax[2], self.data_times)
self.fig.tight_layout(rect=[0, 0.0, 0.97, 1.0])
self._display_colorbar(self.fig, data_axes)
def write_image(self, filename):
self.fig.savefig(filename)
plt.close(self.fig)
self.fig = None
def _setup_plot(self, depth_range, max_depth):
# subset the yticks so that we don't plot every one
yticks = np.linspace(0, max_depth, self.num_yticks)
# create range vector (depth in meters)
yticklabels = np.round(np.linspace(depth_range[0][-1], depth_range[0][0], self.num_yticks)).astype(int)
self.fig, self.ax = plt.subplots(len(self.frequency_dict), sharex=True, sharey=True)
self.fig.subplots_adjust(hspace=self.interplot_spacing)
self.fig.set_size_inches(40, 19)
for axes in self.ax:
axes.grid(False)
axes.set_ylabel('depth (m)', fontsize=self.font_size_small)
axes.set_yticks(yticks)
axes.set_yticklabels(yticklabels, fontsize=self.font_size_small)
axes.tick_params(axis="both", labelcolor="k", pad=4, direction='out', length=5, width=2)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.spines['bottom'].set_visible(False)
axes.spines['left'].set_visible(False)
@staticmethod
def _get_power_range(power_dict):
# Calculate the power data range across all channels
all_power_data = np.concatenate(power_dict.values())
max_db = np.nanpercentile(all_power_data, ZPLSCCPlot.upper_percentile)
min_db = np.nanpercentile(all_power_data, ZPLSCCPlot.lower_percentile)
return min_db, max_db
@staticmethod
def _transpose_and_flip(power_dict):
for channel in power_dict:
# Transpose array data so we have time on the x-axis and depth on the y-axis
power_dict[channel] = power_dict[channel].transpose()
# reverse the Y axis (so depth is measured from the surface (at the top) to the ZPLS (at the bottom)
power_dict[channel] = power_dict[channel][::-1]
return power_dict
@staticmethod
def _generate_plot(ax, power_data, title, min_db, max_db):
"""
Generate a ZPLS plot for an individual channel
:param ax: matplotlib axis to receive the plot image
:param power_data: Transducer data array
:param title: plot title
:param min_db: minimum power level
:param max_db: maximum power level
"""
# only generate plots for the transducers that have data
if power_data.size <= 0:
return
ax.set_title(title, fontsize=ZPLSCCPlot.font_size_large)
return imshow(ax, power_data, interpolation='none', aspect='auto', cmap='jet', vmin=min_db, vmax=max_db)
@staticmethod
def _display_x_labels(ax, data_times):
time_format = '%Y-%m-%d\n%H:%M:%S'
time_length = data_times.size
# X axis label
# subset the xticks so that we don't plot every one
if time_length < ZPLSCCPlot.num_xticks:
ZPLSCCPlot.num_xticks = time_length
xticks = np.linspace(0, time_length, ZPLSCCPlot.num_xticks)
xstep = int(round(xticks[1]))
# format trans_array_time array so that it can be used to label the x-axis
xticklabels = [i for i in num2date(data_times[::xstep])] + [num2date(data_times[-1])]
xticklabels = [i.strftime(time_format) for i in xticklabels]
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
ax.set_xlabel('time (UTC)', fontsize=ZPLSCCPlot.font_size_small)
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels, rotation=45, horizontalalignment='center', fontsize=ZPLSCCPlot.font_size_small)
ax.set_xlim(0, time_length)
@staticmethod
def _display_colorbar(fig, data_axes):
# Add a colorbar to the specified figure using the data from the given axes
ax = fig.add_axes([0.965, 0.12, 0.01, 0.775])
cb = fig.colorbar(data_axes, cax=ax, use_gridspec=True)
cb.set_label('dB', fontsize=ZPLSCCPlot.font_size_large)
cb.ax.tick_params(labelsize=ZPLSCCPlot.font_size_small)
class ZPLSCCEchogram(object):
def __init__(self):
self.cc = ZplscCCalibrationCoefficients()
self.params = ZplscCParameters()
def compute_backscatter(self, profile_hdr, chan_data, sound_speed, depth_range, sea_absorb):
"""
Compute the backscatter volumes values for one zplsc_c profile data record.
This code was borrowed from ASL MatLab code that reads in zplsc-c raw data
and performs calculations in order to compute the backscatter volume in db.
:param profile_hdr: Raw profile header with metadata from the zplsc-c instrument.
:param chan_data: Raw frequency data from the zplsc-c instrument.
:param sound_speed: Speed of sound at based on speed of sound, pressure and salinity.
:param depth_range: Range of the depth of the measurements
:param sea_absorb: Seawater absorption coefficient for each frequency
:return: sv: Volume backscatter in db
"""
_N = []
if self.params.Bins2Avg > 1:
for chan in range(profile_hdr.num_channels):
el = self.cc.EL[chan] - 2.5/self.cc.DS[chan] + np.array(chan_data[chan])/(26214*self.cc.DS[chan])
power = 10**(el/10)
# Perform bin averaging
num_bins = len(chan_data[chan])/self.params.Bins2Avg
pwr_avg = []
for _bin in range(num_bins):
pwr_avg.append(np.mean(power[_bin*self.params.Bins2Avg:(_bin+1)*self.params.Bins2Avg]))
el_avg = 10*np.log10(pwr_avg)
_N.append(np.round(26214*self.cc.DS[chan]*(el_avg - self.cc.EL[chan] + 2.5/self.cc.DS[chan])))
else:
| for chan in range(profile_hdr.num_channels):
_N.append(np.array(chan_data[chan])) | conditional_block |
|
zplsc_c_echogram.py | 35
TVR = []
VTX = []
BP = []
EL = []
DS = []
# Freq 38kHz
TVR.append(1.691999969482e2)
VTX.append(1.533999938965e2)
BP.append(8.609999902546e-3)
EL.append(1.623000030518e2)
DS.append(2.280000038445e-2)
# Freq 125kHz
TVR.append(1.668999938965e2)
VTX.append(5.8e+01)
BP.append(1.530999969691e-2)
EL.append(1.376999969482e2)
DS.append(2.280000038445e-2)
# Freq 200kHz
TVR.append(1.688999938965e2)
VTX.append(9.619999694824e1)
BP.append(1.530999969691e-2)
EL.append(1.456000061035e2)
DS.append(2.250000089407e-2)
# Freq 455kHz
TVR.append(1.696000061035e2)
VTX.append(1.301000061035e2)
BP.append(8.609999902546e-3)
EL.append(1.491999969482e2)
DS.append(2.300000004470e-2)
class ZPLSCCPlot(object):
font_size_small = 14
font_size_large = 18
num_xticks = 25
num_yticks = 7
interplot_spacing = 0.1
lower_percentile = 5
upper_percentile = 95
def __init__(self, data_times, channel_data_dict, frequency_dict, depth_range, _min_db=None, _max_db=None):
self.fig = None
self.power_data_dict = self._transpose_and_flip(channel_data_dict)
if (_min_db is None) or (_max_db is None):
self.min_db, self.max_db = self._get_power_range(channel_data_dict)
else:
self.min_db = _min_db
self.max_db = _max_db
self.frequency_dict = frequency_dict
# convert ntp time, i.e. seconds since 1900-01-01 00:00:00 to matplotlib time
self.data_times = (data_times / (60 * 60 * 24)) + REF_TIME
max_depth, _ = self.power_data_dict[1].shape
self._setup_plot(depth_range, max_depth)
def generate_plots(self):
"""
Generate plots for all channels in data set
"""
freq_to_channel = {v: k for k, v in self.frequency_dict.iteritems()}
data_axes = None
for index, frequency in enumerate(sorted(freq_to_channel)):
channel = freq_to_channel[frequency]
td_f = self.frequency_dict[channel]
title = 'Volume Backscatter (Sv) :Channel #%d: Frequency: %.1f kHz' % (channel, td_f)
data_axes = self._generate_plot(self.ax[index], self.power_data_dict[channel], title,
self.min_db, self.max_db)
if data_axes:
self._display_x_labels(self.ax[2], self.data_times)
self.fig.tight_layout(rect=[0, 0.0, 0.97, 1.0])
self._display_colorbar(self.fig, data_axes)
def write_image(self, filename):
self.fig.savefig(filename)
plt.close(self.fig)
self.fig = None
def _setup_plot(self, depth_range, max_depth):
# subset the yticks so that we don't plot every one
yticks = np.linspace(0, max_depth, self.num_yticks)
# create range vector (depth in meters)
yticklabels = np.round(np.linspace(depth_range[0][-1], depth_range[0][0], self.num_yticks)).astype(int)
self.fig, self.ax = plt.subplots(len(self.frequency_dict), sharex=True, sharey=True)
self.fig.subplots_adjust(hspace=self.interplot_spacing)
self.fig.set_size_inches(40, 19)
for axes in self.ax:
axes.grid(False)
axes.set_ylabel('depth (m)', fontsize=self.font_size_small)
axes.set_yticks(yticks)
axes.set_yticklabels(yticklabels, fontsize=self.font_size_small)
axes.tick_params(axis="both", labelcolor="k", pad=4, direction='out', length=5, width=2)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.spines['bottom'].set_visible(False)
axes.spines['left'].set_visible(False)
@staticmethod
def _get_power_range(power_dict):
# Calculate the power data range across all channels
all_power_data = np.concatenate(power_dict.values())
max_db = np.nanpercentile(all_power_data, ZPLSCCPlot.upper_percentile)
min_db = np.nanpercentile(all_power_data, ZPLSCCPlot.lower_percentile)
return min_db, max_db
@staticmethod
def _transpose_and_flip(power_dict):
for channel in power_dict:
# Transpose array data so we have time on the x-axis and depth on the y-axis
power_dict[channel] = power_dict[channel].transpose()
# reverse the Y axis (so depth is measured from the surface (at the top) to the ZPLS (at the bottom)
power_dict[channel] = power_dict[channel][::-1]
return power_dict
@staticmethod
def _generate_plot(ax, power_data, title, min_db, max_db):
"""
Generate a ZPLS plot for an individual channel
:param ax: matplotlib axis to receive the plot image
:param power_data: Transducer data array
:param title: plot title
:param min_db: minimum power level
:param max_db: maximum power level
"""
# only generate plots for the transducers that have data
if power_data.size <= 0:
return
ax.set_title(title, fontsize=ZPLSCCPlot.font_size_large)
return imshow(ax, power_data, interpolation='none', aspect='auto', cmap='jet', vmin=min_db, vmax=max_db)
@staticmethod
def _display_x_labels(ax, data_times):
|
@staticmethod
def _display_colorbar(fig, data_axes):
# Add a colorbar to the specified figure using the data from the given axes
ax = fig.add_axes([0.965, 0.12, 0.01, 0.775])
cb = fig.colorbar(data_axes, cax=ax, use_gridspec=True)
cb.set_label('dB', fontsize=ZPLSCCPlot.font_size_large)
cb.ax.tick_params(labelsize=ZPLSCCPlot.font_size_small)
class ZPLSCCEchogram(object):
def __init__(self):
self.cc = ZplscCCalibrationCoefficients()
self.params = ZplscCParameters()
def compute_backscatter(self, profile_hdr, chan_data, sound_speed, depth_range, sea_absorb | time_format = '%Y-%m-%d\n%H:%M:%S'
time_length = data_times.size
# X axis label
# subset the xticks so that we don't plot every one
if time_length < ZPLSCCPlot.num_xticks:
ZPLSCCPlot.num_xticks = time_length
xticks = np.linspace(0, time_length, ZPLSCCPlot.num_xticks)
xstep = int(round(xticks[1]))
# format trans_array_time array so that it can be used to label the x-axis
xticklabels = [i for i in num2date(data_times[::xstep])] + [num2date(data_times[-1])]
xticklabels = [i.strftime(time_format) for i in xticklabels]
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
ax.set_xlabel('time (UTC)', fontsize=ZPLSCCPlot.font_size_small)
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels, rotation=45, horizontalalignment='center', fontsize=ZPLSCCPlot.font_size_small)
ax.set_xlim(0, time_length) | identifier_body |
zplsc_c_echogram.py | self.fig.tight_layout(rect=[0, 0.0, 0.97, 1.0])
self._display_colorbar(self.fig, data_axes)
def write_image(self, filename):
self.fig.savefig(filename)
plt.close(self.fig)
self.fig = None
def _setup_plot(self, depth_range, max_depth):
# subset the yticks so that we don't plot every one
yticks = np.linspace(0, max_depth, self.num_yticks)
# create range vector (depth in meters)
yticklabels = np.round(np.linspace(depth_range[0][-1], depth_range[0][0], self.num_yticks)).astype(int)
self.fig, self.ax = plt.subplots(len(self.frequency_dict), sharex=True, sharey=True)
self.fig.subplots_adjust(hspace=self.interplot_spacing)
self.fig.set_size_inches(40, 19)
for axes in self.ax:
axes.grid(False)
axes.set_ylabel('depth (m)', fontsize=self.font_size_small)
axes.set_yticks(yticks)
axes.set_yticklabels(yticklabels, fontsize=self.font_size_small)
axes.tick_params(axis="both", labelcolor="k", pad=4, direction='out', length=5, width=2)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.spines['bottom'].set_visible(False)
axes.spines['left'].set_visible(False)
@staticmethod
def _get_power_range(power_dict):
# Calculate the power data range across all channels
all_power_data = np.concatenate(power_dict.values())
max_db = np.nanpercentile(all_power_data, ZPLSCCPlot.upper_percentile)
min_db = np.nanpercentile(all_power_data, ZPLSCCPlot.lower_percentile)
return min_db, max_db
@staticmethod
def _transpose_and_flip(power_dict):
for channel in power_dict:
# Transpose array data so we have time on the x-axis and depth on the y-axis
power_dict[channel] = power_dict[channel].transpose()
# reverse the Y axis (so depth is measured from the surface (at the top) to the ZPLS (at the bottom)
power_dict[channel] = power_dict[channel][::-1]
return power_dict
@staticmethod
def _generate_plot(ax, power_data, title, min_db, max_db):
"""
Generate a ZPLS plot for an individual channel
:param ax: matplotlib axis to receive the plot image
:param power_data: Transducer data array
:param title: plot title
:param min_db: minimum power level
:param max_db: maximum power level
"""
# only generate plots for the transducers that have data
if power_data.size <= 0:
return
ax.set_title(title, fontsize=ZPLSCCPlot.font_size_large)
return imshow(ax, power_data, interpolation='none', aspect='auto', cmap='jet', vmin=min_db, vmax=max_db)
@staticmethod
def _display_x_labels(ax, data_times):
time_format = '%Y-%m-%d\n%H:%M:%S'
time_length = data_times.size
# X axis label
# subset the xticks so that we don't plot every one
if time_length < ZPLSCCPlot.num_xticks:
ZPLSCCPlot.num_xticks = time_length
xticks = np.linspace(0, time_length, ZPLSCCPlot.num_xticks)
xstep = int(round(xticks[1]))
# format trans_array_time array so that it can be used to label the x-axis
xticklabels = [i for i in num2date(data_times[::xstep])] + [num2date(data_times[-1])]
xticklabels = [i.strftime(time_format) for i in xticklabels]
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
ax.set_xlabel('time (UTC)', fontsize=ZPLSCCPlot.font_size_small)
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels, rotation=45, horizontalalignment='center', fontsize=ZPLSCCPlot.font_size_small)
ax.set_xlim(0, time_length)
@staticmethod
def _display_colorbar(fig, data_axes):
# Add a colorbar to the specified figure using the data from the given axes
ax = fig.add_axes([0.965, 0.12, 0.01, 0.775])
cb = fig.colorbar(data_axes, cax=ax, use_gridspec=True)
cb.set_label('dB', fontsize=ZPLSCCPlot.font_size_large)
cb.ax.tick_params(labelsize=ZPLSCCPlot.font_size_small)
class ZPLSCCEchogram(object):
def __init__(self):
self.cc = ZplscCCalibrationCoefficients()
self.params = ZplscCParameters()
def compute_backscatter(self, profile_hdr, chan_data, sound_speed, depth_range, sea_absorb):
"""
Compute the backscatter volumes values for one zplsc_c profile data record.
This code was borrowed from ASL MatLab code that reads in zplsc-c raw data
and performs calculations in order to compute the backscatter volume in db.
:param profile_hdr: Raw profile header with metadata from the zplsc-c instrument.
:param chan_data: Raw frequency data from the zplsc-c instrument.
:param sound_speed: Speed of sound at based on speed of sound, pressure and salinity.
:param depth_range: Range of the depth of the measurements
:param sea_absorb: Seawater absorption coefficient for each frequency
:return: sv: Volume backscatter in db
"""
_N = []
if self.params.Bins2Avg > 1:
for chan in range(profile_hdr.num_channels):
el = self.cc.EL[chan] - 2.5/self.cc.DS[chan] + np.array(chan_data[chan])/(26214*self.cc.DS[chan])
power = 10**(el/10)
# Perform bin averaging
num_bins = len(chan_data[chan])/self.params.Bins2Avg
pwr_avg = []
for _bin in range(num_bins):
pwr_avg.append(np.mean(power[_bin*self.params.Bins2Avg:(_bin+1)*self.params.Bins2Avg]))
el_avg = 10*np.log10(pwr_avg)
_N.append(np.round(26214*self.cc.DS[chan]*(el_avg - self.cc.EL[chan] + 2.5/self.cc.DS[chan])))
else:
for chan in range(profile_hdr.num_channels):
_N.append(np.array(chan_data[chan]))
sv = []
for chan in range(profile_hdr.num_channels):
# Calculate correction to Sv due to non square transmit pulse
sv_offset = zf.compute_sv_offset(profile_hdr.frequency[chan], profile_hdr.pulse_length[chan])
sv.append(self.cc.EL[chan]-2.5/self.cc.DS[chan] + _N[chan]/(26214*self.cc.DS[chan]) - self.cc.TVR[chan] -
20*np.log10(self.cc.VTX[chan]) + 20*np.log10(depth_range[chan]) +
2*sea_absorb[chan]*depth_range[chan] -
10*np.log10(0.5*sound_speed*profile_hdr.pulse_length[chan]/1e6*self.cc.BP[chan]) +
sv_offset)
return sv
def compute_echogram_metadata(self, profile_hdr):
"""
Compute the metadata parameters needed to compute the zplsc-c valume backscatter values.
:param profile_hdr: Raw profile header with metadata from the zplsc-c instrument.
:return: sound_speed : Speed of sound based on temperature, pressure and salinity.
depth_range : Range of depth values of the zplsc-c data.
sea_absorb : Sea absorbtion based on temperature, pressure, salinity and frequency.
"""
# If the temperature sensor is available, compute the temperature from the counts.
temperature = 0
if profile_hdr.is_sensor_available:
temperature = zf.zplsc_c_temperature(profile_hdr.temperature, self.cc.ka, self.cc.kb, self.cc.kc,
self.cc.A, self.cc.B, self.cc.C)
sound_speed = zf.zplsc_c_ss(temperature, self.params.Pressure, self.params.Salinity)
_m = []
depth_range = []
for chan in range(profile_hdr.num_channels): | _m.append(np.array([x for x in range(1, (profile_hdr.num_bins[chan]/self.params.Bins2Avg)+1)]))
depth_range.append(sound_speed*profile_hdr.lockout_index[0]/(2*profile_hdr.digitization_rate[0]) +
(sound_speed/4)*(((2*_m[chan]-1)*profile_hdr.range_samples[0]*self.params.Bins2Avg-1) /
float(profile_hdr.digitization_rate[0]) +
profile_hdr.pulse_length[0]/1e6)) | random_line_split |
|
index.js | extends Component {
_isMounted = false
constructor(props) {
super(props);
this.state = {
items: {},
isDialogVisible: false,
notifyEvents: this.notify(props.navigation.state.params.events),
pushNotficationToken: '',
timeToNotify: 1,
synchronizedEvents:
this.structureSynchronizedEvents(props.navigation.state.params.events.items)
};
}
async componentDidMount() {
this.registerForPushNotificationsAsync();
this._isMounted = true;
if (Platform.OS === 'android') {
Notifications.createChannelAndroidAsync('reminders', {
name: 'Reminders',
priority: 'max',
vibrate: [0, 250, 250, 250],
});
}
}
componentWillUnmount() {
this._isMounted = false;
}
/**
* Registers device to receive push notifications
*/
registerForPushNotificationsAsync = async () => {
const { status } = await Permissions.askAsync(Permissions.NOTIFICATIONS);
if (status !== 'granted') {
Alert.alert(i18n.t('permissionNotGranted'),
i18n.t('allowNotifications'),
[
{ text: 'ok' }
]);
}
}
/**
*
* @param {object} day - information of the current day
* formats items with information of the day
*/
loadItems = (day) => {
setTimeout(() => {
const uppderBoundForSync = 85;
const lowerBoundForSync = -15;
for (let i = lowerBoundForSync; i < uppderBoundForSync; i++) {
const time = day.timestamp + i * 24 * 60 * 60 * 1000;
const strTime = this.timeToString(time);
if (!this.state.items[strTime]) {
this.state.items[strTime] = [];
const todayEvents = this.state.synchronizedEvents
.filter((event) => { return strTime === event.date; });
const numItems = todayEvents.length;
for (let j = 0; j < numItems; j++) {
this.state.items[strTime].push({
name: todayEvents[j].title,
startTime: todayEvents[j].startTime,
endTime: todayEvents[j].endTime,
description: todayEvents[j].description,
address: todayEvents[j].address,
height: 80
});
}
}
}
const newItems = {};
Object.keys(this.state.items).forEach((key) => { newItems[key] = this.state.items[key]; });
if (this._isMounted) {
this.setState({
items: newItems
});
}
}, 1000);
}
/**
*
* @param {object} events - All the user events
* Formats the events to only return required events
*/
notify = (events) => {
const notifyArray = [];
events.items.forEach((element) => {
const date = new Date(element.start.dateTime);
if (element.summary) {
if (element.summary.includes('conpass') && date.getTime() > (new Date()).getTime()) {
notifyArray.push({
startDate: element.start.dateTime,
summary: element.summary,
});
}
}
});
return notifyArray;
};
/**
* Schedules push notifications to user upon adjusting the timer
*/
sendPushNotification = () => {
Notifications.cancelAllScheduledNotificationsAsync();
this.state.notifyEvents.forEach((element) => {
const localNotification = {
to: this.state.pushNotficationToken,
sound: 'default',
priority: 'high',
title: 'Conpass Notification',
body: element.summary,
channelId: 'reminders',
ios: { _displayInForeground: true }
};
const date = new Date(element.startDate);
const t = date.getTime() - this.state.timeToNotify * 60 * 1000;
const schedulingOptions = {
time: t
};
Notifications.scheduleLocalNotificationAsync(localNotification, schedulingOptions);
});
return 'Notifications sent';
}
/**
* @param {boolean} boolean - True or false
* Shows or hides the dialong box of 'Adjust time' button
*/
showDialog=(boolean) => {
if (this._isMounted) {
this.setState({ isDialogVisible: boolean });
}
}
/**
* @param {integer} number - Time in minutes
* Sets the minutes in which the user wants to get notfications before
*/
sendInput = (number) => {
if (/^\d+$/.test(number.toString())) {
if (this._isMounted) {
this.setState({ timeToNotify: number });
this.setState({ isDialogVisible: false });
}
} else {
// your call back function
Alert.alert(i18n.t('numbersOnly'),
'',
[
{ text: 'ok' }
]);
return;
}
setTimeout(() => {
this.sendPushNotification();
}, 100);
}
/**
* Fetches new events on google calendar
*/
refreshCalendar =async () => {
const accessToken = await AsyncStorage.getItem('accessToken');
const userInfoResponse = await fetch('https://www.googleapis.com/calendar/v3/calendars/primary/events?key=AIzaSyBAHObp5Ic3CbJpkX2500tNhf53e_3wBMA&timeMin=2020-01-01T01:00:00.000Z', {
headers: { Authorization: `Bearer ${accessToken}` },
});
const jsonFile = await userInfoResponse.json();
const { error } = jsonFile;
if (error) {
firebase.auth().signOut();
this.props.navigation.navigate('LoginScreen');
Alert.alert(i18n.t('logInAgain'),
'',
[
{ text: 'ok' }
]);
return;
}
const stringFile = JSON.stringify(jsonFile);
AsyncStorage.setItem('events', stringFile);
this.props.navigation.navigate('FetchScreen');
}
/**
* @param {String} description - A location to go to
* Navigates to the home component
*/
sendDirections = (description) => {
if (!description) {
Alert.alert(i18n.t('noAddress'),
'',
[
{ text: 'ok' }
]);
return '';
}
this.props.navigation.navigate('HomeScreen', { description });
return 'address sent';
}
/**
* @param {integer} number - Time in minutes
* Sets the minutes in which the user wants to get notfications before
*/
rowHasChanged(r1, r2) {
return r1.name !== r2.name;
}
/**
* @param {integer} time - time of the event
* restructure time in a certain format
*/
timeToString(time) {
const date = new Date(time);
return date.toISOString().split('T')[0];
}
/**
* @param {object} events - All the events the user has
* Structures all the events the user has
*/
structureSynchronizedEvents(events) {
const tempArray = [];
events.forEach((event) => {
tempArray.push(
{
date: event.start.dateTime != null ? event.start.dateTime.substring(0, event.start.dateTime.indexOf('T')) : event.start.date,
title: event.summary != null ? event.summary : 'No Title For this Event',
startTime: event.start.dateTime != null ? event.start.dateTime : event.start.date,
endTime: event.end.dateTime != null ? event.end.dateTime : event.end.date,
description: event.description != null ? event.description : '',
address: event.location != null ? event.location : ''
}
);
});
if (this._isMounted) {
this.setState({
synchronizedEvents: this.tempArray
});
}
return tempArray;
}
/**
* @param {object} item - information of item
* present event in the agenda
*/
renderItem(item) {
const { address } = item;
const { description } = item;
return (
<TouchableOpacity
style={[styles.item, { height: item.height }]}
onPress={() => {
return Alert.alert(item.name,
`${item.startTime} - ${item.endTime}\n${item.description}\n${item.address}`,
[
{ text: i18n.t('cancel') },
{
text: i18n.t('getDirections'),
onPress: () => {
if (address) { this.sendDirections(address.split(',')[0]); } else { this.sendDirections(description.split('\n')[0]); }
}
},
],
{ cancelable: false });
}}
>
<Text style={{ color: 'white' }}>{item.name}</Text>
</TouchableOpacity>
);
}
/**
* add line to empty day
*/
renderEmptyDate = () => {
return (
<View
style={{
borderBottomColor: 'rgba(105,105,105,0.1)',
borderBottomWidth: 1,
}}
/>
);
}
render() {
return (
<View
style={{ height: '100%', width: | DashboardScreen | identifier_name |
|
index.js | _isMounted = false
constructor(props) {
super(props);
this.state = {
items: {},
isDialogVisible: false,
notifyEvents: this.notify(props.navigation.state.params.events),
pushNotficationToken: '',
timeToNotify: 1,
synchronizedEvents:
this.structureSynchronizedEvents(props.navigation.state.params.events.items)
};
}
async componentDidMount() {
this.registerForPushNotificationsAsync();
this._isMounted = true;
if (Platform.OS === 'android') {
Notifications.createChannelAndroidAsync('reminders', {
name: 'Reminders',
priority: 'max',
vibrate: [0, 250, 250, 250],
});
}
}
componentWillUnmount() {
this._isMounted = false;
}
/**
* Registers device to receive push notifications
*/
registerForPushNotificationsAsync = async () => {
const { status } = await Permissions.askAsync(Permissions.NOTIFICATIONS);
if (status !== 'granted') {
Alert.alert(i18n.t('permissionNotGranted'),
i18n.t('allowNotifications'),
[
{ text: 'ok' }
]);
}
}
/**
*
* @param {object} day - information of the current day
* formats items with information of the day
*/
loadItems = (day) => {
setTimeout(() => {
const uppderBoundForSync = 85;
const lowerBoundForSync = -15;
for (let i = lowerBoundForSync; i < uppderBoundForSync; i++) {
const time = day.timestamp + i * 24 * 60 * 60 * 1000;
const strTime = this.timeToString(time);
if (!this.state.items[strTime]) {
this.state.items[strTime] = [];
const todayEvents = this.state.synchronizedEvents
.filter((event) => { return strTime === event.date; });
const numItems = todayEvents.length;
for (let j = 0; j < numItems; j++) {
this.state.items[strTime].push({
name: todayEvents[j].title,
startTime: todayEvents[j].startTime,
endTime: todayEvents[j].endTime,
description: todayEvents[j].description,
address: todayEvents[j].address,
height: 80
});
}
}
}
const newItems = {};
Object.keys(this.state.items).forEach((key) => { newItems[key] = this.state.items[key]; });
if (this._isMounted) {
this.setState({
items: newItems
});
}
}, 1000);
}
/**
*
* @param {object} events - All the user events
* Formats the events to only return required events
*/
notify = (events) => {
const notifyArray = [];
events.items.forEach((element) => {
const date = new Date(element.start.dateTime);
if (element.summary) {
if (element.summary.includes('conpass') && date.getTime() > (new Date()).getTime()) |
}
});
return notifyArray;
};
/**
* Schedules push notifications to user upon adjusting the timer
*/
sendPushNotification = () => {
Notifications.cancelAllScheduledNotificationsAsync();
this.state.notifyEvents.forEach((element) => {
const localNotification = {
to: this.state.pushNotficationToken,
sound: 'default',
priority: 'high',
title: 'Conpass Notification',
body: element.summary,
channelId: 'reminders',
ios: { _displayInForeground: true }
};
const date = new Date(element.startDate);
const t = date.getTime() - this.state.timeToNotify * 60 * 1000;
const schedulingOptions = {
time: t
};
Notifications.scheduleLocalNotificationAsync(localNotification, schedulingOptions);
});
return 'Notifications sent';
}
/**
* @param {boolean} boolean - True or false
* Shows or hides the dialong box of 'Adjust time' button
*/
showDialog=(boolean) => {
if (this._isMounted) {
this.setState({ isDialogVisible: boolean });
}
}
/**
* @param {integer} number - Time in minutes
* Sets the minutes in which the user wants to get notfications before
*/
sendInput = (number) => {
if (/^\d+$/.test(number.toString())) {
if (this._isMounted) {
this.setState({ timeToNotify: number });
this.setState({ isDialogVisible: false });
}
} else {
// your call back function
Alert.alert(i18n.t('numbersOnly'),
'',
[
{ text: 'ok' }
]);
return;
}
setTimeout(() => {
this.sendPushNotification();
}, 100);
}
/**
* Fetches new events on google calendar
*/
refreshCalendar =async () => {
const accessToken = await AsyncStorage.getItem('accessToken');
const userInfoResponse = await fetch('https://www.googleapis.com/calendar/v3/calendars/primary/events?key=AIzaSyBAHObp5Ic3CbJpkX2500tNhf53e_3wBMA&timeMin=2020-01-01T01:00:00.000Z', {
headers: { Authorization: `Bearer ${accessToken}` },
});
const jsonFile = await userInfoResponse.json();
const { error } = jsonFile;
if (error) {
firebase.auth().signOut();
this.props.navigation.navigate('LoginScreen');
Alert.alert(i18n.t('logInAgain'),
'',
[
{ text: 'ok' }
]);
return;
}
const stringFile = JSON.stringify(jsonFile);
AsyncStorage.setItem('events', stringFile);
this.props.navigation.navigate('FetchScreen');
}
/**
* @param {String} description - A location to go to
* Navigates to the home component
*/
sendDirections = (description) => {
if (!description) {
Alert.alert(i18n.t('noAddress'),
'',
[
{ text: 'ok' }
]);
return '';
}
this.props.navigation.navigate('HomeScreen', { description });
return 'address sent';
}
/**
* @param {integer} number - Time in minutes
* Sets the minutes in which the user wants to get notfications before
*/
rowHasChanged(r1, r2) {
return r1.name !== r2.name;
}
/**
* @param {integer} time - time of the event
* restructure time in a certain format
*/
timeToString(time) {
const date = new Date(time);
return date.toISOString().split('T')[0];
}
/**
* @param {object} events - All the events the user has
* Structures all the events the user has
*/
structureSynchronizedEvents(events) {
const tempArray = [];
events.forEach((event) => {
tempArray.push(
{
date: event.start.dateTime != null ? event.start.dateTime.substring(0, event.start.dateTime.indexOf('T')) : event.start.date,
title: event.summary != null ? event.summary : 'No Title For this Event',
startTime: event.start.dateTime != null ? event.start.dateTime : event.start.date,
endTime: event.end.dateTime != null ? event.end.dateTime : event.end.date,
description: event.description != null ? event.description : '',
address: event.location != null ? event.location : ''
}
);
});
if (this._isMounted) {
this.setState({
synchronizedEvents: this.tempArray
});
}
return tempArray;
}
/**
* @param {object} item - information of item
* present event in the agenda
*/
renderItem(item) {
const { address } = item;
const { description } = item;
return (
<TouchableOpacity
style={[styles.item, { height: item.height }]}
onPress={() => {
return Alert.alert(item.name,
`${item.startTime} - ${item.endTime}\n${item.description}\n${item.address}`,
[
{ text: i18n.t('cancel') },
{
text: i18n.t('getDirections'),
onPress: () => {
if (address) { this.sendDirections(address.split(',')[0]); } else { this.sendDirections(description.split('\n')[0]); }
}
},
],
{ cancelable: false });
}}
>
<Text style={{ color: 'white' }}>{item.name}</Text>
</TouchableOpacity>
);
}
/**
* add line to empty day
*/
renderEmptyDate = () => {
return (
<View
style={{
borderBottomColor: 'rgba(105,105,105,0.1)',
borderBottomWidth: 1,
}}
/>
);
}
render() {
return (
<View
style={{ height: '100%', width: '10 | {
notifyArray.push({
startDate: element.start.dateTime,
summary: element.summary,
});
} | conditional_block |
index.js | _isMounted = false
constructor(props) {
super(props);
this.state = {
items: {},
isDialogVisible: false,
notifyEvents: this.notify(props.navigation.state.params.events),
pushNotficationToken: '',
timeToNotify: 1,
synchronizedEvents:
this.structureSynchronizedEvents(props.navigation.state.params.events.items)
};
}
async componentDidMount() {
this.registerForPushNotificationsAsync();
this._isMounted = true;
if (Platform.OS === 'android') {
Notifications.createChannelAndroidAsync('reminders', {
name: 'Reminders',
priority: 'max',
vibrate: [0, 250, 250, 250],
});
}
}
componentWillUnmount() {
this._isMounted = false;
}
/**
* Registers device to receive push notifications
*/
registerForPushNotificationsAsync = async () => {
const { status } = await Permissions.askAsync(Permissions.NOTIFICATIONS);
if (status !== 'granted') {
Alert.alert(i18n.t('permissionNotGranted'),
i18n.t('allowNotifications'),
[
{ text: 'ok' }
]);
}
}
/**
*
* @param {object} day - information of the current day
* formats items with information of the day
*/
loadItems = (day) => {
setTimeout(() => {
const uppderBoundForSync = 85;
const lowerBoundForSync = -15;
for (let i = lowerBoundForSync; i < uppderBoundForSync; i++) {
const time = day.timestamp + i * 24 * 60 * 60 * 1000;
const strTime = this.timeToString(time);
if (!this.state.items[strTime]) {
this.state.items[strTime] = [];
const todayEvents = this.state.synchronizedEvents
.filter((event) => { return strTime === event.date; });
const numItems = todayEvents.length;
for (let j = 0; j < numItems; j++) {
this.state.items[strTime].push({
name: todayEvents[j].title,
startTime: todayEvents[j].startTime,
endTime: todayEvents[j].endTime,
description: todayEvents[j].description,
address: todayEvents[j].address,
height: 80
});
}
}
}
const newItems = {};
Object.keys(this.state.items).forEach((key) => { newItems[key] = this.state.items[key]; });
if (this._isMounted) {
this.setState({
items: newItems
});
}
}, 1000);
}
/**
*
* @param {object} events - All the user events
* Formats the events to only return required events
*/
notify = (events) => {
const notifyArray = [];
events.items.forEach((element) => {
const date = new Date(element.start.dateTime);
if (element.summary) {
if (element.summary.includes('conpass') && date.getTime() > (new Date()).getTime()) {
notifyArray.push({
startDate: element.start.dateTime,
summary: element.summary,
});
}
}
});
return notifyArray;
};
/**
* Schedules push notifications to user upon adjusting the timer
*/
sendPushNotification = () => {
Notifications.cancelAllScheduledNotificationsAsync();
this.state.notifyEvents.forEach((element) => {
const localNotification = {
to: this.state.pushNotficationToken,
sound: 'default',
priority: 'high',
title: 'Conpass Notification',
body: element.summary,
channelId: 'reminders',
ios: { _displayInForeground: true }
};
const date = new Date(element.startDate);
const t = date.getTime() - this.state.timeToNotify * 60 * 1000;
const schedulingOptions = {
time: t
};
Notifications.scheduleLocalNotificationAsync(localNotification, schedulingOptions);
});
return 'Notifications sent';
}
/**
* @param {boolean} boolean - True or false
* Shows or hides the dialong box of 'Adjust time' button
*/
showDialog=(boolean) => {
if (this._isMounted) {
this.setState({ isDialogVisible: boolean });
}
}
/**
* @param {integer} number - Time in minutes
* Sets the minutes in which the user wants to get notfications before
*/
sendInput = (number) => {
if (/^\d+$/.test(number.toString())) {
if (this._isMounted) {
this.setState({ timeToNotify: number });
this.setState({ isDialogVisible: false });
}
} else {
// your call back function
Alert.alert(i18n.t('numbersOnly'),
'',
[
{ text: 'ok' }
]);
return;
}
setTimeout(() => {
this.sendPushNotification();
}, 100);
}
/**
* Fetches new events on google calendar
*/
refreshCalendar =async () => {
const accessToken = await AsyncStorage.getItem('accessToken');
const userInfoResponse = await fetch('https://www.googleapis.com/calendar/v3/calendars/primary/events?key=AIzaSyBAHObp5Ic3CbJpkX2500tNhf53e_3wBMA&timeMin=2020-01-01T01:00:00.000Z', {
headers: { Authorization: `Bearer ${accessToken}` },
});
const jsonFile = await userInfoResponse.json();
const { error } = jsonFile;
if (error) {
firebase.auth().signOut();
this.props.navigation.navigate('LoginScreen');
Alert.alert(i18n.t('logInAgain'),
'',
[
{ text: 'ok' }
]);
return;
}
const stringFile = JSON.stringify(jsonFile);
AsyncStorage.setItem('events', stringFile);
this.props.navigation.navigate('FetchScreen');
}
/**
* @param {String} description - A location to go to
* Navigates to the home component
*/
sendDirections = (description) => {
if (!description) {
Alert.alert(i18n.t('noAddress'),
'',
[
{ text: 'ok' }
]);
return '';
}
this.props.navigation.navigate('HomeScreen', { description });
return 'address sent';
}
/**
* @param {integer} number - Time in minutes
* Sets the minutes in which the user wants to get notfications before
*/
rowHasChanged(r1, r2) |
/**
* @param {integer} time - time of the event
* restructure time in a certain format
*/
timeToString(time) {
const date = new Date(time);
return date.toISOString().split('T')[0];
}
/**
* @param {object} events - All the events the user has
* Structures all the events the user has
*/
structureSynchronizedEvents(events) {
const tempArray = [];
events.forEach((event) => {
tempArray.push(
{
date: event.start.dateTime != null ? event.start.dateTime.substring(0, event.start.dateTime.indexOf('T')) : event.start.date,
title: event.summary != null ? event.summary : 'No Title For this Event',
startTime: event.start.dateTime != null ? event.start.dateTime : event.start.date,
endTime: event.end.dateTime != null ? event.end.dateTime : event.end.date,
description: event.description != null ? event.description : '',
address: event.location != null ? event.location : ''
}
);
});
if (this._isMounted) {
this.setState({
synchronizedEvents: this.tempArray
});
}
return tempArray;
}
/**
* @param {object} item - information of item
* present event in the agenda
*/
renderItem(item) {
const { address } = item;
const { description } = item;
return (
<TouchableOpacity
style={[styles.item, { height: item.height }]}
onPress={() => {
return Alert.alert(item.name,
`${item.startTime} - ${item.endTime}\n${item.description}\n${item.address}`,
[
{ text: i18n.t('cancel') },
{
text: i18n.t('getDirections'),
onPress: () => {
if (address) { this.sendDirections(address.split(',')[0]); } else { this.sendDirections(description.split('\n')[0]); }
}
},
],
{ cancelable: false });
}}
>
<Text style={{ color: 'white' }}>{item.name}</Text>
</TouchableOpacity>
);
}
/**
* add line to empty day
*/
renderEmptyDate = () => {
return (
<View
style={{
borderBottomColor: 'rgba(105,105,105,0.1)',
borderBottomWidth: 1,
}}
/>
);
}
render() {
return (
<View
style={{ height: '100%', width: '10 | {
return r1.name !== r2.name;
} | identifier_body |
index.js | {
_isMounted = false
constructor(props) {
super(props);
this.state = {
items: {},
isDialogVisible: false,
notifyEvents: this.notify(props.navigation.state.params.events),
pushNotficationToken: '',
timeToNotify: 1,
synchronizedEvents:
this.structureSynchronizedEvents(props.navigation.state.params.events.items)
};
}
async componentDidMount() {
this.registerForPushNotificationsAsync();
this._isMounted = true;
if (Platform.OS === 'android') {
Notifications.createChannelAndroidAsync('reminders', {
name: 'Reminders',
priority: 'max',
vibrate: [0, 250, 250, 250],
});
}
}
componentWillUnmount() {
this._isMounted = false;
}
/**
* Registers device to receive push notifications
*/
registerForPushNotificationsAsync = async () => {
const { status } = await Permissions.askAsync(Permissions.NOTIFICATIONS);
if (status !== 'granted') {
Alert.alert(i18n.t('permissionNotGranted'),
i18n.t('allowNotifications'),
[ |
/**
*
* @param {object} day - information of the current day
* formats items with information of the day
*/
loadItems = (day) => {
setTimeout(() => {
const uppderBoundForSync = 85;
const lowerBoundForSync = -15;
for (let i = lowerBoundForSync; i < uppderBoundForSync; i++) {
const time = day.timestamp + i * 24 * 60 * 60 * 1000;
const strTime = this.timeToString(time);
if (!this.state.items[strTime]) {
this.state.items[strTime] = [];
const todayEvents = this.state.synchronizedEvents
.filter((event) => { return strTime === event.date; });
const numItems = todayEvents.length;
for (let j = 0; j < numItems; j++) {
this.state.items[strTime].push({
name: todayEvents[j].title,
startTime: todayEvents[j].startTime,
endTime: todayEvents[j].endTime,
description: todayEvents[j].description,
address: todayEvents[j].address,
height: 80
});
}
}
}
const newItems = {};
Object.keys(this.state.items).forEach((key) => { newItems[key] = this.state.items[key]; });
if (this._isMounted) {
this.setState({
items: newItems
});
}
}, 1000);
}
/**
*
* @param {object} events - All the user events
* Formats the events to only return required events
*/
notify = (events) => {
const notifyArray = [];
events.items.forEach((element) => {
const date = new Date(element.start.dateTime);
if (element.summary) {
if (element.summary.includes('conpass') && date.getTime() > (new Date()).getTime()) {
notifyArray.push({
startDate: element.start.dateTime,
summary: element.summary,
});
}
}
});
return notifyArray;
};
/**
* Schedules push notifications to user upon adjusting the timer
*/
sendPushNotification = () => {
Notifications.cancelAllScheduledNotificationsAsync();
this.state.notifyEvents.forEach((element) => {
const localNotification = {
to: this.state.pushNotficationToken,
sound: 'default',
priority: 'high',
title: 'Conpass Notification',
body: element.summary,
channelId: 'reminders',
ios: { _displayInForeground: true }
};
const date = new Date(element.startDate);
const t = date.getTime() - this.state.timeToNotify * 60 * 1000;
const schedulingOptions = {
time: t
};
Notifications.scheduleLocalNotificationAsync(localNotification, schedulingOptions);
});
return 'Notifications sent';
}
/**
* @param {boolean} boolean - True or false
* Shows or hides the dialong box of 'Adjust time' button
*/
showDialog=(boolean) => {
if (this._isMounted) {
this.setState({ isDialogVisible: boolean });
}
}
/**
* @param {integer} number - Time in minutes
* Sets the minutes in which the user wants to get notfications before
*/
sendInput = (number) => {
if (/^\d+$/.test(number.toString())) {
if (this._isMounted) {
this.setState({ timeToNotify: number });
this.setState({ isDialogVisible: false });
}
} else {
// your call back function
Alert.alert(i18n.t('numbersOnly'),
'',
[
{ text: 'ok' }
]);
return;
}
setTimeout(() => {
this.sendPushNotification();
}, 100);
}
/**
* Fetches new events on google calendar
*/
refreshCalendar =async () => {
const accessToken = await AsyncStorage.getItem('accessToken');
const userInfoResponse = await fetch('https://www.googleapis.com/calendar/v3/calendars/primary/events?key=AIzaSyBAHObp5Ic3CbJpkX2500tNhf53e_3wBMA&timeMin=2020-01-01T01:00:00.000Z', {
headers: { Authorization: `Bearer ${accessToken}` },
});
const jsonFile = await userInfoResponse.json();
const { error } = jsonFile;
if (error) {
firebase.auth().signOut();
this.props.navigation.navigate('LoginScreen');
Alert.alert(i18n.t('logInAgain'),
'',
[
{ text: 'ok' }
]);
return;
}
const stringFile = JSON.stringify(jsonFile);
AsyncStorage.setItem('events', stringFile);
this.props.navigation.navigate('FetchScreen');
}
/**
* @param {String} description - A location to go to
* Navigates to the home component
*/
sendDirections = (description) => {
if (!description) {
Alert.alert(i18n.t('noAddress'),
'',
[
{ text: 'ok' }
]);
return '';
}
this.props.navigation.navigate('HomeScreen', { description });
return 'address sent';
}
/**
* @param {integer} number - Time in minutes
* Sets the minutes in which the user wants to get notfications before
*/
rowHasChanged(r1, r2) {
return r1.name !== r2.name;
}
/**
* @param {integer} time - time of the event
* restructure time in a certain format
*/
timeToString(time) {
const date = new Date(time);
return date.toISOString().split('T')[0];
}
/**
* @param {object} events - All the events the user has
* Structures all the events the user has
*/
structureSynchronizedEvents(events) {
const tempArray = [];
events.forEach((event) => {
tempArray.push(
{
date: event.start.dateTime != null ? event.start.dateTime.substring(0, event.start.dateTime.indexOf('T')) : event.start.date,
title: event.summary != null ? event.summary : 'No Title For this Event',
startTime: event.start.dateTime != null ? event.start.dateTime : event.start.date,
endTime: event.end.dateTime != null ? event.end.dateTime : event.end.date,
description: event.description != null ? event.description : '',
address: event.location != null ? event.location : ''
}
);
});
if (this._isMounted) {
this.setState({
synchronizedEvents: this.tempArray
});
}
return tempArray;
}
/**
* @param {object} item - information of item
* present event in the agenda
*/
renderItem(item) {
const { address } = item;
const { description } = item;
return (
<TouchableOpacity
style={[styles.item, { height: item.height }]}
onPress={() => {
return Alert.alert(item.name,
`${item.startTime} - ${item.endTime}\n${item.description}\n${item.address}`,
[
{ text: i18n.t('cancel') },
{
text: i18n.t('getDirections'),
onPress: () => {
if (address) { this.sendDirections(address.split(',')[0]); } else { this.sendDirections(description.split('\n')[0]); }
}
},
],
{ cancelable: false });
}}
>
<Text style={{ color: 'white' }}>{item.name}</Text>
</TouchableOpacity>
);
}
/**
* add line to empty day
*/
renderEmptyDate = () => {
return (
<View
style={{
borderBottomColor: 'rgba(105,105,105,0.1)',
borderBottomWidth: 1,
}}
/>
);
}
render() {
return (
<View
style={{ height: '100%', width: '10 | { text: 'ok' }
]);
}
} | random_line_split |
browse.ts | /api';
import { SettingsProvider } from '../../../providers/settings/settings';
import { Network } from '@ionic-native/network';
import { CoursePage } from '../course/course';
/**
* Generated class for the BrowsePage page.
*
* See https://ionicframework.com/docs/components/#navigation for more info on
* Ionic pages and navigation.
*/
@IonicPage()
@Component({
selector: 'page-browse',
templateUrl: 'browse.html',
})
export class BrowsePage {
@ViewChild(InfiniteScroll) infiniteScroll:InfiniteScroll;
@ViewChild(Content) content: Content;
public isOn: boolean = false;
public isCategory: boolean = false;
public courses;
public error:boolean = false;
private loading;
public settings;
public currency;
public exchangeRate;
public params= {
filter:'',
sort:'',
group:'',
type:''
};
public currentPage=1;
public showLoading:boolean = false;
public sortLib ={
asc : "Alphabetical (asc)",
desc: "Alphabetical (desc)",
recent : "Most Recent",
priceAsc : "Price (Lowest)",
priceDesc : "Price (Highest)",
c : "Online Courses",
"s-b" : "Training Sessions"
};
public catLib = {};
public networkPresent= true;
public showRetry=false;
constructor(public popoverCtrl: PopoverController,public app:App,public navCtrl: NavController, public navParams: NavParams, private apiService:ApiProvider,public loadingController: LoadingController,public toastController: ToastController,public settingService:SettingsProvider,public actionSheetController: ActionSheetController,public network:Network) {
// this.params = {
// filter:'',
// sort:'',
// group:'',
// type:''
// };
}
private clearParams(){
this.params = {
filter:'',
sort:'',
group:'',
type:'',
};
}
async ionViewWillEnter(){
this.currency = await this.settingService.getCurrency();
console.log('Entered browse view');
}
async ngOnInit() {
this.params = {
filter:'',
sort:'',
group:'',
type:''
};
this.sortLib = {
asc : "Alphabetical (asc)",
desc: "Alphabetical (desc)",
recent : "Most Recent",
priceAsc : "Price (Lowest)",
priceDesc : "Price (Highest)",
c : "Online Courses",
"s-b" : "Training Sessions"
};
console.log(this.params);
console.log(this.sortLib);
// let data = await this.settingService.getSetting('widgets');
this.networkCheck();
//get courses
this.showLoading= true;
this.loadCourses(1).subscribe(response=>{
this.courses= response['records'];
console.log(response);
this.currentPage = 1;
this.showLoading = false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
this.currency = await this.settingService.getCurrency();
this.settingService.getSettings().then(val => {
this.settings = val;
//load category library
for(let i=0; i < this.settings.categories.length; i++){
this.catLib[this.settings.categories[i].session_category_id] = this.settings.categories[i].category_name;
}
console.log('showing cat lib');
console.log(this.catLib);
// let currency = val.student_currency;
// //get exchange rate for currency
// let currencyList = val.currencies;
// let currencyObj:any;
// for(let i=0; i<currencyList.length;i++){
// let obj = currencyList[i];
// if(obj.currency_id==currency){
// currencyObj = obj;
// }
// }
// this.currency = currencyObj;
});
}
networkCheck(){
// watch network for a disconnection
let disconnectSubscription = this.network.onDisconnect().subscribe(() => {
console.log('network was disconnected :-(');
this.networkPresent= false;
});
// stop disconnect watch
disconnectSubscription.unsubscribe();
// watch network for a connection
let connectSubscription = this.network.onConnect().subscribe(() => {
console.log('network connected!');
// We just got a connection but we need to wait briefly
// before we determine the connection type. Might need to wait.
// prior to doing any api requests as well.
setTimeout(() => {
if (this.network.type === 'wifi') {
console.log('we got a wifi connection, woohoo!');
this.networkPresent= true;
}
}, 3000);
});
}
search(){
this.showLoading = true;
this.loadCourses(1).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading = false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
}
private loadCourses(page){
this.showRetry= false;
// this.currentPage = page;
//this.infiniteScroll.enable(true);
return this.apiService.getSessions(page,30,this.params.group,this.params.filter,this.params.sort,this.params.type);
}
async | () {
this.loading = await this.loadingController.create({
content: 'Loading. Please wait...',
duration: 2000000000
});
return await this.loading.present();
}
getButtonText(): string {
return `Switch ${ this.isOn ? 'Off' : 'On' }`;
}
setState(): void {
this.isOn = !this.isOn;
}
toggleCategory(){
this.content.scrollToTop();
this.isCategory = !this.isCategory;
}
toggleDetails() {
this.isOn = !this.isOn;
if(!this.isOn && this.params.filter != ""){
this.params.filter = "";
this.showLoading= true;
this.loadCourses(1).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading= false;
});
}
}
clearSearch(){
console.log('clear clicked');
}
loadData(event){
this.currentPage++;
console.log('scroll starting: '+this.currentPage);
this.loadCourses(this.currentPage).subscribe(response=>{
//console.log(response['records']);
this.courses= this.courses.concat(response['records']);
console.log(this.courses);
event.complete();
//determine if this was the last page
let totalPages = Math.ceil((response['total']/response['rows_per_page']));
console.log(totalPages);
if(this.currentPage >= totalPages){
// this.currentPage--;
// event.enable(false);
}
}, err => {
event.complete();
this.presentToast('Network error! Please check your Internet Connection and try again');
console.log(err.message);
})
}
reloadCourses(page){
this.content.scrollToTop();
this.showLoading=true;
this.currentPage=page;
this.loadCourses(page).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading=false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
}
async presentActionSheet() {
const actionSheet = await this.actionSheetController.create({
title: 'Sort By',
buttons: [{
text: 'Alphabetical (asc)',
icon: 'arrow-round-up',
handler: () => {
this.params.sort = "asc";
this.reloadCourses(1);
}
}, {
text: 'Alphabetical (desc)',
icon: 'arrow-round-down',
handler: () => {
this.params.sort = "desc";
this.reloadCourses(1);
}
}, {
text: 'Most Recent',
icon: 'time',
handler: () => {
this.params.sort = "recent";
this.reloadCourses(1);
}
}, {
text: 'Price (Lowest)',
icon: 'cash',
handler: () => {
this.params.sort = "priceAsc";
this.reloadCourses(1);
}
},{
text: 'Price (Highest)',
icon: 'cash',
handler: () => {
this.params.sort = "priceDesc";
this.reloadCourses(1);
}
},
{
text: 'Online Courses',
icon: 'globe',
handler: () => {
this.params.type = "c";
this.reloadCourses(1);
}
},{
text: 'Training Sessions',
icon: 'people',
handler: () => {
this.params.type = "s-b";
this.reloadCourses(1);
}
}, {
text: 'Reset',
icon: 'refresh',
handler: () => {
this.params.type = "";
this.params.sort = "";
this.reloadCourses(1);
}
}]
});
await actionSheet.present();
}
loadCategory(category){
this.params.group= category;
this.isCategory = false;
this.reloadCourses(1);
}
async presentToast(message:string) {
const toast = await this.toastController.create({
message: message,
duration: 3000
});
toast.present();
}
| presentLoading | identifier_name |
browse.ts | /api';
import { SettingsProvider } from '../../../providers/settings/settings';
import { Network } from '@ionic-native/network';
import { CoursePage } from '../course/course';
/**
* Generated class for the BrowsePage page.
*
* See https://ionicframework.com/docs/components/#navigation for more info on
* Ionic pages and navigation.
*/
@IonicPage()
@Component({
selector: 'page-browse',
templateUrl: 'browse.html',
})
export class BrowsePage {
@ViewChild(InfiniteScroll) infiniteScroll:InfiniteScroll;
@ViewChild(Content) content: Content;
public isOn: boolean = false;
public isCategory: boolean = false;
public courses;
public error:boolean = false;
private loading;
public settings;
public currency;
public exchangeRate;
public params= {
filter:'',
sort:'',
group:'',
type:''
};
public currentPage=1;
public showLoading:boolean = false;
public sortLib ={
asc : "Alphabetical (asc)",
desc: "Alphabetical (desc)",
recent : "Most Recent",
priceAsc : "Price (Lowest)",
priceDesc : "Price (Highest)",
c : "Online Courses",
"s-b" : "Training Sessions"
};
public catLib = {};
public networkPresent= true;
public showRetry=false;
constructor(public popoverCtrl: PopoverController,public app:App,public navCtrl: NavController, public navParams: NavParams, private apiService:ApiProvider,public loadingController: LoadingController,public toastController: ToastController,public settingService:SettingsProvider,public actionSheetController: ActionSheetController,public network:Network) {
// this.params = {
// filter:'',
// sort:'',
// group:'',
// type:''
// };
}
private clearParams(){
this.params = {
filter:'',
sort:'',
group:'',
type:'',
};
}
async ionViewWillEnter(){
this.currency = await this.settingService.getCurrency();
console.log('Entered browse view');
}
async ngOnInit() {
this.params = {
filter:'',
sort:'',
group:'',
type:''
};
this.sortLib = {
asc : "Alphabetical (asc)",
desc: "Alphabetical (desc)",
recent : "Most Recent",
priceAsc : "Price (Lowest)",
priceDesc : "Price (Highest)",
c : "Online Courses",
"s-b" : "Training Sessions"
};
console.log(this.params);
console.log(this.sortLib);
// let data = await this.settingService.getSetting('widgets');
this.networkCheck();
//get courses
this.showLoading= true;
this.loadCourses(1).subscribe(response=>{
this.courses= response['records'];
console.log(response);
this.currentPage = 1;
this.showLoading = false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
this.currency = await this.settingService.getCurrency();
this.settingService.getSettings().then(val => {
this.settings = val;
//load category library
for(let i=0; i < this.settings.categories.length; i++){
this.catLib[this.settings.categories[i].session_category_id] = this.settings.categories[i].category_name;
}
console.log('showing cat lib');
console.log(this.catLib);
// let currency = val.student_currency;
// //get exchange rate for currency
// let currencyList = val.currencies;
// let currencyObj:any;
// for(let i=0; i<currencyList.length;i++){
// let obj = currencyList[i];
// if(obj.currency_id==currency){
// currencyObj = obj;
// }
// }
// this.currency = currencyObj;
});
}
networkCheck(){
// watch network for a disconnection
let disconnectSubscription = this.network.onDisconnect().subscribe(() => {
console.log('network was disconnected :-(');
this.networkPresent= false;
});
// stop disconnect watch
disconnectSubscription.unsubscribe();
// watch network for a connection
let connectSubscription = this.network.onConnect().subscribe(() => {
console.log('network connected!');
// We just got a connection but we need to wait briefly
// before we determine the connection type. Might need to wait.
// prior to doing any api requests as well.
setTimeout(() => {
if (this.network.type === 'wifi') {
console.log('we got a wifi connection, woohoo!');
this.networkPresent= true;
}
}, 3000);
});
}
search(){
this.showLoading = true;
this.loadCourses(1).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading = false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
}
private loadCourses(page){
this.showRetry= false;
// this.currentPage = page;
//this.infiniteScroll.enable(true);
return this.apiService.getSessions(page,30,this.params.group,this.params.filter,this.params.sort,this.params.type);
}
async presentLoading() {
this.loading = await this.loadingController.create({
content: 'Loading. Please wait...',
duration: 2000000000
});
return await this.loading.present();
}
getButtonText(): string {
return `Switch ${ this.isOn ? 'Off' : 'On' }`;
}
setState(): void {
this.isOn = !this.isOn;
}
toggleCategory(){
this.content.scrollToTop();
this.isCategory = !this.isCategory;
}
toggleDetails() {
this.isOn = !this.isOn;
if(!this.isOn && this.params.filter != ""){
this.params.filter = "";
this.showLoading= true;
this.loadCourses(1).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading= false;
});
}
}
clearSearch(){
console.log('clear clicked');
}
loadData(event){
this.currentPage++;
console.log('scroll starting: '+this.currentPage);
this.loadCourses(this.currentPage).subscribe(response=>{
//console.log(response['records']);
this.courses= this.courses.concat(response['records']);
console.log(this.courses);
event.complete();
//determine if this was the last page
let totalPages = Math.ceil((response['total']/response['rows_per_page']));
console.log(totalPages);
if(this.currentPage >= totalPages){
// this.currentPage--;
// event.enable(false);
}
}, err => {
event.complete();
this.presentToast('Network error! Please check your Internet Connection and try again');
console.log(err.message);
})
}
reloadCourses(page){
this.content.scrollToTop();
this.showLoading=true;
this.currentPage=page;
this.loadCourses(page).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading=false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
}
async presentActionSheet() | handler: () => {
this.params.sort = "recent";
this.reloadCourses(1);
}
}, {
text: 'Price (Lowest)',
icon: 'cash',
handler: () => {
this.params.sort = "priceAsc";
this.reloadCourses(1);
}
},{
text: 'Price (Highest)',
icon: 'cash',
handler: () => {
this.params.sort = "priceDesc";
this.reloadCourses(1);
}
},
{
text: 'Online Courses',
icon: 'globe',
handler: () => {
this.params.type = "c";
this.reloadCourses(1);
}
},{
text: 'Training Sessions',
icon: 'people',
handler: () => {
this.params.type = "s-b";
this.reloadCourses(1);
}
}, {
text: 'Reset',
icon: 'refresh',
handler: () => {
this.params.type = "";
this.params.sort = "";
this.reloadCourses(1);
}
}]
});
await actionSheet.present();
}
loadCategory(category){
this.params.group= category;
this.isCategory = false;
this.reloadCourses(1);
}
async presentToast(message:string) {
const toast = await this.toastController.create({
message: message,
duration: 3000
});
toast.present();
}
| {
const actionSheet = await this.actionSheetController.create({
title: 'Sort By',
buttons: [{
text: 'Alphabetical (asc)',
icon: 'arrow-round-up',
handler: () => {
this.params.sort = "asc";
this.reloadCourses(1);
}
}, {
text: 'Alphabetical (desc)',
icon: 'arrow-round-down',
handler: () => {
this.params.sort = "desc";
this.reloadCourses(1);
}
}, {
text: 'Most Recent',
icon: 'time', | identifier_body |
browse.ts | ';
import { SettingsProvider } from '../../../providers/settings/settings';
import { Network } from '@ionic-native/network';
import { CoursePage } from '../course/course';
/**
* Generated class for the BrowsePage page.
*
* See https://ionicframework.com/docs/components/#navigation for more info on
* Ionic pages and navigation.
*/
@IonicPage()
@Component({
selector: 'page-browse',
templateUrl: 'browse.html',
})
export class BrowsePage {
@ViewChild(InfiniteScroll) infiniteScroll:InfiniteScroll;
@ViewChild(Content) content: Content;
public isOn: boolean = false;
public isCategory: boolean = false;
public courses;
public error:boolean = false;
private loading;
public settings;
public currency;
public exchangeRate;
public params= {
filter:'',
sort:'',
group:'',
type:''
};
public currentPage=1;
public showLoading:boolean = false;
public sortLib ={
asc : "Alphabetical (asc)",
desc: "Alphabetical (desc)",
recent : "Most Recent",
priceAsc : "Price (Lowest)",
priceDesc : "Price (Highest)",
c : "Online Courses",
"s-b" : "Training Sessions"
};
public catLib = {};
public networkPresent= true;
public showRetry=false;
constructor(public popoverCtrl: PopoverController,public app:App,public navCtrl: NavController, public navParams: NavParams, private apiService:ApiProvider,public loadingController: LoadingController,public toastController: ToastController,public settingService:SettingsProvider,public actionSheetController: ActionSheetController,public network:Network) {
// this.params = {
// filter:'',
// sort:'',
// group:'',
// type:''
// };
}
private clearParams(){
this.params = {
filter:'',
sort:'',
group:'',
type:'',
};
}
async ionViewWillEnter(){
this.currency = await this.settingService.getCurrency();
console.log('Entered browse view');
}
async ngOnInit() {
this.params = {
filter:'',
sort:'',
group:'',
type:''
};
this.sortLib = {
asc : "Alphabetical (asc)",
desc: "Alphabetical (desc)",
recent : "Most Recent",
priceAsc : "Price (Lowest)",
priceDesc : "Price (Highest)",
c : "Online Courses",
"s-b" : "Training Sessions"
};
console.log(this.params);
console.log(this.sortLib);
// let data = await this.settingService.getSetting('widgets');
this.networkCheck();
//get courses
this.showLoading= true;
this.loadCourses(1).subscribe(response=>{
this.courses= response['records'];
console.log(response);
this.currentPage = 1;
this.showLoading = false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
this.currency = await this.settingService.getCurrency();
this.settingService.getSettings().then(val => {
this.settings = val;
//load category library
for(let i=0; i < this.settings.categories.length; i++){
this.catLib[this.settings.categories[i].session_category_id] = this.settings.categories[i].category_name;
}
console.log('showing cat lib');
console.log(this.catLib);
// let currency = val.student_currency;
// //get exchange rate for currency
// let currencyList = val.currencies;
// let currencyObj:any;
// for(let i=0; i<currencyList.length;i++){
// let obj = currencyList[i];
// if(obj.currency_id==currency){
// currencyObj = obj;
// }
// }
// this.currency = currencyObj;
});
}
networkCheck(){
// watch network for a disconnection
let disconnectSubscription = this.network.onDisconnect().subscribe(() => {
console.log('network was disconnected :-(');
this.networkPresent= false;
});
// stop disconnect watch
disconnectSubscription.unsubscribe();
// watch network for a connection
let connectSubscription = this.network.onConnect().subscribe(() => {
console.log('network connected!');
// We just got a connection but we need to wait briefly
// before we determine the connection type. Might need to wait.
// prior to doing any api requests as well.
setTimeout(() => {
if (this.network.type === 'wifi') |
}, 3000);
});
}
search(){
this.showLoading = true;
this.loadCourses(1).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading = false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
}
private loadCourses(page){
this.showRetry= false;
// this.currentPage = page;
//this.infiniteScroll.enable(true);
return this.apiService.getSessions(page,30,this.params.group,this.params.filter,this.params.sort,this.params.type);
}
async presentLoading() {
this.loading = await this.loadingController.create({
content: 'Loading. Please wait...',
duration: 2000000000
});
return await this.loading.present();
}
getButtonText(): string {
return `Switch ${ this.isOn ? 'Off' : 'On' }`;
}
setState(): void {
this.isOn = !this.isOn;
}
toggleCategory(){
this.content.scrollToTop();
this.isCategory = !this.isCategory;
}
toggleDetails() {
this.isOn = !this.isOn;
if(!this.isOn && this.params.filter != ""){
this.params.filter = "";
this.showLoading= true;
this.loadCourses(1).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading= false;
});
}
}
clearSearch(){
console.log('clear clicked');
}
loadData(event){
this.currentPage++;
console.log('scroll starting: '+this.currentPage);
this.loadCourses(this.currentPage).subscribe(response=>{
//console.log(response['records']);
this.courses= this.courses.concat(response['records']);
console.log(this.courses);
event.complete();
//determine if this was the last page
let totalPages = Math.ceil((response['total']/response['rows_per_page']));
console.log(totalPages);
if(this.currentPage >= totalPages){
// this.currentPage--;
// event.enable(false);
}
}, err => {
event.complete();
this.presentToast('Network error! Please check your Internet Connection and try again');
console.log(err.message);
})
}
reloadCourses(page){
this.content.scrollToTop();
this.showLoading=true;
this.currentPage=page;
this.loadCourses(page).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading=false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
}
async presentActionSheet() {
const actionSheet = await this.actionSheetController.create({
title: 'Sort By',
buttons: [{
text: 'Alphabetical (asc)',
icon: 'arrow-round-up',
handler: () => {
this.params.sort = "asc";
this.reloadCourses(1);
}
}, {
text: 'Alphabetical (desc)',
icon: 'arrow-round-down',
handler: () => {
this.params.sort = "desc";
this.reloadCourses(1);
}
}, {
text: 'Most Recent',
icon: 'time',
handler: () => {
this.params.sort = "recent";
this.reloadCourses(1);
}
}, {
text: 'Price (Lowest)',
icon: 'cash',
handler: () => {
this.params.sort = "priceAsc";
this.reloadCourses(1);
}
},{
text: 'Price (Highest)',
icon: 'cash',
handler: () => {
this.params.sort = "priceDesc";
this.reloadCourses(1);
}
},
{
text: 'Online Courses',
icon: 'globe',
handler: () => {
this.params.type = "c";
this.reloadCourses(1);
}
},{
text: 'Training Sessions',
icon: 'people',
handler: () => {
this.params.type = "s-b";
this.reloadCourses(1);
}
}, {
text: 'Reset',
icon: 'refresh',
handler: () => {
this.params.type = "";
this.params.sort = "";
this.reloadCourses(1);
}
}]
});
await actionSheet.present();
}
loadCategory(category){
this.params.group= category;
this.isCategory = false;
this.reloadCourses(1);
}
async presentToast(message:string) {
const toast = await this.toastController.create({
message: message,
duration: 3000
});
toast.present();
}
| {
console.log('we got a wifi connection, woohoo!');
this.networkPresent= true;
} | conditional_block |
browse.ts | /api';
import { SettingsProvider } from '../../../providers/settings/settings';
import { Network } from '@ionic-native/network';
import { CoursePage } from '../course/course';
/**
* Generated class for the BrowsePage page.
*
* See https://ionicframework.com/docs/components/#navigation for more info on
* Ionic pages and navigation.
*/
@IonicPage()
@Component({
selector: 'page-browse',
templateUrl: 'browse.html',
})
export class BrowsePage {
@ViewChild(InfiniteScroll) infiniteScroll:InfiniteScroll;
@ViewChild(Content) content: Content;
public isOn: boolean = false;
public isCategory: boolean = false;
public courses;
public error:boolean = false;
private loading;
public settings;
public currency;
public exchangeRate;
public params= {
filter:'',
sort:'',
group:'',
type:''
};
public currentPage=1;
public showLoading:boolean = false;
public sortLib ={
asc : "Alphabetical (asc)",
desc: "Alphabetical (desc)",
recent : "Most Recent",
priceAsc : "Price (Lowest)",
priceDesc : "Price (Highest)",
c : "Online Courses",
"s-b" : "Training Sessions"
};
public catLib = {};
public networkPresent= true;
public showRetry=false;
constructor(public popoverCtrl: PopoverController,public app:App,public navCtrl: NavController, public navParams: NavParams, private apiService:ApiProvider,public loadingController: LoadingController,public toastController: ToastController,public settingService:SettingsProvider,public actionSheetController: ActionSheetController,public network:Network) {
// this.params = {
// filter:'',
// sort:'',
// group:'',
// type:''
// };
}
private clearParams(){
this.params = {
filter:'',
sort:'',
group:'',
type:'',
};
}
async ionViewWillEnter(){
this.currency = await this.settingService.getCurrency();
console.log('Entered browse view');
}
async ngOnInit() {
this.params = {
filter:'',
sort:'', | type:''
};
this.sortLib = {
asc : "Alphabetical (asc)",
desc: "Alphabetical (desc)",
recent : "Most Recent",
priceAsc : "Price (Lowest)",
priceDesc : "Price (Highest)",
c : "Online Courses",
"s-b" : "Training Sessions"
};
console.log(this.params);
console.log(this.sortLib);
// let data = await this.settingService.getSetting('widgets');
this.networkCheck();
//get courses
this.showLoading= true;
this.loadCourses(1).subscribe(response=>{
this.courses= response['records'];
console.log(response);
this.currentPage = 1;
this.showLoading = false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
this.currency = await this.settingService.getCurrency();
this.settingService.getSettings().then(val => {
this.settings = val;
//load category library
for(let i=0; i < this.settings.categories.length; i++){
this.catLib[this.settings.categories[i].session_category_id] = this.settings.categories[i].category_name;
}
console.log('showing cat lib');
console.log(this.catLib);
// let currency = val.student_currency;
// //get exchange rate for currency
// let currencyList = val.currencies;
// let currencyObj:any;
// for(let i=0; i<currencyList.length;i++){
// let obj = currencyList[i];
// if(obj.currency_id==currency){
// currencyObj = obj;
// }
// }
// this.currency = currencyObj;
});
}
networkCheck(){
// watch network for a disconnection
let disconnectSubscription = this.network.onDisconnect().subscribe(() => {
console.log('network was disconnected :-(');
this.networkPresent= false;
});
// stop disconnect watch
disconnectSubscription.unsubscribe();
// watch network for a connection
let connectSubscription = this.network.onConnect().subscribe(() => {
console.log('network connected!');
// We just got a connection but we need to wait briefly
// before we determine the connection type. Might need to wait.
// prior to doing any api requests as well.
setTimeout(() => {
if (this.network.type === 'wifi') {
console.log('we got a wifi connection, woohoo!');
this.networkPresent= true;
}
}, 3000);
});
}
search(){
this.showLoading = true;
this.loadCourses(1).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading = false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
}
private loadCourses(page){
this.showRetry= false;
// this.currentPage = page;
//this.infiniteScroll.enable(true);
return this.apiService.getSessions(page,30,this.params.group,this.params.filter,this.params.sort,this.params.type);
}
async presentLoading() {
this.loading = await this.loadingController.create({
content: 'Loading. Please wait...',
duration: 2000000000
});
return await this.loading.present();
}
getButtonText(): string {
return `Switch ${ this.isOn ? 'Off' : 'On' }`;
}
setState(): void {
this.isOn = !this.isOn;
}
toggleCategory(){
this.content.scrollToTop();
this.isCategory = !this.isCategory;
}
toggleDetails() {
this.isOn = !this.isOn;
if(!this.isOn && this.params.filter != ""){
this.params.filter = "";
this.showLoading= true;
this.loadCourses(1).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading= false;
});
}
}
clearSearch(){
console.log('clear clicked');
}
loadData(event){
this.currentPage++;
console.log('scroll starting: '+this.currentPage);
this.loadCourses(this.currentPage).subscribe(response=>{
//console.log(response['records']);
this.courses= this.courses.concat(response['records']);
console.log(this.courses);
event.complete();
//determine if this was the last page
let totalPages = Math.ceil((response['total']/response['rows_per_page']));
console.log(totalPages);
if(this.currentPage >= totalPages){
// this.currentPage--;
// event.enable(false);
}
}, err => {
event.complete();
this.presentToast('Network error! Please check your Internet Connection and try again');
console.log(err.message);
})
}
reloadCourses(page){
this.content.scrollToTop();
this.showLoading=true;
this.currentPage=page;
this.loadCourses(page).subscribe(resp=>{
this.courses = resp['records'];
this.showLoading=false;
}, err => {
this.showRetry = true;
this.showLoading= false;
this.presentToast('Network error! Please check your Internet connection');
console.log(err.message);
});
}
async presentActionSheet() {
const actionSheet = await this.actionSheetController.create({
title: 'Sort By',
buttons: [{
text: 'Alphabetical (asc)',
icon: 'arrow-round-up',
handler: () => {
this.params.sort = "asc";
this.reloadCourses(1);
}
}, {
text: 'Alphabetical (desc)',
icon: 'arrow-round-down',
handler: () => {
this.params.sort = "desc";
this.reloadCourses(1);
}
}, {
text: 'Most Recent',
icon: 'time',
handler: () => {
this.params.sort = "recent";
this.reloadCourses(1);
}
}, {
text: 'Price (Lowest)',
icon: 'cash',
handler: () => {
this.params.sort = "priceAsc";
this.reloadCourses(1);
}
},{
text: 'Price (Highest)',
icon: 'cash',
handler: () => {
this.params.sort = "priceDesc";
this.reloadCourses(1);
}
},
{
text: 'Online Courses',
icon: 'globe',
handler: () => {
this.params.type = "c";
this.reloadCourses(1);
}
},{
text: 'Training Sessions',
icon: 'people',
handler: () => {
this.params.type = "s-b";
this.reloadCourses(1);
}
}, {
text: 'Reset',
icon: 'refresh',
handler: () => {
this.params.type = "";
this.params.sort = "";
this.reloadCourses(1);
}
}]
});
await actionSheet.present();
}
loadCategory(category){
this.params.group= category;
this.isCategory = false;
this.reloadCourses(1);
}
async presentToast(message:string) {
const toast = await this.toastController.create({
message: message,
duration: 3000
});
toast.present();
}
| group:'', | random_line_split |
lib.rs | coords.iter().copied().collect() }
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum ConwayCube {
Void,
Inactive,
Active,
}
impl ConwayCube {
/// Construct cube from input character.
#[must_use]
pub fn from_char(cube: char) -> ConwayCube {
match cube {
'.' => ConwayCube::Inactive,
'#' => ConwayCube::Active,
_ => {
let e = format!("invalid cube character {}", cube);
panic!(e);
}
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct InfiniteGrid {
cubes: Vec<ConwayCube>,
dim: usize,
edge: i32,
}
impl fmt::Display for InfiniteGrid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let half_edge = self.edge / 2;
let mut s = String::new();
for coords in (0..self.dim).map(|_i| 0..self.edge).multi_cartesian_product() {
if coords[self.dim - 2] == 0 && coords[self.dim - 1] == 0 {
let axes = vec!['z', 'w', 'v', 'u'];
for d in 2..self.dim {
if d > 2 {
s += ", ";
}
let label = format!("{}={}", axes[d - 2], coords[self.dim - d - 1] - half_edge);
s += &label;
}
s += "\n";
}
let pos = Position::from(&coords);
s += match self.cube_at(&pos) {
ConwayCube::Void => "_",
ConwayCube::Inactive => ".",
ConwayCube::Active => "#",
};
if coords[self.dim - 1] >= self.edge - 1 {
s += "\n";
}
}
write!(f, "{}", s)
}
}
pub struct InfiniteGridIter {
layout: InfiniteGrid,
}
impl Iterator for InfiniteGridIter {
type Item = InfiniteGrid;
fn next(&mut self) -> Option<Self::Item> {
let prev_layout = self.layout.clone();
self.layout = prev_layout.cube_round();
Some(self.layout.clone())
}
}
impl InfiniteGrid {
/// Return edge length.
#[must_use]
pub fn edge(&self) -> i32 {
self.edge
}
/// Construct `InfiniteGrid` of dimension `dim` by reading initial grid
/// plane from file at `path`.
///
/// # Errors
///
/// Returns `Err` if the input file cannot be opened, or if line is
/// found with invalid format.
pub fn read_from_file(path: &str, dim: usize) -> Result<InfiniteGrid> {
let s: String = fs::read_to_string(path)?;
InfiniteGrid::from_input(&s, dim)
}
/// Construct `InfiniteGrid` of dimension `dim` by reading initial grid
/// plane from `input` string.
///
/// # Errors
///
/// Returns `Err` if a line is found with invalid format.
pub fn from_input(input: &str, dim: usize) -> Result<InfiniteGrid> {
let edge = i32::try_from(input.lines().next().unwrap().len()).unwrap();
let half_edge = edge / 2;
// parse and store initial 2D grid plane:
let cubes_2d: Vec<ConwayCube> = input
.lines()
.flat_map(|line| line.trim().chars().map(ConwayCube::from_char))
.collect();
// create empty nD grid, and copy initial 2D grid plane to it
// (in the middle of each dimension beyond the first two):
let max_i = InfiniteGrid::index_size(edge, dim);
let mut cubes: Vec<ConwayCube> = vec![ConwayCube::Void; max_i];
for y in 0..edge {
for x in 0..edge {
let from_i = usize::try_from(y * edge + x).unwrap();
let mut dims = vec![half_edge; dim];
dims[dim-1] = x;
dims[dim-2] = y;
let to_i = Position::from(&dims).index_for(edge);
cubes[to_i] = cubes_2d[from_i];
}
}
Ok(Self { cubes, dim, edge })
}
#[must_use]
fn index_size(edge: i32, dim: usize) -> usize {
let mut s = 1_i32;
for _i in 0..dim {
s *= edge;
}
usize::try_from(s).unwrap()
}
#[must_use]
pub fn iter(&self) -> InfiniteGridIter {
InfiniteGridIter {
layout: self.clone(),
}
}
/// Return `ConwayCube` at the given position.
#[must_use]
pub fn cube_at(&self, pos: &Position) -> ConwayCube {
for c in &pos.coords {
if *c < 0 || *c >= self.edge {
return ConwayCube::Void;
}
}
self.cubes[pos.index_for(self.edge)]
}
/// Return count of active cubes.
#[must_use]
pub fn active_cubes(&self) -> usize {
self.cubes.iter().filter(|&c| *c == ConwayCube::Active).count()
}
/// Return count of active neighbor cubes of the cube at the given
/// position.
#[must_use]
pub fn active_neighbors_at(&self, pos: &Position) -> usize {
let mut n_active: usize = 0;
for deltas in (0..self.dim).map(|_i| -1..=1).multi_cartesian_product() {
if deltas.iter().all(|dc| *dc == 0) {
continue;
}
let d_coords: Vec<i32> = pos.coords
.iter()
.enumerate()
.map(|(i, dc)| *dc + deltas[i])
.collect();
let d_pos = Position::from(&d_coords);
if self.cube_at(&d_pos) == ConwayCube::Active {
n_active += 1;
}
}
n_active
}
/// Do one round of cube life. Returns a new grid which is one unit
/// bigger in all directions (its edge increases by 2 in all
/// dimensions).
#[must_use]
pub fn cube_round(&self) -> InfiniteGrid {
let edge = self.edge + 2;
let max_i = InfiniteGrid::index_size(edge, self.dim);
let mut cubes: Vec<ConwayCube> = vec![ConwayCube::Void; max_i];
for coords in (0..self.dim).map(|_i| 0..edge).multi_cartesian_product() {
let from_coords: Vec<i32> = coords.iter().map(|c| c - 1).collect();
let from_pos = Position::from(&from_coords);
let pos = Position::from(&coords);
cubes[pos.index_for(edge)] = self.new_cube_at(&from_pos);
}
InfiniteGrid { cubes, dim: self.dim, edge }
}
fn new_cube_at(&self, pos: &Position) -> ConwayCube {
match self.cube_at(&pos) {
ConwayCube::Active => {
let n = self.active_neighbors_at(&pos);
if n == 2 || n == 3 {
ConwayCube::Active
} else {
ConwayCube::Inactive
}
},
_ => {
if self.active_neighbors_at(&pos) == 3 {
ConwayCube::Active
} else {
ConwayCube::Inactive
}
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
const TINY_LAYOUT: &'static str = ".#.\n..#\n###\n";
#[test]
fn test_from_input() |
#[test]
fn test_grid_indexing() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[1, 0, 1]);
assert_eq!(ConwayCube::Active, grid.cube_at(&pos));
let pos = Position::from(&[1, 1, 0]);
assert_eq!(ConwayCube::Inactive, grid.cube_at(&pos));
let pos = Position::from(&[0, 1, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_z() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[-1, 0, 1]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
let pos = Position::from(&[3, 1, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_y | {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
assert_eq!(3, grid.edge);
assert_eq!(5, grid.active_cubes());
} | identifier_body |
lib.rs | : coords.iter().copied().collect() }
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum ConwayCube {
Void,
Inactive,
Active,
}
impl ConwayCube {
/// Construct cube from input character.
#[must_use]
pub fn from_char(cube: char) -> ConwayCube {
match cube {
'.' => ConwayCube::Inactive,
'#' => ConwayCube::Active,
_ => {
let e = format!("invalid cube character {}", cube);
panic!(e);
}
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct InfiniteGrid {
cubes: Vec<ConwayCube>,
dim: usize,
edge: i32,
}
impl fmt::Display for InfiniteGrid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let half_edge = self.edge / 2;
let mut s = String::new();
for coords in (0..self.dim).map(|_i| 0..self.edge).multi_cartesian_product() {
if coords[self.dim - 2] == 0 && coords[self.dim - 1] == 0 {
let axes = vec!['z', 'w', 'v', 'u'];
for d in 2..self.dim {
if d > 2 {
s += ", ";
}
let label = format!("{}={}", axes[d - 2], coords[self.dim - d - 1] - half_edge);
s += &label;
}
s += "\n";
}
let pos = Position::from(&coords);
s += match self.cube_at(&pos) {
ConwayCube::Void => "_",
ConwayCube::Inactive => ".",
ConwayCube::Active => "#",
};
if coords[self.dim - 1] >= self.edge - 1 {
s += "\n";
}
}
write!(f, "{}", s)
}
}
pub struct InfiniteGridIter {
layout: InfiniteGrid,
}
impl Iterator for InfiniteGridIter {
type Item = InfiniteGrid;
fn next(&mut self) -> Option<Self::Item> {
let prev_layout = self.layout.clone();
self.layout = prev_layout.cube_round();
Some(self.layout.clone())
}
}
impl InfiniteGrid {
/// Return edge length.
#[must_use]
pub fn edge(&self) -> i32 {
self.edge
}
/// Construct `InfiniteGrid` of dimension `dim` by reading initial grid
/// plane from file at `path`.
///
/// # Errors
///
/// Returns `Err` if the input file cannot be opened, or if line is
/// found with invalid format.
pub fn read_from_file(path: &str, dim: usize) -> Result<InfiniteGrid> {
let s: String = fs::read_to_string(path)?;
InfiniteGrid::from_input(&s, dim)
}
/// Construct `InfiniteGrid` of dimension `dim` by reading initial grid
/// plane from `input` string.
///
/// # Errors
///
/// Returns `Err` if a line is found with invalid format.
pub fn from_input(input: &str, dim: usize) -> Result<InfiniteGrid> {
let edge = i32::try_from(input.lines().next().unwrap().len()).unwrap();
let half_edge = edge / 2;
// parse and store initial 2D grid plane:
let cubes_2d: Vec<ConwayCube> = input | // create empty nD grid, and copy initial 2D grid plane to it
// (in the middle of each dimension beyond the first two):
let max_i = InfiniteGrid::index_size(edge, dim);
let mut cubes: Vec<ConwayCube> = vec![ConwayCube::Void; max_i];
for y in 0..edge {
for x in 0..edge {
let from_i = usize::try_from(y * edge + x).unwrap();
let mut dims = vec![half_edge; dim];
dims[dim-1] = x;
dims[dim-2] = y;
let to_i = Position::from(&dims).index_for(edge);
cubes[to_i] = cubes_2d[from_i];
}
}
Ok(Self { cubes, dim, edge })
}
#[must_use]
fn index_size(edge: i32, dim: usize) -> usize {
let mut s = 1_i32;
for _i in 0..dim {
s *= edge;
}
usize::try_from(s).unwrap()
}
#[must_use]
pub fn iter(&self) -> InfiniteGridIter {
InfiniteGridIter {
layout: self.clone(),
}
}
/// Return `ConwayCube` at the given position.
#[must_use]
pub fn cube_at(&self, pos: &Position) -> ConwayCube {
for c in &pos.coords {
if *c < 0 || *c >= self.edge {
return ConwayCube::Void;
}
}
self.cubes[pos.index_for(self.edge)]
}
/// Return count of active cubes.
#[must_use]
pub fn active_cubes(&self) -> usize {
self.cubes.iter().filter(|&c| *c == ConwayCube::Active).count()
}
/// Return count of active neighbor cubes of the cube at the given
/// position.
#[must_use]
pub fn active_neighbors_at(&self, pos: &Position) -> usize {
let mut n_active: usize = 0;
for deltas in (0..self.dim).map(|_i| -1..=1).multi_cartesian_product() {
if deltas.iter().all(|dc| *dc == 0) {
continue;
}
let d_coords: Vec<i32> = pos.coords
.iter()
.enumerate()
.map(|(i, dc)| *dc + deltas[i])
.collect();
let d_pos = Position::from(&d_coords);
if self.cube_at(&d_pos) == ConwayCube::Active {
n_active += 1;
}
}
n_active
}
/// Do one round of cube life. Returns a new grid which is one unit
/// bigger in all directions (its edge increases by 2 in all
/// dimensions).
#[must_use]
pub fn cube_round(&self) -> InfiniteGrid {
let edge = self.edge + 2;
let max_i = InfiniteGrid::index_size(edge, self.dim);
let mut cubes: Vec<ConwayCube> = vec![ConwayCube::Void; max_i];
for coords in (0..self.dim).map(|_i| 0..edge).multi_cartesian_product() {
let from_coords: Vec<i32> = coords.iter().map(|c| c - 1).collect();
let from_pos = Position::from(&from_coords);
let pos = Position::from(&coords);
cubes[pos.index_for(edge)] = self.new_cube_at(&from_pos);
}
InfiniteGrid { cubes, dim: self.dim, edge }
}
fn new_cube_at(&self, pos: &Position) -> ConwayCube {
match self.cube_at(&pos) {
ConwayCube::Active => {
let n = self.active_neighbors_at(&pos);
if n == 2 || n == 3 {
ConwayCube::Active
} else {
ConwayCube::Inactive
}
},
_ => {
if self.active_neighbors_at(&pos) == 3 {
ConwayCube::Active
} else {
ConwayCube::Inactive
}
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
const TINY_LAYOUT: &'static str = ".#.\n..#\n###\n";
#[test]
fn test_from_input() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
assert_eq!(3, grid.edge);
assert_eq!(5, grid.active_cubes());
}
#[test]
fn test_grid_indexing() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[1, 0, 1]);
assert_eq!(ConwayCube::Active, grid.cube_at(&pos));
let pos = Position::from(&[1, 1, 0]);
assert_eq!(ConwayCube::Inactive, grid.cube_at(&pos));
let pos = Position::from(&[0, 1, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_z() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[-1, 0, 1]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
let pos = Position::from(&[3, 1, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_y() {
| .lines()
.flat_map(|line| line.trim().chars().map(ConwayCube::from_char))
.collect(); | random_line_split |
lib.rs | ={}", axes[d - 2], coords[self.dim - d - 1] - half_edge);
s += &label;
}
s += "\n";
}
let pos = Position::from(&coords);
s += match self.cube_at(&pos) {
ConwayCube::Void => "_",
ConwayCube::Inactive => ".",
ConwayCube::Active => "#",
};
if coords[self.dim - 1] >= self.edge - 1 {
s += "\n";
}
}
write!(f, "{}", s)
}
}
pub struct InfiniteGridIter {
layout: InfiniteGrid,
}
impl Iterator for InfiniteGridIter {
type Item = InfiniteGrid;
fn next(&mut self) -> Option<Self::Item> {
let prev_layout = self.layout.clone();
self.layout = prev_layout.cube_round();
Some(self.layout.clone())
}
}
impl InfiniteGrid {
/// Return edge length.
#[must_use]
pub fn edge(&self) -> i32 {
self.edge
}
/// Construct `InfiniteGrid` of dimension `dim` by reading initial grid
/// plane from file at `path`.
///
/// # Errors
///
/// Returns `Err` if the input file cannot be opened, or if line is
/// found with invalid format.
pub fn read_from_file(path: &str, dim: usize) -> Result<InfiniteGrid> {
let s: String = fs::read_to_string(path)?;
InfiniteGrid::from_input(&s, dim)
}
/// Construct `InfiniteGrid` of dimension `dim` by reading initial grid
/// plane from `input` string.
///
/// # Errors
///
/// Returns `Err` if a line is found with invalid format.
pub fn from_input(input: &str, dim: usize) -> Result<InfiniteGrid> {
let edge = i32::try_from(input.lines().next().unwrap().len()).unwrap();
let half_edge = edge / 2;
// parse and store initial 2D grid plane:
let cubes_2d: Vec<ConwayCube> = input
.lines()
.flat_map(|line| line.trim().chars().map(ConwayCube::from_char))
.collect();
// create empty nD grid, and copy initial 2D grid plane to it
// (in the middle of each dimension beyond the first two):
let max_i = InfiniteGrid::index_size(edge, dim);
let mut cubes: Vec<ConwayCube> = vec![ConwayCube::Void; max_i];
for y in 0..edge {
for x in 0..edge {
let from_i = usize::try_from(y * edge + x).unwrap();
let mut dims = vec![half_edge; dim];
dims[dim-1] = x;
dims[dim-2] = y;
let to_i = Position::from(&dims).index_for(edge);
cubes[to_i] = cubes_2d[from_i];
}
}
Ok(Self { cubes, dim, edge })
}
#[must_use]
fn index_size(edge: i32, dim: usize) -> usize {
let mut s = 1_i32;
for _i in 0..dim {
s *= edge;
}
usize::try_from(s).unwrap()
}
#[must_use]
pub fn iter(&self) -> InfiniteGridIter {
InfiniteGridIter {
layout: self.clone(),
}
}
/// Return `ConwayCube` at the given position.
#[must_use]
pub fn cube_at(&self, pos: &Position) -> ConwayCube {
for c in &pos.coords {
if *c < 0 || *c >= self.edge {
return ConwayCube::Void;
}
}
self.cubes[pos.index_for(self.edge)]
}
/// Return count of active cubes.
#[must_use]
pub fn active_cubes(&self) -> usize {
self.cubes.iter().filter(|&c| *c == ConwayCube::Active).count()
}
/// Return count of active neighbor cubes of the cube at the given
/// position.
#[must_use]
pub fn active_neighbors_at(&self, pos: &Position) -> usize {
let mut n_active: usize = 0;
for deltas in (0..self.dim).map(|_i| -1..=1).multi_cartesian_product() {
if deltas.iter().all(|dc| *dc == 0) {
continue;
}
let d_coords: Vec<i32> = pos.coords
.iter()
.enumerate()
.map(|(i, dc)| *dc + deltas[i])
.collect();
let d_pos = Position::from(&d_coords);
if self.cube_at(&d_pos) == ConwayCube::Active {
n_active += 1;
}
}
n_active
}
/// Do one round of cube life. Returns a new grid which is one unit
/// bigger in all directions (its edge increases by 2 in all
/// dimensions).
#[must_use]
pub fn cube_round(&self) -> InfiniteGrid {
let edge = self.edge + 2;
let max_i = InfiniteGrid::index_size(edge, self.dim);
let mut cubes: Vec<ConwayCube> = vec![ConwayCube::Void; max_i];
for coords in (0..self.dim).map(|_i| 0..edge).multi_cartesian_product() {
let from_coords: Vec<i32> = coords.iter().map(|c| c - 1).collect();
let from_pos = Position::from(&from_coords);
let pos = Position::from(&coords);
cubes[pos.index_for(edge)] = self.new_cube_at(&from_pos);
}
InfiniteGrid { cubes, dim: self.dim, edge }
}
fn new_cube_at(&self, pos: &Position) -> ConwayCube {
match self.cube_at(&pos) {
ConwayCube::Active => {
let n = self.active_neighbors_at(&pos);
if n == 2 || n == 3 {
ConwayCube::Active
} else {
ConwayCube::Inactive
}
},
_ => {
if self.active_neighbors_at(&pos) == 3 {
ConwayCube::Active
} else {
ConwayCube::Inactive
}
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
const TINY_LAYOUT: &'static str = ".#.\n..#\n###\n";
#[test]
fn test_from_input() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
assert_eq!(3, grid.edge);
assert_eq!(5, grid.active_cubes());
}
#[test]
fn test_grid_indexing() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[1, 0, 1]);
assert_eq!(ConwayCube::Active, grid.cube_at(&pos));
let pos = Position::from(&[1, 1, 0]);
assert_eq!(ConwayCube::Inactive, grid.cube_at(&pos));
let pos = Position::from(&[0, 1, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_z() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[-1, 0, 1]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
let pos = Position::from(&[3, 1, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_y() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[1, -1, 1]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
let pos = Position::from(&[1, 3, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_x() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[1, 0, -1]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
let pos = Position::from(&[1, 1, 3]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_6_rounds() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let mut n_active: usize = 0;
for (i, g) in grid.iter().enumerate() {
if i >= N_ROUNDS - 1 {
n_active = g.active_cubes();
break;
}
}
assert_eq!(112, n_active);
}
#[test]
fn | test_6_rounds_4d | identifier_name |
|
lib.rs | Iter {
layout: InfiniteGrid,
}
impl Iterator for InfiniteGridIter {
type Item = InfiniteGrid;
fn next(&mut self) -> Option<Self::Item> {
let prev_layout = self.layout.clone();
self.layout = prev_layout.cube_round();
Some(self.layout.clone())
}
}
impl InfiniteGrid {
/// Return edge length.
#[must_use]
pub fn edge(&self) -> i32 {
self.edge
}
/// Construct `InfiniteGrid` of dimension `dim` by reading initial grid
/// plane from file at `path`.
///
/// # Errors
///
/// Returns `Err` if the input file cannot be opened, or if line is
/// found with invalid format.
pub fn read_from_file(path: &str, dim: usize) -> Result<InfiniteGrid> {
let s: String = fs::read_to_string(path)?;
InfiniteGrid::from_input(&s, dim)
}
/// Construct `InfiniteGrid` of dimension `dim` by reading initial grid
/// plane from `input` string.
///
/// # Errors
///
/// Returns `Err` if a line is found with invalid format.
pub fn from_input(input: &str, dim: usize) -> Result<InfiniteGrid> {
let edge = i32::try_from(input.lines().next().unwrap().len()).unwrap();
let half_edge = edge / 2;
// parse and store initial 2D grid plane:
let cubes_2d: Vec<ConwayCube> = input
.lines()
.flat_map(|line| line.trim().chars().map(ConwayCube::from_char))
.collect();
// create empty nD grid, and copy initial 2D grid plane to it
// (in the middle of each dimension beyond the first two):
let max_i = InfiniteGrid::index_size(edge, dim);
let mut cubes: Vec<ConwayCube> = vec![ConwayCube::Void; max_i];
for y in 0..edge {
for x in 0..edge {
let from_i = usize::try_from(y * edge + x).unwrap();
let mut dims = vec![half_edge; dim];
dims[dim-1] = x;
dims[dim-2] = y;
let to_i = Position::from(&dims).index_for(edge);
cubes[to_i] = cubes_2d[from_i];
}
}
Ok(Self { cubes, dim, edge })
}
#[must_use]
fn index_size(edge: i32, dim: usize) -> usize {
let mut s = 1_i32;
for _i in 0..dim {
s *= edge;
}
usize::try_from(s).unwrap()
}
#[must_use]
pub fn iter(&self) -> InfiniteGridIter {
InfiniteGridIter {
layout: self.clone(),
}
}
/// Return `ConwayCube` at the given position.
#[must_use]
pub fn cube_at(&self, pos: &Position) -> ConwayCube {
for c in &pos.coords {
if *c < 0 || *c >= self.edge {
return ConwayCube::Void;
}
}
self.cubes[pos.index_for(self.edge)]
}
/// Return count of active cubes.
#[must_use]
pub fn active_cubes(&self) -> usize {
self.cubes.iter().filter(|&c| *c == ConwayCube::Active).count()
}
/// Return count of active neighbor cubes of the cube at the given
/// position.
#[must_use]
pub fn active_neighbors_at(&self, pos: &Position) -> usize {
let mut n_active: usize = 0;
for deltas in (0..self.dim).map(|_i| -1..=1).multi_cartesian_product() {
if deltas.iter().all(|dc| *dc == 0) {
continue;
}
let d_coords: Vec<i32> = pos.coords
.iter()
.enumerate()
.map(|(i, dc)| *dc + deltas[i])
.collect();
let d_pos = Position::from(&d_coords);
if self.cube_at(&d_pos) == ConwayCube::Active {
n_active += 1;
}
}
n_active
}
/// Do one round of cube life. Returns a new grid which is one unit
/// bigger in all directions (its edge increases by 2 in all
/// dimensions).
#[must_use]
pub fn cube_round(&self) -> InfiniteGrid {
let edge = self.edge + 2;
let max_i = InfiniteGrid::index_size(edge, self.dim);
let mut cubes: Vec<ConwayCube> = vec![ConwayCube::Void; max_i];
for coords in (0..self.dim).map(|_i| 0..edge).multi_cartesian_product() {
let from_coords: Vec<i32> = coords.iter().map(|c| c - 1).collect();
let from_pos = Position::from(&from_coords);
let pos = Position::from(&coords);
cubes[pos.index_for(edge)] = self.new_cube_at(&from_pos);
}
InfiniteGrid { cubes, dim: self.dim, edge }
}
fn new_cube_at(&self, pos: &Position) -> ConwayCube {
match self.cube_at(&pos) {
ConwayCube::Active => {
let n = self.active_neighbors_at(&pos);
if n == 2 || n == 3 {
ConwayCube::Active
} else {
ConwayCube::Inactive
}
},
_ => {
if self.active_neighbors_at(&pos) == 3 {
ConwayCube::Active
} else {
ConwayCube::Inactive
}
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
const TINY_LAYOUT: &'static str = ".#.\n..#\n###\n";
#[test]
fn test_from_input() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
assert_eq!(3, grid.edge);
assert_eq!(5, grid.active_cubes());
}
#[test]
fn test_grid_indexing() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[1, 0, 1]);
assert_eq!(ConwayCube::Active, grid.cube_at(&pos));
let pos = Position::from(&[1, 1, 0]);
assert_eq!(ConwayCube::Inactive, grid.cube_at(&pos));
let pos = Position::from(&[0, 1, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_z() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[-1, 0, 1]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
let pos = Position::from(&[3, 1, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_y() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[1, -1, 1]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
let pos = Position::from(&[1, 3, 0]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_grid_indexing_void_x() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let pos = Position::from(&[1, 0, -1]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
let pos = Position::from(&[1, 1, 3]);
assert_eq!(ConwayCube::Void, grid.cube_at(&pos));
}
#[test]
fn test_6_rounds() {
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 3).unwrap();
let mut n_active: usize = 0;
for (i, g) in grid.iter().enumerate() {
if i >= N_ROUNDS - 1 {
n_active = g.active_cubes();
break;
}
}
assert_eq!(112, n_active);
}
#[test]
fn test_6_rounds_4d() {
// the full N_ROUNDS takes a while so:
let n_rounds = 1; // = N_ROUNDS;
let grid = InfiniteGrid::from_input(TINY_LAYOUT, 4).unwrap();
let mut n_active: usize = 0;
for (i, g) in grid.iter().enumerate() {
if i >= n_rounds - 1 {
n_active = g.active_cubes();
break;
}
}
let expect = if n_rounds == 1 | { 3*4 + 5 + 3*4 } | conditional_block |
|
encoder.rs | om_codec_cx_pkt_kind::AOM_CODEC_CUSTOM_PKT => {
let b = to_buffer(unsafe { pkt.data.raw });
AOMPacket::Custom(b)
}
_ => panic!("No packet defined"),
}
}
}
#[cfg(target_os = "windows")]
fn map_fmt_to_img(img: &mut aom_image, fmt: &Formaton) {
img.cp = fmt.get_primaries() as i32;
img.tc = fmt.get_xfer() as i32;
img.mc = fmt.get_matrix() as i32;
}
#[cfg(not(target_os = "windows"))]
fn map_fmt_to_img(img: &mut aom_image, fmt: &Formaton) {
img.cp = fmt.get_primaries() as u32;
img.tc = fmt.get_xfer() as u32;
img.mc = fmt.get_matrix() as u32;
}
// TODO: Extend
fn map_formaton(img: &mut aom_image, fmt: &Formaton) {
if fmt == YUV420 {
img.fmt = aom_img_fmt::AOM_IMG_FMT_I420;
} else {
unimplemented!();
}
img.bit_depth = 8;
img.bps = 12;
img.x_chroma_shift = 1;
img.y_chroma_shift = 1;
map_fmt_to_img(img, fmt);
}
fn img_from_frame(frame: &Frame) -> aom_image {
let mut img: aom_image = unsafe { mem::zeroed() };
if let MediaKind::Video(ref v) = frame.kind {
map_formaton(&mut img, &v.format);
img.w = v.width as u32;
img.h = v.height as u32;
img.d_w = v.width as u32;
img.d_h = v.height as u32;
}
// populate the buffers
for i in 0..frame.buf.count() {
let s: &[u8] = frame.buf.as_slice(i).unwrap();
img.planes[i] = s.as_ptr() as *mut u8;
img.stride[i] = frame.buf.linesize(i).unwrap() as i32;
}
img
}
/// AV1 Encoder
pub struct AV1Encoder {
pub(crate) ctx: aom_codec_ctx_t,
pub(crate) iter: aom_codec_iter_t,
}
unsafe impl Send for AV1Encoder {} // TODO: Make sure it cannot be abused
impl AV1Encoder {
/// Create a new encoder using the provided configuration
///
/// You may use `get_encoder` instead.
pub fn new(cfg: &mut AV1EncoderConfig) -> Result<AV1Encoder, aom_codec_err_t::Type> {
let mut ctx = MaybeUninit::uninit();
let ret = unsafe {
aom_codec_enc_init_ver(
ctx.as_mut_ptr(),
aom_codec_av1_cx(),
cfg.cfg(),
0,
AOM_ENCODER_ABI_VERSION as i32,
)
};
match ret {
aom_codec_err_t::AOM_CODEC_OK => {
let ctx = unsafe { ctx.assume_init() };
let mut enc = AV1Encoder {
ctx,
iter: ptr::null(),
};
// Apparently aom 2.0 would crash if a CPUUSED is not set explicitly.
enc.control(aome_enc_control_id::AOME_SET_CPUUSED, 2)
.expect("Cannot set CPUUSED");
Ok(enc)
}
_ => Err(ret),
}
}
/// Update the encoder parameters after-creation
///
/// It calls `aom_codec_control_`
pub fn control(
&mut self,
id: aome_enc_control_id::Type,
val: i32,
) -> Result<(), aom_codec_err_t::Type> {
let ret = unsafe { aom_codec_control(&mut self.ctx, id as i32, val) };
match ret {
aom_codec_err_t::AOM_CODEC_OK => Ok(()),
_ => Err(ret),
}
}
// TODO: Cache the image information
//
/// Send an uncompressed frame to the encoder
///
/// Call [`get_packet`] to receive the compressed data.
///
/// It calls `aom_codec_encode`.
///
/// [`get_packet`]: #method.get_packet
pub fn encode(&mut self, frame: &Frame) -> Result<(), aom_codec_err_t::Type> {
let img = img_from_frame(frame);
let ret = unsafe { aom_codec_encode(&mut self.ctx, &img, frame.t.pts.unwrap(), 1, 0) };
self.iter = ptr::null();
match ret {
aom_codec_err_t::AOM_CODEC_OK => Ok(()),
_ => Err(ret),
}
}
/// Notify the encoder that no more data will be sent
///
/// Call [`get_packet`] to receive the compressed data.
///
/// It calls `aom_codec_encode` with NULL arguments.
///
/// [`get_packet`]: #method.get_packet
pub fn flush(&mut self) -> Result<(), aom_codec_err_t::Type> {
let ret = unsafe { aom_codec_encode(&mut self.ctx, ptr::null_mut(), 0, 1, 0) };
self.iter = ptr::null();
match ret {
aom_codec_err_t::AOM_CODEC_OK => Ok(()),
_ => Err(ret),
}
}
/// Retrieve the compressed data
///
/// To be called until it returns `None`.
///
/// It calls `aom_codec_get_cx_data`.
pub fn get_packet(&mut self) -> Option<AOMPacket> {
let pkt = unsafe { aom_codec_get_cx_data(&mut self.ctx, &mut self.iter) };
if pkt.is_null() {
None
} else {
Some(AOMPacket::new(unsafe { *pkt }))
}
}
}
impl Drop for AV1Encoder {
fn drop(&mut self) {
unsafe { aom_codec_destroy(&mut self.ctx) };
}
}
impl AOMCodec for AV1Encoder {
fn get_context(&mut self) -> &mut aom_codec_ctx {
&mut self.ctx
}
}
#[cfg(feature = "codec-trait")]
mod encoder_trait {
use super::*;
use av_codec::encoder::*;
use av_codec::error::*;
use av_data::frame::ArcFrame;
use av_data::params::{CodecParams, MediaKind, VideoInfo};
use av_data::value::Value;
pub struct Des {
descr: Descr,
}
pub struct Enc {
cfg: AV1EncoderConfig,
enc: Option<AV1Encoder>,
}
impl Descriptor for Des {
type OutputEncoder = Enc;
fn create(&self) -> Self::OutputEncoder {
Enc {
cfg: AV1EncoderConfig::new().unwrap(),
enc: None,
}
}
fn describe(&self) -> &Descr {
&self.descr
}
}
impl Encoder for Enc {
fn | (&mut self) -> Result<()> {
if self.enc.is_none() {
self.cfg
.get_encoder()
.map(|enc| {
self.enc = Some(enc);
})
.map_err(|_err| Error::ConfigurationIncomplete)
} else {
unimplemented!()
}
}
// TODO: have it as default impl?
fn get_extradata(&self) -> Option<Vec<u8>> {
None
}
fn send_frame(&mut self, frame: &ArcFrame) -> Result<()> {
let enc = self.enc.as_mut().unwrap();
enc.encode(frame).map_err(|_| unimplemented!())
}
fn receive_packet(&mut self) -> Result<Packet> {
let enc = self.enc.as_mut().unwrap();
if let Some(p) = enc.get_packet() {
match p {
AOMPacket::Packet(pkt) => Ok(pkt),
_ => unimplemented!(),
}
} else {
Err(Error::MoreDataNeeded)
}
}
fn flush(&mut self) -> Result<()> {
let enc = self.enc.as_mut().unwrap();
enc.flush().map_err(|_| unimplemented!())
}
fn set_option<'a>(&mut self, key: &str, val: Value<'a>) -> Result<()> {
match (key, val) {
("w", Value::U64(v)) => self.cfg.g_w = v as u32,
("h", Value::U64(v)) => self.cfg.g_h = v as u32,
("qmin", Value::U64(v)) => self.cfg.rc_min_quantizer = v as u32,
("qmax", Value::U64(v)) => self.cfg.rc_max_quantizer = v as u32,
("timebase", Value::Pair(num, den)) => {
self.cfg.g_timebase.num = num as i32;
self.cfg.g_timebase.den = den as i32;
}
_ => unimplemented!(),
}
Ok(())
}
fn get_params(&self) -> Result<CodecParams> {
use std::sync::Arc;
Ok(CodecParams {
kind: Some(MediaKind::Video(VideoInfo {
height: self.cfg | configure | identifier_name |
encoder.rs | method.get_packet
pub fn flush(&mut self) -> Result<(), aom_codec_err_t::Type> {
let ret = unsafe { aom_codec_encode(&mut self.ctx, ptr::null_mut(), 0, 1, 0) };
self.iter = ptr::null();
match ret {
aom_codec_err_t::AOM_CODEC_OK => Ok(()),
_ => Err(ret),
}
}
/// Retrieve the compressed data
///
/// To be called until it returns `None`.
///
/// It calls `aom_codec_get_cx_data`.
pub fn get_packet(&mut self) -> Option<AOMPacket> {
let pkt = unsafe { aom_codec_get_cx_data(&mut self.ctx, &mut self.iter) };
if pkt.is_null() {
None
} else {
Some(AOMPacket::new(unsafe { *pkt }))
}
}
}
impl Drop for AV1Encoder {
fn drop(&mut self) {
unsafe { aom_codec_destroy(&mut self.ctx) };
}
}
impl AOMCodec for AV1Encoder {
fn get_context(&mut self) -> &mut aom_codec_ctx {
&mut self.ctx
}
}
#[cfg(feature = "codec-trait")]
mod encoder_trait {
use super::*;
use av_codec::encoder::*;
use av_codec::error::*;
use av_data::frame::ArcFrame;
use av_data::params::{CodecParams, MediaKind, VideoInfo};
use av_data::value::Value;
pub struct Des {
descr: Descr,
}
pub struct Enc {
cfg: AV1EncoderConfig,
enc: Option<AV1Encoder>,
}
impl Descriptor for Des {
type OutputEncoder = Enc;
fn create(&self) -> Self::OutputEncoder {
Enc {
cfg: AV1EncoderConfig::new().unwrap(),
enc: None,
}
}
fn describe(&self) -> &Descr {
&self.descr
}
}
impl Encoder for Enc {
fn configure(&mut self) -> Result<()> {
if self.enc.is_none() {
self.cfg
.get_encoder()
.map(|enc| {
self.enc = Some(enc);
})
.map_err(|_err| Error::ConfigurationIncomplete)
} else {
unimplemented!()
}
}
// TODO: have it as default impl?
fn get_extradata(&self) -> Option<Vec<u8>> {
None
}
fn send_frame(&mut self, frame: &ArcFrame) -> Result<()> {
let enc = self.enc.as_mut().unwrap();
enc.encode(frame).map_err(|_| unimplemented!())
}
fn receive_packet(&mut self) -> Result<Packet> {
let enc = self.enc.as_mut().unwrap();
if let Some(p) = enc.get_packet() {
match p {
AOMPacket::Packet(pkt) => Ok(pkt),
_ => unimplemented!(),
}
} else {
Err(Error::MoreDataNeeded)
}
}
fn flush(&mut self) -> Result<()> {
let enc = self.enc.as_mut().unwrap();
enc.flush().map_err(|_| unimplemented!())
}
fn set_option<'a>(&mut self, key: &str, val: Value<'a>) -> Result<()> {
match (key, val) {
("w", Value::U64(v)) => self.cfg.g_w = v as u32,
("h", Value::U64(v)) => self.cfg.g_h = v as u32,
("qmin", Value::U64(v)) => self.cfg.rc_min_quantizer = v as u32,
("qmax", Value::U64(v)) => self.cfg.rc_max_quantizer = v as u32,
("timebase", Value::Pair(num, den)) => {
self.cfg.g_timebase.num = num as i32;
self.cfg.g_timebase.den = den as i32;
}
_ => unimplemented!(),
}
Ok(())
}
fn get_params(&self) -> Result<CodecParams> {
use std::sync::Arc;
Ok(CodecParams {
kind: Some(MediaKind::Video(VideoInfo {
height: self.cfg.g_h as usize,
width: self.cfg.g_w as usize,
format: Some(Arc::new(*YUV420)), // TODO: support more formats
})),
codec_id: Some("av1".to_owned()),
extradata: None,
bit_rate: 0, // TODO: expose the information
convergence_window: 0,
delay: 0,
})
}
fn set_params(&mut self, params: &CodecParams) -> Result<()> {
if let Some(MediaKind::Video(ref info)) = params.kind {
self.cfg.g_w = info.width as u32;
self.cfg.g_h = info.height as u32;
}
Ok(())
}
}
/// AV1 Encoder
///
/// To be used with [av-codec](https://docs.rs/av-codec) `Encoder Context`.
pub const AV1_DESCR: &Des = &Des {
descr: Descr {
codec: "av1",
name: "aom",
desc: "libaom AV1 encoder",
mime: "video/AV1",
},
};
}
#[cfg(feature = "codec-trait")]
pub use self::encoder_trait::AV1_DESCR;
#[cfg(test)]
pub(crate) mod tests {
use super::*;
#[test]
fn init() {
let mut c = AV1EncoderConfig::new().unwrap();
let mut e = c.get_encoder().unwrap();
println!("{}", e.error_to_str());
}
use av_data::rational::*;
use av_data::timeinfo::TimeInfo;
pub fn setup(w: u32, h: u32, t: &TimeInfo) -> AV1Encoder {
if (w % 2) != 0 || (h % 2) != 0 {
panic!("Invalid frame size: w: {} h: {}", w, h);
}
let mut cfg = AV1EncoderConfig::new()
.unwrap()
.width(w)
.height(h)
.timebase(t.timebase.unwrap())
.rc_min_quantizer(0)
.rc_min_quantizer(0)
.threads(4)
.pass(aom_enc_pass::AOM_RC_ONE_PASS)
.rc_end_usage(aom_rc_mode::AOM_CQ);
let mut enc = cfg.get_encoder().unwrap();
enc.control(aome_enc_control_id::AOME_SET_CQ_LEVEL, 4)
.unwrap();
enc.control(aome_enc_control_id::AOME_SET_CPUUSED, 2)
.unwrap();
enc
}
pub fn setup_frame(w: u32, h: u32, t: &TimeInfo) -> Frame {
use av_data::frame::*;
use av_data::pixel::formats;
use std::sync::Arc;
let v = VideoInfo::new(
w as usize,
h as usize,
false,
FrameType::OTHER,
Arc::new(*formats::YUV420),
);
Frame::new_default_frame(v, Some(t.clone()))
}
#[test]
fn encode() {
let w = 200;
let h = 200;
let t = TimeInfo {
pts: Some(0),
dts: Some(0),
duration: Some(1),
timebase: Some(Rational64::new(1, 1000)),
user_private: None,
};
let mut e = setup(w, h, &t);
let mut f = setup_frame(w, h, &t);
let mut out = 0;
// TODO write some pattern
for i in 0..100 {
e.encode(&f).unwrap();
f.t.pts = Some(i);
// println!("{:#?}", f);
loop {
let p = e.get_packet();
if p.is_none() {
break;
} else {
out = 1;
// println!("{:#?}", p.unwrap());
}
}
}
if out != 1 {
panic!("No packet produced");
}
}
#[cfg(all(test, feature = "codec-trait"))]
#[test]
fn encode_codec_trait() | {
use super::AV1_DESCR;
use av_codec::common::CodecList;
use av_codec::encoder::*;
use av_codec::error::*;
use std::sync::Arc;
let encoders = Codecs::from_list(&[AV1_DESCR]);
let mut ctx = Context::by_name(&encoders, "av1").unwrap();
let w = 200;
let h = 200;
ctx.set_option("w", u64::from(w)).unwrap();
ctx.set_option("h", u64::from(h)).unwrap();
ctx.set_option("timebase", (1, 1000)).unwrap();
ctx.set_option("qmin", 0u64).unwrap();
ctx.set_option("qmax", 0u64).unwrap();
let t = TimeInfo {
pts: Some(0), | identifier_body |
|
encoder.rs | .expect("Cannot set CPUUSED");
Ok(enc)
}
_ => Err(ret),
}
}
/// Update the encoder parameters after-creation
///
/// It calls `aom_codec_control_`
pub fn control(
&mut self,
id: aome_enc_control_id::Type,
val: i32,
) -> Result<(), aom_codec_err_t::Type> {
let ret = unsafe { aom_codec_control(&mut self.ctx, id as i32, val) };
match ret {
aom_codec_err_t::AOM_CODEC_OK => Ok(()),
_ => Err(ret),
}
}
// TODO: Cache the image information
//
/// Send an uncompressed frame to the encoder
///
/// Call [`get_packet`] to receive the compressed data.
///
/// It calls `aom_codec_encode`.
///
/// [`get_packet`]: #method.get_packet
pub fn encode(&mut self, frame: &Frame) -> Result<(), aom_codec_err_t::Type> {
let img = img_from_frame(frame);
let ret = unsafe { aom_codec_encode(&mut self.ctx, &img, frame.t.pts.unwrap(), 1, 0) };
self.iter = ptr::null();
match ret {
aom_codec_err_t::AOM_CODEC_OK => Ok(()),
_ => Err(ret),
}
}
/// Notify the encoder that no more data will be sent
///
/// Call [`get_packet`] to receive the compressed data.
///
/// It calls `aom_codec_encode` with NULL arguments.
///
/// [`get_packet`]: #method.get_packet
pub fn flush(&mut self) -> Result<(), aom_codec_err_t::Type> {
let ret = unsafe { aom_codec_encode(&mut self.ctx, ptr::null_mut(), 0, 1, 0) };
self.iter = ptr::null();
match ret {
aom_codec_err_t::AOM_CODEC_OK => Ok(()),
_ => Err(ret),
}
}
/// Retrieve the compressed data
///
/// To be called until it returns `None`.
///
/// It calls `aom_codec_get_cx_data`.
pub fn get_packet(&mut self) -> Option<AOMPacket> {
let pkt = unsafe { aom_codec_get_cx_data(&mut self.ctx, &mut self.iter) };
if pkt.is_null() {
None
} else {
Some(AOMPacket::new(unsafe { *pkt }))
}
}
}
impl Drop for AV1Encoder {
fn drop(&mut self) {
unsafe { aom_codec_destroy(&mut self.ctx) };
}
}
impl AOMCodec for AV1Encoder {
fn get_context(&mut self) -> &mut aom_codec_ctx {
&mut self.ctx
}
}
#[cfg(feature = "codec-trait")]
mod encoder_trait {
use super::*;
use av_codec::encoder::*;
use av_codec::error::*;
use av_data::frame::ArcFrame;
use av_data::params::{CodecParams, MediaKind, VideoInfo};
use av_data::value::Value;
pub struct Des {
descr: Descr,
}
pub struct Enc {
cfg: AV1EncoderConfig,
enc: Option<AV1Encoder>,
}
impl Descriptor for Des {
type OutputEncoder = Enc;
fn create(&self) -> Self::OutputEncoder {
Enc {
cfg: AV1EncoderConfig::new().unwrap(),
enc: None,
}
}
fn describe(&self) -> &Descr {
&self.descr
}
}
impl Encoder for Enc {
fn configure(&mut self) -> Result<()> {
if self.enc.is_none() {
self.cfg
.get_encoder()
.map(|enc| {
self.enc = Some(enc);
})
.map_err(|_err| Error::ConfigurationIncomplete)
} else {
unimplemented!()
}
}
// TODO: have it as default impl?
fn get_extradata(&self) -> Option<Vec<u8>> {
None
}
fn send_frame(&mut self, frame: &ArcFrame) -> Result<()> {
let enc = self.enc.as_mut().unwrap();
enc.encode(frame).map_err(|_| unimplemented!())
}
fn receive_packet(&mut self) -> Result<Packet> {
let enc = self.enc.as_mut().unwrap();
if let Some(p) = enc.get_packet() {
match p {
AOMPacket::Packet(pkt) => Ok(pkt),
_ => unimplemented!(),
}
} else {
Err(Error::MoreDataNeeded)
}
}
fn flush(&mut self) -> Result<()> {
let enc = self.enc.as_mut().unwrap();
enc.flush().map_err(|_| unimplemented!())
}
fn set_option<'a>(&mut self, key: &str, val: Value<'a>) -> Result<()> {
match (key, val) {
("w", Value::U64(v)) => self.cfg.g_w = v as u32,
("h", Value::U64(v)) => self.cfg.g_h = v as u32,
("qmin", Value::U64(v)) => self.cfg.rc_min_quantizer = v as u32,
("qmax", Value::U64(v)) => self.cfg.rc_max_quantizer = v as u32,
("timebase", Value::Pair(num, den)) => {
self.cfg.g_timebase.num = num as i32;
self.cfg.g_timebase.den = den as i32;
}
_ => unimplemented!(),
}
Ok(())
}
fn get_params(&self) -> Result<CodecParams> {
use std::sync::Arc;
Ok(CodecParams {
kind: Some(MediaKind::Video(VideoInfo {
height: self.cfg.g_h as usize,
width: self.cfg.g_w as usize,
format: Some(Arc::new(*YUV420)), // TODO: support more formats
})),
codec_id: Some("av1".to_owned()),
extradata: None,
bit_rate: 0, // TODO: expose the information
convergence_window: 0,
delay: 0,
})
}
fn set_params(&mut self, params: &CodecParams) -> Result<()> {
if let Some(MediaKind::Video(ref info)) = params.kind {
self.cfg.g_w = info.width as u32;
self.cfg.g_h = info.height as u32;
}
Ok(())
}
}
/// AV1 Encoder
///
/// To be used with [av-codec](https://docs.rs/av-codec) `Encoder Context`.
pub const AV1_DESCR: &Des = &Des {
descr: Descr {
codec: "av1",
name: "aom",
desc: "libaom AV1 encoder",
mime: "video/AV1",
},
};
}
#[cfg(feature = "codec-trait")]
pub use self::encoder_trait::AV1_DESCR;
#[cfg(test)]
pub(crate) mod tests {
use super::*;
#[test]
fn init() {
let mut c = AV1EncoderConfig::new().unwrap();
let mut e = c.get_encoder().unwrap();
println!("{}", e.error_to_str());
}
use av_data::rational::*;
use av_data::timeinfo::TimeInfo;
pub fn setup(w: u32, h: u32, t: &TimeInfo) -> AV1Encoder {
if (w % 2) != 0 || (h % 2) != 0 {
panic!("Invalid frame size: w: {} h: {}", w, h);
}
let mut cfg = AV1EncoderConfig::new()
.unwrap()
.width(w)
.height(h)
.timebase(t.timebase.unwrap())
.rc_min_quantizer(0)
.rc_min_quantizer(0)
.threads(4)
.pass(aom_enc_pass::AOM_RC_ONE_PASS)
.rc_end_usage(aom_rc_mode::AOM_CQ);
let mut enc = cfg.get_encoder().unwrap();
enc.control(aome_enc_control_id::AOME_SET_CQ_LEVEL, 4)
.unwrap();
enc.control(aome_enc_control_id::AOME_SET_CPUUSED, 2)
.unwrap();
enc
}
pub fn setup_frame(w: u32, h: u32, t: &TimeInfo) -> Frame {
use av_data::frame::*;
use av_data::pixel::formats;
use std::sync::Arc;
let v = VideoInfo::new(
w as usize,
h as usize,
false,
FrameType::OTHER,
Arc::new(*formats::YUV420),
);
Frame::new_default_frame(v, Some(t.clone()))
}
#[test]
fn encode() {
let w = 200;
let h = 200;
let t = TimeInfo {
pts: Some(0),
dts: Some(0),
duration: Some(1),
timebase: Some(Rational64::new(1, 1000)),
user_private: None,
}; | random_line_split |
||
example_scenes.py | _graph_label(sin_graph, "\\sin(x)")
relu_label = axes.get_graph_label(relu_graph, Text("ReLU"))
step_label = axes.get_graph_label(step_graph, Text("Step"), x=4)
self.play(
ShowCreation(sin_graph),
FadeIn(sin_label, RIGHT),
)
self.wait(2)
self.play(
ReplacementTransform(sin_graph, relu_graph),
FadeTransform(sin_label, relu_label),
)
self.wait()
self.play(
ReplacementTransform(relu_graph, step_graph),
FadeTransform(relu_label, step_label),
)
self.wait()
parabola = axes.get_graph(lambda x: 0.25 * x**2)
parabola.set_stroke(BLUE)
self.play(
FadeOut(step_graph),
FadeOut(step_label),
ShowCreation(parabola)
)
self.wait()
# You can use axes.input_to_graph_point, abbreviated
# to axes.i2gp, to find a particular point on a graph
dot = Dot(color=RED)
dot.move_to(axes.i2gp(2, parabola))
self.play(FadeIn(dot, scale=0.5))
# A value tracker lets us animate a parameter, usually
# with the intent of having other mobjects update based
# on the parameter
x_tracker = ValueTracker(2)
f_always(
dot.move_to,
lambda: axes.i2gp(x_tracker.get_value(), parabola)
)
self.play(x_tracker.animate.set_value(4), run_time=3)
self.play(x_tracker.animate.set_value(-2), run_time=3)
self.wait()
class TexAndNumbersExample(Scene):
def construct(self):
axes = Axes((-3, 3), (-3, 3), unit_size=1)
axes.to_edge(DOWN)
axes.add_coordinate_labels(font_size=16)
circle = Circle(radius=2)
circle.set_stroke(YELLOW, 3)
circle.move_to(axes.get_origin())
self.add(axes, circle)
# When numbers show up in tex, they can be readily
# replaced with DecimalMobjects so that methods like
# get_value and set_value can be called on them, and
# animations like ChangeDecimalToValue can be called
# on them.
tex = Tex("x^2 + y^2 = 4.00")
tex.next_to(axes, UP, buff=0.5)
value = tex.make_number_changable("4.00")
# This will tie the right hand side of our equation to
# the square of the radius of the circle
value.add_updater(lambda v: v.set_value(circle.get_radius()**2))
self.add(tex)
text = Text("""
You can manipulate numbers
in Tex mobjects
""", font_size=30)
text.next_to(tex, RIGHT, buff=1.5)
arrow = Arrow(text, tex)
self.add(text, arrow)
self.play(
circle.animate.set_height(2.0),
run_time=4,
rate_func=there_and_back,
)
# By default, tex.make_number_changable replaces the first occurance
# of the number,but by passing replace_all=True it replaces all and
# returns a group of the results
exponents = tex.make_number_changable("2", replace_all=True)
self.play(
LaggedStartMap(
FlashAround, exponents,
lag_ratio=0.2, buff=0.1, color=RED
),
exponents.animate.set_color(RED)
)
def func(x, y):
# Switch from manim coords to axes coords
xa, ya = axes.point_to_coords(np.array([x, y, 0]))
return xa**4 + ya**4 - 4
new_curve = ImplicitFunction(func)
new_curve.match_style(circle)
circle.rotate(angle_of_vector(new_curve.get_start())) # Align
value.clear_updaters()
self.play(
*(ChangeDecimalToValue(exp, 4) for exp in exponents),
ReplacementTransform(circle.copy(), new_curve),
circle.animate.set_stroke(width=1, opacity=0.5),
)
class SurfaceExample(ThreeDScene):
def construct(self):
surface_text = Text("For 3d scenes, try using surfaces")
surface_text.fix_in_frame()
surface_text.to_edge(UP)
self.add(surface_text)
self.wait(0.1)
torus1 = Torus(r1=1, r2=1)
torus2 = Torus(r1=3, r2=1)
sphere = Sphere(radius=3, resolution=torus1.resolution)
# You can texture a surface with up to two images, which will
# be interpreted as the side towards the light, and away from
# the light. These can be either urls, or paths to a local file
# in whatever you've set as the image directory in
# the custom_config.yml file
# day_texture = "EarthTextureMap"
# night_texture = "NightEarthTextureMap"
day_texture = "https://upload.wikimedia.org/wikipedia/commons/thumb/4/4d/Whole_world_-_land_and_oceans.jpg/1280px-Whole_world_-_land_and_oceans.jpg"
night_texture = "https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/The_earth_at_night.jpg/1280px-The_earth_at_night.jpg"
surfaces = [
TexturedSurface(surface, day_texture, night_texture)
for surface in [sphere, torus1, torus2]
]
for mob in surfaces:
mob.shift(IN)
mob.mesh = SurfaceMesh(mob)
mob.mesh.set_stroke(BLUE, 1, opacity=0.5)
surface = surfaces[0]
self.play(
FadeIn(surface),
ShowCreation(surface.mesh, lag_ratio=0.01, run_time=3),
)
for mob in surfaces:
mob.add(mob.mesh)
surface.save_state()
self.play(Rotate(surface, PI / 2), run_time=2)
for mob in surfaces[1:]:
mob.rotate(PI / 2)
self.play(
Transform(surface, surfaces[1]),
run_time=3
)
self.play(
Transform(surface, surfaces[2]),
# Move camera frame during the transition
self.frame.animate.increment_phi(-10 * DEGREES),
self.frame.animate.increment_theta(-20 * DEGREES),
run_time=3
)
# Add ambient rotation
self.frame.add_updater(lambda m, dt: m.increment_theta(-0.1 * dt))
# Play around with where the light is
light_text = Text("You can move around the light source")
light_text.move_to(surface_text)
light_text.fix_in_frame()
self.play(FadeTransform(surface_text, light_text))
light = self.camera.light_source
self.add(light)
light.save_state()
self.play(light.animate.move_to(3 * IN), run_time=5)
self.play(light.animate.shift(10 * OUT), run_time=5)
drag_text = Text("Try moving the mouse while pressing d or f")
drag_text.move_to(light_text)
drag_text.fix_in_frame()
self.play(FadeTransform(light_text, drag_text))
self.wait()
class InteractiveDevelopment(Scene):
def construct(self):
circle = Circle()
circle.set_fill(BLUE, opacity=0.5)
circle.set_stroke(BLUE_E, width=4)
square = Square()
self.play(ShowCreation(square))
self.wait()
# This opens an iPython terminal where you can keep writing
# lines as if they were part of this construct method.
# In particular, 'square', 'circle' and 'self' will all be
# part of the local namespace in that terminal.
self.embed()
# Try copying and pasting some of the lines below into
# the interactive shell
self.play(ReplacementTransform(square, circle))
self.wait()
self.play(circle.animate.stretch(4, 0))
self.play(Rotate(circle, 90 * DEGREES))
self.play(circle.animate.shift(2 * RIGHT).scale(0.25))
text = Text("""
In general, using the interactive shell
is very helpful when developing new scenes
""")
self.play(Write(text))
# In the interactive shell, you can just type
# play, add, remove, clear, wait, save_state and restore,
# instead of self.play, self.add, self.remove, etc.
# To interact with the window, type touch(). You can then
# scroll in the window, or zoom by holding down 'z' while scrolling,
# and change camera perspective by holding down 'd' while moving
# the mouse. Press 'r' to reset to the standard camera position.
# Press 'q' to stop interacting with the window and go back to
# typing new commands into the shell.
# In principle you can customize a scene to be responsive to
# mouse and keyboard interactions
always(circle.move_to, self.mouse_point)
class ControlsExample(Scene):
drag_to_pan = False
| def setup(self): | random_line_split |
|
example_scenes.py | )
fonts.set_width(FRAME_WIDTH - 1)
slant = Text(
"And the same as slant and weight",
font="Consolas",
t2s={"slant": ITALIC},
t2w={"weight": BOLD},
t2c={"slant": ORANGE, "weight": RED}
)
VGroup(fonts, slant).arrange(DOWN, buff=0.8)
self.play(FadeOut(text), FadeOut(difference, shift=DOWN))
self.play(Write(fonts))
self.wait()
self.play(Write(slant))
self.wait()
class TexTransformExample(Scene):
def construct(self):
# Tex to color map
t2c = {
"A": BLUE,
"B": TEAL,
"C": GREEN,
}
# Configuration to pass along to each Tex mobject
kw = dict(font_size=72, t2c=t2c)
lines = VGroup(
Tex("A^2 + B^2 = C^2", **kw),
Tex("A^2 = C^2 - B^2", **kw),
Tex("A^2 = (C + B)(C - B)", **kw),
Tex(R"A = \sqrt{(C + B)(C - B)}", **kw),
)
lines.arrange(DOWN, buff=LARGE_BUFF)
self.add(lines[0])
# The animation TransformMatchingStrings will line up parts
# of the source and target which have matching substring strings.
# Here, giving it a little path_arc makes each part rotate into
# their final positions, which feels appropriate for the idea of
# rearranging an equation
self.play(
TransformMatchingStrings(
lines[0].copy(), lines[1],
# matched_keys specifies which substring should
# line up. If it's not specified, the animation
# will align the longest matching substrings.
# In this case, the substring "^2 = C^2" would
# trip it up
matched_keys=["A^2", "B^2", "C^2"],
# When you want a substring from the source
# to go to a non-equal substring from the target,
# use the key map.
key_map={"+": "-"},
path_arc=90 * DEGREES,
),
)
self.wait()
self.play(TransformMatchingStrings(
lines[1].copy(), lines[2],
matched_keys=["A^2"]
))
self.wait()
self.play(
TransformMatchingStrings(
lines[2].copy(), lines[3],
key_map={"2": R"\sqrt"},
path_arc=-30 * DEGREES,
),
)
self.wait(2)
self.play(LaggedStartMap(FadeOut, lines, shift=2 * RIGHT))
# TransformMatchingShapes will try to line up all pieces of a
# source mobject with those of a target, regardless of the
# what Mobject type they are.
source = Text("the morse code", height=1)
target = Text("here come dots", height=1)
saved_source = source.copy()
self.play(Write(source))
self.wait()
kw = dict(run_time=3, path_arc=PI / 2)
self.play(TransformMatchingShapes(source, target, **kw))
self.wait()
self.play(TransformMatchingShapes(target, saved_source, **kw))
self.wait()
class TexIndexing(Scene):
def construct(self):
# You can index into Tex mobject (or other StringMobjects) by substrings
equation = Tex(R"e^{\pi i} = -1", font_size=144)
self.add(equation)
self.play(FlashAround(equation["e"]))
self.wait()
self.play(Indicate(equation[R"\pi"]))
self.wait()
self.play(TransformFromCopy(
equation[R"e^{\pi i}"].copy().set_opacity(0.5),
equation["-1"],
path_arc=-PI / 2,
run_time=3
))
self.play(FadeOut(equation))
# Or regular expressions
equation = Tex("A^2 + B^2 = C^2", font_size=144)
self.play(Write(equation))
for part in equation[re.compile(r"\w\^2")]:
self.play(FlashAround(part))
self.wait()
self.play(FadeOut(equation))
# Indexing by substrings like this may not work when
# the order in which Latex draws symbols does not match
# the order in which they show up in the string.
# For example, here the infinity is drawn before the sigma
# so we don't get the desired behavior.
equation = Tex(R"\sum_{n = 1}^\infty \frac{1}{n^2} = \frac{\pi^2}{6}", font_size=72)
self.play(FadeIn(equation))
self.play(equation[R"\infty"].animate.set_color(RED)) # Doesn't hit the infinity
self.wait()
self.play(FadeOut(equation))
# However you can always fix this by explicitly passing in
# a string you might want to isolate later. Also, using
# \over instead of \frac helps to avoid the issue for fractions
equation = Tex(
R"\sum_{n = 1}^\infty {1 \over n^2} = {\pi^2 \over 6}",
# Explicitly mark "\infty" as a substring you might want to access
isolate=[R"\infty"],
font_size=72
)
self.play(FadeIn(equation))
self.play(equation[R"\infty"].animate.set_color(RED)) # Got it!
self.wait()
self.play(FadeOut(equation))
class UpdatersExample(Scene):
def construct(self):
square = Square()
square.set_fill(BLUE_E, 1)
# On all frames, the constructor Brace(square, UP) will
# be called, and the mobject brace will set its data to match
# that of the newly constructed object
brace = always_redraw(Brace, square, UP)
label = TexText("Width = 0.00")
number = label.make_number_changable("0.00")
# This ensures that the method deicmal.next_to(square)
# is called on every frame
always(label.next_to, brace, UP)
# You could also write the following equivalent line
# label.add_updater(lambda m: m.next_to(brace, UP))
# If the argument itself might change, you can use f_always,
# for which the arguments following the initial Mobject method
# should be functions returning arguments to that method.
# The following line ensures thst decimal.set_value(square.get_y())
# is called every frame
f_always(number.set_value, square.get_width)
# You could also write the following equivalent line
# number.add_updater(lambda m: m.set_value(square.get_width()))
self.add(square, brace, label)
# Notice that the brace and label track with the square
self.play(
square.animate.scale(2),
rate_func=there_and_back,
run_time=2,
)
self.wait()
self.play(
square.animate.set_width(5, stretch=True),
run_time=3,
)
self.wait()
self.play(
square.animate.set_width(2),
run_time=3
)
self.wait()
# In general, you can alway call Mobject.add_updater, and pass in
# a function that you want to be called on every frame. The function
# should take in either one argument, the mobject, or two arguments,
# the mobject and the amount of time since the last frame.
now = self.time
w0 = square.get_width()
square.add_updater(
lambda m: m.set_width(w0 * math.sin(self.time - now) + w0)
)
self.wait(4 * PI)
class CoordinateSystemExample(Scene):
def construct(self):
axes = Axes(
# x-axis ranges from -1 to | text = Text("Here is a text", font="Consolas", font_size=90)
difference = Text(
"""
The most important difference between Text and TexText is that\n
you can change the font more easily, but can't use the LaTeX grammar
""",
font="Arial", font_size=24,
# t2c is a dict that you can choose color for different text
t2c={"Text": BLUE, "TexText": BLUE, "LaTeX": ORANGE}
)
VGroup(text, difference).arrange(DOWN, buff=1)
self.play(Write(text))
self.play(FadeIn(difference, UP))
self.wait(3)
fonts = Text(
"And you can also set the font according to different words",
font="Arial",
t2f={"font": "Consolas", "words": "Consolas"},
t2c={"font": BLUE, "words": GREEN} | identifier_body |
|
example_scenes.py | _point, abbreviated
# to axes.i2gp, to find a particular point on a graph
dot = Dot(color=RED)
dot.move_to(axes.i2gp(2, parabola))
self.play(FadeIn(dot, scale=0.5))
# A value tracker lets us animate a parameter, usually
# with the intent of having other mobjects update based
# on the parameter
x_tracker = ValueTracker(2)
f_always(
dot.move_to,
lambda: axes.i2gp(x_tracker.get_value(), parabola)
)
self.play(x_tracker.animate.set_value(4), run_time=3)
self.play(x_tracker.animate.set_value(-2), run_time=3)
self.wait()
class TexAndNumbersExample(Scene):
def construct(self):
axes = Axes((-3, 3), (-3, 3), unit_size=1)
axes.to_edge(DOWN)
axes.add_coordinate_labels(font_size=16)
circle = Circle(radius=2)
circle.set_stroke(YELLOW, 3)
circle.move_to(axes.get_origin())
self.add(axes, circle)
# When numbers show up in tex, they can be readily
# replaced with DecimalMobjects so that methods like
# get_value and set_value can be called on them, and
# animations like ChangeDecimalToValue can be called
# on them.
tex = Tex("x^2 + y^2 = 4.00")
tex.next_to(axes, UP, buff=0.5)
value = tex.make_number_changable("4.00")
# This will tie the right hand side of our equation to
# the square of the radius of the circle
value.add_updater(lambda v: v.set_value(circle.get_radius()**2))
self.add(tex)
text = Text("""
You can manipulate numbers
in Tex mobjects
""", font_size=30)
text.next_to(tex, RIGHT, buff=1.5)
arrow = Arrow(text, tex)
self.add(text, arrow)
self.play(
circle.animate.set_height(2.0),
run_time=4,
rate_func=there_and_back,
)
# By default, tex.make_number_changable replaces the first occurance
# of the number,but by passing replace_all=True it replaces all and
# returns a group of the results
exponents = tex.make_number_changable("2", replace_all=True)
self.play(
LaggedStartMap(
FlashAround, exponents,
lag_ratio=0.2, buff=0.1, color=RED
),
exponents.animate.set_color(RED)
)
def func(x, y):
# Switch from manim coords to axes coords
xa, ya = axes.point_to_coords(np.array([x, y, 0]))
return xa**4 + ya**4 - 4
new_curve = ImplicitFunction(func)
new_curve.match_style(circle)
circle.rotate(angle_of_vector(new_curve.get_start())) # Align
value.clear_updaters()
self.play(
*(ChangeDecimalToValue(exp, 4) for exp in exponents),
ReplacementTransform(circle.copy(), new_curve),
circle.animate.set_stroke(width=1, opacity=0.5),
)
class SurfaceExample(ThreeDScene):
def construct(self):
surface_text = Text("For 3d scenes, try using surfaces")
surface_text.fix_in_frame()
surface_text.to_edge(UP)
self.add(surface_text)
self.wait(0.1)
torus1 = Torus(r1=1, r2=1)
torus2 = Torus(r1=3, r2=1)
sphere = Sphere(radius=3, resolution=torus1.resolution)
# You can texture a surface with up to two images, which will
# be interpreted as the side towards the light, and away from
# the light. These can be either urls, or paths to a local file
# in whatever you've set as the image directory in
# the custom_config.yml file
# day_texture = "EarthTextureMap"
# night_texture = "NightEarthTextureMap"
day_texture = "https://upload.wikimedia.org/wikipedia/commons/thumb/4/4d/Whole_world_-_land_and_oceans.jpg/1280px-Whole_world_-_land_and_oceans.jpg"
night_texture = "https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/The_earth_at_night.jpg/1280px-The_earth_at_night.jpg"
surfaces = [
TexturedSurface(surface, day_texture, night_texture)
for surface in [sphere, torus1, torus2]
]
for mob in surfaces:
mob.shift(IN)
mob.mesh = SurfaceMesh(mob)
mob.mesh.set_stroke(BLUE, 1, opacity=0.5)
surface = surfaces[0]
self.play(
FadeIn(surface),
ShowCreation(surface.mesh, lag_ratio=0.01, run_time=3),
)
for mob in surfaces:
mob.add(mob.mesh)
surface.save_state()
self.play(Rotate(surface, PI / 2), run_time=2)
for mob in surfaces[1:]:
mob.rotate(PI / 2)
self.play(
Transform(surface, surfaces[1]),
run_time=3
)
self.play(
Transform(surface, surfaces[2]),
# Move camera frame during the transition
self.frame.animate.increment_phi(-10 * DEGREES),
self.frame.animate.increment_theta(-20 * DEGREES),
run_time=3
)
# Add ambient rotation
self.frame.add_updater(lambda m, dt: m.increment_theta(-0.1 * dt))
# Play around with where the light is
light_text = Text("You can move around the light source")
light_text.move_to(surface_text)
light_text.fix_in_frame()
self.play(FadeTransform(surface_text, light_text))
light = self.camera.light_source
self.add(light)
light.save_state()
self.play(light.animate.move_to(3 * IN), run_time=5)
self.play(light.animate.shift(10 * OUT), run_time=5)
drag_text = Text("Try moving the mouse while pressing d or f")
drag_text.move_to(light_text)
drag_text.fix_in_frame()
self.play(FadeTransform(light_text, drag_text))
self.wait()
class InteractiveDevelopment(Scene):
def construct(self):
circle = Circle()
circle.set_fill(BLUE, opacity=0.5)
circle.set_stroke(BLUE_E, width=4)
square = Square()
self.play(ShowCreation(square))
self.wait()
# This opens an iPython terminal where you can keep writing
# lines as if they were part of this construct method.
# In particular, 'square', 'circle' and 'self' will all be
# part of the local namespace in that terminal.
self.embed()
# Try copying and pasting some of the lines below into
# the interactive shell
self.play(ReplacementTransform(square, circle))
self.wait()
self.play(circle.animate.stretch(4, 0))
self.play(Rotate(circle, 90 * DEGREES))
self.play(circle.animate.shift(2 * RIGHT).scale(0.25))
text = Text("""
In general, using the interactive shell
is very helpful when developing new scenes
""")
self.play(Write(text))
# In the interactive shell, you can just type
# play, add, remove, clear, wait, save_state and restore,
# instead of self.play, self.add, self.remove, etc.
# To interact with the window, type touch(). You can then
# scroll in the window, or zoom by holding down 'z' while scrolling,
# and change camera perspective by holding down 'd' while moving
# the mouse. Press 'r' to reset to the standard camera position.
# Press 'q' to stop interacting with the window and go back to
# typing new commands into the shell.
# In principle you can customize a scene to be responsive to
# mouse and keyboard interactions
always(circle.move_to, self.mouse_point)
class ControlsExample(Scene):
drag_to_pan = False
def setup(self):
self.textbox = Textbox()
self.checkbox = Checkbox()
self.color_picker = ColorSliders()
self.panel = ControlPanel(
Text("Text", font_size=24), self.textbox, Line(),
Text("Show/Hide Text", font_size=24), self.checkbox, Line(),
Text("Color of Text", font_size=24), self.color_picker
)
self.add(self.panel)
def construct(self):
text = Text("text", font_size=96)
def text_updater(old_text):
assert(isinstance(old_text, Text))
new_text = Text(self.textbox.get_value(), font_size=old_text.font_size)
# new_text.align_data_and_family(old_text)
new_text.move_to(old_text)
if self.checkbox.get_value():
| new_text.set_fill(
color=self.color_picker.get_picked_color(),
opacity=self.color_picker.get_picked_opacity()
) | conditional_block |
|
example_scenes.py | , label)
# Notice that the brace and label track with the square
self.play(
square.animate.scale(2),
rate_func=there_and_back,
run_time=2,
)
self.wait()
self.play(
square.animate.set_width(5, stretch=True),
run_time=3,
)
self.wait()
self.play(
square.animate.set_width(2),
run_time=3
)
self.wait()
# In general, you can alway call Mobject.add_updater, and pass in
# a function that you want to be called on every frame. The function
# should take in either one argument, the mobject, or two arguments,
# the mobject and the amount of time since the last frame.
now = self.time
w0 = square.get_width()
square.add_updater(
lambda m: m.set_width(w0 * math.sin(self.time - now) + w0)
)
self.wait(4 * PI)
class CoordinateSystemExample(Scene):
def construct(self):
axes = Axes(
# x-axis ranges from -1 to 10, with a default step size of 1
x_range=(-1, 10),
# y-axis ranges from -2 to 2 with a step size of 0.5
y_range=(-2, 2, 0.5),
# The axes will be stretched so as to match the specified
# height and width
height=6,
width=10,
# Axes is made of two NumberLine mobjects. You can specify
# their configuration with axis_config
axis_config=dict(
stroke_color=GREY_A,
stroke_width=2,
numbers_to_exclude=[0],
),
# Alternatively, you can specify configuration for just one
# of them, like this.
y_axis_config=dict(
numbers_with_elongated_ticks=[-2, 2],
)
)
# Keyword arguments of add_coordinate_labels can be used to
# configure the DecimalNumber mobjects which it creates and
# adds to the axes
axes.add_coordinate_labels(
font_size=20,
num_decimal_places=1,
)
self.add(axes)
# Axes descends from the CoordinateSystem class, meaning
# you can call call axes.coords_to_point, abbreviated to
# axes.c2p, to associate a set of coordinates with a point,
# like so:
dot = Dot(color=RED)
dot.move_to(axes.c2p(0, 0))
self.play(FadeIn(dot, scale=0.5))
self.play(dot.animate.move_to(axes.c2p(3, 2)))
self.wait()
self.play(dot.animate.move_to(axes.c2p(5, 0.5)))
self.wait()
# Similarly, you can call axes.point_to_coords, or axes.p2c
# print(axes.p2c(dot.get_center()))
# We can draw lines from the axes to better mark the coordinates
# of a given point.
# Here, the always_redraw command means that on each new frame
# the lines will be redrawn
h_line = always_redraw(lambda: axes.get_h_line(dot.get_left()))
v_line = always_redraw(lambda: axes.get_v_line(dot.get_bottom()))
self.play(
ShowCreation(h_line),
ShowCreation(v_line),
)
self.play(dot.animate.move_to(axes.c2p(3, -2)))
self.wait()
self.play(dot.animate.move_to(axes.c2p(1, 1)))
self.wait()
# If we tie the dot to a particular set of coordinates, notice
# that as we move the axes around it respects the coordinate
# system defined by them.
f_always(dot.move_to, lambda: axes.c2p(1, 1))
self.play(
axes.animate.scale(0.75).to_corner(UL),
run_time=2,
)
self.wait()
self.play(FadeOut(VGroup(axes, dot, h_line, v_line)))
# Other coordinate systems you can play around with include
# ThreeDAxes, NumberPlane, and ComplexPlane.
class GraphExample(Scene):
def construct(self):
axes = Axes((-3, 10), (-1, 8), height=6)
axes.add_coordinate_labels()
self.play(Write(axes, lag_ratio=0.01, run_time=1))
# Axes.get_graph will return the graph of a function
sin_graph = axes.get_graph(
lambda x: 2 * math.sin(x),
color=BLUE,
)
# By default, it draws it so as to somewhat smoothly interpolate
# between sampled points (x, f(x)). If the graph is meant to have
# a corner, though, you can set use_smoothing to False
relu_graph = axes.get_graph(
lambda x: max(x, 0),
use_smoothing=False,
color=YELLOW,
)
# For discontinuous functions, you can specify the point of
# discontinuity so that it does not try to draw over the gap.
step_graph = axes.get_graph(
lambda x: 2.0 if x > 3 else 1.0,
discontinuities=[3],
color=GREEN,
)
# Axes.get_graph_label takes in either a string or a mobject.
# If it's a string, it treats it as a LaTeX expression. By default
# it places the label next to the graph near the right side, and
# has it match the color of the graph
sin_label = axes.get_graph_label(sin_graph, "\\sin(x)")
relu_label = axes.get_graph_label(relu_graph, Text("ReLU"))
step_label = axes.get_graph_label(step_graph, Text("Step"), x=4)
self.play(
ShowCreation(sin_graph),
FadeIn(sin_label, RIGHT),
)
self.wait(2)
self.play(
ReplacementTransform(sin_graph, relu_graph),
FadeTransform(sin_label, relu_label),
)
self.wait()
self.play(
ReplacementTransform(relu_graph, step_graph),
FadeTransform(relu_label, step_label),
)
self.wait()
parabola = axes.get_graph(lambda x: 0.25 * x**2)
parabola.set_stroke(BLUE)
self.play(
FadeOut(step_graph),
FadeOut(step_label),
ShowCreation(parabola)
)
self.wait()
# You can use axes.input_to_graph_point, abbreviated
# to axes.i2gp, to find a particular point on a graph
dot = Dot(color=RED)
dot.move_to(axes.i2gp(2, parabola))
self.play(FadeIn(dot, scale=0.5))
# A value tracker lets us animate a parameter, usually
# with the intent of having other mobjects update based
# on the parameter
x_tracker = ValueTracker(2)
f_always(
dot.move_to,
lambda: axes.i2gp(x_tracker.get_value(), parabola)
)
self.play(x_tracker.animate.set_value(4), run_time=3)
self.play(x_tracker.animate.set_value(-2), run_time=3)
self.wait()
class TexAndNumbersExample(Scene):
def construct(self):
axes = Axes((-3, 3), (-3, 3), unit_size=1)
axes.to_edge(DOWN)
axes.add_coordinate_labels(font_size=16)
circle = Circle(radius=2)
circle.set_stroke(YELLOW, 3)
circle.move_to(axes.get_origin())
self.add(axes, circle)
# When numbers show up in tex, they can be readily
# replaced with DecimalMobjects so that methods like
# get_value and set_value can be called on them, and
# animations like ChangeDecimalToValue can be called
# on them.
tex = Tex("x^2 + y^2 = 4.00")
tex.next_to(axes, UP, buff=0.5)
value = tex.make_number_changable("4.00")
# This will tie the right hand side of our equation to
# the square of the radius of the circle
value.add_updater(lambda v: v.set_value(circle.get_radius()**2))
self.add(tex)
text = Text("""
You can manipulate numbers
in Tex mobjects
""", font_size=30)
text.next_to(tex, RIGHT, buff=1.5)
arrow = Arrow(text, tex)
self.add(text, arrow)
self.play(
circle.animate.set_height(2.0),
run_time=4,
rate_func=there_and_back,
)
# By default, tex.make_number_changable replaces the first occurance
# of the number,but by passing replace_all=True it replaces all and
# returns a group of the results
exponents = tex.make_number_changable("2", replace_all=True)
self.play(
LaggedStartMap(
FlashAround, exponents,
lag_ratio=0.2, buff=0.1, color=RED
),
exponents.animate.set_color(RED)
)
def | func | identifier_name |
|
Skycam.py | (tuple of floats) coordinates of node0, node1, node2
'''
# Project lengths into xy plane
a_eff = ((zC-zB)**2 + c**2)**.5
b_eff = (zC**2 + b**2)**.5
c_eff = (zB**2 + c**2)**.5
# Law of cosines
numer = b_eff**2 + c_eff**2 - a_eff**2
denom = 2*b_eff*c_eff
Arad = acos(numer/denom)
# Law of sines
Brad = asin(b*sin(Arad)/a)
Crad = asin(c*sin(Arad)/a)
theta = .5*pi-Arad
return (0,0, 0), (0, c, zB), (b*cos(theta), b*sin(theta), zC)
def load_path(self, points):
''' Create a new path based on predetermined points.
Inputs:
points: (list of tuples of floats) specific camera positions for
any given time.
Returns:
Initializes new Path in Skycam's path attribute
'''
self.path = Path.new_path(points, self.node0, self.node1, self.node2)
def create_path(self, waypoints, steps):
''' Generate a new list of points based on waypoints.
Inputs:
waypoints: (list of tuples of floats): points the path should
bring the camera to
steps: (int) number of steps in which to complete the path
Returns:
Calls load_path method on list of generated spline points
'''
xpoints = [point[0] for point in waypoints]
ypoints = [point[1] for point in waypoints]
zpoints = [point[2] for point in waypoints]
# spline parameters
s = 2.0 # smoothness
k = 1 # spline order
nest = -1 # estimate of knots needed
# create spline and calculate length
s, us = splprep([xpoints, ypoints, zpoints], s=s, k=k, nest=nest)
totl = self.splineLen(s)
dl = totl/steps
if dl > 1:
print "dl greater than 1!"
i = 0
u = 0
upath = [u]
# Divide path into equidistant lengths
while i < steps-1:
u = self.binary_search(u, s, dl) # optionally pass tolerance
upath.append(u)
print i
i += 1
path = [splev(u, s) for u in upath]
path_lens = []
for i in xrange(len(path) - 1):
path_lens.append(distance(path[i], path[i+1]))
error = [ele - dl for ele in path_lens]
print 'Error is: ', sum(error)/len(error)
self.load_path(path)
# self.path = Path.new_path(path, self.node0, self.node1, self.node2)
def go_path(self, start):
'''Send appropriate movement commands for loaded path.
Input:
start: (int) index of path at which to begin sending commands
'''
#TODO: Implement save point
while (not self.direct and not self.pause):
for i in xrange(len(self.path.diffs0) - start):
self.send_command(self.path.diffs0[i + start], self.path.diffs1[i + start], self.path.diffs2[i + start])
# raw_input('')
self.save_point = i
break
# def pause_path(self):
# ''' Pause path traversal.'''
# self.pause = True
# def switch_mode(self):
# ''' Switch from path control to joystick control '''
# self.direct = not self.direct
# def go_input(self):
# ''' Translate a direct-control input into a directional vector and send appropriate commands '''
# pass
def connect(self, baud=57600):
''' Connect to proper serial ports for Bluetooth communication.
Inputs:
baud: (int) baud rate at which to connect
Returns:
Print confirmation of connection
'''
# Connect to proper serial ports
self.serA = serial.Serial('/dev/rfcomm0', baud, timeout=50)
self.serB = serial.Serial('/dev/rfcomm1', baud, timeout=50)
self.serC = serial.Serial('/dev/rfcomm2', baud, timeout=50)
print 'Hacking the mainframe...'
sleep(8)
print 'Mainframe hacked'
def send_command(self, diff0, diff1, diff2):
'''Send proper commands to all three serial ports.
Inputs:
diff0: (float) node length difference for node 0
diff1: (float) node length difference for node 1
diff2: (float) node length difference for node 2
'''
print diff0, diff1, diff2
self.serA.write(str(diff0) + 'g')
self.serB.write(str(diff1) + 'g')
self.serC.write(str(diff2) + 'g')
#TODO: Always mess around with this value
sleep(.35)
pass
# def dldp(self, nodePos, theta, phi):
# ''' use a directional vector and current position to calculate change in node length '''
# cam = self.cam
# deltaX = cam[0] - nodePos[0]
# deltaY = cam[1] - nodePos[1]
# deltaZ = cam[2] - nodePos[2]
# numer = deltaX*cos(theta)*cos(phi) + deltaY*sin(theta)*cos(phi) + deltaZ*sin(phi)
# denom = (deltaX**2 + deltaY**2 + deltaZ**2)**.5
# return numer/denom
def | (self, ustart, s, dl, tol=.01):
''' Perform a binary search to find parametrized location of point.
Inputs:
ustart: (float)
s: (spline object)
dl: (float)
tol: (float)
Returns:
Reassigns middle and endpoints of search
um: (float) midpoint of search
'''
point = splev(ustart, s)
ui = ustart
uf = 1
um = (ui + uf)/2
xm, ym, zm = splev(um, s)
xf, yf, zf = splev(uf, s)
while True:
tpoint = splev(um, s)
if distance(point, tpoint)>(dl*(1+tol)):
uf, um = um, (um+ui)/2
elif distance(point, tpoint)<(dl*(1-tol)):
ui, um = um, (um+uf)/2
else:
return um
def splineLen(self, s):
''' Calculate length of a spline.
Inputs:
s: (spline object) represents path that joins waypoints
Returns:
(float) length of spline
'''
ts = np.linspace(0, 1, 1000)
xs, ys, zs = splev(ts, s)
spline = zip(xs, ys, zs)
ipoint = spline[0]
totl = 0
for point in spline:
totl += distance(point, ipoint)
ipoint = point
return totl
def tighten(self):
''' Calibrate node lengths to current position of camera.
Enter ' ' to tighten
Enter 's' to accept node length
'''
while True:
input = raw_input('Tightening Node A')
if input == ' ':
self.serA.write('-100g')
elif input == 's':
break
while True:
input = raw_input('Tightening Node B')
if input == ' ':
self.serB.write('-100g')
elif input == 's':
break
while True:
input = raw_input('Tightening Node C')
if input == ' ':
self.serC.write('-100g')
elif input == 's':
return
class Path:
''' Path object stores a path's points, node lengths, and length differences
to enable in path traversal.
'''
def __init__(self, points, node0, node1, node2):
''' Init method for Path class.
Input:
points: (list of tuples of floats)
node0, 1, 2: (tuple of floats)
Returns:
Initializes Path attributes
'''
self.points = points
self.lens0 = [distance(node0, point) for point in points]
self.lens1 = [distance(node1, point) for point in points]
self.lens2 = [distance(node2, point) for point in points]
self.diffs0 = self.diff_calc(self.lens0)
self.diffs1 = self.diff_calc(self.lens1)
self.diffs2 = self.diff_calc(self.lens2)
@staticmethod
def new_path(points, node0, node1, | binary_search | identifier_name |
Skycam.py | (tuple of floats) coordinates of node0, node1, node2
'''
# Project lengths into xy plane
a_eff = ((zC-zB)**2 + c**2)**.5
b_eff = (zC**2 + b**2)**.5
c_eff = (zB**2 + c**2)**.5
# Law of cosines
numer = b_eff**2 + c_eff**2 - a_eff**2
denom = 2*b_eff*c_eff
Arad = acos(numer/denom)
# Law of sines
Brad = asin(b*sin(Arad)/a)
Crad = asin(c*sin(Arad)/a)
theta = .5*pi-Arad
return (0,0, 0), (0, c, zB), (b*cos(theta), b*sin(theta), zC)
def load_path(self, points):
''' Create a new path based on predetermined points.
Inputs:
points: (list of tuples of floats) specific camera positions for
any given time.
Returns:
Initializes new Path in Skycam's path attribute
'''
self.path = Path.new_path(points, self.node0, self.node1, self.node2)
def create_path(self, waypoints, steps):
| # create spline and calculate length
s, us = splprep([xpoints, ypoints, zpoints], s=s, k=k, nest=nest)
totl = self.splineLen(s)
dl = totl/steps
if dl > 1:
print "dl greater than 1!"
i = 0
u = 0
upath = [u]
# Divide path into equidistant lengths
while i < steps-1:
u = self.binary_search(u, s, dl) # optionally pass tolerance
upath.append(u)
print i
i += 1
path = [splev(u, s) for u in upath]
path_lens = []
for i in xrange(len(path) - 1):
path_lens.append(distance(path[i], path[i+1]))
error = [ele - dl for ele in path_lens]
print 'Error is: ', sum(error)/len(error)
self.load_path(path)
# self.path = Path.new_path(path, self.node0, self.node1, self.node2)
def go_path(self, start):
'''Send appropriate movement commands for loaded path.
Input:
start: (int) index of path at which to begin sending commands
'''
#TODO: Implement save point
while (not self.direct and not self.pause):
for i in xrange(len(self.path.diffs0) - start):
self.send_command(self.path.diffs0[i + start], self.path.diffs1[i + start], self.path.diffs2[i + start])
# raw_input('')
self.save_point = i
break
# def pause_path(self):
# ''' Pause path traversal.'''
# self.pause = True
# def switch_mode(self):
# ''' Switch from path control to joystick control '''
# self.direct = not self.direct
# def go_input(self):
# ''' Translate a direct-control input into a directional vector and send appropriate commands '''
# pass
def connect(self, baud=57600):
''' Connect to proper serial ports for Bluetooth communication.
Inputs:
baud: (int) baud rate at which to connect
Returns:
Print confirmation of connection
'''
# Connect to proper serial ports
self.serA = serial.Serial('/dev/rfcomm0', baud, timeout=50)
self.serB = serial.Serial('/dev/rfcomm1', baud, timeout=50)
self.serC = serial.Serial('/dev/rfcomm2', baud, timeout=50)
print 'Hacking the mainframe...'
sleep(8)
print 'Mainframe hacked'
def send_command(self, diff0, diff1, diff2):
'''Send proper commands to all three serial ports.
Inputs:
diff0: (float) node length difference for node 0
diff1: (float) node length difference for node 1
diff2: (float) node length difference for node 2
'''
print diff0, diff1, diff2
self.serA.write(str(diff0) + 'g')
self.serB.write(str(diff1) + 'g')
self.serC.write(str(diff2) + 'g')
#TODO: Always mess around with this value
sleep(.35)
pass
# def dldp(self, nodePos, theta, phi):
# ''' use a directional vector and current position to calculate change in node length '''
# cam = self.cam
# deltaX = cam[0] - nodePos[0]
# deltaY = cam[1] - nodePos[1]
# deltaZ = cam[2] - nodePos[2]
# numer = deltaX*cos(theta)*cos(phi) + deltaY*sin(theta)*cos(phi) + deltaZ*sin(phi)
# denom = (deltaX**2 + deltaY**2 + deltaZ**2)**.5
# return numer/denom
def binary_search(self, ustart, s, dl, tol=.01):
''' Perform a binary search to find parametrized location of point.
Inputs:
ustart: (float)
s: (spline object)
dl: (float)
tol: (float)
Returns:
Reassigns middle and endpoints of search
um: (float) midpoint of search
'''
point = splev(ustart, s)
ui = ustart
uf = 1
um = (ui + uf)/2
xm, ym, zm = splev(um, s)
xf, yf, zf = splev(uf, s)
while True:
tpoint = splev(um, s)
if distance(point, tpoint)>(dl*(1+tol)):
uf, um = um, (um+ui)/2
elif distance(point, tpoint)<(dl*(1-tol)):
ui, um = um, (um+uf)/2
else:
return um
def splineLen(self, s):
''' Calculate length of a spline.
Inputs:
s: (spline object) represents path that joins waypoints
Returns:
(float) length of spline
'''
ts = np.linspace(0, 1, 1000)
xs, ys, zs = splev(ts, s)
spline = zip(xs, ys, zs)
ipoint = spline[0]
totl = 0
for point in spline:
totl += distance(point, ipoint)
ipoint = point
return totl
def tighten(self):
''' Calibrate node lengths to current position of camera.
Enter ' ' to tighten
Enter 's' to accept node length
'''
while True:
input = raw_input('Tightening Node A')
if input == ' ':
self.serA.write('-100g')
elif input == 's':
break
while True:
input = raw_input('Tightening Node B')
if input == ' ':
self.serB.write('-100g')
elif input == 's':
break
while True:
input = raw_input('Tightening Node C')
if input == ' ':
self.serC.write('-100g')
elif input == 's':
return
class Path:
''' Path object stores a path's points, node lengths, and length differences
to enable in path traversal.
'''
def __init__(self, points, node0, node1, node2):
''' Init method for Path class.
Input:
points: (list of tuples of floats)
node0, 1, 2: (tuple of floats)
Returns:
Initializes Path attributes
'''
self.points = points
self.lens0 = [distance(node0, point) for point in points]
self.lens1 = [distance(node1, point) for point in points]
self.lens2 = [distance(node2, point) for point in points]
self.diffs0 = self.diff_calc(self.lens0)
self.diffs1 = self.diff_calc(self.lens1)
self.diffs2 = self.diff_calc(self.lens2)
@staticmethod
def new_path(points, node0, node1, node | ''' Generate a new list of points based on waypoints.
Inputs:
waypoints: (list of tuples of floats): points the path should
bring the camera to
steps: (int) number of steps in which to complete the path
Returns:
Calls load_path method on list of generated spline points
'''
xpoints = [point[0] for point in waypoints]
ypoints = [point[1] for point in waypoints]
zpoints = [point[2] for point in waypoints]
# spline parameters
s = 2.0 # smoothness
k = 1 # spline order
nest = -1 # estimate of knots needed
| identifier_body |
Skycam.py | (tuple of floats) coordinates of node0, node1, node2
'''
# Project lengths into xy plane
a_eff = ((zC-zB)**2 + c**2)**.5
b_eff = (zC**2 + b**2)**.5
c_eff = (zB**2 + c**2)**.5
# Law of cosines
numer = b_eff**2 + c_eff**2 - a_eff**2
denom = 2*b_eff*c_eff
Arad = acos(numer/denom)
# Law of sines
Brad = asin(b*sin(Arad)/a)
Crad = asin(c*sin(Arad)/a)
theta = .5*pi-Arad
return (0,0, 0), (0, c, zB), (b*cos(theta), b*sin(theta), zC)
def load_path(self, points):
''' Create a new path based on predetermined points.
Inputs:
points: (list of tuples of floats) specific camera positions for
any given time.
Returns:
Initializes new Path in Skycam's path attribute
'''
self.path = Path.new_path(points, self.node0, self.node1, self.node2)
def create_path(self, waypoints, steps):
''' Generate a new list of points based on waypoints.
Inputs:
waypoints: (list of tuples of floats): points the path should
bring the camera to
steps: (int) number of steps in which to complete the path
Returns:
Calls load_path method on list of generated spline points
'''
xpoints = [point[0] for point in waypoints]
ypoints = [point[1] for point in waypoints]
zpoints = [point[2] for point in waypoints]
# spline parameters
s = 2.0 # smoothness
k = 1 # spline order
nest = -1 # estimate of knots needed
# create spline and calculate length
s, us = splprep([xpoints, ypoints, zpoints], s=s, k=k, nest=nest)
totl = self.splineLen(s)
dl = totl/steps
if dl > 1:
print "dl greater than 1!"
i = 0
u = 0
upath = [u]
# Divide path into equidistant lengths
while i < steps-1:
u = self.binary_search(u, s, dl) # optionally pass tolerance
upath.append(u)
print i
i += 1
path = [splev(u, s) for u in upath]
path_lens = []
for i in xrange(len(path) - 1):
path_lens.append(distance(path[i], path[i+1]))
error = [ele - dl for ele in path_lens]
print 'Error is: ', sum(error)/len(error)
self.load_path(path)
# self.path = Path.new_path(path, self.node0, self.node1, self.node2)
def go_path(self, start):
'''Send appropriate movement commands for loaded path.
Input:
start: (int) index of path at which to begin sending commands
'''
#TODO: Implement save point
while (not self.direct and not self.pause):
for i in xrange(len(self.path.diffs0) - start):
self.send_command(self.path.diffs0[i + start], self.path.diffs1[i + start], self.path.diffs2[i + start])
# raw_input('')
self.save_point = i
break
# def pause_path(self):
# ''' Pause path traversal.'''
# self.pause = True
# def switch_mode(self):
# ''' Switch from path control to joystick control '''
# self.direct = not self.direct
# def go_input(self):
# ''' Translate a direct-control input into a directional vector and send appropriate commands '''
# pass
def connect(self, baud=57600):
''' Connect to proper serial ports for Bluetooth communication.
Inputs:
baud: (int) baud rate at which to connect
Returns:
Print confirmation of connection
'''
# Connect to proper serial ports
self.serA = serial.Serial('/dev/rfcomm0', baud, timeout=50)
self.serB = serial.Serial('/dev/rfcomm1', baud, timeout=50)
self.serC = serial.Serial('/dev/rfcomm2', baud, timeout=50)
print 'Hacking the mainframe...'
sleep(8)
print 'Mainframe hacked'
def send_command(self, diff0, diff1, diff2):
'''Send proper commands to all three serial ports.
Inputs:
diff0: (float) node length difference for node 0
diff1: (float) node length difference for node 1
diff2: (float) node length difference for node 2
'''
print diff0, diff1, diff2
self.serA.write(str(diff0) + 'g')
self.serB.write(str(diff1) + 'g')
self.serC.write(str(diff2) + 'g')
#TODO: Always mess around with this value
sleep(.35)
pass
# def dldp(self, nodePos, theta, phi):
# ''' use a directional vector and current position to calculate change in node length '''
# cam = self.cam
# deltaX = cam[0] - nodePos[0]
# deltaY = cam[1] - nodePos[1]
# deltaZ = cam[2] - nodePos[2]
# numer = deltaX*cos(theta)*cos(phi) + deltaY*sin(theta)*cos(phi) + deltaZ*sin(phi)
# denom = (deltaX**2 + deltaY**2 + deltaZ**2)**.5
# return numer/denom
def binary_search(self, ustart, s, dl, tol=.01):
''' Perform a binary search to find parametrized location of point.
Inputs:
ustart: (float)
s: (spline object)
dl: (float)
tol: (float)
Returns:
Reassigns middle and endpoints of search
um: (float) midpoint of search
'''
point = splev(ustart, s)
ui = ustart
uf = 1
um = (ui + uf)/2
xm, ym, zm = splev(um, s)
xf, yf, zf = splev(uf, s)
while True:
tpoint = splev(um, s)
if distance(point, tpoint)>(dl*(1+tol)):
uf, um = um, (um+ui)/2
elif distance(point, tpoint)<(dl*(1-tol)):
ui, um = um, (um+uf)/2
else:
return um
def splineLen(self, s):
''' Calculate length of a spline.
Inputs:
s: (spline object) represents path that joins waypoints
Returns:
(float) length of spline
'''
ts = np.linspace(0, 1, 1000)
xs, ys, zs = splev(ts, s)
spline = zip(xs, ys, zs)
ipoint = spline[0]
totl = 0
for point in spline:
|
return totl
def tighten(self):
''' Calibrate node lengths to current position of camera.
Enter ' ' to tighten
Enter 's' to accept node length
'''
while True:
input = raw_input('Tightening Node A')
if input == ' ':
self.serA.write('-100g')
elif input == 's':
break
while True:
input = raw_input('Tightening Node B')
if input == ' ':
self.serB.write('-100g')
elif input == 's':
break
while True:
input = raw_input('Tightening Node C')
if input == ' ':
self.serC.write('-100g')
elif input == 's':
return
class Path:
''' Path object stores a path's points, node lengths, and length differences
to enable in path traversal.
'''
def __init__(self, points, node0, node1, node2):
''' Init method for Path class.
Input:
points: (list of tuples of floats)
node0, 1, 2: (tuple of floats)
Returns:
Initializes Path attributes
'''
self.points = points
self.lens0 = [distance(node0, point) for point in points]
self.lens1 = [distance(node1, point) for point in points]
self.lens2 = [distance(node2, point) for point in points]
self.diffs0 = self.diff_calc(self.lens0)
self.diffs1 = self.diff_calc(self.lens1)
self.diffs2 = self.diff_calc(self.lens2)
@staticmethod
def new_path(points, node0, node1, node | totl += distance(point, ipoint)
ipoint = point | conditional_block |
Skycam.py | (tuple of floats) coordinates of node0, node1, node2
'''
# Project lengths into xy plane
a_eff = ((zC-zB)**2 + c**2)**.5
b_eff = (zC**2 + b**2)**.5
c_eff = (zB**2 + c**2)**.5
# Law of cosines
numer = b_eff**2 + c_eff**2 - a_eff**2
denom = 2*b_eff*c_eff
Arad = acos(numer/denom)
# Law of sines
Brad = asin(b*sin(Arad)/a)
Crad = asin(c*sin(Arad)/a)
theta = .5*pi-Arad
return (0,0, 0), (0, c, zB), (b*cos(theta), b*sin(theta), zC)
def load_path(self, points):
''' Create a new path based on predetermined points.
Inputs:
points: (list of tuples of floats) specific camera positions for
any given time.
Returns:
Initializes new Path in Skycam's path attribute
'''
self.path = Path.new_path(points, self.node0, self.node1, self.node2)
def create_path(self, waypoints, steps):
''' Generate a new list of points based on waypoints.
Inputs:
waypoints: (list of tuples of floats): points the path should
bring the camera to | Calls load_path method on list of generated spline points
'''
xpoints = [point[0] for point in waypoints]
ypoints = [point[1] for point in waypoints]
zpoints = [point[2] for point in waypoints]
# spline parameters
s = 2.0 # smoothness
k = 1 # spline order
nest = -1 # estimate of knots needed
# create spline and calculate length
s, us = splprep([xpoints, ypoints, zpoints], s=s, k=k, nest=nest)
totl = self.splineLen(s)
dl = totl/steps
if dl > 1:
print "dl greater than 1!"
i = 0
u = 0
upath = [u]
# Divide path into equidistant lengths
while i < steps-1:
u = self.binary_search(u, s, dl) # optionally pass tolerance
upath.append(u)
print i
i += 1
path = [splev(u, s) for u in upath]
path_lens = []
for i in xrange(len(path) - 1):
path_lens.append(distance(path[i], path[i+1]))
error = [ele - dl for ele in path_lens]
print 'Error is: ', sum(error)/len(error)
self.load_path(path)
# self.path = Path.new_path(path, self.node0, self.node1, self.node2)
def go_path(self, start):
'''Send appropriate movement commands for loaded path.
Input:
start: (int) index of path at which to begin sending commands
'''
#TODO: Implement save point
while (not self.direct and not self.pause):
for i in xrange(len(self.path.diffs0) - start):
self.send_command(self.path.diffs0[i + start], self.path.diffs1[i + start], self.path.diffs2[i + start])
# raw_input('')
self.save_point = i
break
# def pause_path(self):
# ''' Pause path traversal.'''
# self.pause = True
# def switch_mode(self):
# ''' Switch from path control to joystick control '''
# self.direct = not self.direct
# def go_input(self):
# ''' Translate a direct-control input into a directional vector and send appropriate commands '''
# pass
def connect(self, baud=57600):
''' Connect to proper serial ports for Bluetooth communication.
Inputs:
baud: (int) baud rate at which to connect
Returns:
Print confirmation of connection
'''
# Connect to proper serial ports
self.serA = serial.Serial('/dev/rfcomm0', baud, timeout=50)
self.serB = serial.Serial('/dev/rfcomm1', baud, timeout=50)
self.serC = serial.Serial('/dev/rfcomm2', baud, timeout=50)
print 'Hacking the mainframe...'
sleep(8)
print 'Mainframe hacked'
def send_command(self, diff0, diff1, diff2):
'''Send proper commands to all three serial ports.
Inputs:
diff0: (float) node length difference for node 0
diff1: (float) node length difference for node 1
diff2: (float) node length difference for node 2
'''
print diff0, diff1, diff2
self.serA.write(str(diff0) + 'g')
self.serB.write(str(diff1) + 'g')
self.serC.write(str(diff2) + 'g')
#TODO: Always mess around with this value
sleep(.35)
pass
# def dldp(self, nodePos, theta, phi):
# ''' use a directional vector and current position to calculate change in node length '''
# cam = self.cam
# deltaX = cam[0] - nodePos[0]
# deltaY = cam[1] - nodePos[1]
# deltaZ = cam[2] - nodePos[2]
# numer = deltaX*cos(theta)*cos(phi) + deltaY*sin(theta)*cos(phi) + deltaZ*sin(phi)
# denom = (deltaX**2 + deltaY**2 + deltaZ**2)**.5
# return numer/denom
def binary_search(self, ustart, s, dl, tol=.01):
''' Perform a binary search to find parametrized location of point.
Inputs:
ustart: (float)
s: (spline object)
dl: (float)
tol: (float)
Returns:
Reassigns middle and endpoints of search
um: (float) midpoint of search
'''
point = splev(ustart, s)
ui = ustart
uf = 1
um = (ui + uf)/2
xm, ym, zm = splev(um, s)
xf, yf, zf = splev(uf, s)
while True:
tpoint = splev(um, s)
if distance(point, tpoint)>(dl*(1+tol)):
uf, um = um, (um+ui)/2
elif distance(point, tpoint)<(dl*(1-tol)):
ui, um = um, (um+uf)/2
else:
return um
def splineLen(self, s):
''' Calculate length of a spline.
Inputs:
s: (spline object) represents path that joins waypoints
Returns:
(float) length of spline
'''
ts = np.linspace(0, 1, 1000)
xs, ys, zs = splev(ts, s)
spline = zip(xs, ys, zs)
ipoint = spline[0]
totl = 0
for point in spline:
totl += distance(point, ipoint)
ipoint = point
return totl
def tighten(self):
''' Calibrate node lengths to current position of camera.
Enter ' ' to tighten
Enter 's' to accept node length
'''
while True:
input = raw_input('Tightening Node A')
if input == ' ':
self.serA.write('-100g')
elif input == 's':
break
while True:
input = raw_input('Tightening Node B')
if input == ' ':
self.serB.write('-100g')
elif input == 's':
break
while True:
input = raw_input('Tightening Node C')
if input == ' ':
self.serC.write('-100g')
elif input == 's':
return
class Path:
''' Path object stores a path's points, node lengths, and length differences
to enable in path traversal.
'''
def __init__(self, points, node0, node1, node2):
''' Init method for Path class.
Input:
points: (list of tuples of floats)
node0, 1, 2: (tuple of floats)
Returns:
Initializes Path attributes
'''
self.points = points
self.lens0 = [distance(node0, point) for point in points]
self.lens1 = [distance(node1, point) for point in points]
self.lens2 = [distance(node2, point) for point in points]
self.diffs0 = self.diff_calc(self.lens0)
self.diffs1 = self.diff_calc(self.lens1)
self.diffs2 = self.diff_calc(self.lens2)
@staticmethod
def new_path(points, node0, node1, node2 | steps: (int) number of steps in which to complete the path
Returns: | random_line_split |
kafka.go | if publishBuilderEntry.encoded == nil && publishBuilderEntry.err == nil {
if publishBuilderEntry.traceId == "" {
publishBuilderEntry.traceId = generateID()
}
if publishBuilderEntry.messageId == "" {
publishBuilderEntry.messageId = generateID()
}
publishBuilderEntry.encoded, publishBuilderEntry.err = json.Marshal(Message{
Message: publishBuilderEntry.message,
MessageType: publishBuilderEntry.messageType,
Service: publishBuilderEntry.service,
TraceId: publishBuilderEntry.traceId,
MessageId: publishBuilderEntry.messageId,
})
}
}
// Encode PublishBuilder into array of bytes
func (publishBuilderEntry *PublishBuilderEntry) Encode() ([]byte, error) {
publishBuilderEntry.ensureEncoded()
return publishBuilderEntry.encoded, publishBuilderEntry.err
}
// Length returns size of encoded value
func (publishBuilderEntry *PublishBuilderEntry) Length() int {
publishBuilderEntry.ensureEncoded()
return len(publishBuilderEntry.encoded)
}
// NewKafkaClient create a new instance of KafkaClient
func NewKafkaClient(brokerList []string, realm string) (*KafkaClient, error) {
return initKafkaClient(brokerList, realm, nil)
}
// NewKafkaClientWithAuthentication create a new instance of KafkaClient with Authentication
func NewKafkaClientWithAuthentication(brokerList []string, realm string, securityConfig *SecurityConfig) (*KafkaClient, error) {
return initKafkaClient(brokerList, realm, securityConfig)
}
func initKafkaClient(brokerList []string, realm string, securityConfig *SecurityConfig) (*KafkaClient, error) {
config := sarama.NewConfig()
config.Version = sarama.V2_1_0_0
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(config, securityConfig)
}
// currently only support 1 message broker
broker := sarama.NewBroker(brokerList[0])
err := broker.Open(config)
if err != nil {
logrus.Error("unable to open kafka")
return nil, err
}
if connected, err := broker.Connected(); !connected {
logrus.Error("unable connect to kafka")
return nil, err
}
configAsync := sarama.NewConfig()
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(configAsync, securityConfig)
}
asyncProducer, err := sarama.NewAsyncProducer(brokerList, configAsync)
if err != nil {
logrus.Error("unable to create async producer in kafka : ", err)
return nil, err
}
configSync := sarama.NewConfig()
configSync.Producer.Return.Successes = true
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(configSync, securityConfig)
}
syncProducer, err := sarama.NewSyncProducer(brokerList, configSync)
if err != nil {
logrus.Error("unable to create sync producer in kafka : ", err)
return nil, err
}
consumer, err := sarama.NewConsumer(brokerList, config)
if err != nil {
logrus.Error("Unable to create consumer in kafka : ", err)
return nil, err
}
client := &KafkaClient{
asyncProducer,
syncProducer,
consumer,
broker,
realm,
}
subscribeMap = make(map[string]map[string]func(message *Message, err error))
go listenAsyncError(asyncProducer)
go cleanup(client)
return client, nil
}
func configureSASLScramAuthentication(config *sarama.Config, securityConfig *SecurityConfig) {
config.Net.SASL.Enable = true
config.Net.SASL.User = securityConfig.SASLUsername
config.Net.SASL.Password = securityConfig.SASLPassword
config.Net.SASL.Handshake = true
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &SCRAMClient{HashGeneratorFcn: sha512.New} }
config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512
config.Net.TLS.Enable = true
config.Net.TLS.Config = &tls.Config{}
}
func cleanup(client *KafkaClient) {
// define signal notify
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
func() {
for {
select {
case <-sig:
_ = client.asyncProducer.Close()
_ = client.syncProducer.Close()
_ = client.consumer.Close()
_ = client.broker.Close()
}
}
}()
}
func listenAsyncError(producer sarama.AsyncProducer) {
for err := range producer.Errors() {
logrus.Error("unable to publish message using async producer to kafka : ", err)
}
}
func constructTopic(realm, topic string) string {
return realm + separator + topic
}
// SetTimeout set listening timeout for publish with response
func (client *KafkaClient) SetTimeout(timeout int) {
publishResponseTimeout = time.Duration(timeout)
}
// PublishAsync push a message to message broker topic asynchronously
func (client *KafkaClient) PublishAsync(publishBuilder *PublishBuilder) {
// send message
client.asyncProducer.Input() <- &sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: constructTopic(client.realm, publishBuilder.topic),
}
}
// PublishSync push a message to message broker topic synchronously
func (client *KafkaClient) PublishSync(publishBuilder *PublishBuilder) error {
// send message
_, _, err := client.syncProducer.SendMessage(&sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: constructTopic(client.realm, publishBuilder.topic),
})
if err != nil {
logrus.Error("unable to publish message using sync producer to kafka : ", err)
}
return err
}
// PublishWithResponses push a message to message broker topic synchronously and waiting response consumer until timeout
// Intended for Point to Point Communication
func (client *KafkaClient) PublishWithResponses(publishBuilder *PublishBuilder) {
topic := constructTopic(client.realm, publishBuilder.topic)
// register callback into map with topic and message Type as a key
// need to create topic, if the keys doesn't exists
if registerCallback(topic, publishBuilder.messageType, publishBuilder.callback) | else {
defer func() {
err := deleteTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to delete topic in kafka : ", err)
return
}
}()
err := createTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to create topic in kafka : ", err)
return
}
// listening a topic
go func() {
consumer, err := client.consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
if err != nil {
logrus.Error("unable to consume topic from kafka : ", err)
return
}
for timeout := time.After(time.Duration(publishResponseTimeout) * time.Millisecond); ; {
select {
case consumerMessage := <-consumer.Messages():
if !processResponseMessage(consumerMessage, publishBuilder.messageId) {
continue
}
break
case err = <-consumer.Errors():
callback := publishBuilder.callback
callback(nil, err)
break
case <-timeout:
callback := publishBuilder.callback
callback(nil, rtoError)
break
}
}
}()
}
// send message
_, _, err := client.syncProducer.SendMessage(&sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: topic,
})
if err != nil {
logrus.Error("unable to publish message in publish with response to kafka : ", err)
return
}
}
// Register add subscriber for a topic and register callback function
func (client *KafkaClient) Register(subscribeBuilder *SubscribeBuilder) {
topic := constructTopic(client.realm, subscribeBuilder.topic)
if registerCallback(topic, subscribeBuilder.messageType, subscribeBuilder.callback) {
logrus.Error("topic and message type already registered")
return
}
err := createTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to create topic in kafka : ", err)
return
}
// listening a topic
// each topic would have his own goroutine
go func() {
consumer, err := client.consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
if err != nil {
logrus.Error("unable to consume topic from kafka : ", err)
return
}
for {
select {
case consumerMessage := <-consumer.Messages():
processMessage(consumerMessage)
break
case err = <-consumer.Errors():
callback := subscribeBuilder.callback
callback(nil, err)
break | {
logrus.Warn("topic and message type already registered")
} | conditional_block |
kafka.go | if publishBuilderEntry.encoded == nil && publishBuilderEntry.err == nil {
if publishBuilderEntry.traceId == "" {
publishBuilderEntry.traceId = generateID()
}
if publishBuilderEntry.messageId == "" {
publishBuilderEntry.messageId = generateID()
}
publishBuilderEntry.encoded, publishBuilderEntry.err = json.Marshal(Message{
Message: publishBuilderEntry.message,
MessageType: publishBuilderEntry.messageType,
Service: publishBuilderEntry.service,
TraceId: publishBuilderEntry.traceId,
MessageId: publishBuilderEntry.messageId,
})
}
}
// Encode PublishBuilder into array of bytes
func (publishBuilderEntry *PublishBuilderEntry) Encode() ([]byte, error) {
publishBuilderEntry.ensureEncoded()
return publishBuilderEntry.encoded, publishBuilderEntry.err
}
// Length returns size of encoded value
func (publishBuilderEntry *PublishBuilderEntry) Length() int {
publishBuilderEntry.ensureEncoded()
return len(publishBuilderEntry.encoded)
}
// NewKafkaClient create a new instance of KafkaClient
func NewKafkaClient(brokerList []string, realm string) (*KafkaClient, error) {
return initKafkaClient(brokerList, realm, nil)
}
// NewKafkaClientWithAuthentication create a new instance of KafkaClient with Authentication
func NewKafkaClientWithAuthentication(brokerList []string, realm string, securityConfig *SecurityConfig) (*KafkaClient, error) {
return initKafkaClient(brokerList, realm, securityConfig)
}
func initKafkaClient(brokerList []string, realm string, securityConfig *SecurityConfig) (*KafkaClient, error) {
config := sarama.NewConfig()
config.Version = sarama.V2_1_0_0
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(config, securityConfig)
}
// currently only support 1 message broker
broker := sarama.NewBroker(brokerList[0])
err := broker.Open(config)
if err != nil {
logrus.Error("unable to open kafka")
return nil, err
}
if connected, err := broker.Connected(); !connected {
logrus.Error("unable connect to kafka")
return nil, err
}
configAsync := sarama.NewConfig()
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(configAsync, securityConfig)
}
asyncProducer, err := sarama.NewAsyncProducer(brokerList, configAsync)
if err != nil {
logrus.Error("unable to create async producer in kafka : ", err)
return nil, err
}
configSync := sarama.NewConfig()
configSync.Producer.Return.Successes = true
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(configSync, securityConfig)
}
syncProducer, err := sarama.NewSyncProducer(brokerList, configSync)
if err != nil {
logrus.Error("unable to create sync producer in kafka : ", err)
return nil, err
}
consumer, err := sarama.NewConsumer(brokerList, config)
if err != nil {
logrus.Error("Unable to create consumer in kafka : ", err)
return nil, err
}
client := &KafkaClient{
asyncProducer,
syncProducer,
consumer,
broker,
realm,
}
subscribeMap = make(map[string]map[string]func(message *Message, err error))
go listenAsyncError(asyncProducer)
go cleanup(client)
return client, nil
}
func configureSASLScramAuthentication(config *sarama.Config, securityConfig *SecurityConfig) {
config.Net.SASL.Enable = true
config.Net.SASL.User = securityConfig.SASLUsername
config.Net.SASL.Password = securityConfig.SASLPassword
config.Net.SASL.Handshake = true
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &SCRAMClient{HashGeneratorFcn: sha512.New} }
config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512
config.Net.TLS.Enable = true
config.Net.TLS.Config = &tls.Config{}
}
func cleanup(client *KafkaClient) {
// define signal notify
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
func() {
for {
select {
case <-sig:
_ = client.asyncProducer.Close()
_ = client.syncProducer.Close()
_ = client.consumer.Close()
_ = client.broker.Close()
}
}
}()
}
func listenAsyncError(producer sarama.AsyncProducer) {
for err := range producer.Errors() {
logrus.Error("unable to publish message using async producer to kafka : ", err)
}
}
func constructTopic(realm, topic string) string {
return realm + separator + topic
}
// SetTimeout set listening timeout for publish with response
func (client *KafkaClient) SetTimeout(timeout int) {
publishResponseTimeout = time.Duration(timeout)
}
// PublishAsync push a message to message broker topic asynchronously
func (client *KafkaClient) PublishAsync(publishBuilder *PublishBuilder) {
// send message
client.asyncProducer.Input() <- &sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
}, |
// PublishSync push a message to message broker topic synchronously
func (client *KafkaClient) PublishSync(publishBuilder *PublishBuilder) error {
// send message
_, _, err := client.syncProducer.SendMessage(&sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: constructTopic(client.realm, publishBuilder.topic),
})
if err != nil {
logrus.Error("unable to publish message using sync producer to kafka : ", err)
}
return err
}
// PublishWithResponses push a message to message broker topic synchronously and waiting response consumer until timeout
// Intended for Point to Point Communication
func (client *KafkaClient) PublishWithResponses(publishBuilder *PublishBuilder) {
topic := constructTopic(client.realm, publishBuilder.topic)
// register callback into map with topic and message Type as a key
// need to create topic, if the keys doesn't exists
if registerCallback(topic, publishBuilder.messageType, publishBuilder.callback) {
logrus.Warn("topic and message type already registered")
} else {
defer func() {
err := deleteTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to delete topic in kafka : ", err)
return
}
}()
err := createTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to create topic in kafka : ", err)
return
}
// listening a topic
go func() {
consumer, err := client.consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
if err != nil {
logrus.Error("unable to consume topic from kafka : ", err)
return
}
for timeout := time.After(time.Duration(publishResponseTimeout) * time.Millisecond); ; {
select {
case consumerMessage := <-consumer.Messages():
if !processResponseMessage(consumerMessage, publishBuilder.messageId) {
continue
}
break
case err = <-consumer.Errors():
callback := publishBuilder.callback
callback(nil, err)
break
case <-timeout:
callback := publishBuilder.callback
callback(nil, rtoError)
break
}
}
}()
}
// send message
_, _, err := client.syncProducer.SendMessage(&sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: topic,
})
if err != nil {
logrus.Error("unable to publish message in publish with response to kafka : ", err)
return
}
}
// Register add subscriber for a topic and register callback function
func (client *KafkaClient) Register(subscribeBuilder *SubscribeBuilder) {
topic := constructTopic(client.realm, subscribeBuilder.topic)
if registerCallback(topic, subscribeBuilder.messageType, subscribeBuilder.callback) {
logrus.Error("topic and message type already registered")
return
}
err := createTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to create topic in kafka : ", err)
return
}
// listening a topic
// each topic would have his own goroutine
go func() {
consumer, err := client.consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
if err != nil {
logrus.Error("unable to consume topic from kafka : ", err)
return
}
for {
select {
case consumerMessage := <-consumer.Messages():
processMessage(consumerMessage)
break
case err = <-consumer.Errors():
callback := subscribeBuilder.callback
callback(nil, err)
break | Topic: constructTopic(client.realm, publishBuilder.topic),
}
} | random_line_split |
kafka.go | publishBuilderEntry.ensureEncoded()
return publishBuilderEntry.encoded, publishBuilderEntry.err
}
// Length returns size of encoded value
func (publishBuilderEntry *PublishBuilderEntry) Length() int {
publishBuilderEntry.ensureEncoded()
return len(publishBuilderEntry.encoded)
}
// NewKafkaClient create a new instance of KafkaClient
func NewKafkaClient(brokerList []string, realm string) (*KafkaClient, error) {
return initKafkaClient(brokerList, realm, nil)
}
// NewKafkaClientWithAuthentication create a new instance of KafkaClient with Authentication
func NewKafkaClientWithAuthentication(brokerList []string, realm string, securityConfig *SecurityConfig) (*KafkaClient, error) {
return initKafkaClient(brokerList, realm, securityConfig)
}
func initKafkaClient(brokerList []string, realm string, securityConfig *SecurityConfig) (*KafkaClient, error) {
config := sarama.NewConfig()
config.Version = sarama.V2_1_0_0
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(config, securityConfig)
}
// currently only support 1 message broker
broker := sarama.NewBroker(brokerList[0])
err := broker.Open(config)
if err != nil {
logrus.Error("unable to open kafka")
return nil, err
}
if connected, err := broker.Connected(); !connected {
logrus.Error("unable connect to kafka")
return nil, err
}
configAsync := sarama.NewConfig()
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(configAsync, securityConfig)
}
asyncProducer, err := sarama.NewAsyncProducer(brokerList, configAsync)
if err != nil {
logrus.Error("unable to create async producer in kafka : ", err)
return nil, err
}
configSync := sarama.NewConfig()
configSync.Producer.Return.Successes = true
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(configSync, securityConfig)
}
syncProducer, err := sarama.NewSyncProducer(brokerList, configSync)
if err != nil {
logrus.Error("unable to create sync producer in kafka : ", err)
return nil, err
}
consumer, err := sarama.NewConsumer(brokerList, config)
if err != nil {
logrus.Error("Unable to create consumer in kafka : ", err)
return nil, err
}
client := &KafkaClient{
asyncProducer,
syncProducer,
consumer,
broker,
realm,
}
subscribeMap = make(map[string]map[string]func(message *Message, err error))
go listenAsyncError(asyncProducer)
go cleanup(client)
return client, nil
}
func configureSASLScramAuthentication(config *sarama.Config, securityConfig *SecurityConfig) {
config.Net.SASL.Enable = true
config.Net.SASL.User = securityConfig.SASLUsername
config.Net.SASL.Password = securityConfig.SASLPassword
config.Net.SASL.Handshake = true
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &SCRAMClient{HashGeneratorFcn: sha512.New} }
config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512
config.Net.TLS.Enable = true
config.Net.TLS.Config = &tls.Config{}
}
func cleanup(client *KafkaClient) {
// define signal notify
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
func() {
for {
select {
case <-sig:
_ = client.asyncProducer.Close()
_ = client.syncProducer.Close()
_ = client.consumer.Close()
_ = client.broker.Close()
}
}
}()
}
func listenAsyncError(producer sarama.AsyncProducer) {
for err := range producer.Errors() {
logrus.Error("unable to publish message using async producer to kafka : ", err)
}
}
func constructTopic(realm, topic string) string {
return realm + separator + topic
}
// SetTimeout set listening timeout for publish with response
func (client *KafkaClient) SetTimeout(timeout int) {
publishResponseTimeout = time.Duration(timeout)
}
// PublishAsync push a message to message broker topic asynchronously
func (client *KafkaClient) PublishAsync(publishBuilder *PublishBuilder) {
// send message
client.asyncProducer.Input() <- &sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: constructTopic(client.realm, publishBuilder.topic),
}
}
// PublishSync push a message to message broker topic synchronously
func (client *KafkaClient) PublishSync(publishBuilder *PublishBuilder) error {
// send message
_, _, err := client.syncProducer.SendMessage(&sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: constructTopic(client.realm, publishBuilder.topic),
})
if err != nil {
logrus.Error("unable to publish message using sync producer to kafka : ", err)
}
return err
}
// PublishWithResponses push a message to message broker topic synchronously and waiting response consumer until timeout
// Intended for Point to Point Communication
func (client *KafkaClient) PublishWithResponses(publishBuilder *PublishBuilder) {
topic := constructTopic(client.realm, publishBuilder.topic)
// register callback into map with topic and message Type as a key
// need to create topic, if the keys doesn't exists
if registerCallback(topic, publishBuilder.messageType, publishBuilder.callback) {
logrus.Warn("topic and message type already registered")
} else {
defer func() {
err := deleteTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to delete topic in kafka : ", err)
return
}
}()
err := createTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to create topic in kafka : ", err)
return
}
// listening a topic
go func() {
consumer, err := client.consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
if err != nil {
logrus.Error("unable to consume topic from kafka : ", err)
return
}
for timeout := time.After(time.Duration(publishResponseTimeout) * time.Millisecond); ; {
select {
case consumerMessage := <-consumer.Messages():
if !processResponseMessage(consumerMessage, publishBuilder.messageId) {
continue
}
break
case err = <-consumer.Errors():
callback := publishBuilder.callback
callback(nil, err)
break
case <-timeout:
callback := publishBuilder.callback
callback(nil, rtoError)
break
}
}
}()
}
// send message
_, _, err := client.syncProducer.SendMessage(&sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: topic,
})
if err != nil {
logrus.Error("unable to publish message in publish with response to kafka : ", err)
return
}
}
// Register add subscriber for a topic and register callback function
func (client *KafkaClient) Register(subscribeBuilder *SubscribeBuilder) {
topic := constructTopic(client.realm, subscribeBuilder.topic)
if registerCallback(topic, subscribeBuilder.messageType, subscribeBuilder.callback) {
logrus.Error("topic and message type already registered")
return
}
err := createTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to create topic in kafka : ", err)
return
}
// listening a topic
// each topic would have his own goroutine
go func() {
consumer, err := client.consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
if err != nil {
logrus.Error("unable to consume topic from kafka : ", err)
return
}
for {
select {
case consumerMessage := <-consumer.Messages():
processMessage(consumerMessage)
break
case err = <-consumer.Errors():
callback := subscribeBuilder.callback
callback(nil, err)
break
}
}
}()
}
// createTopic create kafka topic
func createTopic(broker *sarama.Broker, topicName string) error {
topicDetail := &sarama.TopicDetail{}
topicDetail.NumPartitions = int32(1)
topicDetail.ReplicationFactor = int16(1)
topicDetail.ConfigEntries = make(map[string]*string)
topicDetails := make(map[string]*sarama.TopicDetail)
topicDetails[topicName] = topicDetail
request := sarama.CreateTopicsRequest{
Timeout: time.Second * 15,
TopicDetails: topicDetails,
}
_, err := broker.CreateTopics(&request)
return err
}
// deleteTopic delete kafka topic
func | deleteTopic | identifier_name |
|
kafka.go | logrus.Error("unable to open kafka")
return nil, err
}
if connected, err := broker.Connected(); !connected {
logrus.Error("unable connect to kafka")
return nil, err
}
configAsync := sarama.NewConfig()
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(configAsync, securityConfig)
}
asyncProducer, err := sarama.NewAsyncProducer(brokerList, configAsync)
if err != nil {
logrus.Error("unable to create async producer in kafka : ", err)
return nil, err
}
configSync := sarama.NewConfig()
configSync.Producer.Return.Successes = true
if securityConfig != nil && securityConfig.AuthenticationType == saslScramAuth {
configureSASLScramAuthentication(configSync, securityConfig)
}
syncProducer, err := sarama.NewSyncProducer(brokerList, configSync)
if err != nil {
logrus.Error("unable to create sync producer in kafka : ", err)
return nil, err
}
consumer, err := sarama.NewConsumer(brokerList, config)
if err != nil {
logrus.Error("Unable to create consumer in kafka : ", err)
return nil, err
}
client := &KafkaClient{
asyncProducer,
syncProducer,
consumer,
broker,
realm,
}
subscribeMap = make(map[string]map[string]func(message *Message, err error))
go listenAsyncError(asyncProducer)
go cleanup(client)
return client, nil
}
func configureSASLScramAuthentication(config *sarama.Config, securityConfig *SecurityConfig) {
config.Net.SASL.Enable = true
config.Net.SASL.User = securityConfig.SASLUsername
config.Net.SASL.Password = securityConfig.SASLPassword
config.Net.SASL.Handshake = true
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &SCRAMClient{HashGeneratorFcn: sha512.New} }
config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512
config.Net.TLS.Enable = true
config.Net.TLS.Config = &tls.Config{}
}
func cleanup(client *KafkaClient) {
// define signal notify
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
func() {
for {
select {
case <-sig:
_ = client.asyncProducer.Close()
_ = client.syncProducer.Close()
_ = client.consumer.Close()
_ = client.broker.Close()
}
}
}()
}
func listenAsyncError(producer sarama.AsyncProducer) {
for err := range producer.Errors() {
logrus.Error("unable to publish message using async producer to kafka : ", err)
}
}
func constructTopic(realm, topic string) string {
return realm + separator + topic
}
// SetTimeout set listening timeout for publish with response
func (client *KafkaClient) SetTimeout(timeout int) {
publishResponseTimeout = time.Duration(timeout)
}
// PublishAsync push a message to message broker topic asynchronously
func (client *KafkaClient) PublishAsync(publishBuilder *PublishBuilder) {
// send message
client.asyncProducer.Input() <- &sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: constructTopic(client.realm, publishBuilder.topic),
}
}
// PublishSync push a message to message broker topic synchronously
func (client *KafkaClient) PublishSync(publishBuilder *PublishBuilder) error {
// send message
_, _, err := client.syncProducer.SendMessage(&sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: constructTopic(client.realm, publishBuilder.topic),
})
if err != nil {
logrus.Error("unable to publish message using sync producer to kafka : ", err)
}
return err
}
// PublishWithResponses push a message to message broker topic synchronously and waiting response consumer until timeout
// Intended for Point to Point Communication
func (client *KafkaClient) PublishWithResponses(publishBuilder *PublishBuilder) {
topic := constructTopic(client.realm, publishBuilder.topic)
// register callback into map with topic and message Type as a key
// need to create topic, if the keys doesn't exists
if registerCallback(topic, publishBuilder.messageType, publishBuilder.callback) {
logrus.Warn("topic and message type already registered")
} else {
defer func() {
err := deleteTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to delete topic in kafka : ", err)
return
}
}()
err := createTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to create topic in kafka : ", err)
return
}
// listening a topic
go func() {
consumer, err := client.consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
if err != nil {
logrus.Error("unable to consume topic from kafka : ", err)
return
}
for timeout := time.After(time.Duration(publishResponseTimeout) * time.Millisecond); ; {
select {
case consumerMessage := <-consumer.Messages():
if !processResponseMessage(consumerMessage, publishBuilder.messageId) {
continue
}
break
case err = <-consumer.Errors():
callback := publishBuilder.callback
callback(nil, err)
break
case <-timeout:
callback := publishBuilder.callback
callback(nil, rtoError)
break
}
}
}()
}
// send message
_, _, err := client.syncProducer.SendMessage(&sarama.ProducerMessage{
Timestamp: time.Now().UTC(),
Value: &PublishBuilderEntry{
PublishBuilder: *publishBuilder,
},
Topic: topic,
})
if err != nil {
logrus.Error("unable to publish message in publish with response to kafka : ", err)
return
}
}
// Register add subscriber for a topic and register callback function
func (client *KafkaClient) Register(subscribeBuilder *SubscribeBuilder) {
topic := constructTopic(client.realm, subscribeBuilder.topic)
if registerCallback(topic, subscribeBuilder.messageType, subscribeBuilder.callback) {
logrus.Error("topic and message type already registered")
return
}
err := createTopic(client.broker, topic)
if err != nil {
logrus.Error("unable to create topic in kafka : ", err)
return
}
// listening a topic
// each topic would have his own goroutine
go func() {
consumer, err := client.consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
if err != nil {
logrus.Error("unable to consume topic from kafka : ", err)
return
}
for {
select {
case consumerMessage := <-consumer.Messages():
processMessage(consumerMessage)
break
case err = <-consumer.Errors():
callback := subscribeBuilder.callback
callback(nil, err)
break
}
}
}()
}
// createTopic create kafka topic
func createTopic(broker *sarama.Broker, topicName string) error {
topicDetail := &sarama.TopicDetail{}
topicDetail.NumPartitions = int32(1)
topicDetail.ReplicationFactor = int16(1)
topicDetail.ConfigEntries = make(map[string]*string)
topicDetails := make(map[string]*sarama.TopicDetail)
topicDetails[topicName] = topicDetail
request := sarama.CreateTopicsRequest{
Timeout: time.Second * 15,
TopicDetails: topicDetails,
}
_, err := broker.CreateTopics(&request)
return err
}
// deleteTopic delete kafka topic
func deleteTopic(broker *sarama.Broker, topicName string) error {
request := sarama.DeleteTopicsRequest{
Timeout: time.Second * topicTimeout,
Topics: []string{topicName},
}
_, err := broker.DeleteTopics(&request)
return err
}
// unmarshal unmarshal received message into message struct
func unmarshal(consumerMessage *sarama.ConsumerMessage) *Message {
var receivedMessage Message
err := json.Unmarshal(consumerMessage.Value, &receivedMessage)
if err != nil {
logrus.Error("unable to unmarshal message from consumer in kafka : ", err)
return &Message{}
}
return &receivedMessage
}
// registerCallback add callback to map with topic and message Type as a key
func registerCallback(topic, messageType string, callback func(message *Message, err error)) (isRegistered bool) | {
if subscribeMap == nil {
subscribeMap = make(map[string]map[string]func(message *Message, err error))
}
if callbackMap, isTopic := subscribeMap[topic]; isTopic {
if _, isMsgType := callbackMap[messageType]; isMsgType {
return true
}
}
newCallbackMap := make(map[string]func(message *Message, err error))
newCallbackMap[messageType] = callback
subscribeMap[topic] = newCallbackMap
return false
} | identifier_body |
|
pathfinding.py | range(0, layer.GetFeatureCount()):
feat = layer.GetFeature(i)
geom = feat.geometry()
if geom is None:
continue
line = []
if geom.GetGeometryCount() > 0:
for j in range(0, geom.GetGeometryCount()):
g = feat.geometry().GetGeometryRef(j)
for p in range(0, g.GetPointCount()):
pt = g.GetPoint(p)
# new_x,new_y=coordinate_transfer(pt[0],pt[1],layer_shape, sketch_shape)
# line.append((new_x,new_y))
line.append((int(pt[0]), int(layer_shape[3] - int(pt[1]))))
else:
for p in range(0, geom.GetPointCount()):
pt = geom.GetPoint(p)
# new_x,new_y=coordinate_transfer(pt[0],pt[1],layer_shape, sketch_shape)
# line.append((new_x,new_y))
line.append((int(pt[0]), int(layer_shape[3] - int(pt[1]))))
line_list.append(line)
return line_list
def point_generator(gridMap, type):
X, Y = gridMap.shape
while True:
p_x = random.randint(0, X)
p_y = random.randint(0, Y)
if gridMap[p_x][p_y] == type:
return p_x, p_y
def se_generator(gridMap):
s_x, s_y = point_generator(gridMap, 1)
e_x, e_y = point_generator(gridMap, 1)
return s_x, s_y, e_x, e_y
def run(_ver, _return_dict, start, end, neigh_range, gridMap, background, openset_size, length_part, degree_delta,
roads=None):
# time1 = time.time()
# gridMap = np.load('../res/sampled_sketch.npy')
# gridMap = np.load('map.npy')
# time2 = time.time()
# print("图片加载完毕,耗时{}".format(time2 - time1))
# maze = cv2.inRange(gridMap, 2.9, 3.1)
# start = (0, 0)
# end = (2500, 1000)
# neigh_range = (200, 250)
# sample_n = 20
# road1 = [(776, 523), (1425, 393), (2930, 122)]
# road2 = [(1285, 166), (1425, 393), (1880, 1075), (2020, 1973), (2086, 3737)]
# com_line = [(3125, 718), (900, 1700), (1000, 2265), (1166, 3337), (3060, 3142)]
# forbidden =
# finder = pathfinder(maze, neigh_range, sample_n, [road1, road2], [com_line], gridMap)
# print("maze shape:{},{}".format(gridMap.shape[0], gridMap.shape[1]))
# print("类型:起点:{},终点:{}".format(gridMap[start[1]][start[0]], gridMap[end[1]][end[0]]))
time3 = time.time()
plt.figure()
finder = pathfinder(_ver, gridMap, neigh_range, openset_size=openset_size, length_part=length_part,
degree_delta=degree_delta, roads=roads)
path, close_list = finder.astar(start, end)
if path is None:
# print("查找失败,无解")
for p in close_list:
cv2.circle(background, p, 5, (255, 0, 0), 2)
plt.imshow(background)
plt.savefig("output/{}/fail_fig_{}_{}_{}_ver{}.png".format(unique_tag, neigh_range[0], neigh_range[1],
str(round(time.time()))[-5:], _ver))
_return_dict[_ver] = 0
return False
time4 = time.time()
print("寻路完毕,耗时{}".format(time4 - time3))
p1 = path[0]
for p in close_list:
cv2.circle(background, p, 10, (0, 0, 255), 5)
for index, p in enumerate(path):
if index == 0:
continue
p2 = p
cv2.line(background, p1, p2, (255, 0, 0), 40)
p1 = p
for p in path:
cv2.circle(background, p, 10, (0, 0, 0), 40)
# for p in finder.close_set:
# cv2.circle(background, p, 3, (0, 255, 0))
plt.imshow(background)
plt.savefig(
"output/{}/fig_{}_{}_{}_ver{}.png".format(unique_tag, neigh_range[0], neigh_range[1],
str(round(time.time()))[-5:],
_ver))
np.save("output/{}/path__{}_ver{}.npy".format(unique_tag, str(round(time.time()))[-5:], _ver), np.array(path))
_return_dict[_ver] = 1
return 1
if __name__ == "__main__":
if not os.path.exists("output/"):
os.makedirs("output/")
if not os.path.exists("output/{}/".format(unique_tag)):
os.makedirs("output/{}/".format(unique_tag))
parser = argparse.ArgumentParser(description='电力寻路程序')
parser.add_argument("--gridMap", help="地图的路径", type=str)
parser.add_argument("--start", nargs="+", help="起点", type=int)
parser.add_argument("--end", nargs="+", help="终点", type=int)
parser.add_argument("-v", "--voltage", help="电压等级", type=int)
parser.add_argument("-b", "--buffer", help="搜索集大小", type=int)
parser.add_argument("-p", "--precision", help="搜索精确等级", type=int)
parser.add_argument("-r", "--road", help="道路SHP文件的路径", type=str)
args = parser.parse_args()
try:
start = (args.start[0], args.start[1])
except:
print("没有输入起点!")
try:
end = (args.end[0], args.end[1])
except:
print("没有输入终点!")
voltage_level = args.voltage
neigh_range = (500, 600)
if voltage_level == 35:
neigh_range = (100, 150)
elif voltage_level == 110:
neigh_range = (150, 250)
elif voltage_level == 220:
neigh_range = (250, 450)
elif voltage_level == 330:
neigh_range = (300, 400)
elif voltage_level == 500:
neigh_range = (350, 450)
elif voltage_level == 750:
neigh_range = (450, 500)
elif voltage_level == 1000:
neigh_range = (500, 600)
else:
raise Exception("电压等级输入错误!")
try:
openset_size = args.buffer
except:
print("请输入合适的搜索集大小!")
try:
precision = args.precision
except:
print("请输入搜索精确度!")
if precision == 1:
length_part = 5
degree_delta = 90 # 20
elif precision == 2:
length_part = 10
degree_delta = 90 # 40
elif precision == 3:
length_part = 10
degree_delta = 90 # 40
elif precision == 4:
length_part = 5
degree_delta = 45 # 40
elif precision == 5:
length_part = 10
degree_delta = 45 # 80
elif precision == 6:
length_part = 20
degree_delta = 45 # 160
elif precision == 7 | = 30 # 240
else:
length_part = 5
degree_delta = 90 # 20
print("读取TIFF文件中...")
try:
tif = TIFF.open(args.gridMap, mode='r') # 打開tiff文件進行讀取
except:
print("输入的路径有误!")
im = tif.read_image()
print("正在分析各类地块...")
class4 = cv2.inRange(im, 3.9, 4.1)
class1 | :
length_part = 20
degree_delta | conditional_block |
pathfinding.py | range(0, layer.GetFeatureCount()):
feat = layer.GetFeature(i)
geom = feat.geometry()
if geom is None:
continue
line = []
if geom.GetGeometryCount() > 0:
for j in range(0, geom.GetGeometryCount()):
g = feat.geometry().GetGeometryRef(j)
for p in range(0, g.GetPointCount()):
pt = g.GetPoint(p)
# new_x,new_y=coordinate_transfer(pt[0],pt[1],layer_shape, sketch_shape)
# line.append((new_x,new_y))
line.append((int(pt[0]), int(layer_shape[3] - int(pt[1]))))
else:
for p in range(0, geom.GetPointCount()):
pt = geom.GetPoint(p)
# new_x,new_y=coordinate_transfer(pt[0],pt[1],layer_shape, sketch_shape)
# line.append((new_x,new_y))
line.append((int(pt[0]), int(layer_shape[3] - int(pt[1]))))
line_list.append(line)
return line_list
def point_generator(gridMap, type):
X, Y = gridMap.shape
while True:
p_x = random.randint(0, X)
p_y = random.randint(0, Y)
if gridMap[p_x][p_y] == type:
return p_x, p_y
def se_generator(gridMap):
s_x, s_y = point_generator(gridMap, 1)
e_x, e_y = point_generator(gridMap, 1)
return s_x, s_y, e_x, e_y
def run(_ver, _return_dict, start, end, | , gridMap, background, openset_size, length_part, degree_delta,
roads=None):
# time1 = time.time()
# gridMap = np.load('../res/sampled_sketch.npy')
# gridMap = np.load('map.npy')
# time2 = time.time()
# print("图片加载完毕,耗时{}".format(time2 - time1))
# maze = cv2.inRange(gridMap, 2.9, 3.1)
# start = (0, 0)
# end = (2500, 1000)
# neigh_range = (200, 250)
# sample_n = 20
# road1 = [(776, 523), (1425, 393), (2930, 122)]
# road2 = [(1285, 166), (1425, 393), (1880, 1075), (2020, 1973), (2086, 3737)]
# com_line = [(3125, 718), (900, 1700), (1000, 2265), (1166, 3337), (3060, 3142)]
# forbidden =
# finder = pathfinder(maze, neigh_range, sample_n, [road1, road2], [com_line], gridMap)
# print("maze shape:{},{}".format(gridMap.shape[0], gridMap.shape[1]))
# print("类型:起点:{},终点:{}".format(gridMap[start[1]][start[0]], gridMap[end[1]][end[0]]))
time3 = time.time()
plt.figure()
finder = pathfinder(_ver, gridMap, neigh_range, openset_size=openset_size, length_part=length_part,
degree_delta=degree_delta, roads=roads)
path, close_list = finder.astar(start, end)
if path is None:
# print("查找失败,无解")
for p in close_list:
cv2.circle(background, p, 5, (255, 0, 0), 2)
plt.imshow(background)
plt.savefig("output/{}/fail_fig_{}_{}_{}_ver{}.png".format(unique_tag, neigh_range[0], neigh_range[1],
str(round(time.time()))[-5:], _ver))
_return_dict[_ver] = 0
return False
time4 = time.time()
print("寻路完毕,耗时{}".format(time4 - time3))
p1 = path[0]
for p in close_list:
cv2.circle(background, p, 10, (0, 0, 255), 5)
for index, p in enumerate(path):
if index == 0:
continue
p2 = p
cv2.line(background, p1, p2, (255, 0, 0), 40)
p1 = p
for p in path:
cv2.circle(background, p, 10, (0, 0, 0), 40)
# for p in finder.close_set:
# cv2.circle(background, p, 3, (0, 255, 0))
plt.imshow(background)
plt.savefig(
"output/{}/fig_{}_{}_{}_ver{}.png".format(unique_tag, neigh_range[0], neigh_range[1],
str(round(time.time()))[-5:],
_ver))
np.save("output/{}/path__{}_ver{}.npy".format(unique_tag, str(round(time.time()))[-5:], _ver), np.array(path))
_return_dict[_ver] = 1
return 1
if __name__ == "__main__":
if not os.path.exists("output/"):
os.makedirs("output/")
if not os.path.exists("output/{}/".format(unique_tag)):
os.makedirs("output/{}/".format(unique_tag))
parser = argparse.ArgumentParser(description='电力寻路程序')
parser.add_argument("--gridMap", help="地图的路径", type=str)
parser.add_argument("--start", nargs="+", help="起点", type=int)
parser.add_argument("--end", nargs="+", help="终点", type=int)
parser.add_argument("-v", "--voltage", help="电压等级", type=int)
parser.add_argument("-b", "--buffer", help="搜索集大小", type=int)
parser.add_argument("-p", "--precision", help="搜索精确等级", type=int)
parser.add_argument("-r", "--road", help="道路SHP文件的路径", type=str)
args = parser.parse_args()
try:
start = (args.start[0], args.start[1])
except:
print("没有输入起点!")
try:
end = (args.end[0], args.end[1])
except:
print("没有输入终点!")
voltage_level = args.voltage
neigh_range = (500, 600)
if voltage_level == 35:
neigh_range = (100, 150)
elif voltage_level == 110:
neigh_range = (150, 250)
elif voltage_level == 220:
neigh_range = (250, 450)
elif voltage_level == 330:
neigh_range = (300, 400)
elif voltage_level == 500:
neigh_range = (350, 450)
elif voltage_level == 750:
neigh_range = (450, 500)
elif voltage_level == 1000:
neigh_range = (500, 600)
else:
raise Exception("电压等级输入错误!")
try:
openset_size = args.buffer
except:
print("请输入合适的搜索集大小!")
try:
precision = args.precision
except:
print("请输入搜索精确度!")
if precision == 1:
length_part = 5
degree_delta = 90 # 20
elif precision == 2:
length_part = 10
degree_delta = 90 # 40
elif precision == 3:
length_part = 10
degree_delta = 90 # 40
elif precision == 4:
length_part = 5
degree_delta = 45 # 40
elif precision == 5:
length_part = 10
degree_delta = 45 # 80
elif precision == 6:
length_part = 20
degree_delta = 45 # 160
elif precision == 7:
length_part = 20
degree_delta = 30 # 240
else:
length_part = 5
degree_delta = 90 # 20
print("读取TIFF文件中...")
try:
tif = TIFF.open(args.gridMap, mode='r') # 打開tiff文件進行讀取
except:
print("输入的路径有误!")
im = tif.read_image()
print("正在分析各类地块...")
class4 = cv2.inRange(im, 3.9, 4.1)
class1 = | neigh_range | identifier_name |
pathfinding.py | range(0, layer.GetFeatureCount()):
feat = layer.GetFeature(i)
geom = feat.geometry()
if geom is None:
continue
line = []
if geom.GetGeometryCount() > 0:
for j in range(0, geom.GetGeometryCount()):
g = feat.geometry().GetGeometryRef(j)
for p in range(0, g.GetPointCount()):
pt = g.GetPoint(p)
# new_x,new_y=coordinate_transfer(pt[0],pt[1],layer_shape, sketch_shape)
# line.append((new_x,new_y))
line.append((int(pt[0]), int(layer_shape[3] - int(pt[1]))))
else:
for p in range(0, geom.GetPointCount()):
pt = geom.GetPoint(p)
# new_x,new_y=coordinate_transfer(pt[0],pt[1],layer_shape, sketch_shape)
# line.append((new_x,new_y))
line.append((int(pt[0]), int(layer_shape[3] - int(pt[1]))))
line_list.append(line)
return line_list
def point_generator(gridMap, type):
X, Y = gridMap.shape
while True:
p_x = random.randint(0, X)
p_y = random.randint(0, Y)
if gridMap[p_x][p_y] == type:
return p_x, p_y
def s | t, end, neigh_range, gridMap, background, openset_size, length_part, degree_delta,
roads=None):
# time1 = time.time()
# gridMap = np.load('../res/sampled_sketch.npy')
# gridMap = np.load('map.npy')
# time2 = time.time()
# print("图片加载完毕,耗时{}".format(time2 - time1))
# maze = cv2.inRange(gridMap, 2.9, 3.1)
# start = (0, 0)
# end = (2500, 1000)
# neigh_range = (200, 250)
# sample_n = 20
# road1 = [(776, 523), (1425, 393), (2930, 122)]
# road2 = [(1285, 166), (1425, 393), (1880, 1075), (2020, 1973), (2086, 3737)]
# com_line = [(3125, 718), (900, 1700), (1000, 2265), (1166, 3337), (3060, 3142)]
# forbidden =
# finder = pathfinder(maze, neigh_range, sample_n, [road1, road2], [com_line], gridMap)
# print("maze shape:{},{}".format(gridMap.shape[0], gridMap.shape[1]))
# print("类型:起点:{},终点:{}".format(gridMap[start[1]][start[0]], gridMap[end[1]][end[0]]))
time3 = time.time()
plt.figure()
finder = pathfinder(_ver, gridMap, neigh_range, openset_size=openset_size, length_part=length_part,
degree_delta=degree_delta, roads=roads)
path, close_list = finder.astar(start, end)
if path is None:
# print("查找失败,无解")
for p in close_list:
cv2.circle(background, p, 5, (255, 0, 0), 2)
plt.imshow(background)
plt.savefig("output/{}/fail_fig_{}_{}_{}_ver{}.png".format(unique_tag, neigh_range[0], neigh_range[1],
str(round(time.time()))[-5:], _ver))
_return_dict[_ver] = 0
return False
time4 = time.time()
print("寻路完毕,耗时{}".format(time4 - time3))
p1 = path[0]
for p in close_list:
cv2.circle(background, p, 10, (0, 0, 255), 5)
for index, p in enumerate(path):
if index == 0:
continue
p2 = p
cv2.line(background, p1, p2, (255, 0, 0), 40)
p1 = p
for p in path:
cv2.circle(background, p, 10, (0, 0, 0), 40)
# for p in finder.close_set:
# cv2.circle(background, p, 3, (0, 255, 0))
plt.imshow(background)
plt.savefig(
"output/{}/fig_{}_{}_{}_ver{}.png".format(unique_tag, neigh_range[0], neigh_range[1],
str(round(time.time()))[-5:],
_ver))
np.save("output/{}/path__{}_ver{}.npy".format(unique_tag, str(round(time.time()))[-5:], _ver), np.array(path))
_return_dict[_ver] = 1
return 1
if __name__ == "__main__":
if not os.path.exists("output/"):
os.makedirs("output/")
if not os.path.exists("output/{}/".format(unique_tag)):
os.makedirs("output/{}/".format(unique_tag))
parser = argparse.ArgumentParser(description='电力寻路程序')
parser.add_argument("--gridMap", help="地图的路径", type=str)
parser.add_argument("--start", nargs="+", help="起点", type=int)
parser.add_argument("--end", nargs="+", help="终点", type=int)
parser.add_argument("-v", "--voltage", help="电压等级", type=int)
parser.add_argument("-b", "--buffer", help="搜索集大小", type=int)
parser.add_argument("-p", "--precision", help="搜索精确等级", type=int)
parser.add_argument("-r", "--road", help="道路SHP文件的路径", type=str)
args = parser.parse_args()
try:
start = (args.start[0], args.start[1])
except:
print("没有输入起点!")
try:
end = (args.end[0], args.end[1])
except:
print("没有输入终点!")
voltage_level = args.voltage
neigh_range = (500, 600)
if voltage_level == 35:
neigh_range = (100, 150)
elif voltage_level == 110:
neigh_range = (150, 250)
elif voltage_level == 220:
neigh_range = (250, 450)
elif voltage_level == 330:
neigh_range = (300, 400)
elif voltage_level == 500:
neigh_range = (350, 450)
elif voltage_level == 750:
neigh_range = (450, 500)
elif voltage_level == 1000:
neigh_range = (500, 600)
else:
raise Exception("电压等级输入错误!")
try:
openset_size = args.buffer
except:
print("请输入合适的搜索集大小!")
try:
precision = args.precision
except:
print("请输入搜索精确度!")
if precision == 1:
length_part = 5
degree_delta = 90 # 20
elif precision == 2:
length_part = 10
degree_delta = 90 # 40
elif precision == 3:
length_part = 10
degree_delta = 90 # 40
elif precision == 4:
length_part = 5
degree_delta = 45 # 40
elif precision == 5:
length_part = 10
degree_delta = 45 # 80
elif precision == 6:
length_part = 20
degree_delta = 45 # 160
elif precision == 7:
length_part = 20
degree_delta = 30 # 240
else:
length_part = 5
degree_delta = 90 # 20
print("读取TIFF文件中...")
try:
tif = TIFF.open(args.gridMap, mode='r') # 打開tiff文件進行讀取
except:
print("输入的路径有误!")
im = tif.read_image()
print("正在分析各类地块...")
class4 = cv2.inRange(im, 3.9, 4.1)
class | e_generator(gridMap):
s_x, s_y = point_generator(gridMap, 1)
e_x, e_y = point_generator(gridMap, 1)
return s_x, s_y, e_x, e_y
def run(_ver, _return_dict, star | identifier_body |
pathfinding.py | range(0, layer.GetFeatureCount()):
feat = layer.GetFeature(i)
geom = feat.geometry()
if geom is None:
continue
line = []
if geom.GetGeometryCount() > 0:
for j in range(0, geom.GetGeometryCount()):
g = feat.geometry().GetGeometryRef(j)
for p in range(0, g.GetPointCount()):
pt = g.GetPoint(p)
# new_x,new_y=coordinate_transfer(pt[0],pt[1],layer_shape, sketch_shape)
# line.append((new_x,new_y))
line.append((int(pt[0]), int(layer_shape[3] - int(pt[1]))))
else:
for p in range(0, geom.GetPointCount()):
pt = geom.GetPoint(p)
# new_x,new_y=coordinate_transfer(pt[0],pt[1],layer_shape, sketch_shape)
# line.append((new_x,new_y))
line.append((int(pt[0]), int(layer_shape[3] - int(pt[1]))))
line_list.append(line)
return line_list
def point_generator(gridMap, type):
X, Y = gridMap.shape
while True:
p_x = random.randint(0, X)
p_y = random.randint(0, Y)
if gridMap[p_x][p_y] == type:
return p_x, p_y
def se_generator(gridMap):
s_x, s_y = point_generator(gridMap, 1)
e_x, e_y = point_generator(gridMap, 1)
return s_x, s_y, e_x, e_y
def run(_ver, _return_dict, start, end, neigh_range, gridMap, background, openset_size, length_part, degree_delta,
roads=None):
# time1 = time.time()
# gridMap = np.load('../res/sampled_sketch.npy')
# gridMap = np.load('map.npy')
# time2 = time.time()
# print("图片加载完毕,耗时{}".format(time2 - time1))
# maze = cv2.inRange(gridMap, 2.9, 3.1)
# start = (0, 0)
# end = (2500, 1000)
# neigh_range = (200, 250)
# sample_n = 20
# road1 = [(776, 523), (1425, 393), (2930, 122)]
# road2 = [(1285, 166), (1425, 393), (1880, 1075), (2020, 1973), (2086, 3737)]
# com_line = [(3125, 718), (900, 1700), (1000, 2265), (1166, 3337), (3060, 3142)]
# forbidden =
# finder = pathfinder(maze, neigh_range, sample_n, [road1, road2], [com_line], gridMap)
# print("maze shape:{},{}".format(gridMap.shape[0], gridMap.shape[1]))
# print("类型:起点:{},终点:{}".format(gridMap[start[1]][start[0]], gridMap[end[1]][end[0]]))
time3 = time.time()
plt.figure()
finder = pathfinder(_ver, gridMap, neigh_range, openset_size=openset_size, length_part=length_part,
degree_delta=degree_delta, roads=roads)
path, close_list = finder.astar(start, end)
if path is None:
# print("查找失败,无解")
for p in close_list:
cv2.circle(background, p, 5, (255, 0, 0), 2)
plt.imshow(background)
plt.savefig("output/{}/fail_fig_{}_{}_{}_ver{}.png".format(unique_tag, neigh_range[0], neigh_range[1],
str(round(time.time()))[-5:], _ver))
_return_dict[_ver] = 0
return False
time4 = time.time()
print("寻路完毕,耗时{}".format(time4 - time3))
p1 = path[0]
for p in close_list: | cv2.circle(background, p, 10, (0, 0, 255), 5)
for index, p in enumerate(path):
if index == 0:
continue
p2 = p
cv2.line(background, p1, p2, (255, 0, 0), 40)
p1 = p
for p in path:
cv2.circle(background, p, 10, (0, 0, 0), 40)
# for p in finder.close_set:
# cv2.circle(background, p, 3, (0, 255, 0))
plt.imshow(background)
plt.savefig(
"output/{}/fig_{}_{}_{}_ver{}.png".format(unique_tag, neigh_range[0], neigh_range[1],
str(round(time.time()))[-5:],
_ver))
np.save("output/{}/path__{}_ver{}.npy".format(unique_tag, str(round(time.time()))[-5:], _ver), np.array(path))
_return_dict[_ver] = 1
return 1
if __name__ == "__main__":
if not os.path.exists("output/"):
os.makedirs("output/")
if not os.path.exists("output/{}/".format(unique_tag)):
os.makedirs("output/{}/".format(unique_tag))
parser = argparse.ArgumentParser(description='电力寻路程序')
parser.add_argument("--gridMap", help="地图的路径", type=str)
parser.add_argument("--start", nargs="+", help="起点", type=int)
parser.add_argument("--end", nargs="+", help="终点", type=int)
parser.add_argument("-v", "--voltage", help="电压等级", type=int)
parser.add_argument("-b", "--buffer", help="搜索集大小", type=int)
parser.add_argument("-p", "--precision", help="搜索精确等级", type=int)
parser.add_argument("-r", "--road", help="道路SHP文件的路径", type=str)
args = parser.parse_args()
try:
start = (args.start[0], args.start[1])
except:
print("没有输入起点!")
try:
end = (args.end[0], args.end[1])
except:
print("没有输入终点!")
voltage_level = args.voltage
neigh_range = (500, 600)
if voltage_level == 35:
neigh_range = (100, 150)
elif voltage_level == 110:
neigh_range = (150, 250)
elif voltage_level == 220:
neigh_range = (250, 450)
elif voltage_level == 330:
neigh_range = (300, 400)
elif voltage_level == 500:
neigh_range = (350, 450)
elif voltage_level == 750:
neigh_range = (450, 500)
elif voltage_level == 1000:
neigh_range = (500, 600)
else:
raise Exception("电压等级输入错误!")
try:
openset_size = args.buffer
except:
print("请输入合适的搜索集大小!")
try:
precision = args.precision
except:
print("请输入搜索精确度!")
if precision == 1:
length_part = 5
degree_delta = 90 # 20
elif precision == 2:
length_part = 10
degree_delta = 90 # 40
elif precision == 3:
length_part = 10
degree_delta = 90 # 40
elif precision == 4:
length_part = 5
degree_delta = 45 # 40
elif precision == 5:
length_part = 10
degree_delta = 45 # 80
elif precision == 6:
length_part = 20
degree_delta = 45 # 160
elif precision == 7:
length_part = 20
degree_delta = 30 # 240
else:
length_part = 5
degree_delta = 90 # 20
print("读取TIFF文件中...")
try:
tif = TIFF.open(args.gridMap, mode='r') # 打開tiff文件進行讀取
except:
print("输入的路径有误!")
im = tif.read_image()
print("正在分析各类地块...")
class4 = cv2.inRange(im, 3.9, 4.1)
class1 = | random_line_split |
|
get_tips_nonlocal.py | or y for the padding:
[0... pad | pad ... width + pad | width + pad ... width + 2 * pad]
return -9999 if X is within rejection_distance of the edge,
return X if X is in [pad ... width + pad], which is if X is in the unpadded frame, which has width = width
else return X reflected onto the unpadded frame'''
P = rejection_distance
X -= pad
if X < -pad+P:
X = -9999 # throw out X later
elif X < 0:
X += width
if X > width+pad-P:
X = -9999 # throw out X later
elif X >= width:
X -= width
return X
# @njit
def textures_to_padded_textures(txt,dtexture_dt, pad):
'''large pad allows knots to be recorded right.
consider pad = int(512/2), edge_tolerance = int(512/4)'''
width, height = txt.shape[:2]
# padded_width = 512 + pad #pixels
padded_txt = np.pad(array = txt[...,0], pad_width = pad, mode = 'wrap')
dpadded_txt_dt = np.pad(array = dtexture_dt[...,0], pad_width = pad, mode = 'wrap')
return padded_txt, dpadded_txt_dt
def matrices_to_padded_matrices(txt,dtexture_dt, pad):
'''txt and dtexture_dt are rank two tensors. i.e. the channel_no is 1.
large pad allows knots to be recorded right.
'''
# width, height = txt.shape[:2]
# padded_width = 512 + pad #pixels
padded_txt = np.pad(array = txt, pad_width = pad, mode = 'wrap')
dpadded_txt_dt = np.pad(array = dtexture_dt, pad_width = pad, mode = 'wrap')
return padded_txt, dpadded_txt_dt
# #informal test for ^that
# padded_txt = np.pad(array = txt, pad_width = pad, mode = 'wrap')
# print(txt[0,0])
# print(padded_txt[...,2:5][pad,pad])
# @njit
def | (mat, pad, channel_no=3):
''''''
return np.pad(array = mat, pad_width = pad, mode = 'wrap')[...,pad:pad+channel_no]
# width, height = mat.shape[:2]
# padded_width = 512 + pad #pixels
# padded_mat = np.pad(array = mat, pad_width = pad, mode = 'wrap')
# return padded_mat[...,2:5]
# @njit
def pad_texture(txt, pad):
'''large pad allows knots to be recorded right.
consider pad = int(512/2), edge_tolerance = int(512/4)'''
width, height = txt.shape[:2]
# padded_width = 512 + pad #pixels
padded_txta = np.pad(array = txt[...,0], pad_width = pad, mode = 'wrap')
padded_txtb = np.pad(array = txt[...,1], pad_width = pad, mode = 'wrap')
padded_txtc = np.pad(array = txt[...,2], pad_width = pad, mode = 'wrap')
# dpadded_txt_dt = np.pad(array = dtexture_dt[...,0], pad_width = pad, mode = 'wrap')
return np.array([padded_txta,padded_txtb,padded_txtc]).T
def map_pbc_tips_back(tips, pad, width, height, edge_tolerance, atol = 1e-11):
'''width and height are from the shape of the unpadded buffer.
TODO: get intersection to be njit compiled, then njit map_pbc_tips_back,
for which I'll need to return to using numba.typed.List() instead of [].'''
atol_squared = atol**2
min_dist_squared_init = width**2
s_tips, x_tips, y_tips = tips
s1_mapped_lst = []; s2_mapped_lst = [];
x_mapped_lst = []; y_mapped_lst = [];
# s1_mapped_lst = List(); s2_mapped_lst = List();
# x_mapped_lst = List(); y_mapped_lst = List();
for n, x in enumerate(x_tips):
y = y_tips[n]; s = s_tips[n]
S1, S2 = s_tips[n]
y = y_tips[n]
for X, Y in zip(x, y):
X = unpad(X=X, pad=pad, width=width , rejection_distance=edge_tolerance)
if not (X == -9999):
Y = unpad(X=Y, pad=pad, width=height, rejection_distance=edge_tolerance)
if not (Y == -9999):
# find the index and distance to the nearest tip already on the mapped_lsts
min_dist_squared = min_dist_squared_init; min_index = -1
for j0, (x0,y0) in enumerate(zip(x_mapped_lst,y_mapped_lst)):
# compute the distance between x0,y0 and X,Y
dist_squared = (X-x0)**2+(Y-y0)**2
# if ^that distance is the smallest, update min_dist with it
if dist_squared < min_dist_squared:
min_dist_squared = dist_squared
min_index = j0
#if this new tip is sufficiently far from all other recorded tips,
if min_dist_squared >= atol:
# then append the entry to all four lists
x_mapped_lst.append(X)
y_mapped_lst.append(Y)
lst_S1 = []#List()
lst_S1.append(S1)
lst_S2 = []#List()
lst_S2.append(S2)
s1_mapped_lst.append(lst_S1)
s2_mapped_lst.append(lst_S2)
else:
#just append to the previous entry in the s1 and s2 lists if the contour isn't already there
s1_mapped_lst[min_index].append(S1)
s2_mapped_lst[min_index].append(S2)
return s1_mapped_lst, s2_mapped_lst, x_mapped_lst, y_mapped_lst
#########################################################################
# Interpolating Electrophysiological state values to spiral tip locations
#########################################################################
def get_state_nearest(x, y, txt):
'''nearest local texture values, ignore any index errors and/or periodic boundary conditions'''
xint = np.round(x).astype(dtype=int)
yint = np.round(y).astype(dtype=int)
try:
state_nearest = list(txt[xint,yint])
except IndexError:
state_nearest = nanstate
return state_nearest
#for get_state_interpolated
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore", category=RuntimeWarning, lineno=0, append=False)
#TODO: restrict ^this warning filter to onlyt get_state_interpolated
def get_state_interpolated(x, y, txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = 3, rad = 0.5, kind='linear'):
'''linear interpolation of local texture values to subpixel precision
using 2D linear interpolation with scipy.interpolate.interp2d.
channel_no must be len(nanstate).
for channel_no = 3, use nanstate = [np.nan,np.nan,np.nan].
rad = the pixel radius considered in interpolation.
kind can be "linear" or "cubic".
if kind="cubic", then set rad = 3.5.'''
state_interpolated = nanstate #.copy() if you change nanstate to a numpy array
try:
xlo = np.round(x-rad).astype(dtype=int)
ylo = np.round(y-rad).astype(dtype=int)
xhi = np.round(x+rad).astype(dtype=int)
yhi = np.round(y+rad).astype(dtype=int)
yloc = ycoord_mesh[ylo:yhi+1,xlo:xhi+1].flatten().copy()
xloc = xcoord_mesh[ylo:yhi+1,xlo:xhi+1].flatten().copy()
local_values = txt[ylo:yhi+1,xlo:xhi+1]
interp_foo = lambda x,y,zloc: interp2d(yloc,xloc,zloc,kind=kind)(y,x)
for c in range(channel_no):
zloc = local_values[...,c].flatten().copy()
state_interpolated[c] = float(interp_foo(x,y,zloc))
except IndexError:
pass
except RuntimeWarning:
pass
return state_interpolated
# ###############
# # Example Usage
# ###############
# #Caution! : check whether spiral tips are recorded as 'x': x coordinate or 'x': y coordinate
# #precompute the following the __padded__ coordinates
# xcoord_mesh, ycoord_mesh = np.meshgrid(np.arange(0,200),np.arange(0,200))
# x = 169.75099760896785
# y = 68.05364536542943
# nanstate = [np | pad_matrix | identifier_name |
get_tips_nonlocal.py | _tips[n]
y = y_tips[n]
for X, Y in zip(x, y):
X = unpad(X=X, pad=pad, width=width , rejection_distance=edge_tolerance)
if not (X == -9999):
Y = unpad(X=Y, pad=pad, width=height, rejection_distance=edge_tolerance)
if not (Y == -9999):
# find the index and distance to the nearest tip already on the mapped_lsts
min_dist_squared = min_dist_squared_init; min_index = -1
for j0, (x0,y0) in enumerate(zip(x_mapped_lst,y_mapped_lst)):
# compute the distance between x0,y0 and X,Y
dist_squared = (X-x0)**2+(Y-y0)**2
# if ^that distance is the smallest, update min_dist with it
if dist_squared < min_dist_squared:
min_dist_squared = dist_squared
min_index = j0
#if this new tip is sufficiently far from all other recorded tips,
if min_dist_squared >= atol:
# then append the entry to all four lists
x_mapped_lst.append(X)
y_mapped_lst.append(Y)
lst_S1 = []#List()
lst_S1.append(S1)
lst_S2 = []#List()
lst_S2.append(S2)
s1_mapped_lst.append(lst_S1)
s2_mapped_lst.append(lst_S2)
else:
#just append to the previous entry in the s1 and s2 lists if the contour isn't already there
s1_mapped_lst[min_index].append(S1)
s2_mapped_lst[min_index].append(S2)
return s1_mapped_lst, s2_mapped_lst, x_mapped_lst, y_mapped_lst
#########################################################################
# Interpolating Electrophysiological state values to spiral tip locations
#########################################################################
def get_state_nearest(x, y, txt):
'''nearest local texture values, ignore any index errors and/or periodic boundary conditions'''
xint = np.round(x).astype(dtype=int)
yint = np.round(y).astype(dtype=int)
try:
state_nearest = list(txt[xint,yint])
except IndexError:
state_nearest = nanstate
return state_nearest
#for get_state_interpolated
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore", category=RuntimeWarning, lineno=0, append=False)
#TODO: restrict ^this warning filter to onlyt get_state_interpolated
def get_state_interpolated(x, y, txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = 3, rad = 0.5, kind='linear'):
'''linear interpolation of local texture values to subpixel precision
using 2D linear interpolation with scipy.interpolate.interp2d.
channel_no must be len(nanstate).
for channel_no = 3, use nanstate = [np.nan,np.nan,np.nan].
rad = the pixel radius considered in interpolation.
kind can be "linear" or "cubic".
if kind="cubic", then set rad = 3.5.'''
state_interpolated = nanstate #.copy() if you change nanstate to a numpy array
try:
xlo = np.round(x-rad).astype(dtype=int)
ylo = np.round(y-rad).astype(dtype=int)
xhi = np.round(x+rad).astype(dtype=int)
yhi = np.round(y+rad).astype(dtype=int)
yloc = ycoord_mesh[ylo:yhi+1,xlo:xhi+1].flatten().copy()
xloc = xcoord_mesh[ylo:yhi+1,xlo:xhi+1].flatten().copy()
local_values = txt[ylo:yhi+1,xlo:xhi+1]
interp_foo = lambda x,y,zloc: interp2d(yloc,xloc,zloc,kind=kind)(y,x)
for c in range(channel_no):
zloc = local_values[...,c].flatten().copy()
state_interpolated[c] = float(interp_foo(x,y,zloc))
except IndexError:
pass
except RuntimeWarning:
pass
return state_interpolated
# ###############
# # Example Usage
# ###############
# #Caution! : check whether spiral tips are recorded as 'x': x coordinate or 'x': y coordinate
# #precompute the following the __padded__ coordinates
# xcoord_mesh, ycoord_mesh = np.meshgrid(np.arange(0,200),np.arange(0,200))
# x = 169.75099760896785
# y = 68.05364536542943
# nanstate = [np.nan,np.nan,np.nan]
# txt = np.stack([texture,texture,texture]).T
# print(
# get_state_nearest(x,y,txt)
# )
# print (
# get_state_interpolated(x, y, txt.astype('float32'), nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = 3, rad = 3.5, kind='cubic')
# )
# print (
# get_state_interpolated(x, y, txt.astype('float32'), nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = 3, rad = 0.5, kind='linear')
# )
##############################################
## Get Electrophysiological (EP) State Data #
##############################################
def get_states(x_values, y_values, txt, pad,
nanstate, xcoord_mesh, ycoord_mesh, channel_no = 3):
'''iterates through x_locations and y_locations contained in tips_mapped and returns the electrophysiological states'''
# tips_mapped gives tip locations using the correct image pixel coordinates, here.
# padded_txt = txt
padded_txt = pad_matrix(txt, pad)
n_lst, x_lst, y_lst = tips_mapped
y_locations = np.array(flatten(x_lst))+pad#np.array(tips_mapped[2])
x_locations = np.array(flatten(y_lst))+pad#np.array(tips_mapped[3])
states_nearest = []; states_interpolated_linear = []; states_interpolated_cubic = [];
for x,y in zip(x_locations,y_locations):
state_nearest = get_state_nearest(x,y,txt=padded_txt)
state_interpolated_linear = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = channel_no, rad = 0.5, kind='linear')
state_interpolated_cubic = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = channel_no, rad = 3.5, kind='cubic')
states_nearest.append(state_nearest)
states_interpolated_linear.append(state_interpolated_linear)
states_interpolated_cubic.append(state_interpolated_cubic)
return states_nearest, states_interpolated_linear, states_interpolated_cubic
def add_states(tips_mapped, states_EP):
tips_mapped = list(tips_mapped)
tips_mapped.extend(states_EP)
return tuple(tips_mapped)
def unwrap_EP(df,
EP_col_name = 'states_interpolated_linear',
drop_original_column=False):
'''If this function is slow, it may be a result of df[EP_col_name] containing strings.'''
EP_col_exists = EP_col_name in df.columns.values
if not EP_col_exists:
print(f"Caution! EP_col_name '{EP_col_exists}' does not exist. Returning input df.")
return df
else:
V_lst = []
f_lst = []
s_lst = []
for index, row in df.iterrows():
try:
V,f,s = row[EP_col_name]
except Exception as e:
V,f,s = eval(row[EP_col_name])
V_lst.append(V)
f_lst.append(f)
s_lst.append(s)
df['V'] = V_lst
df['f'] = f_lst
df['s'] = s_lst
df.drop(columns=[EP_col_name], inplace=True)
return df
@njit
def get_grad_direction(texture):
| '''get the gradient direction field, N
out_Nx, out_Ny = get_grad_direction(texture)
'''
height, width = texture.shape
out_Nx = np.zeros_like(texture, dtype=np.float64)
out_Ny = np.zeros_like(texture, dtype=np.float64)
DX = 1/0.025; DY = 1/0.025;
for y in range(height):
for x in range(width):
up = _pbc(texture,y+1,x,height,width)
down = _pbc(texture,y-1,x,height,width)
left = _pbc(texture,y,x-1,height,width)
right = _pbc(texture,y,x+1,height,width)
Nx = (right-left)/DX
Ny = (up-down)/DY
norm = np.sqrt( Nx**2 + Ny**2 )
if norm == 0:
out_Nx[y,x] = -10.
out_Ny[y,x] = -10.
else: | identifier_body |
|
get_tips_nonlocal.py | x or y for the padding:
[0... pad | pad ... width + pad | width + pad ... width + 2 * pad]
return -9999 if X is within rejection_distance of the edge,
return X if X is in [pad ... width + pad], which is if X is in the unpadded frame, which has width = width
else return X reflected onto the unpadded frame'''
P = rejection_distance
X -= pad
if X < -pad+P:
X = -9999 # throw out X later
elif X < 0:
X += width
if X > width+pad-P:
X = -9999 # throw out X later
elif X >= width:
X -= width
return X
# @njit
def textures_to_padded_textures(txt,dtexture_dt, pad):
'''large pad allows knots to be recorded right.
consider pad = int(512/2), edge_tolerance = int(512/4)'''
width, height = txt.shape[:2]
# padded_width = 512 + pad #pixels
padded_txt = np.pad(array = txt[...,0], pad_width = pad, mode = 'wrap')
dpadded_txt_dt = np.pad(array = dtexture_dt[...,0], pad_width = pad, mode = 'wrap')
return padded_txt, dpadded_txt_dt
def matrices_to_padded_matrices(txt,dtexture_dt, pad):
'''txt and dtexture_dt are rank two tensors. i.e. the channel_no is 1.
large pad allows knots to be recorded right.
'''
# width, height = txt.shape[:2]
# padded_width = 512 + pad #pixels
padded_txt = np.pad(array = txt, pad_width = pad, mode = 'wrap')
dpadded_txt_dt = np.pad(array = dtexture_dt, pad_width = pad, mode = 'wrap')
return padded_txt, dpadded_txt_dt
# #informal test for ^that
# padded_txt = np.pad(array = txt, pad_width = pad, mode = 'wrap')
# print(txt[0,0])
# print(padded_txt[...,2:5][pad,pad])
# @njit
def pad_matrix(mat, pad, channel_no=3):
''''''
return np.pad(array = mat, pad_width = pad, mode = 'wrap')[...,pad:pad+channel_no]
# width, height = mat.shape[:2]
# padded_width = 512 + pad #pixels
# padded_mat = np.pad(array = mat, pad_width = pad, mode = 'wrap')
# return padded_mat[...,2:5]
# @njit
def pad_texture(txt, pad):
'''large pad allows knots to be recorded right.
consider pad = int(512/2), edge_tolerance = int(512/4)'''
width, height = txt.shape[:2]
# padded_width = 512 + pad #pixels
padded_txta = np.pad(array = txt[...,0], pad_width = pad, mode = 'wrap')
padded_txtb = np.pad(array = txt[...,1], pad_width = pad, mode = 'wrap')
padded_txtc = np.pad(array = txt[...,2], pad_width = pad, mode = 'wrap')
# dpadded_txt_dt = np.pad(array = dtexture_dt[...,0], pad_width = pad, mode = 'wrap')
return np.array([padded_txta,padded_txtb,padded_txtc]).T
def map_pbc_tips_back(tips, pad, width, height, edge_tolerance, atol = 1e-11):
'''width and height are from the shape of the unpadded buffer.
TODO: get intersection to be njit compiled, then njit map_pbc_tips_back,
for which I'll need to return to using numba.typed.List() instead of [].'''
atol_squared = atol**2
min_dist_squared_init = width**2
s_tips, x_tips, y_tips = tips
s1_mapped_lst = []; s2_mapped_lst = [];
x_mapped_lst = []; y_mapped_lst = [];
# s1_mapped_lst = List(); s2_mapped_lst = List();
# x_mapped_lst = List(); y_mapped_lst = List();
for n, x in enumerate(x_tips):
y = y_tips[n]; s = s_tips[n]
S1, S2 = s_tips[n]
y = y_tips[n]
for X, Y in zip(x, y):
X = unpad(X=X, pad=pad, width=width , rejection_distance=edge_tolerance)
if not (X == -9999):
Y = unpad(X=Y, pad=pad, width=height, rejection_distance=edge_tolerance)
if not (Y == -9999):
# find the index and distance to the nearest tip already on the mapped_lsts
min_dist_squared = min_dist_squared_init; min_index = -1
for j0, (x0,y0) in enumerate(zip(x_mapped_lst,y_mapped_lst)):
# compute the distance between x0,y0 and X,Y
dist_squared = (X-x0)**2+(Y-y0)**2
# if ^that distance is the smallest, update min_dist with it
if dist_squared < min_dist_squared:
min_dist_squared = dist_squared
min_index = j0
#if this new tip is sufficiently far from all other recorded tips,
if min_dist_squared >= atol:
# then append the entry to all four lists
x_mapped_lst.append(X)
y_mapped_lst.append(Y) | s1_mapped_lst.append(lst_S1)
s2_mapped_lst.append(lst_S2)
else:
#just append to the previous entry in the s1 and s2 lists if the contour isn't already there
s1_mapped_lst[min_index].append(S1)
s2_mapped_lst[min_index].append(S2)
return s1_mapped_lst, s2_mapped_lst, x_mapped_lst, y_mapped_lst
#########################################################################
# Interpolating Electrophysiological state values to spiral tip locations
#########################################################################
def get_state_nearest(x, y, txt):
'''nearest local texture values, ignore any index errors and/or periodic boundary conditions'''
xint = np.round(x).astype(dtype=int)
yint = np.round(y).astype(dtype=int)
try:
state_nearest = list(txt[xint,yint])
except IndexError:
state_nearest = nanstate
return state_nearest
#for get_state_interpolated
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore", category=RuntimeWarning, lineno=0, append=False)
#TODO: restrict ^this warning filter to onlyt get_state_interpolated
def get_state_interpolated(x, y, txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = 3, rad = 0.5, kind='linear'):
'''linear interpolation of local texture values to subpixel precision
using 2D linear interpolation with scipy.interpolate.interp2d.
channel_no must be len(nanstate).
for channel_no = 3, use nanstate = [np.nan,np.nan,np.nan].
rad = the pixel radius considered in interpolation.
kind can be "linear" or "cubic".
if kind="cubic", then set rad = 3.5.'''
state_interpolated = nanstate #.copy() if you change nanstate to a numpy array
try:
xlo = np.round(x-rad).astype(dtype=int)
ylo = np.round(y-rad).astype(dtype=int)
xhi = np.round(x+rad).astype(dtype=int)
yhi = np.round(y+rad).astype(dtype=int)
yloc = ycoord_mesh[ylo:yhi+1,xlo:xhi+1].flatten().copy()
xloc = xcoord_mesh[ylo:yhi+1,xlo:xhi+1].flatten().copy()
local_values = txt[ylo:yhi+1,xlo:xhi+1]
interp_foo = lambda x,y,zloc: interp2d(yloc,xloc,zloc,kind=kind)(y,x)
for c in range(channel_no):
zloc = local_values[...,c].flatten().copy()
state_interpolated[c] = float(interp_foo(x,y,zloc))
except IndexError:
pass
except RuntimeWarning:
pass
return state_interpolated
# ###############
# # Example Usage
# ###############
# #Caution! : check whether spiral tips are recorded as 'x': x coordinate or 'x': y coordinate
# #precompute the following the __padded__ coordinates
# xcoord_mesh, ycoord_mesh = np.meshgrid(np.arange(0,200),np.arange(0,200))
# x = 169.75099760896785
# y = 68.05364536542943
# nanstate = [np | lst_S1 = []#List()
lst_S1.append(S1)
lst_S2 = []#List()
lst_S2.append(S2) | random_line_split |
get_tips_nonlocal.py | X = unpad(X=X, pad=pad, width=width , rejection_distance=edge_tolerance)
if not (X == -9999):
Y = unpad(X=Y, pad=pad, width=height, rejection_distance=edge_tolerance)
if not (Y == -9999):
# find the index and distance to the nearest tip already on the mapped_lsts
min_dist_squared = min_dist_squared_init; min_index = -1
for j0, (x0,y0) in enumerate(zip(x_mapped_lst,y_mapped_lst)):
# compute the distance between x0,y0 and X,Y
dist_squared = (X-x0)**2+(Y-y0)**2
# if ^that distance is the smallest, update min_dist with it
if dist_squared < min_dist_squared:
min_dist_squared = dist_squared
min_index = j0
#if this new tip is sufficiently far from all other recorded tips,
if min_dist_squared >= atol:
# then append the entry to all four lists
x_mapped_lst.append(X)
y_mapped_lst.append(Y)
lst_S1 = []#List()
lst_S1.append(S1)
lst_S2 = []#List()
lst_S2.append(S2)
s1_mapped_lst.append(lst_S1)
s2_mapped_lst.append(lst_S2)
else:
#just append to the previous entry in the s1 and s2 lists if the contour isn't already there
s1_mapped_lst[min_index].append(S1)
s2_mapped_lst[min_index].append(S2)
return s1_mapped_lst, s2_mapped_lst, x_mapped_lst, y_mapped_lst
#########################################################################
# Interpolating Electrophysiological state values to spiral tip locations
#########################################################################
def get_state_nearest(x, y, txt):
'''nearest local texture values, ignore any index errors and/or periodic boundary conditions'''
xint = np.round(x).astype(dtype=int)
yint = np.round(y).astype(dtype=int)
try:
state_nearest = list(txt[xint,yint])
except IndexError:
state_nearest = nanstate
return state_nearest
#for get_state_interpolated
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore", category=RuntimeWarning, lineno=0, append=False)
#TODO: restrict ^this warning filter to onlyt get_state_interpolated
def get_state_interpolated(x, y, txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = 3, rad = 0.5, kind='linear'):
'''linear interpolation of local texture values to subpixel precision
using 2D linear interpolation with scipy.interpolate.interp2d.
channel_no must be len(nanstate).
for channel_no = 3, use nanstate = [np.nan,np.nan,np.nan].
rad = the pixel radius considered in interpolation.
kind can be "linear" or "cubic".
if kind="cubic", then set rad = 3.5.'''
state_interpolated = nanstate #.copy() if you change nanstate to a numpy array
try:
xlo = np.round(x-rad).astype(dtype=int)
ylo = np.round(y-rad).astype(dtype=int)
xhi = np.round(x+rad).astype(dtype=int)
yhi = np.round(y+rad).astype(dtype=int)
yloc = ycoord_mesh[ylo:yhi+1,xlo:xhi+1].flatten().copy()
xloc = xcoord_mesh[ylo:yhi+1,xlo:xhi+1].flatten().copy()
local_values = txt[ylo:yhi+1,xlo:xhi+1]
interp_foo = lambda x,y,zloc: interp2d(yloc,xloc,zloc,kind=kind)(y,x)
for c in range(channel_no):
zloc = local_values[...,c].flatten().copy()
state_interpolated[c] = float(interp_foo(x,y,zloc))
except IndexError:
pass
except RuntimeWarning:
pass
return state_interpolated
# ###############
# # Example Usage
# ###############
# #Caution! : check whether spiral tips are recorded as 'x': x coordinate or 'x': y coordinate
# #precompute the following the __padded__ coordinates
# xcoord_mesh, ycoord_mesh = np.meshgrid(np.arange(0,200),np.arange(0,200))
# x = 169.75099760896785
# y = 68.05364536542943
# nanstate = [np.nan,np.nan,np.nan]
# txt = np.stack([texture,texture,texture]).T
# print(
# get_state_nearest(x,y,txt)
# )
# print (
# get_state_interpolated(x, y, txt.astype('float32'), nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = 3, rad = 3.5, kind='cubic')
# )
# print (
# get_state_interpolated(x, y, txt.astype('float32'), nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = 3, rad = 0.5, kind='linear')
# )
##############################################
## Get Electrophysiological (EP) State Data #
##############################################
def get_states(x_values, y_values, txt, pad,
nanstate, xcoord_mesh, ycoord_mesh, channel_no = 3):
'''iterates through x_locations and y_locations contained in tips_mapped and returns the electrophysiological states'''
# tips_mapped gives tip locations using the correct image pixel coordinates, here.
# padded_txt = txt
padded_txt = pad_matrix(txt, pad)
n_lst, x_lst, y_lst = tips_mapped
y_locations = np.array(flatten(x_lst))+pad#np.array(tips_mapped[2])
x_locations = np.array(flatten(y_lst))+pad#np.array(tips_mapped[3])
states_nearest = []; states_interpolated_linear = []; states_interpolated_cubic = [];
for x,y in zip(x_locations,y_locations):
state_nearest = get_state_nearest(x,y,txt=padded_txt)
state_interpolated_linear = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = channel_no, rad = 0.5, kind='linear')
state_interpolated_cubic = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = channel_no, rad = 3.5, kind='cubic')
states_nearest.append(state_nearest)
states_interpolated_linear.append(state_interpolated_linear)
states_interpolated_cubic.append(state_interpolated_cubic)
return states_nearest, states_interpolated_linear, states_interpolated_cubic
def add_states(tips_mapped, states_EP):
tips_mapped = list(tips_mapped)
tips_mapped.extend(states_EP)
return tuple(tips_mapped)
def unwrap_EP(df,
EP_col_name = 'states_interpolated_linear',
drop_original_column=False):
'''If this function is slow, it may be a result of df[EP_col_name] containing strings.'''
EP_col_exists = EP_col_name in df.columns.values
if not EP_col_exists:
print(f"Caution! EP_col_name '{EP_col_exists}' does not exist. Returning input df.")
return df
else:
V_lst = []
f_lst = []
s_lst = []
for index, row in df.iterrows():
try:
V,f,s = row[EP_col_name]
except Exception as e:
V,f,s = eval(row[EP_col_name])
V_lst.append(V)
f_lst.append(f)
s_lst.append(s)
df['V'] = V_lst
df['f'] = f_lst
df['s'] = s_lst
df.drop(columns=[EP_col_name], inplace=True)
return df
@njit
def get_grad_direction(texture):
'''get the gradient direction field, N
out_Nx, out_Ny = get_grad_direction(texture)
'''
height, width = texture.shape
out_Nx = np.zeros_like(texture, dtype=np.float64)
out_Ny = np.zeros_like(texture, dtype=np.float64)
DX = 1/0.025; DY = 1/0.025;
for y in range(height):
for x in range(width):
up = _pbc(texture,y+1,x,height,width)
down = _pbc(texture,y-1,x,height,width)
left = _pbc(texture,y,x-1,height,width)
right = _pbc(texture,y,x+1,height,width)
Nx = (right-left)/DX
Ny = (up-down)/DY
norm = np.sqrt( Nx**2 + Ny**2 )
if norm == 0:
out_Nx[y,x] = -10.
out_Ny[y,x] = -10.
else:
| out_Nx[y,x] = Nx/norm
out_Ny[y,x] = Ny/norm | conditional_block |
|
details.js | if(iLeft<0) {
// iLeft = 0;
// } else if(iLeft>iWidht) {
// iLeft = iWidht;
// }
//
// if(iTop<0) {
// iTop = 0;
// } else if(iTop>iHeight) {
// iTop = iHeight;
// }
// $(".zoomShow-pud").css({"left":iLeft,"top":iTop})
// $(".bigShow").find("img").css({"left":-iLeft/4*15,"top":-iTop/4*15})
// //不想写了就用死值了
// console.log(iTop)
// })
})
$(function(){
//json加载图片与页面
var goodid
$.get("../data/details/imgs.json",function(data){
var con=data
for(var i in con){
$(".bx-bo-left-con").append('<ul><li><a href="details.html"><img src='+con[i].src+'/></a></li><li class="left-conli-txt"><a href="##" >'+con[i].userName+'</a></li><li class="left-conli-val">'+con[i].price+'</li></ul>')
}
})
// 热门商品组
// for(var i=1;i<=4;i++){
// $(".right-bom-dtu p").append("<img src='../img/details/right-bom"+i+".jpg'/>")
// }
//下方商品详情图片
$.get("../data/details/comlist.json",function(data){
if(parseInt($.cookie("comid"))%2==0){
var cl=data.comList2
}else{
var cl=data.comList1
}
$("#listName").html(cl.comname)
$("#bx-tit>P").html(cl.comname)
$(".pval-ms").find("em").html(cl.price)
$(".pval-ms").find("del").html(cl.yPrice)
$(".data-attr2").eq(0).find("span").html(cl.color)
$(".data-attr2").eq(1).find("span").html(cl.size)
for(var i=1;i<=5;i++){
$(".jqzoomDiv>ul").find("li").eq(i-1).find("img").attr("src",cl.imgSrc+"detalis-zoomdiv"+i+".JPG")
}
for(var i=1;i<=4;i++){
$(".right-bom-dtu p").append("<img src='"+cl.imgSrc+"right-bom"+i+".jpg'/>")
}
//放大镜
$(".zoomShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-zhong1.JPG")
$(".bigShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-big1.JPG")
$(".jqzoomDiv ul li").each(function(){
$(this).mouseover(function(){
$(".zoomShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-zhong"+($(this).index()+1)+".JPG")
$(".bigShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-big"+($(this).index()+1)+".JPG")
})
})
$(".bx-data-lt-2").mouseover(function(){
$(".zoomShow-pud").css({"display":"block"})
$(".bigShow").css({"display":"block"})
})
$(".bx-data-lt-2").mouseout(function(){
$(".zoomShow-pud").css({"display":"none"})
$(".bigShow").css({"display":"none"})
})
$(".zoomShow").mousemove(function(e){
// var iLeft=e.pageX-$(window).scrollLeft()-30-$(".bx-data-lt-1").offset().left-($(".zoomShow-pud").width()/2)
// var iTop=e.clientY-$(window).scrollTop()+$(".bx-data-lt-2").offset().top-($(".zoomShow-pud").height()/2)
var iLeft=e.clientX-$(window).scrollLeft()-$(".bx-data-lt-2").offset().left-($(".zoomShow-pud").width()/2)
var iTop=e.clientY+$(window).scrollTop()-$(".bx-data-lt-2").offset().top-($(".zoomShow-pud").height()/2)
var iWidht=$(".bx-data-lt-2").width()-$(".zoomShow-pud").width()
var iHeight=$(".bx-data-lt-2").height()-$(".zoomShow-pud").height()
if(iLeft<0) {
iLeft = 0;
} else if(iLeft>iWidht) {
iLeft = iWidht;
}
if(iTop<0) {
iTop = 0;
} else if(iTop>iHeight) {
iTop = iHeight;
}
$(".zoomShow-pud").css({"left":iLeft,"top":iTop})
$(".bigShow").find("img").css({"left":-iLeft/4*15,"top":-iTop/4*15})
//不想写了就用死值了
//console.log(iTop)
})
//点击按钮事件
downBtn(cl)
})
})
function downBtn(cl){
//从cookie中 取出数据的正确方式
//console.log(JSON.parse($.cookie("carList")).ID1.iNum)
goodid=cl.id
| imgSrc:cl.imgSrc+"detalis-zoomdiv-zhong1.JPG",
id:cl.id,
color:cl.color,
size:cl.size,
price:cl.price,
//iNum:num
}
// var value=0
// if($.cookie("carList")){
// for(var i in JSON.parse($.cookie("carList"))){
// value +=parseInt(JSON.parse($.cookie("carList"))[i].iNum)
// }
// //value=parseInt(JSON.parse($.cookie("carList")).ID1.iNum)+parseInt(JSON.parse($.cookie("carList")).ID2.iNum)
// }
$(".addCart").mousedown(function(){
//console.log($("#buyNum").val())
if(parseInt($("#buyNum").val())){
//判断添加数量的值是否为0
if(cl.id=="1"){
num1+=parseInt($("#buyNum").val())
com1.iNum=num1
}else{
num2+=parseInt($("#buyNum").val())
com1.iNum=num2
}
//计算购物车处的值
$.cookie("carList"+cl.id,JSON.stringify(com1),{expires:7,path:"/"})
//console.log($.cookie("carList1"))
//console.log($.cookie("carList2"))
var value=0
value=num1+num2
// if($.cookie("carList")){
// value+=parseInt($("#buyNum").val())
// }else{
// value=0
// }
$("#carNum").html("("+value+")")
}
})
}
//上方导航栏nav中购物车应显示的值
//活动倒计时
$(function(){
var timer=$(".time-end span")
var time=24*3600*1000 //活动倒计时
setInterval(function(){
time-=1000
var hour=parseInt(time/3600000)%24
var
min=parseInt(time/3600000*60)%60
var sec=parseInt(time/3600000*24*60)%60
timer.find("em").eq(0).html(hour)
timer.find("em").eq(1).html(min)
timer.find("em").eq(2).html(sec)
// console.log(time)
},500)
})
//添加商品按钮
$(function(){
var oAdd=$(".buyAdd")
var oRed=$(".buyReduce")
var oValue=$("#buyNum")
oAdd.css("cursor","pointer")
oRed.css("cursor","pointer")
var val=oValue.val()
oAdd.mousedown(function | $(".addCart").css("cursor","pointer")
var num1=0;num2=0;
if($.cookie("carList1")){
num1=parseInt(JSON.parse($.cookie("carList1")).iNum)
}
if($.cookie("carList2")){
num2=parseInt(JSON.parse($.cookie("carList2")).iNum)
}
//判断是否存在cookie
// if($.cookie("carList")){
// for(var m in JSON.parse($.cookie("carList"))){
// num+=parseInt(JSON.parse($.cookie("carList"))[m].iNum)
// //这里有bug每次累加是2个加起来的和
// }
// }else{
// num=0
// }
var com1= $.cookie("carList")?JSON.parse($.cookie("carList")):{}
com1={
userName:cl.comname, | identifier_body |
details.js | if(iLeft<0) {
// iLeft = 0;
// } else if(iLeft>iWidht) {
// iLeft = iWidht;
// }
//
// if(iTop<0) {
// iTop = 0;
// } else if(iTop>iHeight) {
// iTop = iHeight;
// }
// $(".zoomShow-pud").css({"left":iLeft,"top":iTop})
// $(".bigShow").find("img").css({"left":-iLeft/4*15,"top":-iTop/4*15})
// //不想写了就用死值了
// console.log(iTop)
// })
})
$(function(){
//json加载图片与页面
var goodid
$.get("../data/details/imgs.json",function(data){
var con=data
for(var i in con){
$(".bx-bo-left-con").append('<ul><li><a href="details.html"><img src='+con[i].src+'/></a></li><li class="left-conli-txt"><a href="##" >'+con[i].userName+'</a></li><li class="left-conli-val">'+con[i].price+'</li></ul>')
}
})
// 热门商品组
// for(var i=1;i<=4;i++){
// $(".right-bom-dtu p").append("<img src='../img/details/right-bom"+i+".jpg'/>")
// }
//下方商品详情图片
$.get("../data/details/comlist.json",function(data){
if(parseInt($.cookie("comid"))%2==0){
var cl=data.comList2
}else{
var cl=data.comList1
}
$("#listName").html(cl.comname)
$("#bx-tit>P").html(cl.comname)
$(".pval-ms").find("em").html(cl.price)
$(".pval-ms").find("del").html(cl.yPrice)
$(".data-attr2").eq(0).find("span").html(cl.color)
$(".data-attr2").eq(1).find("span").html(cl.size)
for(var i=1;i<=5;i++){
$(".jqzoomDiv>ul").find("li").eq(i-1).find("img").attr("src",cl.imgSrc+"detalis-zoomdiv"+i+".JPG")
}
for(var i=1;i<=4;i++){
$(".right-bom-dtu p").append("<img src='"+cl.imgSrc+"right-bom"+i+".jpg'/>")
}
//放大镜
$(".zoomShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-zhong1.JPG")
$(".bigShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-big1.JPG")
$(".jqzoomDiv ul li").each(function(){
$(this).mouseover(function(){
$(".zoomShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-zhong"+($(this).index()+1)+".JPG")
$(".bigShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-big"+($(this).index()+1)+".JPG")
})
})
$(".bx-data-lt-2").mouseover(function(){
$(".zoomShow-pud").css({"display":"block"})
$(".bigShow").css({"display":"block"})
})
$(".bx-data-lt-2").mouseout(function(){
$(".zoomShow-pud").css({"display":"none"})
$(".bigShow").css({"display":"none"})
})
$(".zoomShow").mousemove(function(e){
// var iLeft=e.pageX-$(window).scrollLeft()-30-$(".bx-data-lt-1").offset().left-($(".zoomShow-pud").width()/2)
// var iTop=e.clientY-$(window).scrollTop()+$(".bx-data-lt-2").offset().top-($(".zoomShow-pud").height()/2)
var iLeft=e.clientX-$(window).scrollLeft()-$(".bx-data-lt-2").offset().left-($(".zoomShow-pud").width()/2)
var iTop=e.clientY+$(window).scrollTop()-$(".bx-data-lt-2").offset().top-($(".zoomShow-pud").height()/2)
var iWidht=$(".bx-data-lt-2").width()-$(".zoomShow-pud").width()
var iHeight=$(".bx-data-lt-2").height()-$(".zoomShow-pud").height()
if(iLeft<0) {
iLeft = 0;
} else if(iLeft>iWidht) {
iLeft = iWidht;
}
if(iTop<0) {
iTop = 0;
} else if(iTop>iHeight) {
iTop = iHeight;
}
$(".zoomShow-pud").css({"left":iLeft,"top":iTop})
$(".bigShow").find("img").css({"left":-iLeft/4*15,"top":-iTop/4*15})
//不想写了就用死值了
//console.log(iTop)
})
//点击按钮事件
downBtn(cl)
})
})
function downBtn(cl){
//从cookie中 取出数据的正确方式
//console.log(JSON.parse($.cookie("carList")).ID1.iNum)
go | .id
$(".addCart").css("cursor","pointer")
var num1=0;num2=0;
if($.cookie("carList1")){
num1=parseInt(JSON.parse($.cookie("carList1")).iNum)
}
if($.cookie("carList2")){
num2=parseInt(JSON.parse($.cookie("carList2")).iNum)
}
//判断是否存在cookie
// if($.cookie("carList")){
// for(var m in JSON.parse($.cookie("carList"))){
// num+=parseInt(JSON.parse($.cookie("carList"))[m].iNum)
// //这里有bug每次累加是2个加起来的和
// }
// }else{
// num=0
// }
var com1= $.cookie("carList")?JSON.parse($.cookie("carList")):{}
com1={
userName:cl.comname,
imgSrc:cl.imgSrc+"detalis-zoomdiv-zhong1.JPG",
id:cl.id,
color:cl.color,
size:cl.size,
price:cl.price,
//iNum:num
}
// var value=0
// if($.cookie("carList")){
// for(var i in JSON.parse($.cookie("carList"))){
// value +=parseInt(JSON.parse($.cookie("carList"))[i].iNum)
// }
// //value=parseInt(JSON.parse($.cookie("carList")).ID1.iNum)+parseInt(JSON.parse($.cookie("carList")).ID2.iNum)
// }
$(".addCart").mousedown(function(){
//console.log($("#buyNum").val())
if(parseInt($("#buyNum").val())){
//判断添加数量的值是否为0
if(cl.id=="1"){
num1+=parseInt($("#buyNum").val())
com1.iNum=num1
}else{
num2+=parseInt($("#buyNum").val())
com1.iNum=num2
}
//计算购物车处的值
$.cookie("carList"+cl.id,JSON.stringify(com1),{expires:7,path:"/"})
//console.log($.cookie("carList1"))
//console.log($.cookie("carList2"))
var value=0
value=num1+num2
// if($.cookie("carList")){
// value+=parseInt($("#buyNum").val())
// }else{
// value=0
// }
$("#carNum").html("("+value+")")
}
})
}
//上方导航栏nav中购物车应显示的值
//活动倒计时
$(function(){
var timer=$(".time-end span")
var time=24*3600*1000 //活动倒计时
setInterval(function(){
time-=1000
var hour=parseInt(time/3600000)%24
var min=parseInt(time/3600000*60)%60
var sec=parseInt(time/3600000*24*60)%60
timer.find("em").eq(0).html(hour)
timer.find("em").eq(1).html(min)
timer.find("em").eq(2).html(sec)
// console.log(time)
},500)
})
//添加商品按钮
$(function(){
var oAdd=$(".buyAdd")
var oRed=$(".buyReduce")
var oValue=$("#buyNum")
oAdd.css("cursor","pointer")
oRed.css("cursor","pointer")
var val=oValue.val()
oAdd.mousedown(function(){
| odid=cl | identifier_name |
details.js | if(iLeft<0) {
// iLeft = 0;
// } else if(iLeft>iWidht) {
// iLeft = iWidht;
// }
//
// if(iTop<0) {
// iTop = 0;
// } else if(iTop>iHeight) {
// iTop = iHeight;
// }
// $(".zoomShow-pud").css({"left":iLeft,"top":iTop})
// $(".bigShow").find("img").css({"left":-iLeft/4*15,"top":-iTop/4*15})
// //不想写了就用死值了
// console.log(iTop)
// })
})
$(function(){
//json加载图片与页面
var goodid
$.get("../data/details/imgs.json",function(data){
var con=data
for(var i in con){
$(".bx-bo-left-con").append('<ul><li><a href="details.html"><img src='+con[i].src+'/></a></li><li class="left-conli-txt"><a href="##" >'+con[i].userName+'</a></li><li class="left-conli-val">'+con[i].price+'</li></ul>')
}
})
// 热门商品组
// for(var i=1;i<=4;i++){
// $(".right-bom-dtu p").append("<img src='../img/details/right-bom"+i+".jpg'/>")
// }
//下方商品详情图片
$.get("../data/details/comlist.json",function(data){
if(parseInt($.cookie("comid"))%2==0){
var cl=data.comList2
}else{
var cl=data.comList1
}
$("#listName").html(cl.comname)
$("#bx-tit>P").html(cl.comname)
$(".pval-ms").find("em").html(cl.price)
$(".pval-ms").find("del").html(cl.yPrice)
$(".data-attr2").eq(0).find("span").html(cl.color)
$(".data-attr2").eq(1).find("span").html(cl.size)
for(var i=1;i<=5;i++){
$(".jqzoomDiv>ul").find("li").eq(i-1).find("img").attr("src",cl.imgSrc+"detalis-zoomdiv"+i+".JPG")
}
for(var i=1;i<=4;i++){
$(".right-bom-dtu p").append("<img src='"+cl.imgSrc+"right-bom"+i+".jpg'/>")
}
//放大镜
$(".zoomShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-zhong1.JPG")
$(".bigShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-big1.JPG")
$(".jqzoomDiv ul li").each(function(){
$(this).mouseover(function(){
$(".zoomShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-zhong"+($(this).index()+1)+".JPG")
$(".bigShow").find("img").removeAttr("src").attr("src",cl.imgSrc+"detalis-zoomdiv-big"+($(this).index()+1)+".JPG")
})
})
$(".bx-data-lt-2").mouseover(function(){
$(".zoomShow-pud").css({"display":"block"})
$(".bigShow").css({"display":"block"})
})
$(".bx-data-lt-2").mouseout(function(){
$(".zoomShow-pud").css({"display":"none"})
$(".bigShow").css({"display":"none"})
})
$(".zoomShow").mousemove(function(e){
// var iLeft=e.pageX-$(window).scrollLeft()-30-$(".bx-data-lt-1").offset().left-($(".zoomShow-pud").width()/2)
// var iTop=e.clientY-$(window).scrollTop()+$(".bx-data-lt-2").offset().top-($(".zoomShow-pud").height()/2)
var iLeft=e.clientX-$(window).scrollLeft()-$(".bx-data-lt-2").offset().left-($(".zoomShow-pud").width()/2)
var iTop=e.clientY+$(window).scrollTop()-$(".bx-data-lt-2").offset().top-($(".zoomShow-pud").height()/2)
var iWidht=$(".bx-data-lt-2").width()-$(".zoomShow-pud").width()
var iHeight=$(".bx-data-lt-2").height()-$(".zoomShow-pud").height()
if(iLeft<0) {
iLeft = 0;
} else if(iLeft>iWidht) {
iLeft = iWidht;
}
if(iTop<0) {
iTop = 0;
} else if(iTop>iHeight) {
iTop = iHeight;
}
$(".zoomShow-pud").css({"left":iLeft,"top":iTop})
$(".bigShow").find("img").css({"left":-iLeft/4*15,"top":-iTop/4*15})
//不想写了就用死值了
//console.log(iTop)
})
//点击按钮事件
downBtn(cl)
})
})
function downBtn(cl){
//从cookie中 取出数据的正确方式
//console.log(JSON.parse($.cookie("carList")).ID1.iNum)
goodid=cl.id | num1=parseInt(JSON.parse($.cookie("carList1")).iNum)
}
if($.cookie("carList2")){
num2=parseInt(JSON.parse($.cookie("carList2")).iNum)
}
//判断是否存在cookie
// if($.cookie("carList")){
// for(var m in JSON.parse($.cookie("carList"))){
// num+=parseInt(JSON.parse($.cookie("carList"))[m].iNum)
// //这里有bug每次累加是2个加起来的和
// }
// }else{
// num=0
// }
var com1= $.cookie("carList")?JSON.parse($.cookie("carList")):{}
com1={
userName:cl.comname,
imgSrc:cl.imgSrc+"detalis-zoomdiv-zhong1.JPG",
id:cl.id,
color:cl.color,
size:cl.size,
price:cl.price,
//iNum:num
}
// var value=0
// if($.cookie("carList")){
// for(var i in JSON.parse($.cookie("carList"))){
// value +=parseInt(JSON.parse($.cookie("carList"))[i].iNum)
// }
// //value=parseInt(JSON.parse($.cookie("carList")).ID1.iNum)+parseInt(JSON.parse($.cookie("carList")).ID2.iNum)
// }
$(".addCart").mousedown(function(){
//console.log($("#buyNum").val())
if(parseInt($("#buyNum").val())){
//判断添加数量的值是否为0
if(cl.id=="1"){
num1+=parseInt($("#buyNum").val())
com1.iNum=num1
}else{
num2+=parseInt($("#buyNum").val())
com1.iNum=num2
}
//计算购物车处的值
$.cookie("carList"+cl.id,JSON.stringify(com1),{expires:7,path:"/"})
//console.log($.cookie("carList1"))
//console.log($.cookie("carList2"))
var value=0
value=num1+num2
// if($.cookie("carList")){
// value+=parseInt($("#buyNum").val())
// }else{
// value=0
// }
$("#carNum").html("("+value+")")
}
})
}
//上方导航栏nav中购物车应显示的值
//活动倒计时
$(function(){
var timer=$(".time-end span")
var time=24*3600*1000 //活动倒计时
setInterval(function(){
time-=1000
var hour=parseInt(time/3600000)%24
var min=parseInt(time/3600000*60)%60
var sec=parseInt(time/3600000*24*60)%60
timer.find("em").eq(0).html(hour)
timer.find("em").eq(1).html(min)
timer.find("em").eq(2).html(sec)
// console.log(time)
},500)
})
//添加商品按钮
$(function(){
var oAdd=$(".buyAdd")
var oRed=$(".buyReduce")
var oValue=$("#buyNum")
oAdd.css("cursor","pointer")
oRed.css("cursor","pointer")
var val=oValue.val()
oAdd.mousedown(function(){
| $(".addCart").css("cursor","pointer")
var num1=0;num2=0;
if($.cookie("carList1")){ | random_line_split |
reduc_spec.py | )
'''
if all([((type(arg) is tuple) or (arg is None)) for arg in args]):
ts, tf = args[0], args[1]
d=in_out.read_time_range(dt_0=ts,dt_f=tf, ext=kwargs.get('ext'))
elif any(('.h5' in arg for arg in args)):
d=in_out.read_to_arrays([arg for arg in args if '.h5' in arg])
else:
raise ValueError
# Modified Julian date
self.mjd=d['mjd'][:,0]
# Hours since beginning of read
self.t=(self.mjd-self.mjd[0])*24;
# Sample rate in MHz
if np.unique(d['samp_rate_mhz']).size > 1:
raise NameError('Sample rate changed during accumulation')
else:
self.samp_rate = d['samp_rate_mhz'][0]
# Accumulation length in s
if np.unique(d['acc_len_secs']).size > 1:
raise NameError('Accumulation length changed during accumulation')
else:
self.acc_len = d['acc_len_secs'][0]
# spectrum data
self.spec=d['spec'].astype(float)
# mask data
m=self.getmask()
self.applymask(m)
# Frequency axis
self.f = np.linspace(0,self.samp_rate/2,self.spec.shape[1])
# Add LO frequency
if d.has_key('LO_freq'):
self.lo=d['LO_freq']
else:
self.lo=9500
# Zenith angle in degrees.
self.za=d['angle_degs']-d['zenith_degs']
self.za=self.za[:,0]
# Airmass
self.am=self.za2am(self.za)
# az/el -> RA/Dec
az=120.0 # Hard coded to SE for now
self.ra,self.dec=azel2radec(az,90-self.za,self.mjd)
# Get stepping/cal indices
self._getscanind()
# Useful information
self.nf = self.f.size
##################
# Do the reduction
##################
#zarange=[20,50]
#self.reduc(zarange)
def splitbylo(self,lo):
"""Split out structure into a single LO, lo in GHz"""
ind=np.where(self.lo==lo)[0]
self.splitbyind(ind)
def splitbyscans(self,scans):
"""Split out structure, keeping scans in scans"""
ind=np.where(np.array([val in scans for dum,val in enumerate(self.scan)]))[0]
self.splitbyind(ind)
def splitbyind(self,ind):
"""Split out by time index"""
fields=['mjd','dec','ra','lo','scan','t','spec','za','am']
for k,val in enumerate(fields):
x=getattr(self,val)
setattr(self,val,x[ind])
self._getscanind()
def za2am(self,x):
"""Zenith angle in degrees to airmass"""
return 1/cosd(x)
def reduc(self,zarange=[20,50]):
"""Main reduction script. Elrange is two element list or tuple over
which to perform airmass regression (inclusive)"""
# First, take out a secular gain drift for each constant elevation
# stare. Fit P(t) to each channel in a contiguous elevation stare,
# normalize fit to mean=1, and normalize each chan to this.
#deg=10
#self.removedrift(deg)
# Convert P-> T RJ
#self.P2T()
# Now fit a line to P(am) in each scan and store the results.
self.fitam(zarange)
def | (self):
"""Scale by P->TRJ factor"""
# Convert to RJ temperature
#fac=planck.I2Ta(self.f*1e6,1).value
fac = planck(self.f*1e6, 1)
fac=fac/fac[0]
self.spec=self.spec*np.tile(fac,(self.spec.shape[0],1))
def _getscanind(self):
"""Identify start/stop indices of cal and scanning"""
zamin = self.za.min()
first = np.where(self.za==zamin)[0]
self.scan = np.zeros(self.spec.shape[0])
if zamin < 0:
cs = first[np.where((first - np.roll(first, 1)) != 1)[0]]
ss = first[np.where((np.roll(first,-1) - first) != 1)[0]] + 1
ce = ss
se = np.roll((cs - 1) % self.za.size, -1) + 1
for k, val in enumerate(cs):
self.scan[val:se[k] + 1] = k
else:
moves = np.diff(self.za)
max_ind = np.where(moves==moves.max())[0]
turnover = self.za.size
diffs = np.diff(max_ind)
if np.unique(diffs).size > 1:
raise ValueError, 'Can\'t deal with non-uniform cal data yet.'
if max_ind.size > 1:
turnover = diffs[0]
cs = ce = np.array([])
ss = np.arange(self.za.size)[::turnover]
se = np.roll((ss - 1) % self.za.size, -1)
for k, val in enumerate(ss):
self.scan[val:se[k] + 1] = k
self.ind = {'cs': cs, 'ce': ce, 'ss': ss, 'se': se}
self.nscan = np.unique(self.scan).size
def getind(self,start,end,blk):
"""Return indices corresponding to start and end indices, strings as
defined in self.ind"""
if blk is None:
# Return all blocks
blk = np.arange(self.ind[start].size)
ind=np.array([])
for k,val in enumerate(blk):
ind=np.append(ind,np.arange(self.ind[start][val],self.ind[end][val]))
return ind.astype(int)
def getscanind(self,blk=None,zarange=[0,90]):
"""Return indices of periods of stepping. Scanblock goes from 0 to
Nscans-1, and will return the indices of the scan blocks requested.
Default is to return all scan blocks and all zenith angles."""
ind=self.getind('ss','se',arr(blk))
ind=ind[(self.za[ind]>=zarange[0]) & (self.za[ind]<=zarange[1])]
return ind
def getcalind(self,blk=None):
"""Return indices of periods of calibrator staring. If blk is defined,
return all indices of cal stares, including leading and trailing for
each block."""
if blk!=None:
blk=arr(blk)
cblk=np.array([]).astype(int) # Initialize cal stare indices
cs=self.ind['cs']
ce=self.ind['ce']
for k,val in enumerate(blk):
ss=self.ind['ss'][val] # Scan start
se=self.ind['se'][val] # Scan stop
# Find leading cal stare
ind=np.where(ce<=ss)[0]
if ind.size>0:
# If it exists, append it
cblk=np.append(cblk,ind[-1])
# Find trailing cal stare
ind=np.where(cs>=se)[0]
if ind.size>0:
# If it exists, append it
cblk=np.append(cblk,ind[0])
else:
cblk=None
return np.unique(self.getind('cs','ce',blk=cblk))
def calccalmean(self,blk):
"""Calculate mean of lead/trail cal stare for each scan block"""
calind=self.getcalind(blk)
x=self.spec[calind,:]
return np.nanmean(x,axis=0)
def getmask(self):
""" Get NtxNf mask"""
mask=np.ones(self.spec.shape)
# Right now just make out DC and clock freq
mask[:,1024]=0;
mask[:,0]=0;
return mask
def applymask(self,mask):
"""Set spec values to 0 where mask is zero"""
self.spec[mask==0]=np.nan
def removedrift(self,deg=10):
"""Fit and remove a polynomial from P(t) for each frequency channel for
a block of contiguous, constant elevation stares"""
# First remove a secular zeropoint drift over the entire scanset. Fit just the
# scans but remove from cal stares as well.
x=self.t
scanind=self.getscanind()
for k in range(self.nf):
y=self.spec[:,k]
if not np.any(np.isnan(y)):
p=np.polyfit(x[scanind],y[scanind],deg=deg)
# Don't remove mean
p[-1]=0
self.spec[:,k]=self.spec[:,k]-np.poly1d(p)(x)
return
for k in range(self.nscan):
# Each scan
ind=self.getscanind(k)
for j,val in | P2T | identifier_name |
reduc_spec.py | )
'''
if all([((type(arg) is tuple) or (arg is None)) for arg in args]):
ts, tf = args[0], args[1]
d=in_out.read_time_range(dt_0=ts,dt_f=tf, ext=kwargs.get('ext'))
elif any(('.h5' in arg for arg in args)):
d=in_out.read_to_arrays([arg for arg in args if '.h5' in arg])
else:
raise ValueError
# Modified Julian date
self.mjd=d['mjd'][:,0]
# Hours since beginning of read
self.t=(self.mjd-self.mjd[0])*24;
# Sample rate in MHz
if np.unique(d['samp_rate_mhz']).size > 1:
raise NameError('Sample rate changed during accumulation')
else:
self.samp_rate = d['samp_rate_mhz'][0]
# Accumulation length in s
if np.unique(d['acc_len_secs']).size > 1:
raise NameError('Accumulation length changed during accumulation')
else:
self.acc_len = d['acc_len_secs'][0]
# spectrum data
self.spec=d['spec'].astype(float)
# mask data
m=self.getmask()
self.applymask(m)
# Frequency axis
self.f = np.linspace(0,self.samp_rate/2,self.spec.shape[1])
# Add LO frequency
if d.has_key('LO_freq'):
self.lo=d['LO_freq']
else:
self.lo=9500
# Zenith angle in degrees.
self.za=d['angle_degs']-d['zenith_degs']
self.za=self.za[:,0]
# Airmass
self.am=self.za2am(self.za)
# az/el -> RA/Dec
az=120.0 # Hard coded to SE for now
self.ra,self.dec=azel2radec(az,90-self.za,self.mjd)
# Get stepping/cal indices
self._getscanind()
# Useful information
self.nf = self.f.size
##################
# Do the reduction
##################
#zarange=[20,50]
#self.reduc(zarange)
def splitbylo(self,lo):
"""Split out structure into a single LO, lo in GHz"""
ind=np.where(self.lo==lo)[0]
self.splitbyind(ind)
def splitbyscans(self,scans):
"""Split out structure, keeping scans in scans"""
ind=np.where(np.array([val in scans for dum,val in enumerate(self.scan)]))[0]
self.splitbyind(ind)
def splitbyind(self,ind):
"""Split out by time index"""
fields=['mjd','dec','ra','lo','scan','t','spec','za','am']
for k,val in enumerate(fields):
x=getattr(self,val)
setattr(self,val,x[ind])
self._getscanind()
def za2am(self,x):
"""Zenith angle in degrees to airmass"""
return 1/cosd(x)
def reduc(self,zarange=[20,50]):
"""Main reduction script. Elrange is two element list or tuple over
which to perform airmass regression (inclusive)"""
# First, take out a secular gain drift for each constant elevation
# stare. Fit P(t) to each channel in a contiguous elevation stare,
# normalize fit to mean=1, and normalize each chan to this.
#deg=10
#self.removedrift(deg)
# Convert P-> T RJ
#self.P2T()
# Now fit a line to P(am) in each scan and store the results.
self.fitam(zarange)
def P2T(self):
"""Scale by P->TRJ factor"""
# Convert to RJ temperature
#fac=planck.I2Ta(self.f*1e6,1).value
fac = planck(self.f*1e6, 1)
fac=fac/fac[0]
self.spec=self.spec*np.tile(fac,(self.spec.shape[0],1))
def _getscanind(self):
"""Identify start/stop indices of cal and scanning"""
zamin = self.za.min()
first = np.where(self.za==zamin)[0]
self.scan = np.zeros(self.spec.shape[0])
if zamin < 0:
cs = first[np.where((first - np.roll(first, 1)) != 1)[0]]
ss = first[np.where((np.roll(first,-1) - first) != 1)[0]] + 1
ce = ss
se = np.roll((cs - 1) % self.za.size, -1) + 1
for k, val in enumerate(cs):
self.scan[val:se[k] + 1] = k
else:
moves = np.diff(self.za)
max_ind = np.where(moves==moves.max())[0]
turnover = self.za.size
diffs = np.diff(max_ind)
if np.unique(diffs).size > 1:
raise ValueError, 'Can\'t deal with non-uniform cal data yet.'
if max_ind.size > 1:
turnover = diffs[0]
cs = ce = np.array([])
ss = np.arange(self.za.size)[::turnover]
se = np.roll((ss - 1) % self.za.size, -1)
for k, val in enumerate(ss):
self.scan[val:se[k] + 1] = k
self.ind = {'cs': cs, 'ce': ce, 'ss': ss, 'se': se}
self.nscan = np.unique(self.scan).size
def getind(self,start,end,blk):
"""Return indices corresponding to start and end indices, strings as
defined in self.ind"""
if blk is None:
# Return all blocks
blk = np.arange(self.ind[start].size)
ind=np.array([])
for k,val in enumerate(blk):
ind=np.append(ind,np.arange(self.ind[start][val],self.ind[end][val]))
return ind.astype(int)
def getscanind(self,blk=None,zarange=[0,90]):
"""Return indices of periods of stepping. Scanblock goes from 0 to
Nscans-1, and will return the indices of the scan blocks requested.
Default is to return all scan blocks and all zenith angles."""
ind=self.getind('ss','se',arr(blk))
ind=ind[(self.za[ind]>=zarange[0]) & (self.za[ind]<=zarange[1])]
return ind
def getcalind(self,blk=None):
"""Return indices of periods of calibrator staring. If blk is defined,
return all indices of cal stares, including leading and trailing for
each block."""
if blk!=None:
blk=arr(blk)
cblk=np.array([]).astype(int) # Initialize cal stare indices
cs=self.ind['cs']
ce=self.ind['ce']
for k,val in enumerate(blk):
ss=self.ind['ss'][val] # Scan start
se=self.ind['se'][val] # Scan stop
# Find leading cal stare | if ind.size>0:
# If it exists, append it
cblk=np.append(cblk,ind[-1])
# Find trailing cal stare
ind=np.where(cs>=se)[0]
if ind.size>0:
# If it exists, append it
cblk=np.append(cblk,ind[0])
else:
cblk=None
return np.unique(self.getind('cs','ce',blk=cblk))
def calccalmean(self,blk):
"""Calculate mean of lead/trail cal stare for each scan block"""
calind=self.getcalind(blk)
x=self.spec[calind,:]
return np.nanmean(x,axis=0)
def getmask(self):
""" Get NtxNf mask"""
mask=np.ones(self.spec.shape)
# Right now just make out DC and clock freq
mask[:,1024]=0;
mask[:,0]=0;
return mask
def applymask(self,mask):
"""Set spec values to 0 where mask is zero"""
self.spec[mask==0]=np.nan
def removedrift(self,deg=10):
"""Fit and remove a polynomial from P(t) for each frequency channel for
a block of contiguous, constant elevation stares"""
# First remove a secular zeropoint drift over the entire scanset. Fit just the
# scans but remove from cal stares as well.
x=self.t
scanind=self.getscanind()
for k in range(self.nf):
y=self.spec[:,k]
if not np.any(np.isnan(y)):
p=np.polyfit(x[scanind],y[scanind],deg=deg)
# Don't remove mean
p[-1]=0
self.spec[:,k]=self.spec[:,k]-np.poly1d(p)(x)
return
for k in range(self.nscan):
# Each scan
ind=self.getscanind(k)
for j,val in enumerate | ind=np.where(ce<=ss)[0] | random_line_split |
reduc_spec.py | )
'''
if all([((type(arg) is tuple) or (arg is None)) for arg in args]):
ts, tf = args[0], args[1]
d=in_out.read_time_range(dt_0=ts,dt_f=tf, ext=kwargs.get('ext'))
elif any(('.h5' in arg for arg in args)):
d=in_out.read_to_arrays([arg for arg in args if '.h5' in arg])
else:
raise ValueError
# Modified Julian date
self.mjd=d['mjd'][:,0]
# Hours since beginning of read
self.t=(self.mjd-self.mjd[0])*24;
# Sample rate in MHz
if np.unique(d['samp_rate_mhz']).size > 1:
raise NameError('Sample rate changed during accumulation')
else:
self.samp_rate = d['samp_rate_mhz'][0]
# Accumulation length in s
if np.unique(d['acc_len_secs']).size > 1:
raise NameError('Accumulation length changed during accumulation')
else:
self.acc_len = d['acc_len_secs'][0]
# spectrum data
self.spec=d['spec'].astype(float)
# mask data
m=self.getmask()
self.applymask(m)
# Frequency axis
self.f = np.linspace(0,self.samp_rate/2,self.spec.shape[1])
# Add LO frequency
if d.has_key('LO_freq'):
self.lo=d['LO_freq']
else:
self.lo=9500
# Zenith angle in degrees.
self.za=d['angle_degs']-d['zenith_degs']
self.za=self.za[:,0]
# Airmass
self.am=self.za2am(self.za)
# az/el -> RA/Dec
az=120.0 # Hard coded to SE for now
self.ra,self.dec=azel2radec(az,90-self.za,self.mjd)
# Get stepping/cal indices
self._getscanind()
# Useful information
self.nf = self.f.size
##################
# Do the reduction
##################
#zarange=[20,50]
#self.reduc(zarange)
def splitbylo(self,lo):
"""Split out structure into a single LO, lo in GHz"""
ind=np.where(self.lo==lo)[0]
self.splitbyind(ind)
def splitbyscans(self,scans):
"""Split out structure, keeping scans in scans"""
ind=np.where(np.array([val in scans for dum,val in enumerate(self.scan)]))[0]
self.splitbyind(ind)
def splitbyind(self,ind):
"""Split out by time index"""
fields=['mjd','dec','ra','lo','scan','t','spec','za','am']
for k,val in enumerate(fields):
x=getattr(self,val)
setattr(self,val,x[ind])
self._getscanind()
def za2am(self,x):
"""Zenith angle in degrees to airmass"""
return 1/cosd(x)
def reduc(self,zarange=[20,50]):
"""Main reduction script. Elrange is two element list or tuple over
which to perform airmass regression (inclusive)"""
# First, take out a secular gain drift for each constant elevation
# stare. Fit P(t) to each channel in a contiguous elevation stare,
# normalize fit to mean=1, and normalize each chan to this.
#deg=10
#self.removedrift(deg)
# Convert P-> T RJ
#self.P2T()
# Now fit a line to P(am) in each scan and store the results.
self.fitam(zarange)
def P2T(self):
"""Scale by P->TRJ factor"""
# Convert to RJ temperature
#fac=planck.I2Ta(self.f*1e6,1).value
fac = planck(self.f*1e6, 1)
fac=fac/fac[0]
self.spec=self.spec*np.tile(fac,(self.spec.shape[0],1))
def _getscanind(self):
"""Identify start/stop indices of cal and scanning"""
zamin = self.za.min()
first = np.where(self.za==zamin)[0]
self.scan = np.zeros(self.spec.shape[0])
if zamin < 0:
cs = first[np.where((first - np.roll(first, 1)) != 1)[0]]
ss = first[np.where((np.roll(first,-1) - first) != 1)[0]] + 1
ce = ss
se = np.roll((cs - 1) % self.za.size, -1) + 1
for k, val in enumerate(cs):
self.scan[val:se[k] + 1] = k
else:
moves = np.diff(self.za)
max_ind = np.where(moves==moves.max())[0]
turnover = self.za.size
diffs = np.diff(max_ind)
if np.unique(diffs).size > 1:
raise ValueError, 'Can\'t deal with non-uniform cal data yet.'
if max_ind.size > 1:
turnover = diffs[0]
cs = ce = np.array([])
ss = np.arange(self.za.size)[::turnover]
se = np.roll((ss - 1) % self.za.size, -1)
for k, val in enumerate(ss):
self.scan[val:se[k] + 1] = k
self.ind = {'cs': cs, 'ce': ce, 'ss': ss, 'se': se}
self.nscan = np.unique(self.scan).size
def getind(self,start,end,blk):
"""Return indices corresponding to start and end indices, strings as
defined in self.ind"""
if blk is None:
# Return all blocks
blk = np.arange(self.ind[start].size)
ind=np.array([])
for k,val in enumerate(blk):
ind=np.append(ind,np.arange(self.ind[start][val],self.ind[end][val]))
return ind.astype(int)
def getscanind(self,blk=None,zarange=[0,90]):
"""Return indices of periods of stepping. Scanblock goes from 0 to
Nscans-1, and will return the indices of the scan blocks requested.
Default is to return all scan blocks and all zenith angles."""
ind=self.getind('ss','se',arr(blk))
ind=ind[(self.za[ind]>=zarange[0]) & (self.za[ind]<=zarange[1])]
return ind
def getcalind(self,blk=None):
"""Return indices of periods of calibrator staring. If blk is defined,
return all indices of cal stares, including leading and trailing for
each block."""
if blk!=None:
blk=arr(blk)
cblk=np.array([]).astype(int) # Initialize cal stare indices
cs=self.ind['cs']
ce=self.ind['ce']
for k,val in enumerate(blk):
ss=self.ind['ss'][val] # Scan start
se=self.ind['se'][val] # Scan stop
# Find leading cal stare
ind=np.where(ce<=ss)[0]
if ind.size>0:
# If it exists, append it
cblk=np.append(cblk,ind[-1])
# Find trailing cal stare
ind=np.where(cs>=se)[0]
if ind.size>0:
# If it exists, append it
cblk=np.append(cblk,ind[0])
else:
cblk=None
return np.unique(self.getind('cs','ce',blk=cblk))
def calccalmean(self,blk):
"""Calculate mean of lead/trail cal stare for each scan block"""
calind=self.getcalind(blk)
x=self.spec[calind,:]
return np.nanmean(x,axis=0)
def getmask(self):
""" Get NtxNf mask"""
mask=np.ones(self.spec.shape)
# Right now just make out DC and clock freq
mask[:,1024]=0;
mask[:,0]=0;
return mask
def applymask(self,mask):
"""Set spec values to 0 where mask is zero"""
self.spec[mask==0]=np.nan
def removedrift(self,deg=10):
"""Fit and remove a polynomial from P(t) for each frequency channel for
a block of contiguous, constant elevation stares"""
# First remove a secular zeropoint drift over the entire scanset. Fit just the
# scans but remove from cal stares as well.
x=self.t
scanind=self.getscanind()
for k in range(self.nf):
y=self.spec[:,k]
if not np.any(np.isnan(y)):
|
return
for k in range(self.nscan):
# Each scan
ind=self.getscanind(k)
for j,val in | p=np.polyfit(x[scanind],y[scanind],deg=deg)
# Don't remove mean
p[-1]=0
self.spec[:,k]=self.spec[:,k]-np.poly1d(p)(x) | conditional_block |
reduc_spec.py | )
'''
if all([((type(arg) is tuple) or (arg is None)) for arg in args]):
ts, tf = args[0], args[1]
d=in_out.read_time_range(dt_0=ts,dt_f=tf, ext=kwargs.get('ext'))
elif any(('.h5' in arg for arg in args)):
d=in_out.read_to_arrays([arg for arg in args if '.h5' in arg])
else:
raise ValueError
# Modified Julian date
self.mjd=d['mjd'][:,0]
# Hours since beginning of read
self.t=(self.mjd-self.mjd[0])*24;
# Sample rate in MHz
if np.unique(d['samp_rate_mhz']).size > 1:
raise NameError('Sample rate changed during accumulation')
else:
self.samp_rate = d['samp_rate_mhz'][0]
# Accumulation length in s
if np.unique(d['acc_len_secs']).size > 1:
raise NameError('Accumulation length changed during accumulation')
else:
self.acc_len = d['acc_len_secs'][0]
# spectrum data
self.spec=d['spec'].astype(float)
# mask data
m=self.getmask()
self.applymask(m)
# Frequency axis
self.f = np.linspace(0,self.samp_rate/2,self.spec.shape[1])
# Add LO frequency
if d.has_key('LO_freq'):
self.lo=d['LO_freq']
else:
self.lo=9500
# Zenith angle in degrees.
self.za=d['angle_degs']-d['zenith_degs']
self.za=self.za[:,0]
# Airmass
self.am=self.za2am(self.za)
# az/el -> RA/Dec
az=120.0 # Hard coded to SE for now
self.ra,self.dec=azel2radec(az,90-self.za,self.mjd)
# Get stepping/cal indices
self._getscanind()
# Useful information
self.nf = self.f.size
##################
# Do the reduction
##################
#zarange=[20,50]
#self.reduc(zarange)
def splitbylo(self,lo):
"""Split out structure into a single LO, lo in GHz"""
ind=np.where(self.lo==lo)[0]
self.splitbyind(ind)
def splitbyscans(self,scans):
"""Split out structure, keeping scans in scans"""
ind=np.where(np.array([val in scans for dum,val in enumerate(self.scan)]))[0]
self.splitbyind(ind)
def splitbyind(self,ind):
"""Split out by time index"""
fields=['mjd','dec','ra','lo','scan','t','spec','za','am']
for k,val in enumerate(fields):
x=getattr(self,val)
setattr(self,val,x[ind])
self._getscanind()
def za2am(self,x):
|
def reduc(self,zarange=[20,50]):
"""Main reduction script. Elrange is two element list or tuple over
which to perform airmass regression (inclusive)"""
# First, take out a secular gain drift for each constant elevation
# stare. Fit P(t) to each channel in a contiguous elevation stare,
# normalize fit to mean=1, and normalize each chan to this.
#deg=10
#self.removedrift(deg)
# Convert P-> T RJ
#self.P2T()
# Now fit a line to P(am) in each scan and store the results.
self.fitam(zarange)
def P2T(self):
"""Scale by P->TRJ factor"""
# Convert to RJ temperature
#fac=planck.I2Ta(self.f*1e6,1).value
fac = planck(self.f*1e6, 1)
fac=fac/fac[0]
self.spec=self.spec*np.tile(fac,(self.spec.shape[0],1))
def _getscanind(self):
"""Identify start/stop indices of cal and scanning"""
zamin = self.za.min()
first = np.where(self.za==zamin)[0]
self.scan = np.zeros(self.spec.shape[0])
if zamin < 0:
cs = first[np.where((first - np.roll(first, 1)) != 1)[0]]
ss = first[np.where((np.roll(first,-1) - first) != 1)[0]] + 1
ce = ss
se = np.roll((cs - 1) % self.za.size, -1) + 1
for k, val in enumerate(cs):
self.scan[val:se[k] + 1] = k
else:
moves = np.diff(self.za)
max_ind = np.where(moves==moves.max())[0]
turnover = self.za.size
diffs = np.diff(max_ind)
if np.unique(diffs).size > 1:
raise ValueError, 'Can\'t deal with non-uniform cal data yet.'
if max_ind.size > 1:
turnover = diffs[0]
cs = ce = np.array([])
ss = np.arange(self.za.size)[::turnover]
se = np.roll((ss - 1) % self.za.size, -1)
for k, val in enumerate(ss):
self.scan[val:se[k] + 1] = k
self.ind = {'cs': cs, 'ce': ce, 'ss': ss, 'se': se}
self.nscan = np.unique(self.scan).size
def getind(self,start,end,blk):
"""Return indices corresponding to start and end indices, strings as
defined in self.ind"""
if blk is None:
# Return all blocks
blk = np.arange(self.ind[start].size)
ind=np.array([])
for k,val in enumerate(blk):
ind=np.append(ind,np.arange(self.ind[start][val],self.ind[end][val]))
return ind.astype(int)
def getscanind(self,blk=None,zarange=[0,90]):
"""Return indices of periods of stepping. Scanblock goes from 0 to
Nscans-1, and will return the indices of the scan blocks requested.
Default is to return all scan blocks and all zenith angles."""
ind=self.getind('ss','se',arr(blk))
ind=ind[(self.za[ind]>=zarange[0]) & (self.za[ind]<=zarange[1])]
return ind
def getcalind(self,blk=None):
"""Return indices of periods of calibrator staring. If blk is defined,
return all indices of cal stares, including leading and trailing for
each block."""
if blk!=None:
blk=arr(blk)
cblk=np.array([]).astype(int) # Initialize cal stare indices
cs=self.ind['cs']
ce=self.ind['ce']
for k,val in enumerate(blk):
ss=self.ind['ss'][val] # Scan start
se=self.ind['se'][val] # Scan stop
# Find leading cal stare
ind=np.where(ce<=ss)[0]
if ind.size>0:
# If it exists, append it
cblk=np.append(cblk,ind[-1])
# Find trailing cal stare
ind=np.where(cs>=se)[0]
if ind.size>0:
# If it exists, append it
cblk=np.append(cblk,ind[0])
else:
cblk=None
return np.unique(self.getind('cs','ce',blk=cblk))
def calccalmean(self,blk):
"""Calculate mean of lead/trail cal stare for each scan block"""
calind=self.getcalind(blk)
x=self.spec[calind,:]
return np.nanmean(x,axis=0)
def getmask(self):
""" Get NtxNf mask"""
mask=np.ones(self.spec.shape)
# Right now just make out DC and clock freq
mask[:,1024]=0;
mask[:,0]=0;
return mask
def applymask(self,mask):
"""Set spec values to 0 where mask is zero"""
self.spec[mask==0]=np.nan
def removedrift(self,deg=10):
"""Fit and remove a polynomial from P(t) for each frequency channel for
a block of contiguous, constant elevation stares"""
# First remove a secular zeropoint drift over the entire scanset. Fit just the
# scans but remove from cal stares as well.
x=self.t
scanind=self.getscanind()
for k in range(self.nf):
y=self.spec[:,k]
if not np.any(np.isnan(y)):
p=np.polyfit(x[scanind],y[scanind],deg=deg)
# Don't remove mean
p[-1]=0
self.spec[:,k]=self.spec[:,k]-np.poly1d(p)(x)
return
for k in range(self.nscan):
# Each scan
ind=self.getscanind(k)
for j,val in | """Zenith angle in degrees to airmass"""
return 1/cosd(x) | identifier_body |
_ToolBar.ts | : (ev: CustomEvent) => void;
/// <field type="Function" locid="WinJS.UI.ToolBar.onafteropen" helpKeyword="WinJS.UI.ToolBar.onafteropen">
/// Occurs immediately after the control is opened.
/// </field>
onafteropen: (ev: CustomEvent) => void;
/// <field type="Function" locid="WinJS.UI.ToolBar.onbeforeclose" helpKeyword="WinJS.UI.ToolBar.onbeforeclose">
/// Occurs immediately before the control is closed. Is cancelable.
/// </field>
onbeforeclose: (ev: CustomEvent) => void;
/// <field type="Function" locid="WinJS.UI.ToolBar.onafterclose" helpKeyword="WinJS.UI.ToolBar.onafterclose">
/// Occurs immediately after the control is closed.
/// </field>
onafterclose: (ev: CustomEvent) => void;
open(): void {
/// <signature helpKeyword="WinJS.UI.ToolBar.open">
/// <summary locid="WinJS.UI.ToolBar.open">
/// Opens the ToolBar
/// </summary>
/// </signature>
this._commandingSurface.open();
}
close(): void {
/// <signature helpKeyword="WinJS.UI.ToolBar.close">
/// <summary locid="WinJS.UI.ToolBar.close">
/// Closes the ToolBar
/// </summary>
/// </signature>
this._commandingSurface.close();
}
dispose() {
/// <signature helpKeyword="WinJS.UI.ToolBar.dispose">
/// <summary locid="WinJS.UI.ToolBar.dispose">
/// Disposes this ToolBar.
/// </summary>
/// </signature>
if (this._disposed) {
return;
}
this._disposed = true;
_LightDismissService.hidden(this._dismissable);
// Disposing the _commandingSurface will trigger dispose on its OpenCloseMachine and synchronously complete any animations that might have been running.
this._commandingSurface.dispose();
// If page navigation is happening, we don't want the ToolBar left behind in the body.
// Synchronoulsy close the ToolBar to force it out of the body and back into its parent element.
this._synchronousClose();
_ElementUtilities._inputPaneListener.removeEventListener(this._dom.root, "showing", this._handleShowingKeyboardBound);
_Dispose.disposeSubTree(this.element);
}
forceLayout() {
/// <signature helpKeyword="WinJS.UI.ToolBar.forceLayout">
/// <summary locid="WinJS.UI.ToolBar.forceLayout">
/// Forces the ToolBar to update its layout. Use this function when the window did not change size, but the container of the ToolBar changed size.
/// </summary>
/// </signature>
this._commandingSurface.forceLayout();
}
getCommandById(id: string): _Command.ICommand {
/// <signature helpKeyword="WinJS.UI.ToolBar.getCommandById">
/// <summary locid="WinJS.UI.ToolBar.getCommandById">
/// Retrieves the command with the specified ID from this ToolBar.
/// If more than one command is found, this method returns the first command found.
/// </summary>
/// <param name="id" type="String" locid="WinJS.UI.ToolBar.getCommandById_p:id">Id of the command to return.</param>
/// <returns type="object" locid="WinJS.UI.ToolBar.getCommandById_returnValue">
/// The command found, or null if no command is found.
/// </returns>
/// </signature>
return this._commandingSurface.getCommandById(id);
}
showOnlyCommands(commands: Array<string|_Command.ICommand>): void {
/// <signature helpKeyword="WinJS.UI.ToolBar.showOnlyCommands">
/// <summary locid="WinJS.UI.ToolBar.showOnlyCommands">
/// Show the specified commands, hiding all of the others in the ToolBar.
/// </summary>
/// <param name="commands" type="Array" locid="WinJS.UI.ToolBar.showOnlyCommands_p:commands">
/// An array of the commands to show. The array elements may be Command objects, or the string identifiers (IDs) of commands.
/// </param>
/// </signature>
return this._commandingSurface.showOnlyCommands(commands);
}
private _writeProfilerMark(text: string) {
_WriteProfilerMark("WinJS.UI.ToolBar:" + this._id + ":" + text);
}
private _initializeDom(root: HTMLElement): void {
this._writeProfilerMark("_intializeDom,info");
// Attaching JS control to DOM element
root["winControl"] = this;
this._id = root.id || _ElementUtilities._uniqueID(root);
_ElementUtilities.addClass(root, _Constants.ClassNames.controlCssClass);
_ElementUtilities.addClass(root, _Constants.ClassNames.disposableCssClass);
// Make sure we have an ARIA role
var role = root.getAttribute("role");
if (!role) {
root.setAttribute("role", "menubar");
}
var label = root.getAttribute("aria-label");
if (!label) {
root.setAttribute("aria-label", strings.ariaLabel);
}
// Create element for commandingSurface and reparent any declarative Commands.
// The CommandingSurface constructor will parse child elements as AppBarCommands.
var commandingSurfaceEl = document.createElement("DIV");
_ElementUtilities._reparentChildren(root, commandingSurfaceEl);
root.appendChild(commandingSurfaceEl);
// While the ToolBar is open, it will place itself in the <body> so it can become a light dismissible
// overlay. It leaves the placeHolder element behind as stand in at the ToolBar's original DOM location
// to avoid reflowing surrounding app content and create the illusion that the ToolBar hasn't moved along
// the x or y planes.
var placeHolder = _Global.document.createElement("DIV");
_ElementUtilities.addClass(placeHolder, _Constants.ClassNames.placeHolderCssClass);
// If the ToolBar's original HTML parent node is disposed while the ToolBar is open and repositioned as
// a temporary child of the <body>, make sure that calling dispose on the placeHolder element will trigger
// dispose on the ToolBar as well.
_Dispose.markDisposable(placeHolder, this.dispose.bind(this));
this._dom = {
root: root,
commandingSurfaceEl: commandingSurfaceEl,
placeHolder: placeHolder,
};
}
private _handleShowingKeyboard(event: { detail: { originalEvent: _WinRT.Windows.UI.ViewManagement.InputPaneVisibilityEventArgs } }) {
// Because the ToolBar takes up layout space and is not an overlay, it doesn't have the same expectation
// to move itself to get out of the way of a showing IHM. Instsead we just close the ToolBar to avoid
// scenarios where the ToolBar is occluded, but the click-eating-div is still present since it may seem
// strange to end users that an occluded ToolBar (out of sight, out of mind) is still eating their first
// click.
// Mitigation:
// Because (1) custom content in a ToolBar can only be included as a 'content' type command, because (2)
// the ToolBar only supports closedDisplayModes 'compact' and 'full', and because (3) 'content' type
// commands in the overflowarea use a separate contentflyout to display their contents:
// Interactable custom content contained within the ToolBar actionarea or overflowarea, will remain
// visible and interactable even when showing the IHM closes the ToolBar.
this.close();
}
private _synchronousOpen(): void {
this._isOpenedMode = true;
this._updateDomImpl();
}
private _synchronousClose(): void {
this._isOpenedMode = false;
this._updateDomImpl();
}
// State private to the _updateDomImpl family of method. No other methods should make use of it.
//
// Nothing has been rendered yet so these are all initialized to undefined. Because
// they are undefined, the first time _updateDomImpl is called, they will all be
// rendered.
private _updateDomImpl_renderedState = {
isOpenedMode: <boolean>undefined,
closedDisplayMode: <string>undefined,
prevInlineWidth: <string>undefined,
};
private _updateDomImpl(): void {
var rendered = this._updateDomImpl_renderedState;
if (rendered.isOpenedMode !== this._isOpenedMode) {
if (this._isOpenedMode) {
this._updateDomImpl_renderOpened();
} else {
this._updateDomImpl_renderClosed();
}
rendered.isOpenedMode = this._isOpenedMode;
}
if (rendered.closedDisplayMode !== this.closedDisplayMode) {
removeClass(this._dom.root, closedDisplayModeClassMap[rendered.closedDisplayMode]);
addClass(this._dom.root, closedDisplayModeClassMap[this.closedDisplayMode]);
rendered.closedDisplayMode = this.closedDisplayMode;
}
this._commandingSurface.updateDom();
}
private _getClosedHeight(): number {
if (this._cachedClosedHeight === null) {
var wasOpen = this._isOpenedMode;
if (this._isOpenedMode) | {
this._synchronousClose();
} | conditional_block |
|
_ToolBar.ts | same stacking context pitfalls
// as any other light dismissible. https://github.com/winjs/winjs/wiki/Dismissables-and-Stacking-Contexts
// - Reposition the ToolBar element to be exactly overlaid on top of the placeholder element.
// - Render the ToolBar as opened, via the _CommandingSurface API, increasing the overall height of the ToolBar.
// - Closing the ToolBar is basically the same steps but in reverse.
// - One limitation to this implementation is that developers may not position the ToolBar element themselves directly via the CSS "position" or "float" properties.
// - This is because The ToolBar expects its element to be in the flow of the document when closed, and the placeholder element would not receive these same styles
// when inserted to replace the ToolBar element.
// - An easy workaround for developers is to wrap the ToolBar into another DIV element that they may style and position however they'd like.
//
// - Responding to the IHM:
// - If the ToolBar is opened when the IHM is shown, it will close itself.This is to avoid scenarios where the IHM totally occludes the opened ToolBar. If the ToolBar
// did not close itself, then the next mouse or touch input within the App wouldn't appear to do anything since it would just go to closing the light dismissible
// ToolBar anyway.
var strings = {
get ariaLabel() { return _Resources._getWinJSString("ui/toolbarAriaLabel").value; },
get overflowButtonAriaLabel() { return _Resources._getWinJSString("ui/toolbarOverflowButtonAriaLabel").value; },
get mustContainCommands() { return "The toolbar can only contain WinJS.UI.Command or WinJS.UI.AppBarCommand controls"; },
get duplicateConstruction() { return "Invalid argument: Controls may only be instantiated one time for each DOM element"; }
};
var ClosedDisplayMode = {
/// <field locid="WinJS.UI.ToolBar.ClosedDisplayMode.compact" helpKeyword="WinJS.UI.ToolBar.ClosedDisplayMode.compact">
/// When the ToolBar is closed, the height of the ToolBar is reduced such that button commands are still visible, but their labels are hidden.
/// </field>
compact: "compact",
/// <field locid="WinJS.UI.ToolBar.ClosedDisplayMode.full" helpKeyword="WinJS.UI.ToolBar.ClosedDisplayMode.full">
/// When the ToolBar is closed, the height of the ToolBar is always sized to content.
/// </field>
full: "full",
};
var closedDisplayModeClassMap = {};
closedDisplayModeClassMap[ClosedDisplayMode.compact] = _Constants.ClassNames.compactClass;
closedDisplayModeClassMap[ClosedDisplayMode.full] = _Constants.ClassNames.fullClass;
// Versions of add/removeClass that are no ops when called with falsy class names.
function addClass(element: HTMLElement, className: string): void {
className && _ElementUtilities.addClass(element, className);
}
function removeClass(element: HTMLElement, className: string): void {
className && _ElementUtilities.removeClass(element, className);
}
/// <field>
/// <summary locid="WinJS.UI.ToolBar">
/// Displays ICommands within the flow of the app. Use the ToolBar around other statically positioned app content.
/// </summary>
/// </field>
/// <icon src="ui_winjs.ui.toolbar.12x12.png" width="12" height="12" />
/// <icon src="ui_winjs.ui.toolbar.16x16.png" width="16" height="16" />
/// <htmlSnippet supportsContent="true"><![CDATA[<div data-win-control="WinJS.UI.ToolBar">
/// <button data-win-control="WinJS.UI.Command" data-win-options="{id:'',label:'example',icon:'back',type:'button',onclick:null,section:'primary'}"></button>
/// </div>]]></htmlSnippet>
/// <part name="toolbar" class="win-toolbar" locid="WinJS.UI.ToolBar_part:toolbar">The entire ToolBar control.</part>
/// <part name="toolbar-overflowbutton" class="win-toolbar-overflowbutton" locid="WinJS.UI.ToolBar_part:ToolBar-overflowbutton">The toolbar overflow button.</part>
/// <part name="toolbar-overflowarea" class="win-toolbar-overflowarea" locid="WinJS.UI.ToolBar_part:ToolBar-overflowarea">The container for toolbar commands that overflow.</part>
/// <resource type="javascript" src="//$(TARGET_DESTINATION)/js/WinJS.js" shared="true" />
/// <resource type="css" src="//$(TARGET_DESTINATION)/css/ui-dark.css" shared="true" />
export class ToolBar {
private _id: string;
private _disposed: boolean;
private _commandingSurface: _ICommandingSurface._CommandingSurface;
private _isOpenedMode: boolean;
private _handleShowingKeyboardBound: (ev: any) => void;
private _dismissable: _LightDismissService.LightDismissableElement;
private _cachedClosedHeight: number;
private _dom: {
root: HTMLElement;
commandingSurfaceEl: HTMLElement;
placeHolder: HTMLElement;
}
/// <field locid="WinJS.UI.ToolBar.ClosedDisplayMode" helpKeyword="WinJS.UI.ToolBar.ClosedDisplayMode">
/// Display options for the actionarea when the ToolBar is closed.
/// </field>
static ClosedDisplayMode = ClosedDisplayMode;
static supportedForProcessing: boolean = true;
/// <field type="HTMLElement" domElement="true" hidden="true" locid="WinJS.UI.ToolBar.element" helpKeyword="WinJS.UI.ToolBar.element">
/// Gets the DOM element that hosts the ToolBar.
/// </field>
get element() |
/// <field type="WinJS.Binding.List" locid="WinJS.UI.ToolBar.data" helpKeyword="WinJS.UI.ToolBar.data">
/// Gets or sets the Binding List of WinJS.UI.Command for the ToolBar.
/// </field>
get data() {
return this._commandingSurface.data;
}
set data(value: BindingList.List<_Command.ICommand>) {
this._commandingSurface.data = value;
}
/// <field type="String" locid="WinJS.UI.ToolBar.closedDisplayMode" helpKeyword="WinJS.UI.ToolBar.closedDisplayMode">
/// Gets or sets the closedDisplayMode for the ToolBar. Values are "compact" and "full".
/// </field>
get closedDisplayMode() {
return this._commandingSurface.closedDisplayMode;
}
set closedDisplayMode(value: string) {
if (ClosedDisplayMode[value]) {
this._commandingSurface.closedDisplayMode = value;
this._cachedClosedHeight = null;
}
}
/// <field type="Boolean" hidden="true" locid="WinJS.UI.ToolBar.opened" helpKeyword="WinJS.UI.ToolBar.opened">
/// Gets or sets whether the ToolBar is currently opened.
/// </field>
get opened(): boolean {
return this._commandingSurface.opened;
}
set opened(value: boolean) {
this._commandingSurface.opened = value;
}
constructor(element?: HTMLElement, options: any = {}) {
/// <signature helpKeyword="WinJS.UI.ToolBar.ToolBar">
/// <summary locid="WinJS.UI.ToolBar.constructor">
/// Creates a new ToolBar control.
/// </summary>
/// <param name="element" type="HTMLElement" domElement="true" locid="WinJS.UI.ToolBar.constructor_p:element">
/// The DOM element that will host the control.
/// </param>
/// <param name="options" type="Object" locid="WinJS.UI.ToolBar.constructor_p:options">
/// The set of properties and values to apply to the new ToolBar control.
/// </param>
/// <returns type="WinJS.UI.ToolBar" locid="WinJS.UI.ToolBar.constructor_returnValue">
/// The new ToolBar control.
/// </returns>
/// </signature>
this._writeProfilerMark("constructor,StartTM");
// Check to make sure we weren't duplicated
if (element && element["winControl"]) {
throw new _ErrorFromName("WinJS.UI.ToolBar.DuplicateConstruction", strings.duplicateConstruction);
}
this._initializeDom(element || _Global.document.createElement("div"));
var stateMachine = new _OpenCloseMachine.OpenCloseMachine({
eventElement: this.element,
onOpen: () => {
var openAnimation = this._commandingSurface.createOpenAnimation(this._getClosedHeight());
this._synchronousOpen();
return openAnimation.execute();
},
onClose: () => {
var closeAnimation = this._commandingSurface.createCloseAnimation(this._getClosedHeight());
return closeAnimation.execute().then(() => {
this._synchronousClose();
});
},
onUpdateDom: () => {
this._updateDomImpl();
},
onUpdateDomWithIsOpened: (isOpened: boolean) => {
this._isOpenedMode = isOpened;
this._updateDomImpl();
}
});
// Events
this._handleShowingKeyboardBound = this._handleShowingKeyboard.bind(this);
_ElementUtilities._inputPaneListener.addEventListener(this._dom.root, "showing", this._handleShowingKeyboardBound);
// Initialize private state.
this._disposed = false | {
return this._dom.root;
} | identifier_body |
_ToolBar.ts | Surface.opened;
}
set opened(value: boolean) {
this._commandingSurface.opened = value;
}
constructor(element?: HTMLElement, options: any = {}) {
/// <signature helpKeyword="WinJS.UI.ToolBar.ToolBar">
/// <summary locid="WinJS.UI.ToolBar.constructor">
/// Creates a new ToolBar control.
/// </summary>
/// <param name="element" type="HTMLElement" domElement="true" locid="WinJS.UI.ToolBar.constructor_p:element">
/// The DOM element that will host the control.
/// </param>
/// <param name="options" type="Object" locid="WinJS.UI.ToolBar.constructor_p:options">
/// The set of properties and values to apply to the new ToolBar control.
/// </param>
/// <returns type="WinJS.UI.ToolBar" locid="WinJS.UI.ToolBar.constructor_returnValue">
/// The new ToolBar control.
/// </returns>
/// </signature>
this._writeProfilerMark("constructor,StartTM");
// Check to make sure we weren't duplicated
if (element && element["winControl"]) {
throw new _ErrorFromName("WinJS.UI.ToolBar.DuplicateConstruction", strings.duplicateConstruction);
}
this._initializeDom(element || _Global.document.createElement("div"));
var stateMachine = new _OpenCloseMachine.OpenCloseMachine({
eventElement: this.element,
onOpen: () => {
var openAnimation = this._commandingSurface.createOpenAnimation(this._getClosedHeight());
this._synchronousOpen();
return openAnimation.execute();
},
onClose: () => {
var closeAnimation = this._commandingSurface.createCloseAnimation(this._getClosedHeight());
return closeAnimation.execute().then(() => {
this._synchronousClose();
});
},
onUpdateDom: () => {
this._updateDomImpl();
},
onUpdateDomWithIsOpened: (isOpened: boolean) => {
this._isOpenedMode = isOpened;
this._updateDomImpl();
}
});
// Events
this._handleShowingKeyboardBound = this._handleShowingKeyboard.bind(this);
_ElementUtilities._inputPaneListener.addEventListener(this._dom.root, "showing", this._handleShowingKeyboardBound);
// Initialize private state.
this._disposed = false;
this._cachedClosedHeight = null;
this._commandingSurface = new _CommandingSurface._CommandingSurface(this._dom.commandingSurfaceEl, { openCloseMachine: stateMachine });
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-actionarea"), _Constants.ClassNames.actionAreaCssClass);
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-overflowarea"), _Constants.ClassNames.overflowAreaCssClass);
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-overflowbutton"), _Constants.ClassNames.overflowButtonCssClass);
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-ellipsis"), _Constants.ClassNames.ellipsisCssClass);
this._isOpenedMode = _Constants.defaultOpened;
this._dismissable = new _LightDismissService.LightDismissableElement({
element: this._dom.root,
tabIndex: this._dom.root.hasAttribute("tabIndex") ? this._dom.root.tabIndex : -1,
onLightDismiss: () => {
this.close();
},
onTakeFocus: (useSetActive) => {
this._dismissable.restoreFocus() ||
this._commandingSurface.takeFocus(useSetActive);
}
});
// Initialize public properties.
this.closedDisplayMode = _Constants.defaultClosedDisplayMode;
this.opened = this._isOpenedMode;
_Control.setOptions(this, options);
// Exit the Init state.
_ElementUtilities._inDom(this.element).then(() => {
return this._commandingSurface.initialized;
}).then(() => {
stateMachine.exitInit();
this._writeProfilerMark("constructor,StopTM");
});
}
/// <field type="Function" locid="WinJS.UI.ToolBar.onbeforeopen" helpKeyword="WinJS.UI.ToolBar.onbeforeopen">
/// Occurs immediately before the control is opened. Is cancelable.
/// </field>
onbeforeopen: (ev: CustomEvent) => void;
/// <field type="Function" locid="WinJS.UI.ToolBar.onafteropen" helpKeyword="WinJS.UI.ToolBar.onafteropen">
/// Occurs immediately after the control is opened.
/// </field>
onafteropen: (ev: CustomEvent) => void;
/// <field type="Function" locid="WinJS.UI.ToolBar.onbeforeclose" helpKeyword="WinJS.UI.ToolBar.onbeforeclose">
/// Occurs immediately before the control is closed. Is cancelable.
/// </field>
onbeforeclose: (ev: CustomEvent) => void;
/// <field type="Function" locid="WinJS.UI.ToolBar.onafterclose" helpKeyword="WinJS.UI.ToolBar.onafterclose">
/// Occurs immediately after the control is closed.
/// </field>
onafterclose: (ev: CustomEvent) => void;
open(): void {
/// <signature helpKeyword="WinJS.UI.ToolBar.open">
/// <summary locid="WinJS.UI.ToolBar.open">
/// Opens the ToolBar
/// </summary>
/// </signature>
this._commandingSurface.open();
}
close(): void {
/// <signature helpKeyword="WinJS.UI.ToolBar.close">
/// <summary locid="WinJS.UI.ToolBar.close">
/// Closes the ToolBar
/// </summary>
/// </signature>
this._commandingSurface.close();
}
dispose() {
/// <signature helpKeyword="WinJS.UI.ToolBar.dispose">
/// <summary locid="WinJS.UI.ToolBar.dispose">
/// Disposes this ToolBar.
/// </summary>
/// </signature>
if (this._disposed) {
return;
}
this._disposed = true;
_LightDismissService.hidden(this._dismissable);
// Disposing the _commandingSurface will trigger dispose on its OpenCloseMachine and synchronously complete any animations that might have been running.
this._commandingSurface.dispose();
// If page navigation is happening, we don't want the ToolBar left behind in the body.
// Synchronoulsy close the ToolBar to force it out of the body and back into its parent element.
this._synchronousClose();
_ElementUtilities._inputPaneListener.removeEventListener(this._dom.root, "showing", this._handleShowingKeyboardBound);
_Dispose.disposeSubTree(this.element);
}
forceLayout() {
/// <signature helpKeyword="WinJS.UI.ToolBar.forceLayout">
/// <summary locid="WinJS.UI.ToolBar.forceLayout">
/// Forces the ToolBar to update its layout. Use this function when the window did not change size, but the container of the ToolBar changed size.
/// </summary>
/// </signature>
this._commandingSurface.forceLayout();
}
getCommandById(id: string): _Command.ICommand {
/// <signature helpKeyword="WinJS.UI.ToolBar.getCommandById">
/// <summary locid="WinJS.UI.ToolBar.getCommandById">
/// Retrieves the command with the specified ID from this ToolBar.
/// If more than one command is found, this method returns the first command found.
/// </summary>
/// <param name="id" type="String" locid="WinJS.UI.ToolBar.getCommandById_p:id">Id of the command to return.</param>
/// <returns type="object" locid="WinJS.UI.ToolBar.getCommandById_returnValue">
/// The command found, or null if no command is found.
/// </returns>
/// </signature>
return this._commandingSurface.getCommandById(id);
}
showOnlyCommands(commands: Array<string|_Command.ICommand>): void {
/// <signature helpKeyword="WinJS.UI.ToolBar.showOnlyCommands">
/// <summary locid="WinJS.UI.ToolBar.showOnlyCommands">
/// Show the specified commands, hiding all of the others in the ToolBar.
/// </summary>
/// <param name="commands" type="Array" locid="WinJS.UI.ToolBar.showOnlyCommands_p:commands">
/// An array of the commands to show. The array elements may be Command objects, or the string identifiers (IDs) of commands.
/// </param>
/// </signature>
return this._commandingSurface.showOnlyCommands(commands);
}
private _writeProfilerMark(text: string) {
_WriteProfilerMark("WinJS.UI.ToolBar:" + this._id + ":" + text);
}
private _initializeDom(root: HTMLElement): void {
this._writeProfilerMark("_intializeDom,info");
// Attaching JS control to DOM element
root["winControl"] = this;
this._id = root.id || _ElementUtilities._uniqueID(root);
_ElementUtilities.addClass(root, _Constants.ClassNames.controlCssClass);
_ElementUtilities.addClass(root, _Constants.ClassNames.disposableCssClass);
// Make sure we have an ARIA role
var role = root.getAttribute("role");
if (!role) { | root.setAttribute("role", "menubar");
}
var label = root.getAttribute("aria-label"); | random_line_split |
|
_ToolBar.ts | private _isOpenedMode: boolean;
private _handleShowingKeyboardBound: (ev: any) => void;
private _dismissable: _LightDismissService.LightDismissableElement;
private _cachedClosedHeight: number;
private _dom: {
root: HTMLElement;
commandingSurfaceEl: HTMLElement;
placeHolder: HTMLElement;
}
/// <field locid="WinJS.UI.ToolBar.ClosedDisplayMode" helpKeyword="WinJS.UI.ToolBar.ClosedDisplayMode">
/// Display options for the actionarea when the ToolBar is closed.
/// </field>
static ClosedDisplayMode = ClosedDisplayMode;
static supportedForProcessing: boolean = true;
/// <field type="HTMLElement" domElement="true" hidden="true" locid="WinJS.UI.ToolBar.element" helpKeyword="WinJS.UI.ToolBar.element">
/// Gets the DOM element that hosts the ToolBar.
/// </field>
get element() {
return this._dom.root;
}
/// <field type="WinJS.Binding.List" locid="WinJS.UI.ToolBar.data" helpKeyword="WinJS.UI.ToolBar.data">
/// Gets or sets the Binding List of WinJS.UI.Command for the ToolBar.
/// </field>
get data() {
return this._commandingSurface.data;
}
set data(value: BindingList.List<_Command.ICommand>) {
this._commandingSurface.data = value;
}
/// <field type="String" locid="WinJS.UI.ToolBar.closedDisplayMode" helpKeyword="WinJS.UI.ToolBar.closedDisplayMode">
/// Gets or sets the closedDisplayMode for the ToolBar. Values are "compact" and "full".
/// </field>
get closedDisplayMode() {
return this._commandingSurface.closedDisplayMode;
}
set closedDisplayMode(value: string) {
if (ClosedDisplayMode[value]) {
this._commandingSurface.closedDisplayMode = value;
this._cachedClosedHeight = null;
}
}
/// <field type="Boolean" hidden="true" locid="WinJS.UI.ToolBar.opened" helpKeyword="WinJS.UI.ToolBar.opened">
/// Gets or sets whether the ToolBar is currently opened.
/// </field>
get opened(): boolean {
return this._commandingSurface.opened;
}
set opened(value: boolean) {
this._commandingSurface.opened = value;
}
constructor(element?: HTMLElement, options: any = {}) {
/// <signature helpKeyword="WinJS.UI.ToolBar.ToolBar">
/// <summary locid="WinJS.UI.ToolBar.constructor">
/// Creates a new ToolBar control.
/// </summary>
/// <param name="element" type="HTMLElement" domElement="true" locid="WinJS.UI.ToolBar.constructor_p:element">
/// The DOM element that will host the control.
/// </param>
/// <param name="options" type="Object" locid="WinJS.UI.ToolBar.constructor_p:options">
/// The set of properties and values to apply to the new ToolBar control.
/// </param>
/// <returns type="WinJS.UI.ToolBar" locid="WinJS.UI.ToolBar.constructor_returnValue">
/// The new ToolBar control.
/// </returns>
/// </signature>
this._writeProfilerMark("constructor,StartTM");
// Check to make sure we weren't duplicated
if (element && element["winControl"]) {
throw new _ErrorFromName("WinJS.UI.ToolBar.DuplicateConstruction", strings.duplicateConstruction);
}
this._initializeDom(element || _Global.document.createElement("div"));
var stateMachine = new _OpenCloseMachine.OpenCloseMachine({
eventElement: this.element,
onOpen: () => {
var openAnimation = this._commandingSurface.createOpenAnimation(this._getClosedHeight());
this._synchronousOpen();
return openAnimation.execute();
},
onClose: () => {
var closeAnimation = this._commandingSurface.createCloseAnimation(this._getClosedHeight());
return closeAnimation.execute().then(() => {
this._synchronousClose();
});
},
onUpdateDom: () => {
this._updateDomImpl();
},
onUpdateDomWithIsOpened: (isOpened: boolean) => {
this._isOpenedMode = isOpened;
this._updateDomImpl();
}
});
// Events
this._handleShowingKeyboardBound = this._handleShowingKeyboard.bind(this);
_ElementUtilities._inputPaneListener.addEventListener(this._dom.root, "showing", this._handleShowingKeyboardBound);
// Initialize private state.
this._disposed = false;
this._cachedClosedHeight = null;
this._commandingSurface = new _CommandingSurface._CommandingSurface(this._dom.commandingSurfaceEl, { openCloseMachine: stateMachine });
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-actionarea"), _Constants.ClassNames.actionAreaCssClass);
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-overflowarea"), _Constants.ClassNames.overflowAreaCssClass);
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-overflowbutton"), _Constants.ClassNames.overflowButtonCssClass);
addClass(<HTMLElement>this._dom.commandingSurfaceEl.querySelector(".win-commandingsurface-ellipsis"), _Constants.ClassNames.ellipsisCssClass);
this._isOpenedMode = _Constants.defaultOpened;
this._dismissable = new _LightDismissService.LightDismissableElement({
element: this._dom.root,
tabIndex: this._dom.root.hasAttribute("tabIndex") ? this._dom.root.tabIndex : -1,
onLightDismiss: () => {
this.close();
},
onTakeFocus: (useSetActive) => {
this._dismissable.restoreFocus() ||
this._commandingSurface.takeFocus(useSetActive);
}
});
// Initialize public properties.
this.closedDisplayMode = _Constants.defaultClosedDisplayMode;
this.opened = this._isOpenedMode;
_Control.setOptions(this, options);
// Exit the Init state.
_ElementUtilities._inDom(this.element).then(() => {
return this._commandingSurface.initialized;
}).then(() => {
stateMachine.exitInit();
this._writeProfilerMark("constructor,StopTM");
});
}
/// <field type="Function" locid="WinJS.UI.ToolBar.onbeforeopen" helpKeyword="WinJS.UI.ToolBar.onbeforeopen">
/// Occurs immediately before the control is opened. Is cancelable.
/// </field>
onbeforeopen: (ev: CustomEvent) => void;
/// <field type="Function" locid="WinJS.UI.ToolBar.onafteropen" helpKeyword="WinJS.UI.ToolBar.onafteropen">
/// Occurs immediately after the control is opened.
/// </field>
onafteropen: (ev: CustomEvent) => void;
/// <field type="Function" locid="WinJS.UI.ToolBar.onbeforeclose" helpKeyword="WinJS.UI.ToolBar.onbeforeclose">
/// Occurs immediately before the control is closed. Is cancelable.
/// </field>
onbeforeclose: (ev: CustomEvent) => void;
/// <field type="Function" locid="WinJS.UI.ToolBar.onafterclose" helpKeyword="WinJS.UI.ToolBar.onafterclose">
/// Occurs immediately after the control is closed.
/// </field>
onafterclose: (ev: CustomEvent) => void;
open(): void {
/// <signature helpKeyword="WinJS.UI.ToolBar.open">
/// <summary locid="WinJS.UI.ToolBar.open">
/// Opens the ToolBar
/// </summary>
/// </signature>
this._commandingSurface.open();
}
close(): void {
/// <signature helpKeyword="WinJS.UI.ToolBar.close">
/// <summary locid="WinJS.UI.ToolBar.close">
/// Closes the ToolBar
/// </summary>
/// </signature>
this._commandingSurface.close();
}
dispose() {
/// <signature helpKeyword="WinJS.UI.ToolBar.dispose">
/// <summary locid="WinJS.UI.ToolBar.dispose">
/// Disposes this ToolBar.
/// </summary>
/// </signature>
if (this._disposed) {
return;
}
this._disposed = true;
_LightDismissService.hidden(this._dismissable);
// Disposing the _commandingSurface will trigger dispose on its OpenCloseMachine and synchronously complete any animations that might have been running.
this._commandingSurface.dispose();
// If page navigation is happening, we don't want the ToolBar left behind in the body.
// Synchronoulsy close the ToolBar to force it out of the body and back into its parent element.
this._synchronousClose();
_ElementUtilities._inputPaneListener.removeEventListener(this._dom.root, "showing", this._handleShowingKeyboardBound);
_Dispose.disposeSubTree(this.element);
}
forceLayout() {
/// <signature helpKeyword="WinJS.UI.ToolBar.forceLayout">
/// <summary locid="WinJS.UI.ToolBar.forceLayout">
/// Forces the ToolBar to update its layout. Use this function when the window did not change size, but the container of the ToolBar changed size.
/// </summary>
/// </signature>
this._commandingSurface.forceLayout();
}
| getCommandById | identifier_name |
|
update.py | pragma: no cover
toml = None # type: ignore
CruftState = Dict[str, Any]
@example(skip_apply_ask=False)
@example()
def update(
project_dir: Path = Path("."),
cookiecutter_input: bool = False,
skip_apply_ask: bool = True,
skip_update: bool = False,
checkout: Optional[str] = None,
strict: bool = True,
) -> bool:
"""Update specified project's cruft to the latest and greatest release."""
pyproject_file = project_dir / "pyproject.toml"
cruft_file = get_cruft_file(project_dir)
# If the project dir is a git repository, we ensure
# that the user has a clean working directory before proceeding.
if not _is_project_repo_clean(project_dir):
typer.secho(
"Cruft cannot apply updates on an unclean git project."
" Please make sure your git working tree is clean before proceeding.",
fg=typer.colors.RED,
)
return False
cruft_state = json.loads(cruft_file.read_text())
with TemporaryDirectory() as compare_directory_str:
# Initial setup
compare_directory = Path(compare_directory_str)
template_dir = compare_directory / "template"
repo = get_cookiecutter_repo(cruft_state["template"], template_dir, checkout)
directory = cruft_state.get("directory", None)
if directory:
template_dir = template_dir / directory
last_commit = repo.head.object.hexsha
# Bail early if the repo is already up to date
if is_project_updated(repo, cruft_state["commit"], last_commit, strict):
typer.secho(
"Nothing to do, project's cruft is already up to date!", fg=typer.colors.GREEN
)
return True
# Generate clean outputs via the cookiecutter
# from the current cruft state commit of the cookiectter and the updated
# cookiecutter.
old_main_directory, new_main_directory, new_context = _generate_project_updates(
compare_directory, cruft_state, template_dir, cookiecutter_input, repo
)
# Get all paths that we are supposed to skip before generating the diff and applying updates
skip_paths = _get_skip_paths(cruft_state, pyproject_file)
# We also get the list of paths that were deleted from the project
# directory but were present in the template that the project is linked against
# This is to avoid introducing changes that won't apply cleanly to the current project.
deleted_paths = _get_deleted_files(old_main_directory, project_dir)
# We now remove both the skipped and deleted paths from the new and old project
_remove_paths(old_main_directory, new_main_directory, skip_paths | deleted_paths)
# Given the two versions of the cookiecutter outputs based
# on the current project's context we calculate the diff and
# apply the updates to the current project.
if _apply_project_updates(
old_main_directory, new_main_directory, project_dir, skip_update, skip_apply_ask
):
# Update the cruft state and dump the new state
# to the cruft file
cruft_state["commit"] = last_commit
cruft_state["context"] = new_context
cruft_state["directory"] = directory
cruft_file.write_text(json_dumps(cruft_state))
typer.secho(
"Good work! Project's cruft has been updated and is as clean as possible!",
fg=typer.colors.GREEN,
)
return True
#####################################
# Generating clean outputs for diff #
#####################################
def _generate_output(
cruft_state: CruftState, template_dir: Path, cookiecutter_input: bool, new_output_dir: Path
):
new_context = generate_cookiecutter_context(
cruft_state["template"],
template_dir,
extra_context=cruft_state["context"]["cookiecutter"],
no_input=not cookiecutter_input,
)
project_dir = generate_files(
repo_dir=template_dir,
context=new_context,
overwrite_if_exists=True,
output_dir=new_output_dir,
)
return new_context, Path(project_dir)
def _generate_project_updates(
compare_directory: Path,
cruft_state: CruftState,
template_dir: Path,
cookiecutter_input: bool,
repo: Repo,
):
new_output_dir = compare_directory / "new_output"
new_context, new_main_directory = _generate_output(
cruft_state, template_dir, cookiecutter_input, new_output_dir
)
repo.head.reset(commit=cruft_state["commit"], working_tree=True)
old_output_dir = compare_directory / "old_output"
# We should not prompt for the cookiecutter input for the current
# project state
_, old_main_directory = _generate_output(cruft_state, template_dir, False, old_output_dir)
return old_main_directory, new_main_directory, new_context
##############################
# Removing unnecessary files #
##############################
def _get_skip_paths(cruft_state: CruftState, pyproject_file: Path) -> Set[Path]:
skip_cruft = cruft_state.get("skip", [])
if toml and pyproject_file.is_file():
pyproject_cruft = toml.loads(pyproject_file.read_text()).get("tool", {}).get("cruft", {})
skip_cruft.extend(pyproject_cruft.get("skip", []))
return set(map(Path, skip_cruft))
def _get_deleted_files(template_dir: Path, project_dir: Path):
cwd = Path.cwd()
os.chdir(template_dir)
template_paths = set(Path(".").glob("**/*"))
os.chdir(cwd)
os.chdir(project_dir)
deleted_paths = set(filter(lambda path: not path.exists(), template_paths))
os.chdir(cwd)
return deleted_paths
def _remove_paths(old_main_directory: Path, new_main_directory: Path, paths_to_remove: Set[Path]):
for path_to_remove in paths_to_remove:
old_path = old_main_directory / path_to_remove
new_path = new_main_directory / path_to_remove
for path in (old_path, new_path):
if path.is_dir():
rmtree(path)
elif path.is_file():
path.unlink()
#################################################
# Calculating project diff and applying updates #
#################################################
def _get_diff(old_main_directory: Path, new_main_directory: Path):
diff = run(
[
"git",
"diff",
"--no-index",
"--no-ext-diff",
"--no-color",
str(old_main_directory),
str(new_main_directory),
],
stdout=PIPE,
stderr=PIPE,
).stdout.decode()
diff = diff.replace(str(old_main_directory), "").replace(str(new_main_directory), "")
return diff
def _view_diff(old_main_directory: Path, new_main_directory: Path):
run(["git", "diff", "--no-index", str(old_main_directory), str(new_main_directory)])
def _is_git_repo(directory: Path):
# Taken from https://stackoverflow.com/a/16925062
# This works even if we are in a sub folder in a git
# repo
output = run(
["git", "rev-parse", "--is-inside-work-tree"], stdout=PIPE, stderr=DEVNULL, cwd=directory
)
if b"true" in output.stdout:
return True
return False
def _is_project_repo_clean(directory: Path):
if not _is_git_repo(directory):
return True
output = run(["git", "status", "--porcelain"], stdout=PIPE, stderr=DEVNULL, cwd=directory)
if output.stdout.strip():
return False
return True
def _apply_patch_with_rejections(diff: str, expanded_dir_path: Path):
try:
run(
["git", "apply", "--reject"],
input=diff.encode(),
stderr=PIPE,
stdout=PIPE,
check=True,
cwd=expanded_dir_path,
)
except CalledProcessError as error:
typer.secho(error.stderr.decode(), err=True)
typer.secho(
(
"Project directory may have *.rej files reflecting merge conflicts with the update."
" Please resolve those conflicts manually."
),
fg=typer.colors.YELLOW,
)
def | (diff: str, expanded_dir_path: Path):
try:
run(
["git", "apply", "-3"],
input=diff.encode(),
stderr=PIPE,
stdout=PIPE,
check=True,
cwd=expanded_dir_path,
)
except CalledProcessError as error:
typer.secho(error.stderr.decode(), err=True)
if _is_project_repo_clean(expanded_dir_path):
typer.secho(
"Failed to apply the update. Retrying again with a different update stratergy.",
fg=typer.colors.YELLOW,
)
_apply_patch_with_rejections(diff, expanded_dir_path)
def _apply_patch(diff: str, expanded_dir_path: Path):
# Git 3 way merge is the our best bet
# at applying patches. But it only works
# with git repos. If the repo is not a git dir
# we fall back to git apply --reject which applies
# diffs cleanly where applicable otherwise creates
# *.rej files where there are conflicts
if _is_git_repo(expanded_dir_path):
_apply_three_way_patch | _apply_three_way_patch | identifier_name |
update.py | pragma: no cover
toml = None # type: ignore
CruftState = Dict[str, Any]
@example(skip_apply_ask=False)
@example()
def update(
project_dir: Path = Path("."),
cookiecutter_input: bool = False,
skip_apply_ask: bool = True,
skip_update: bool = False,
checkout: Optional[str] = None,
strict: bool = True,
) -> bool:
"""Update specified project's cruft to the latest and greatest release."""
pyproject_file = project_dir / "pyproject.toml"
cruft_file = get_cruft_file(project_dir)
# If the project dir is a git repository, we ensure
# that the user has a clean working directory before proceeding.
if not _is_project_repo_clean(project_dir):
typer.secho(
"Cruft cannot apply updates on an unclean git project."
" Please make sure your git working tree is clean before proceeding.",
fg=typer.colors.RED,
)
return False
cruft_state = json.loads(cruft_file.read_text())
with TemporaryDirectory() as compare_directory_str:
# Initial setup
compare_directory = Path(compare_directory_str)
template_dir = compare_directory / "template"
repo = get_cookiecutter_repo(cruft_state["template"], template_dir, checkout)
directory = cruft_state.get("directory", None)
if directory:
template_dir = template_dir / directory
last_commit = repo.head.object.hexsha
# Bail early if the repo is already up to date
if is_project_updated(repo, cruft_state["commit"], last_commit, strict):
typer.secho(
"Nothing to do, project's cruft is already up to date!", fg=typer.colors.GREEN
)
return True
# Generate clean outputs via the cookiecutter
# from the current cruft state commit of the cookiectter and the updated
# cookiecutter.
old_main_directory, new_main_directory, new_context = _generate_project_updates(
compare_directory, cruft_state, template_dir, cookiecutter_input, repo
)
# Get all paths that we are supposed to skip before generating the diff and applying updates
skip_paths = _get_skip_paths(cruft_state, pyproject_file)
# We also get the list of paths that were deleted from the project
# directory but were present in the template that the project is linked against
# This is to avoid introducing changes that won't apply cleanly to the current project.
deleted_paths = _get_deleted_files(old_main_directory, project_dir)
# We now remove both the skipped and deleted paths from the new and old project
_remove_paths(old_main_directory, new_main_directory, skip_paths | deleted_paths)
# Given the two versions of the cookiecutter outputs based
# on the current project's context we calculate the diff and
# apply the updates to the current project.
if _apply_project_updates(
old_main_directory, new_main_directory, project_dir, skip_update, skip_apply_ask
):
# Update the cruft state and dump the new state
# to the cruft file
cruft_state["commit"] = last_commit
cruft_state["context"] = new_context
cruft_state["directory"] = directory
cruft_file.write_text(json_dumps(cruft_state))
typer.secho(
"Good work! Project's cruft has been updated and is as clean as possible!",
fg=typer.colors.GREEN,
)
return True
#####################################
# Generating clean outputs for diff #
#####################################
def _generate_output(
cruft_state: CruftState, template_dir: Path, cookiecutter_input: bool, new_output_dir: Path
):
new_context = generate_cookiecutter_context(
cruft_state["template"],
template_dir,
extra_context=cruft_state["context"]["cookiecutter"],
no_input=not cookiecutter_input,
)
project_dir = generate_files(
repo_dir=template_dir,
context=new_context,
overwrite_if_exists=True,
output_dir=new_output_dir,
)
return new_context, Path(project_dir)
def _generate_project_updates(
compare_directory: Path,
cruft_state: CruftState,
template_dir: Path,
cookiecutter_input: bool,
repo: Repo,
):
new_output_dir = compare_directory / "new_output"
new_context, new_main_directory = _generate_output(
cruft_state, template_dir, cookiecutter_input, new_output_dir
)
repo.head.reset(commit=cruft_state["commit"], working_tree=True)
old_output_dir = compare_directory / "old_output"
# We should not prompt for the cookiecutter input for the current
# project state
_, old_main_directory = _generate_output(cruft_state, template_dir, False, old_output_dir)
return old_main_directory, new_main_directory, new_context
##############################
# Removing unnecessary files #
##############################
def _get_skip_paths(cruft_state: CruftState, pyproject_file: Path) -> Set[Path]:
skip_cruft = cruft_state.get("skip", [])
if toml and pyproject_file.is_file():
pyproject_cruft = toml.loads(pyproject_file.read_text()).get("tool", {}).get("cruft", {})
skip_cruft.extend(pyproject_cruft.get("skip", []))
return set(map(Path, skip_cruft))
def _get_deleted_files(template_dir: Path, project_dir: Path):
cwd = Path.cwd()
os.chdir(template_dir)
template_paths = set(Path(".").glob("**/*"))
os.chdir(cwd)
os.chdir(project_dir)
deleted_paths = set(filter(lambda path: not path.exists(), template_paths))
os.chdir(cwd)
return deleted_paths
def _remove_paths(old_main_directory: Path, new_main_directory: Path, paths_to_remove: Set[Path]):
for path_to_remove in paths_to_remove:
old_path = old_main_directory / path_to_remove
new_path = new_main_directory / path_to_remove
for path in (old_path, new_path):
if path.is_dir():
rmtree(path)
elif path.is_file():
path.unlink()
#################################################
# Calculating project diff and applying updates #
#################################################
def _get_diff(old_main_directory: Path, new_main_directory: Path):
diff = run(
[
"git",
"diff",
"--no-index",
"--no-ext-diff",
"--no-color",
str(old_main_directory),
str(new_main_directory),
],
stdout=PIPE,
stderr=PIPE,
).stdout.decode()
diff = diff.replace(str(old_main_directory), "").replace(str(new_main_directory), "")
return diff
def _view_diff(old_main_directory: Path, new_main_directory: Path):
|
def _is_git_repo(directory: Path):
# Taken from https://stackoverflow.com/a/16925062
# This works even if we are in a sub folder in a git
# repo
output = run(
["git", "rev-parse", "--is-inside-work-tree"], stdout=PIPE, stderr=DEVNULL, cwd=directory
)
if b"true" in output.stdout:
return True
return False
def _is_project_repo_clean(directory: Path):
if not _is_git_repo(directory):
return True
output = run(["git", "status", "--porcelain"], stdout=PIPE, stderr=DEVNULL, cwd=directory)
if output.stdout.strip():
return False
return True
def _apply_patch_with_rejections(diff: str, expanded_dir_path: Path):
try:
run(
["git", "apply", "--reject"],
input=diff.encode(),
stderr=PIPE,
stdout=PIPE,
check=True,
cwd=expanded_dir_path,
)
except CalledProcessError as error:
typer.secho(error.stderr.decode(), err=True)
typer.secho(
(
"Project directory may have *.rej files reflecting merge conflicts with the update."
" Please resolve those conflicts manually."
),
fg=typer.colors.YELLOW,
)
def _apply_three_way_patch(diff: str, expanded_dir_path: Path):
try:
run(
["git", "apply", "-3"],
input=diff.encode(),
stderr=PIPE,
stdout=PIPE,
check=True,
cwd=expanded_dir_path,
)
except CalledProcessError as error:
typer.secho(error.stderr.decode(), err=True)
if _is_project_repo_clean(expanded_dir_path):
typer.secho(
"Failed to apply the update. Retrying again with a different update stratergy.",
fg=typer.colors.YELLOW,
)
_apply_patch_with_rejections(diff, expanded_dir_path)
def _apply_patch(diff: str, expanded_dir_path: Path):
# Git 3 way merge is the our best bet
# at applying patches. But it only works
# with git repos. If the repo is not a git dir
# we fall back to git apply --reject which applies
# diffs cleanly where applicable otherwise creates
# *.rej files where there are conflicts
if _is_git_repo(expanded_dir_path):
_apply_three_way_patch | run(["git", "diff", "--no-index", str(old_main_directory), str(new_main_directory)]) | identifier_body |
update.py | toml = None # type: ignore
CruftState = Dict[str, Any]
@example(skip_apply_ask=False)
@example()
def update(
project_dir: Path = Path("."),
cookiecutter_input: bool = False,
skip_apply_ask: bool = True,
skip_update: bool = False,
checkout: Optional[str] = None,
strict: bool = True,
) -> bool:
"""Update specified project's cruft to the latest and greatest release."""
pyproject_file = project_dir / "pyproject.toml"
cruft_file = get_cruft_file(project_dir)
# If the project dir is a git repository, we ensure
# that the user has a clean working directory before proceeding.
if not _is_project_repo_clean(project_dir):
typer.secho(
"Cruft cannot apply updates on an unclean git project."
" Please make sure your git working tree is clean before proceeding.",
fg=typer.colors.RED,
)
return False
cruft_state = json.loads(cruft_file.read_text())
with TemporaryDirectory() as compare_directory_str:
# Initial setup
compare_directory = Path(compare_directory_str)
template_dir = compare_directory / "template"
repo = get_cookiecutter_repo(cruft_state["template"], template_dir, checkout)
directory = cruft_state.get("directory", None)
if directory:
template_dir = template_dir / directory
last_commit = repo.head.object.hexsha
# Bail early if the repo is already up to date
if is_project_updated(repo, cruft_state["commit"], last_commit, strict):
typer.secho(
"Nothing to do, project's cruft is already up to date!", fg=typer.colors.GREEN
)
return True
# Generate clean outputs via the cookiecutter
# from the current cruft state commit of the cookiectter and the updated
# cookiecutter.
old_main_directory, new_main_directory, new_context = _generate_project_updates(
compare_directory, cruft_state, template_dir, cookiecutter_input, repo
)
# Get all paths that we are supposed to skip before generating the diff and applying updates
skip_paths = _get_skip_paths(cruft_state, pyproject_file)
# We also get the list of paths that were deleted from the project
# directory but were present in the template that the project is linked against
# This is to avoid introducing changes that won't apply cleanly to the current project.
deleted_paths = _get_deleted_files(old_main_directory, project_dir)
# We now remove both the skipped and deleted paths from the new and old project
_remove_paths(old_main_directory, new_main_directory, skip_paths | deleted_paths)
# Given the two versions of the cookiecutter outputs based
# on the current project's context we calculate the diff and
# apply the updates to the current project.
if _apply_project_updates(
old_main_directory, new_main_directory, project_dir, skip_update, skip_apply_ask
):
# Update the cruft state and dump the new state
# to the cruft file
cruft_state["commit"] = last_commit
cruft_state["context"] = new_context
cruft_state["directory"] = directory
cruft_file.write_text(json_dumps(cruft_state))
typer.secho(
"Good work! Project's cruft has been updated and is as clean as possible!",
fg=typer.colors.GREEN,
)
return True
#####################################
# Generating clean outputs for diff #
#####################################
def _generate_output(
cruft_state: CruftState, template_dir: Path, cookiecutter_input: bool, new_output_dir: Path
):
new_context = generate_cookiecutter_context(
cruft_state["template"],
template_dir,
extra_context=cruft_state["context"]["cookiecutter"],
no_input=not cookiecutter_input,
)
project_dir = generate_files(
repo_dir=template_dir,
context=new_context,
overwrite_if_exists=True,
output_dir=new_output_dir,
)
return new_context, Path(project_dir)
def _generate_project_updates(
compare_directory: Path,
cruft_state: CruftState,
template_dir: Path,
cookiecutter_input: bool,
repo: Repo,
):
new_output_dir = compare_directory / "new_output"
new_context, new_main_directory = _generate_output(
cruft_state, template_dir, cookiecutter_input, new_output_dir
)
repo.head.reset(commit=cruft_state["commit"], working_tree=True)
old_output_dir = compare_directory / "old_output"
# We should not prompt for the cookiecutter input for the current
# project state
_, old_main_directory = _generate_output(cruft_state, template_dir, False, old_output_dir)
return old_main_directory, new_main_directory, new_context
##############################
# Removing unnecessary files #
##############################
def _get_skip_paths(cruft_state: CruftState, pyproject_file: Path) -> Set[Path]:
skip_cruft = cruft_state.get("skip", [])
if toml and pyproject_file.is_file():
pyproject_cruft = toml.loads(pyproject_file.read_text()).get("tool", {}).get("cruft", {})
skip_cruft.extend(pyproject_cruft.get("skip", []))
return set(map(Path, skip_cruft))
def _get_deleted_files(template_dir: Path, project_dir: Path):
cwd = Path.cwd()
os.chdir(template_dir)
template_paths = set(Path(".").glob("**/*"))
os.chdir(cwd)
os.chdir(project_dir)
deleted_paths = set(filter(lambda path: not path.exists(), template_paths))
os.chdir(cwd)
return deleted_paths
def _remove_paths(old_main_directory: Path, new_main_directory: Path, paths_to_remove: Set[Path]):
for path_to_remove in paths_to_remove:
old_path = old_main_directory / path_to_remove
new_path = new_main_directory / path_to_remove
for path in (old_path, new_path):
if path.is_dir():
rmtree(path)
elif path.is_file():
path.unlink()
#################################################
# Calculating project diff and applying updates #
#################################################
def _get_diff(old_main_directory: Path, new_main_directory: Path):
diff = run(
[
"git",
"diff",
"--no-index",
"--no-ext-diff",
"--no-color",
str(old_main_directory),
str(new_main_directory),
],
stdout=PIPE,
stderr=PIPE,
).stdout.decode()
diff = diff.replace(str(old_main_directory), "").replace(str(new_main_directory), "")
return diff
def _view_diff(old_main_directory: Path, new_main_directory: Path):
run(["git", "diff", "--no-index", str(old_main_directory), str(new_main_directory)])
def _is_git_repo(directory: Path):
# Taken from https://stackoverflow.com/a/16925062
# This works even if we are in a sub folder in a git
# repo
output = run(
["git", "rev-parse", "--is-inside-work-tree"], stdout=PIPE, stderr=DEVNULL, cwd=directory
)
if b"true" in output.stdout:
return True
return False
def _is_project_repo_clean(directory: Path):
if not _is_git_repo(directory):
return True
output = run(["git", "status", "--porcelain"], stdout=PIPE, stderr=DEVNULL, cwd=directory)
if output.stdout.strip():
return False
return True
def _apply_patch_with_rejections(diff: str, expanded_dir_path: Path):
try:
run(
["git", "apply", "--reject"],
input=diff.encode(),
stderr=PIPE,
stdout=PIPE,
check=True,
cwd=expanded_dir_path,
)
except CalledProcessError as error:
typer.secho(error.stderr.decode(), err=True)
typer.secho(
(
"Project directory may have *.rej files reflecting merge conflicts with the update."
" Please resolve those conflicts manually."
),
fg=typer.colors.YELLOW,
)
def _apply_three_way_patch(diff: str, expanded_dir_path: Path):
try:
run(
["git", "apply", "-3"],
input=diff.encode(),
stderr=PIPE,
stdout=PIPE,
check=True,
cwd=expanded_dir_path,
)
except CalledProcessError as error:
typer.secho(error.stderr.decode(), err=True)
if _is_project_repo_clean(expanded_dir_path):
typer.secho(
"Failed to apply the update. Retrying again with a different update stratergy.",
fg=typer.colors.YELLOW,
)
_apply_patch_with_rejections(diff, expanded_dir_path)
def _apply_patch(diff: str, expanded_dir_path: Path):
# Git 3 way merge is the our best bet
# at applying patches. But it only works
# with git repos. If the repo is not a git dir
# we fall back to git apply --reject which applies
# diffs cleanly where applicable otherwise creates
# *.rej files where there are conflicts
if _is_git_repo(expanded_dir_path):
| _apply_three_way_patch(diff, expanded_dir_path) | conditional_block |
|
update.py | json_dumps,
)
try:
import toml # type: ignore
except ImportError: # pragma: no cover
toml = None # type: ignore
CruftState = Dict[str, Any]
@example(skip_apply_ask=False)
@example()
def update(
project_dir: Path = Path("."),
cookiecutter_input: bool = False,
skip_apply_ask: bool = True,
skip_update: bool = False,
checkout: Optional[str] = None,
strict: bool = True,
) -> bool:
"""Update specified project's cruft to the latest and greatest release."""
pyproject_file = project_dir / "pyproject.toml"
cruft_file = get_cruft_file(project_dir)
# If the project dir is a git repository, we ensure
# that the user has a clean working directory before proceeding.
if not _is_project_repo_clean(project_dir):
typer.secho(
"Cruft cannot apply updates on an unclean git project."
" Please make sure your git working tree is clean before proceeding.",
fg=typer.colors.RED,
)
return False
cruft_state = json.loads(cruft_file.read_text())
with TemporaryDirectory() as compare_directory_str:
# Initial setup
compare_directory = Path(compare_directory_str)
template_dir = compare_directory / "template"
repo = get_cookiecutter_repo(cruft_state["template"], template_dir, checkout)
directory = cruft_state.get("directory", None)
if directory:
template_dir = template_dir / directory
last_commit = repo.head.object.hexsha
# Bail early if the repo is already up to date
if is_project_updated(repo, cruft_state["commit"], last_commit, strict):
typer.secho(
"Nothing to do, project's cruft is already up to date!", fg=typer.colors.GREEN
)
return True
# Generate clean outputs via the cookiecutter
# from the current cruft state commit of the cookiectter and the updated
# cookiecutter.
old_main_directory, new_main_directory, new_context = _generate_project_updates(
compare_directory, cruft_state, template_dir, cookiecutter_input, repo
)
# Get all paths that we are supposed to skip before generating the diff and applying updates
skip_paths = _get_skip_paths(cruft_state, pyproject_file)
# We also get the list of paths that were deleted from the project
# directory but were present in the template that the project is linked against
# This is to avoid introducing changes that won't apply cleanly to the current project.
deleted_paths = _get_deleted_files(old_main_directory, project_dir)
# We now remove both the skipped and deleted paths from the new and old project
_remove_paths(old_main_directory, new_main_directory, skip_paths | deleted_paths)
# Given the two versions of the cookiecutter outputs based
# on the current project's context we calculate the diff and
# apply the updates to the current project.
if _apply_project_updates(
old_main_directory, new_main_directory, project_dir, skip_update, skip_apply_ask
):
# Update the cruft state and dump the new state
# to the cruft file
cruft_state["commit"] = last_commit
cruft_state["context"] = new_context
cruft_state["directory"] = directory
cruft_file.write_text(json_dumps(cruft_state))
typer.secho(
"Good work! Project's cruft has been updated and is as clean as possible!",
fg=typer.colors.GREEN,
)
return True
#####################################
# Generating clean outputs for diff #
#####################################
def _generate_output(
cruft_state: CruftState, template_dir: Path, cookiecutter_input: bool, new_output_dir: Path
):
new_context = generate_cookiecutter_context(
cruft_state["template"],
template_dir,
extra_context=cruft_state["context"]["cookiecutter"],
no_input=not cookiecutter_input,
)
project_dir = generate_files(
repo_dir=template_dir,
context=new_context,
overwrite_if_exists=True,
output_dir=new_output_dir,
)
return new_context, Path(project_dir)
def _generate_project_updates(
compare_directory: Path,
cruft_state: CruftState,
template_dir: Path,
cookiecutter_input: bool,
repo: Repo,
):
new_output_dir = compare_directory / "new_output"
new_context, new_main_directory = _generate_output(
cruft_state, template_dir, cookiecutter_input, new_output_dir
)
repo.head.reset(commit=cruft_state["commit"], working_tree=True)
old_output_dir = compare_directory / "old_output"
# We should not prompt for the cookiecutter input for the current
# project state
_, old_main_directory = _generate_output(cruft_state, template_dir, False, old_output_dir)
return old_main_directory, new_main_directory, new_context
##############################
# Removing unnecessary files #
##############################
def _get_skip_paths(cruft_state: CruftState, pyproject_file: Path) -> Set[Path]:
skip_cruft = cruft_state.get("skip", [])
if toml and pyproject_file.is_file():
pyproject_cruft = toml.loads(pyproject_file.read_text()).get("tool", {}).get("cruft", {})
skip_cruft.extend(pyproject_cruft.get("skip", []))
return set(map(Path, skip_cruft))
def _get_deleted_files(template_dir: Path, project_dir: Path):
cwd = Path.cwd()
os.chdir(template_dir)
template_paths = set(Path(".").glob("**/*"))
os.chdir(cwd)
os.chdir(project_dir)
deleted_paths = set(filter(lambda path: not path.exists(), template_paths))
os.chdir(cwd)
return deleted_paths
def _remove_paths(old_main_directory: Path, new_main_directory: Path, paths_to_remove: Set[Path]):
for path_to_remove in paths_to_remove:
old_path = old_main_directory / path_to_remove
new_path = new_main_directory / path_to_remove
for path in (old_path, new_path):
if path.is_dir():
rmtree(path)
elif path.is_file():
path.unlink()
#################################################
# Calculating project diff and applying updates #
#################################################
def _get_diff(old_main_directory: Path, new_main_directory: Path):
diff = run(
[
"git",
"diff",
"--no-index",
"--no-ext-diff",
"--no-color",
str(old_main_directory),
str(new_main_directory),
],
stdout=PIPE,
stderr=PIPE,
).stdout.decode()
diff = diff.replace(str(old_main_directory), "").replace(str(new_main_directory), "")
return diff
def _view_diff(old_main_directory: Path, new_main_directory: Path):
run(["git", "diff", "--no-index", str(old_main_directory), str(new_main_directory)])
def _is_git_repo(directory: Path):
# Taken from https://stackoverflow.com/a/16925062
# This works even if we are in a sub folder in a git
# repo
output = run(
["git", "rev-parse", "--is-inside-work-tree"], stdout=PIPE, stderr=DEVNULL, cwd=directory
)
if b"true" in output.stdout:
return True
return False
def _is_project_repo_clean(directory: Path):
if not _is_git_repo(directory):
return True
output = run(["git", "status", "--porcelain"], stdout=PIPE, stderr=DEVNULL, cwd=directory)
if output.stdout.strip():
return False
return True
def _apply_patch_with_rejections(diff: str, expanded_dir_path: Path):
try:
run(
["git", "apply", "--reject"],
input=diff.encode(),
stderr=PIPE,
stdout=PIPE,
check=True,
cwd=expanded_dir_path,
)
except CalledProcessError as error:
typer.secho(error.stderr.decode(), err=True)
typer.secho(
(
"Project directory may have *.rej files reflecting merge conflicts with the update."
" Please resolve those conflicts manually."
),
fg=typer.colors.YELLOW,
)
def _apply_three_way_patch(diff: str, expanded_dir_path: Path):
try:
run(
["git", "apply", "-3"],
input=diff.encode(),
stderr=PIPE,
stdout=PIPE,
check=True,
cwd=expanded_dir_path,
)
except CalledProcessError as error:
typer.secho(error.stderr.decode(), err=True)
if _is_project_repo_clean(expanded_dir_path):
typer.secho(
"Failed to apply the update. Retrying again with a different update stratergy.",
fg=typer.colors.YELLOW,
)
_apply_patch_with_rejections(diff, expanded_dir_path)
def _apply_patch(diff: str, expanded_dir_path: Path):
# Git 3 way merge is the our best bet
# at applying patches. But it only works
# with git repos. If the repo is not a git dir
| generate_cookiecutter_context,
get_cookiecutter_repo,
get_cruft_file,
is_project_updated, | random_line_split |
|
main.rs | if reader.read_u32() != 0 {
println!("Failed to connect to igbp");
return Ok(());
}
qbo
};
println!("Allocate framebuffers");
let mut mem : Vec<BufferMemory> = Vec::with_capacity(3);
unsafe { mem.set_len(3); }
// Disables caching when talking to the gpu.
unsafe { svc::set_memory_attribute(mem.as_mut_ptr() as _, mem.len() * std::mem::size_of::<BufferMemory>(), 0x8, 0x8).expect("Failed to set memory attribute"); }
let gpu_buffer = {
let mut create = NvMapIocCreateArgs {
size: (mem.len() * std::mem::size_of::<BufferMemory>()) as u32,
handle: 0
};
println!("NVMAP_IOC_CREATE {:?} ({:?})", create, unsafe { std::mem::transmute::<&NvMapIocCreateArgs, &[u8; std::mem::size_of::<NvMapIocCreateArgs>()]>(&create) });
let ret = nvdrv.ioctl(nvmap, NVMAP_IOC_CREATE,
// TODO: This is unsafe. And illegal. Rust assumes aliasing
// doesn't happen with references, which is exactly what we're
// doing. In theory, because we never *read* the content of
// those, I believe this is, erm, "mostly OK" ? But I should
// find a better way to deal with it.
unsafe { std::slice::from_raw_parts(&create as *const NvMapIocCreateArgs as *const u8, std::mem::size_of::<NvMapIocCreateArgs>()) },
unsafe { std::slice::from_raw_parts_mut(&mut create as *mut NvMapIocCreateArgs as *mut u8, std::mem::size_of::<NvMapIocCreateArgs>()) })?;
if ret != 0 |
GpuBuffer {
nvmap_handle: create.handle,
size: mem.len() * std::mem::size_of::<BufferMemory>(),
alignment: 0x1000,
kind: 0
}
};
let buffers = {
let mut alloc = NvMapIocAllocArgs {
handle: gpu_buffer.nvmap_handle,
heapmask: 0,
flags: 0,
align: gpu_buffer.alignment,
kind: gpu_buffer.kind,
pad: [0; 7],
addr: mem.as_mut_ptr() as u64
};
println!("NVMAP_IOC_ALLOC {:?} ({:?})", alloc, unsafe { std::mem::transmute::<&NvMapIocAllocArgs, &[u8; std::mem::size_of::<NvMapIocAllocArgs>()]>(&alloc) });
let ret = nvdrv.ioctl(nvmap, NVMAP_IOC_ALLOC,
// TODO: This is unsafe. And illegal. Rust assumes aliasing
// doesn't happen with references, which is exactly what we're
// doing. In theory, because we never *read* the content of
// those, I believe this is, erm, "mostly OK" ? But I should
// find a better way to deal with it.
unsafe { std::slice::from_raw_parts(&alloc as *const NvMapIocAllocArgs as *const u8, std::mem::size_of::<NvMapIocAllocArgs>()) },
unsafe { std::slice::from_raw_parts_mut(&mut alloc as *mut NvMapIocAllocArgs as *mut u8, std::mem::size_of::<NvMapIocAllocArgs>()) })?;
if ret != 0 {
return Err(MyError::IoctlError(ret));
}
let mut buffers = Vec::with_capacity(3);
for i in 0..3 {
buffers.push(GraphicBuffer {
width: queue_buffer_output.width,
height: queue_buffer_output.height,
stride: queue_buffer_output.width,
format: 1, // RGBA_8888
usage: 0xb00, // TODO: Wat?
gpu_buffer: &gpu_buffer,
index: i,
offset_gpu_buffer: 0x3c0000 * i
});
}
buffers
};
println!("Tell IGBP about the buffers");
for (i, buf) in buffers.iter().enumerate() {
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(i as u32); // slot
parcel.write_u32(1); // Unknown
parcel.write_u32(0x16c); // Flattened GraphicsBuffer length
parcel.write_u32(0); // Unknown
buf.write_to_parcel(&mut parcel);
let mut parcel_out = RawParcel::default();
relay_svc.transact_parcel(binder_id as i32, SET_PREALLOCATED_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
println!("{:?}", parcel_out);
}
println!("Set scaling mode");
disp_svc.set_layer_scaling_mode(2, layer_id)?;
println!("Add layer to stack");
for stack in [0x0, 0x2, 0x4, 0x5, 0xA].iter() {
manager_disp_svc.add_to_layer_stack(*stack, layer_id)?;
}
println!("Set Z layer");
system_disp_svc.set_layer_z(layer_id, 2)?;
println!("Loading image from FERRIS");
let image = BMPDecoder::new(Cursor::new(&FERRIS[..]));
println!("Getting frame");
let frame = image.into_frames()?.next().unwrap().into_buffer();
//println!("Resizing FERRIS");
//let frame = image::imageops::resize(&image.into_frames()?.next().unwrap().into_buffer(), 1280, 760, image::FilterType::Lanczos3);
let vevent = unsafe { Event::from_kobject(disp_svc.get_display_vsync_event(display_id)?) };
for _ in 0..60 {
println!("Dequeue buffer");
let slot = {
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(1); // Pixel format
parcel.write_u32(1280); // width
parcel.write_u32(720); // height
parcel.write_u32(0); // get_frame_timestamp
parcel.write_u32(0xb00); // usage
let mut parcel_out = RawParcel::default();
relay_svc.transact_parcel(binder_id as i32, DEQUEUE_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
println!("{:?}", parcel_out);
let mut parcel_out = parcel_out.into_parcel_reader();
let slot = parcel_out.read_u32();
// Read fence
parcel_out.0.seek(SeekFrom::Current(44));
let status = parcel_out.read_u32();
if status != 0 {
println!("WTF: {}", status);
return Err(MyError::ParcelError(status));
}
slot
};
// Request buffer if it hasn't been requested already.
println!("Request buffer {}", slot);
{
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(slot); // Slot
let mut parcel_out = RawParcel::default();
let res = relay_svc.transact_parcel(binder_id as i32, REQUEST_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
let mut parcel_out = parcel_out.into_parcel_reader();
let non_null = parcel_out.read_u32() != 0;
if non_null {
let len = parcel_out.read_u32();
if len != 0x16c {
println!("Invalid length: {}", len);
return Ok(())
}
let unk = parcel_out.read_u32();
// TODO: Get graphicbuffer.
parcel_out.0.seek(SeekFrom::Current(0x16c));
}
let status = parcel_out.read_u32();
if status != 0 {
println!("WTF: {}", status);
return Err(MyError::ParcelError(status));
}
}
// Blit
println!("Blit");
{
fn pdep(mask: u32, mut value: u32) -> u32 {
let mut out = 0;
for shift in 0..32 {
let bit = 1 << shift;
if mask & bit != 0 {
if value & 1 != 0 {
out |= bit
}
value >>= 1;
}
}
out
}
fn swizzle_x(v: u32) -> u32 { pdep(!0x7B4, v) }
fn swizzle_y(v: u32) -> u32 { pdep(0x7B4, v) }
let x0 = 0;
let y0 = | {
return Err(MyError::IoctlError(ret));
} | conditional_block |
main.rs | .write_u32(1); // Pixel format
parcel.write_u32(1280); // width
parcel.write_u32(720); // height
parcel.write_u32(0); // get_frame_timestamp
parcel.write_u32(0xb00); // usage
let mut parcel_out = RawParcel::default();
relay_svc.transact_parcel(binder_id as i32, DEQUEUE_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
println!("{:?}", parcel_out);
let mut parcel_out = parcel_out.into_parcel_reader();
let slot = parcel_out.read_u32();
// Read fence
parcel_out.0.seek(SeekFrom::Current(44));
let status = parcel_out.read_u32();
if status != 0 {
println!("WTF: {}", status);
return Err(MyError::ParcelError(status));
}
slot
};
// Request buffer if it hasn't been requested already.
println!("Request buffer {}", slot);
{
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(slot); // Slot
let mut parcel_out = RawParcel::default();
let res = relay_svc.transact_parcel(binder_id as i32, REQUEST_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
let mut parcel_out = parcel_out.into_parcel_reader();
let non_null = parcel_out.read_u32() != 0;
if non_null {
let len = parcel_out.read_u32();
if len != 0x16c {
println!("Invalid length: {}", len);
return Ok(())
}
let unk = parcel_out.read_u32();
// TODO: Get graphicbuffer.
parcel_out.0.seek(SeekFrom::Current(0x16c));
}
let status = parcel_out.read_u32();
if status != 0 {
println!("WTF: {}", status);
return Err(MyError::ParcelError(status));
}
}
// Blit
println!("Blit");
{
fn pdep(mask: u32, mut value: u32) -> u32 {
let mut out = 0;
for shift in 0..32 {
let bit = 1 << shift;
if mask & bit != 0 {
if value & 1 != 0 {
out |= bit
}
value >>= 1;
}
}
out
}
fn swizzle_x(v: u32) -> u32 { pdep(!0x7B4, v) }
fn swizzle_y(v: u32) -> u32 { pdep(0x7B4, v) }
let x0 = 0;
let y0 = 0;
let mut offs_x0 = swizzle_x(x0);
let mut offs_y = swizzle_y(y0);
let x_mask = swizzle_x(!0);
let y_mask = swizzle_y(!0);
let incr_y = swizzle_x(128 * 10);
let tile_height = 128;
offs_x0 += incr_y * (y0 / tile_height);
// TODO: Add clipping.
for y in 0..frame.height() {
let mut offs_x = offs_x0;
for x in 0..frame.width() {
let pixel = frame.get_pixel(x, y);
mem[slot as usize][offs_y as usize + offs_x as usize] = LE::read_u32(pixel.channels());
offs_x = offs_x.wrapping_sub(x_mask) & x_mask;
}
offs_y = offs_y.wrapping_sub(y_mask) & y_mask;
if offs_y == 0 {
offs_x0 += incr_y; // wrap into next tile row
}
}
}
// Enqueue buffer
println!("Enqueue buffer");
{
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(slot); // Slot
parcel.write_u32(0x54); parcel.write_u32(0); // unknown, but always those values
parcel.write_u32(0x588bbba9); parcel.write_u32(0); // Timestamp, u64
parcel.write_u32(1); // unknown, but always those values
parcel.write_u32(0);
parcel.write_u32(0);
parcel.write_u32(0); // sometimes zero
parcel.write_u32(0);
parcel.write_u32(0);
parcel.write_u32(0); // Also seen 2
parcel.write_u32(0);
parcel.write_u32(0);
parcel.write_u32(1); // fence?
parcel.write_u32(1);
parcel.write_u32(0xa3);
parcel.write_u32(0);
parcel.write_u32(-1i32 as u32);
parcel.write_u32(0);
parcel.write_u32(-1i32 as u32);
parcel.write_u32(0);
parcel.write_u32(-1i32 as u32);
parcel.write_u32(0);
let mut parcel_out = RawParcel::default();
let res = relay_svc.transact_parcel(binder_id as i32, QUEUE_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
let mut parcel_out = parcel_out.into_parcel_reader();
println!("{:?}", QueueBufferOutput::from_parcel(&mut parcel_out));
let status = parcel_out.read_u32();
if status != 0 {
println!("WTF: {}", status);
return Err(MyError::ParcelError(status));
}
}
vevent.wait()?;
vevent.reset()?;
}
Ok(())
}
//static FERRIS : &'static [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 0];
//static FERRIS: &'static [u8; 33061] = include_bytes!("../img/ferris.png");
static FERRIS: &'static [u8; 153718] = include_bytes!("../img/ferris.bmp");
// Graphic buffer stuff
//struct IGraphicBufferProducer(Arc<IHOSBinderDriver>, u32);
//
//impl IGraphicBufferProducer {
// pub fn dequeue_buffer(&self) {
//
// }
//}
//struct Display(Arc<IManagerDisplayService>, u64);
//
//impl Drop for Display {
// fn drop(&mut self) {
// self.0.close_display(self.1);
// }
//}
// TODO: Layer trait?
//struct ManagedLayer(Arc<IManagerDisplayService>, u64);
//
//impl Drop for ManagedLayer {
// fn drop(&mut self) {
// self.0.destroy_managed_layer(self.1);
// }
//}
/// Binder object in a parcel
#[repr(C)]
#[derive(Debug)]
struct FlatBinderObject {
ty: u32,
flags: u32,
inner: usize, // Can either be a void *binder or a u32 handle
cookie: usize
}
impl FlatBinderObject {
fn from_parcel(parcel: &mut ParcelReader) -> FlatBinderObject {
FlatBinderObject {
ty: parcel.read_u32(),
flags: parcel.read_u32(),
inner: parcel.read_u64() as usize,
cookie: parcel.read_u64() as usize
}
}
}
// Returned by igbp_connect
#[repr(C)]
#[derive(Debug)]
struct QueueBufferOutput {
width: u32,
height: u32,
transform_hint: u32,
num_pending_buffers: u32
}
impl QueueBufferOutput {
fn from_parcel(parcel: &mut ParcelReader) -> QueueBufferOutput {
let width = parcel.read_u32();
let height = parcel.read_u32();
let transform_hint = parcel.read_u32();
let num_pending_buffers = parcel.read_u32();
QueueBufferOutput {
width, height, transform_hint, num_pending_buffers
}
}
}
#[repr(C)]
struct GraphicBuffer<'a> {
width: u32,
height: u32,
stride: u32,
format: u32,
usage: u32,
gpu_buffer: &'a GpuBuffer,
index: u32,
offset_gpu_buffer: u32,
}
impl<'a> GraphicBuffer<'a> {
fn write_to_parcel(&self, parcel: &mut OwnedParcel) {
}
}
#[repr(C)]
struct GpuBuffer {
nvmap_handle: u32,
size: usize,
alignment: u32,
kind: u8
}
// nvmap stuff
#[repr(C, align(4096))]
struct BufferMemory([u32; 0x3c0000/4]);
impl std::ops::Deref for BufferMemory {
type Target = [u32];
fn | deref | identifier_name |
|
main.rs | , which is exactly what we're
// doing. In theory, because we never *read* the content of
// those, I believe this is, erm, "mostly OK" ? But I should
// find a better way to deal with it.
unsafe { std::slice::from_raw_parts(&create as *const NvMapIocCreateArgs as *const u8, std::mem::size_of::<NvMapIocCreateArgs>()) },
unsafe { std::slice::from_raw_parts_mut(&mut create as *mut NvMapIocCreateArgs as *mut u8, std::mem::size_of::<NvMapIocCreateArgs>()) })?;
if ret != 0 {
return Err(MyError::IoctlError(ret));
}
GpuBuffer {
nvmap_handle: create.handle,
size: mem.len() * std::mem::size_of::<BufferMemory>(),
alignment: 0x1000,
kind: 0
}
};
let buffers = {
let mut alloc = NvMapIocAllocArgs {
handle: gpu_buffer.nvmap_handle,
heapmask: 0,
flags: 0,
align: gpu_buffer.alignment,
kind: gpu_buffer.kind,
pad: [0; 7],
addr: mem.as_mut_ptr() as u64
};
println!("NVMAP_IOC_ALLOC {:?} ({:?})", alloc, unsafe { std::mem::transmute::<&NvMapIocAllocArgs, &[u8; std::mem::size_of::<NvMapIocAllocArgs>()]>(&alloc) });
let ret = nvdrv.ioctl(nvmap, NVMAP_IOC_ALLOC,
// TODO: This is unsafe. And illegal. Rust assumes aliasing
// doesn't happen with references, which is exactly what we're
// doing. In theory, because we never *read* the content of
// those, I believe this is, erm, "mostly OK" ? But I should
// find a better way to deal with it.
unsafe { std::slice::from_raw_parts(&alloc as *const NvMapIocAllocArgs as *const u8, std::mem::size_of::<NvMapIocAllocArgs>()) },
unsafe { std::slice::from_raw_parts_mut(&mut alloc as *mut NvMapIocAllocArgs as *mut u8, std::mem::size_of::<NvMapIocAllocArgs>()) })?;
if ret != 0 {
return Err(MyError::IoctlError(ret));
}
let mut buffers = Vec::with_capacity(3);
for i in 0..3 {
buffers.push(GraphicBuffer {
width: queue_buffer_output.width,
height: queue_buffer_output.height,
stride: queue_buffer_output.width,
format: 1, // RGBA_8888
usage: 0xb00, // TODO: Wat?
gpu_buffer: &gpu_buffer,
index: i,
offset_gpu_buffer: 0x3c0000 * i
});
}
buffers
};
println!("Tell IGBP about the buffers");
for (i, buf) in buffers.iter().enumerate() {
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(i as u32); // slot
parcel.write_u32(1); // Unknown
parcel.write_u32(0x16c); // Flattened GraphicsBuffer length
parcel.write_u32(0); // Unknown
buf.write_to_parcel(&mut parcel);
let mut parcel_out = RawParcel::default();
relay_svc.transact_parcel(binder_id as i32, SET_PREALLOCATED_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
println!("{:?}", parcel_out);
}
println!("Set scaling mode");
disp_svc.set_layer_scaling_mode(2, layer_id)?;
println!("Add layer to stack");
for stack in [0x0, 0x2, 0x4, 0x5, 0xA].iter() {
manager_disp_svc.add_to_layer_stack(*stack, layer_id)?;
}
println!("Set Z layer");
system_disp_svc.set_layer_z(layer_id, 2)?;
println!("Loading image from FERRIS");
let image = BMPDecoder::new(Cursor::new(&FERRIS[..]));
println!("Getting frame");
let frame = image.into_frames()?.next().unwrap().into_buffer();
//println!("Resizing FERRIS");
//let frame = image::imageops::resize(&image.into_frames()?.next().unwrap().into_buffer(), 1280, 760, image::FilterType::Lanczos3);
let vevent = unsafe { Event::from_kobject(disp_svc.get_display_vsync_event(display_id)?) };
for _ in 0..60 {
println!("Dequeue buffer");
let slot = {
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(1); // Pixel format
parcel.write_u32(1280); // width
parcel.write_u32(720); // height
parcel.write_u32(0); // get_frame_timestamp
parcel.write_u32(0xb00); // usage
let mut parcel_out = RawParcel::default();
relay_svc.transact_parcel(binder_id as i32, DEQUEUE_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
println!("{:?}", parcel_out);
let mut parcel_out = parcel_out.into_parcel_reader();
let slot = parcel_out.read_u32();
// Read fence
parcel_out.0.seek(SeekFrom::Current(44));
let status = parcel_out.read_u32();
if status != 0 {
println!("WTF: {}", status);
return Err(MyError::ParcelError(status));
}
slot
};
// Request buffer if it hasn't been requested already.
println!("Request buffer {}", slot);
{
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(slot); // Slot
let mut parcel_out = RawParcel::default();
let res = relay_svc.transact_parcel(binder_id as i32, REQUEST_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
let mut parcel_out = parcel_out.into_parcel_reader();
let non_null = parcel_out.read_u32() != 0;
if non_null {
let len = parcel_out.read_u32();
if len != 0x16c {
println!("Invalid length: {}", len);
return Ok(())
}
let unk = parcel_out.read_u32();
// TODO: Get graphicbuffer.
parcel_out.0.seek(SeekFrom::Current(0x16c));
}
let status = parcel_out.read_u32();
if status != 0 {
println!("WTF: {}", status);
return Err(MyError::ParcelError(status));
}
}
// Blit
println!("Blit");
{
fn pdep(mask: u32, mut value: u32) -> u32 {
let mut out = 0;
for shift in 0..32 {
let bit = 1 << shift;
if mask & bit != 0 {
if value & 1 != 0 {
out |= bit
}
value >>= 1;
}
}
out
}
fn swizzle_x(v: u32) -> u32 { pdep(!0x7B4, v) }
fn swizzle_y(v: u32) -> u32 { pdep(0x7B4, v) }
let x0 = 0;
let y0 = 0;
let mut offs_x0 = swizzle_x(x0);
let mut offs_y = swizzle_y(y0);
let x_mask = swizzle_x(!0);
let y_mask = swizzle_y(!0);
let incr_y = swizzle_x(128 * 10);
let tile_height = 128;
offs_x0 += incr_y * (y0 / tile_height);
// TODO: Add clipping.
for y in 0..frame.height() {
let mut offs_x = offs_x0;
for x in 0..frame.width() {
let pixel = frame.get_pixel(x, y);
mem[slot as usize][offs_y as usize + offs_x as usize] = LE::read_u32(pixel.channels());
offs_x = offs_x.wrapping_sub(x_mask) & x_mask;
}
offs_y = offs_y.wrapping_sub(y_mask) & y_mask;
if offs_y == 0 {
offs_x0 += incr_y; // wrap into next tile row
}
}
}
// Enqueue buffer
println!("Enqueue buffer");
{
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(slot); // Slot | random_line_split |
||
main.rs |
}
impl From<megaton_hammer::error::Error> for MyError {
fn from(err: megaton_hammer::error::Error) -> MyError {
MyError::MegatonError(err)
}
}
fn main() -> std::result::Result<(), MyError> {
// Let's get ferris to show up on my switch.
println!("Initialize NV");
let nvdrv = nns::nvdrv::INvDrvServices::new_nvdrv_a(|cb| {
println!("Create transfer memory");
let transfer_mem = TransferMemory::new(0x30000)?;
// TODO: Find a better way.
let temporary_process = unsafe { KObject::new(megaton_hammer::kernel::svc::CURRENT_PROCESS) };
let ret = cb(0x30000, &temporary_process, transfer_mem.as_ref());
unsafe { std::mem::forget(temporary_process); }
ret
})?;
println!("Open /dev/nvhost-as-gpu");
let (nvasgpu, err) = nvdrv.open(u8_slice_to_i8_slice(&b"/dev/nvhost-as-gpu"[..]))?;
if err != 0 {
panic!("Failed to open");
}
println!("Open /dev/nvmap");
let (nvmap, err) = nvdrv.open(u8_slice_to_i8_slice(&b"/dev/nvmap"[..]))?;
if err != 0 {
panic!("Failed to open");
}
println!("Initialize vi");
let vi_m = nn::visrv::sf::IManagerRootService::new()?;
println!("get_display_service");
let disp_svc = vi_m.get_display_service(1)?;
println!("get_relay_service");
let relay_svc = disp_svc.get_relay_service()?;
println!("get_system_display_service");
let system_disp_svc = disp_svc.get_system_display_service()?;
println!("get_manager_display_service");
let manager_disp_svc = disp_svc.get_manager_display_service()?;
println!("Open display");
let display_id = {
let mut display = [0u8; 64];
display[..b"Default".len()].copy_from_slice(b"Default");
disp_svc.open_display(display)?
};
println!("Open a layer");
let layer_id = manager_disp_svc.create_managed_layer(1, display_id, 0)?;
let binder_id = {
let mut parcel = RawParcel::default();
let mut display = [0u8; 64];
display[..b"Default".len()].copy_from_slice(b"Default");
let _window_size = disp_svc.open_layer(display, layer_id, 0, parcel.as_bytes_mut())?;
let mut reader = parcel.into_parcel_reader();
let fbo = FlatBinderObject::from_parcel(&mut reader);
let binder = fbo.inner as i32;
relay_svc.adjust_refcount(binder, 1, 0)?;
relay_svc.adjust_refcount(binder, 1, 1)?;
binder
};
// Connect to the IGBP. Take a look at the following link for reference.
// https://android.googlesource.com/platform/frameworks/native/+/e2786ea5aec3a12d948feb85ffbb535fc89c0fe6/libs/gui/IGraphicBufferProducer.cpp#297
println!("Connect to the IGBP");
let queue_buffer_output = {
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(0); // IProducerListener is null because we don't support it in MegatonHammer (nor in libt) yet.
parcel.write_u32(2); // API
parcel.write_u32(0); // ProducerControlledByApp.
let mut parcel_out = RawParcel::default();
relay_svc.transact_parcel(binder_id as i32, CONNECT, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
let mut reader = parcel_out.into_parcel_reader();
let qbo = QueueBufferOutput::from_parcel(&mut reader);
if reader.read_u32() != 0 {
println!("Failed to connect to igbp");
return Ok(());
}
qbo
};
println!("Allocate framebuffers");
let mut mem : Vec<BufferMemory> = Vec::with_capacity(3);
unsafe { mem.set_len(3); }
// Disables caching when talking to the gpu.
unsafe { svc::set_memory_attribute(mem.as_mut_ptr() as _, mem.len() * std::mem::size_of::<BufferMemory>(), 0x8, 0x8).expect("Failed to set memory attribute"); }
let gpu_buffer = {
let mut create = NvMapIocCreateArgs {
size: (mem.len() * std::mem::size_of::<BufferMemory>()) as u32,
handle: 0
};
println!("NVMAP_IOC_CREATE {:?} ({:?})", create, unsafe { std::mem::transmute::<&NvMapIocCreateArgs, &[u8; std::mem::size_of::<NvMapIocCreateArgs>()]>(&create) });
let ret = nvdrv.ioctl(nvmap, NVMAP_IOC_CREATE,
// TODO: This is unsafe. And illegal. Rust assumes aliasing
// doesn't happen with references, which is exactly what we're
// doing. In theory, because we never *read* the content of
// those, I believe this is, erm, "mostly OK" ? But I should
// find a better way to deal with it.
unsafe { std::slice::from_raw_parts(&create as *const NvMapIocCreateArgs as *const u8, std::mem::size_of::<NvMapIocCreateArgs>()) },
unsafe { std::slice::from_raw_parts_mut(&mut create as *mut NvMapIocCreateArgs as *mut u8, std::mem::size_of::<NvMapIocCreateArgs>()) })?;
if ret != 0 {
return Err(MyError::IoctlError(ret));
}
GpuBuffer {
nvmap_handle: create.handle,
size: mem.len() * std::mem::size_of::<BufferMemory>(),
alignment: 0x1000,
kind: 0
}
};
let buffers = {
let mut alloc = NvMapIocAllocArgs {
handle: gpu_buffer.nvmap_handle,
heapmask: 0,
flags: 0,
align: gpu_buffer.alignment,
kind: gpu_buffer.kind,
pad: [0; 7],
addr: mem.as_mut_ptr() as u64
};
println!("NVMAP_IOC_ALLOC {:?} ({:?})", alloc, unsafe { std::mem::transmute::<&NvMapIocAllocArgs, &[u8; std::mem::size_of::<NvMapIocAllocArgs>()]>(&alloc) });
let ret = nvdrv.ioctl(nvmap, NVMAP_IOC_ALLOC,
// TODO: This is unsafe. And illegal. Rust assumes aliasing
// doesn't happen with references, which is exactly what we're
// doing. In theory, because we never *read* the content of
// those, I believe this is, erm, "mostly OK" ? But I should
// find a better way to deal with it.
unsafe { std::slice::from_raw_parts(&alloc as *const NvMapIocAllocArgs as *const u8, std::mem::size_of::<NvMapIocAllocArgs>()) },
unsafe { std::slice::from_raw_parts_mut(&mut alloc as *mut NvMapIocAllocArgs as *mut u8, std::mem::size_of::<NvMapIocAllocArgs>()) })?;
if ret != 0 {
return Err(MyError::IoctlError(ret));
}
let mut buffers = Vec::with_capacity(3);
for i in 0..3 {
buffers.push(GraphicBuffer {
width: queue_buffer_output.width,
height: queue_buffer_output.height,
stride: queue_buffer_output.width,
format: 1, // RGBA_8888
usage: 0xb00, // TODO: Wat?
gpu_buffer: &gpu_buffer,
index: i,
offset_gpu_buffer: 0x3c0000 * i
});
}
buffers
};
println!("Tell IGBP about the buffers");
for (i, buf) in buffers.iter().enumerate() {
let mut parcel = OwnedParcel::new();
parcel.write_interface_token("android.gui.IGraphicBufferProducer");
parcel.write_u32(i as u32); // slot
parcel.write_u32(1); // Unknown
parcel.write_u32(0x16c); // Flattened GraphicsBuffer length
parcel.write_u32(0); // Unknown
buf.write_to_parcel(&mut parcel);
let mut parcel_out = RawParcel::default();
relay_svc.transact_parcel(binder_id as i32, SET_PREALLOCATED_BUFFER, 0, parcel.build().as_bytes(), parcel_out.as_bytes_mut())?;
println!("{ | {
MyError::ImageError(err)
} | identifier_body |
|
multi.rs | storage worker. Responses from the storage worker are
//! then serialized onto the session buffer.
use super::*;
use crate::poll::Poll;
use crate::QUEUE_RETRIES;
use common::signal::Signal;
use config::WorkerConfig;
use core::marker::PhantomData;
use core::time::Duration;
use entrystore::EntryStore;
use mio::event::Event;
use mio::{Events, Token, Waker};
use protocol_common::{Compose, Execute, Parse, ParseError};
use queues::TrackedItem;
use session::Session;
use std::io::{BufRead, Write};
use std::sync::Arc;
const WAKER_TOKEN: Token = Token(usize::MAX);
const STORAGE_THREAD_ID: usize = 0;
/// A builder for the request/response worker which communicates to the storage
/// thread over a queue.
pub struct MultiWorkerBuilder<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser, | _storage: PhantomData<Storage>,
_request: PhantomData<Request>,
_response: PhantomData<Response>,
}
impl<Storage, Parser, Request, Response> MultiWorkerBuilder<Storage, Parser, Request, Response> {
/// Create a new builder from the provided config and parser.
pub fn new<T: WorkerConfig>(config: &T, parser: Parser) -> Result<Self, std::io::Error> {
let poll = Poll::new().map_err(|e| {
error!("{}", e);
std::io::Error::new(std::io::ErrorKind::Other, "Failed to create epoll instance")
})?;
Ok(Self {
poll,
nevent: config.worker().nevent(),
timeout: Duration::from_millis(config.worker().timeout() as u64),
_request: PhantomData,
_response: PhantomData,
_storage: PhantomData,
parser,
})
}
/// Get the waker that is registered to the epoll instance.
pub(crate) fn waker(&self) -> Arc<Waker> {
self.poll.waker()
}
/// Converts the builder into a `MultiWorker` by providing the queues that
/// are necessary for communication between components.
pub fn build(
self,
signal_queue: Queues<(), Signal>,
session_queue: Queues<(), Session>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
) -> MultiWorker<Storage, Parser, Request, Response> {
MultiWorker {
nevent: self.nevent,
parser: self.parser,
poll: self.poll,
timeout: self.timeout,
signal_queue,
_storage: PhantomData,
storage_queue,
session_queue,
}
}
}
/// Represents a finalized request/response worker which is ready to be run.
pub struct MultiWorker<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser,
poll: Poll,
timeout: Duration,
session_queue: Queues<(), Session>,
signal_queue: Queues<(), Signal>,
_storage: PhantomData<Storage>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
}
impl<Storage, Parser, Request, Response> MultiWorker<Storage, Parser, Request, Response>
where
Parser: Parse<Request>,
Response: Compose,
Storage: Execute<Request, Response> + EntryStore,
{
/// Run the worker in a loop, handling new events.
pub fn run(&mut self) {
// these are buffers which are re-used in each loop iteration to receive
// events and queue messages
let mut events = Events::with_capacity(self.nevent);
let mut responses = Vec::with_capacity(QUEUE_CAPACITY);
let mut sessions = Vec::with_capacity(QUEUE_CAPACITY);
loop {
WORKER_EVENT_LOOP.increment();
// get events with timeout
if self.poll.poll(&mut events, self.timeout).is_err() {
error!("Error polling");
}
let timestamp = Instant::now();
let count = events.iter().count();
WORKER_EVENT_TOTAL.add(count as _);
if count == self.nevent {
WORKER_EVENT_MAX_REACHED.increment();
} else {
WORKER_EVENT_DEPTH.increment(timestamp, count as _, 1);
}
// process all events
for event in events.iter() {
match event.token() {
WAKER_TOKEN => {
self.handle_new_sessions(&mut sessions);
self.handle_storage_queue(&mut responses);
// check if we received any signals from the admin thread
while let Some(signal) =
self.signal_queue.try_recv().map(|v| v.into_inner())
{
match signal {
Signal::FlushAll => {}
Signal::Shutdown => {
// if we received a shutdown, we can return
// and stop processing events
return;
}
}
}
}
_ => {
self.handle_event(event, timestamp);
}
}
}
// wakes the storage thread if necessary
let _ = self.storage_queue.wake();
}
}
fn handle_event(&mut self, event: &Event, timestamp: Instant) {
let token = event.token();
// handle error events first
if event.is_error() {
WORKER_EVENT_ERROR.increment();
self.handle_error(token);
}
// handle write events before read events to reduce write buffer
// growth if there is also a readable event
if event.is_writable() {
WORKER_EVENT_WRITE.increment();
self.do_write(token);
}
// read events are handled last
if event.is_readable() {
WORKER_EVENT_READ.increment();
if let Ok(session) = self.poll.get_mut_session(token) {
session.set_timestamp(timestamp);
}
let _ = self.do_read(token);
}
if let Ok(session) = self.poll.get_mut_session(token) {
if session.read_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in read buffer",
session,
session.read_pending()
);
}
if session.write_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in write buffer",
session,
session.read_pending()
);
}
}
}
fn handle_session_read(&mut self, token: Token) -> Result<(), std::io::Error> {
let session = self.poll.get_mut_session(token)?;
match self.parser.parse(session.buffer()) {
Ok(request) => {
let consumed = request.consumed();
let request = request.into_inner();
trace!("parsed request for sesion: {:?}", session);
session.consume(consumed);
let mut message = TokenWrapper::new(request, token);
for retry in 0..QUEUE_RETRIES {
if let Err(m) = self.storage_queue.try_send_to(STORAGE_THREAD_ID, message) {
if (retry + 1) == QUEUE_RETRIES {
error!("queue full trying to send message to storage thread");
let _ = self.poll.close_session(token);
}
// try to wake storage thread
let _ = self.storage_queue.wake();
message = m;
} else {
break;
}
}
Ok(())
}
Err(ParseError::Incomplete) => {
trace!("incomplete request for session: {:?}", session);
Err(std::io::Error::new(
std::io::ErrorKind::WouldBlock,
"incomplete request",
))
}
Err(_) => {
debug!("bad request for session: {:?}", session);
trace!("session: {:?} read buffer: {:?}", session, session.buffer());
let _ = self.poll.close_session(token);
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"bad request",
))
}
}
}
fn handle_storage_queue(
&mut self,
responses: &mut Vec<TrackedItem<WrappedResult<Request, Response>>>,
) {
trace!("handling event for storage queue");
// process all storage queue responses
self.storage_queue.try_recv_all(responses);
for message in responses.drain(..).map(|v| v.into_inner()) {
let token = message.token();
let mut reregister = false;
if let Ok(session) = self.poll.get_mut_session(token) {
let result = message.into_inner();
trace!("composing response for session: {:?}", session);
result.compose(session);
session.finalize_response();
// if we have pending writes, we should attempt to flush the session
// now. if we still have pending bytes, we should re-register to
// remove the read interest.
if session.write_pending() > 0 {
let _ = session.flush();
if session.write_pending() > 0 {
reregister = true;
}
}
if session.read_pending() > 0 && self.handle_session_read(token).is_ok() {
let _ = self.storage_queue.wake();
}
}
if reregister {
self.poll.reregister(token);
}
}
let _ = self.storage_queue.wake();
}
fn handle_new_sessions(&mut self, sessions: &mut Vec<TrackedItem<Session>>) {
self.session_queue.try_recv_all(sessions);
for session in sessions.drain(..).map(|v| v.into_inner()) {
let pending = session.read_pending();
trace!(
"new session: {:?} with {} bytes pending in read buffer",
session,
pending
);
| poll: Poll,
timeout: Duration, | random_line_split |
multi.rs | worker. Responses from the storage worker are
//! then serialized onto the session buffer.
use super::*;
use crate::poll::Poll;
use crate::QUEUE_RETRIES;
use common::signal::Signal;
use config::WorkerConfig;
use core::marker::PhantomData;
use core::time::Duration;
use entrystore::EntryStore;
use mio::event::Event;
use mio::{Events, Token, Waker};
use protocol_common::{Compose, Execute, Parse, ParseError};
use queues::TrackedItem;
use session::Session;
use std::io::{BufRead, Write};
use std::sync::Arc;
const WAKER_TOKEN: Token = Token(usize::MAX);
const STORAGE_THREAD_ID: usize = 0;
/// A builder for the request/response worker which communicates to the storage
/// thread over a queue.
pub struct MultiWorkerBuilder<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser,
poll: Poll,
timeout: Duration,
_storage: PhantomData<Storage>,
_request: PhantomData<Request>,
_response: PhantomData<Response>,
}
impl<Storage, Parser, Request, Response> MultiWorkerBuilder<Storage, Parser, Request, Response> {
/// Create a new builder from the provided config and parser.
pub fn new<T: WorkerConfig>(config: &T, parser: Parser) -> Result<Self, std::io::Error> {
let poll = Poll::new().map_err(|e| {
error!("{}", e);
std::io::Error::new(std::io::ErrorKind::Other, "Failed to create epoll instance")
})?;
Ok(Self {
poll,
nevent: config.worker().nevent(),
timeout: Duration::from_millis(config.worker().timeout() as u64),
_request: PhantomData,
_response: PhantomData,
_storage: PhantomData,
parser,
})
}
/// Get the waker that is registered to the epoll instance.
pub(crate) fn waker(&self) -> Arc<Waker> {
self.poll.waker()
}
/// Converts the builder into a `MultiWorker` by providing the queues that
/// are necessary for communication between components.
pub fn build(
self,
signal_queue: Queues<(), Signal>,
session_queue: Queues<(), Session>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
) -> MultiWorker<Storage, Parser, Request, Response> {
MultiWorker {
nevent: self.nevent,
parser: self.parser,
poll: self.poll,
timeout: self.timeout,
signal_queue,
_storage: PhantomData,
storage_queue,
session_queue,
}
}
}
/// Represents a finalized request/response worker which is ready to be run.
pub struct MultiWorker<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser,
poll: Poll,
timeout: Duration,
session_queue: Queues<(), Session>,
signal_queue: Queues<(), Signal>,
_storage: PhantomData<Storage>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
}
impl<Storage, Parser, Request, Response> MultiWorker<Storage, Parser, Request, Response>
where
Parser: Parse<Request>,
Response: Compose,
Storage: Execute<Request, Response> + EntryStore,
{
/// Run the worker in a loop, handling new events.
pub fn run(&mut self) {
// these are buffers which are re-used in each loop iteration to receive
// events and queue messages
let mut events = Events::with_capacity(self.nevent);
let mut responses = Vec::with_capacity(QUEUE_CAPACITY);
let mut sessions = Vec::with_capacity(QUEUE_CAPACITY);
loop {
WORKER_EVENT_LOOP.increment();
// get events with timeout
if self.poll.poll(&mut events, self.timeout).is_err() {
error!("Error polling");
}
let timestamp = Instant::now();
let count = events.iter().count();
WORKER_EVENT_TOTAL.add(count as _);
if count == self.nevent {
WORKER_EVENT_MAX_REACHED.increment();
} else {
WORKER_EVENT_DEPTH.increment(timestamp, count as _, 1);
}
// process all events
for event in events.iter() {
match event.token() {
WAKER_TOKEN => {
self.handle_new_sessions(&mut sessions);
self.handle_storage_queue(&mut responses);
// check if we received any signals from the admin thread
while let Some(signal) =
self.signal_queue.try_recv().map(|v| v.into_inner())
{
match signal {
Signal::FlushAll => {}
Signal::Shutdown => {
// if we received a shutdown, we can return
// and stop processing events
return;
}
}
}
}
_ => {
self.handle_event(event, timestamp);
}
}
}
// wakes the storage thread if necessary
let _ = self.storage_queue.wake();
}
}
fn handle_event(&mut self, event: &Event, timestamp: Instant) {
let token = event.token();
// handle error events first
if event.is_error() |
// handle write events before read events to reduce write buffer
// growth if there is also a readable event
if event.is_writable() {
WORKER_EVENT_WRITE.increment();
self.do_write(token);
}
// read events are handled last
if event.is_readable() {
WORKER_EVENT_READ.increment();
if let Ok(session) = self.poll.get_mut_session(token) {
session.set_timestamp(timestamp);
}
let _ = self.do_read(token);
}
if let Ok(session) = self.poll.get_mut_session(token) {
if session.read_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in read buffer",
session,
session.read_pending()
);
}
if session.write_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in write buffer",
session,
session.read_pending()
);
}
}
}
fn handle_session_read(&mut self, token: Token) -> Result<(), std::io::Error> {
let session = self.poll.get_mut_session(token)?;
match self.parser.parse(session.buffer()) {
Ok(request) => {
let consumed = request.consumed();
let request = request.into_inner();
trace!("parsed request for sesion: {:?}", session);
session.consume(consumed);
let mut message = TokenWrapper::new(request, token);
for retry in 0..QUEUE_RETRIES {
if let Err(m) = self.storage_queue.try_send_to(STORAGE_THREAD_ID, message) {
if (retry + 1) == QUEUE_RETRIES {
error!("queue full trying to send message to storage thread");
let _ = self.poll.close_session(token);
}
// try to wake storage thread
let _ = self.storage_queue.wake();
message = m;
} else {
break;
}
}
Ok(())
}
Err(ParseError::Incomplete) => {
trace!("incomplete request for session: {:?}", session);
Err(std::io::Error::new(
std::io::ErrorKind::WouldBlock,
"incomplete request",
))
}
Err(_) => {
debug!("bad request for session: {:?}", session);
trace!("session: {:?} read buffer: {:?}", session, session.buffer());
let _ = self.poll.close_session(token);
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"bad request",
))
}
}
}
fn handle_storage_queue(
&mut self,
responses: &mut Vec<TrackedItem<WrappedResult<Request, Response>>>,
) {
trace!("handling event for storage queue");
// process all storage queue responses
self.storage_queue.try_recv_all(responses);
for message in responses.drain(..).map(|v| v.into_inner()) {
let token = message.token();
let mut reregister = false;
if let Ok(session) = self.poll.get_mut_session(token) {
let result = message.into_inner();
trace!("composing response for session: {:?}", session);
result.compose(session);
session.finalize_response();
// if we have pending writes, we should attempt to flush the session
// now. if we still have pending bytes, we should re-register to
// remove the read interest.
if session.write_pending() > 0 {
let _ = session.flush();
if session.write_pending() > 0 {
reregister = true;
}
}
if session.read_pending() > 0 && self.handle_session_read(token).is_ok() {
let _ = self.storage_queue.wake();
}
}
if reregister {
self.poll.reregister(token);
}
}
let _ = self.storage_queue.wake();
}
fn handle_new_sessions(&mut self, sessions: &mut Vec<TrackedItem<Session>>) {
self.session_queue.try_recv_all(sessions);
for session in sessions.drain(..).map(|v| v.into_inner()) {
let pending = session.read_pending();
trace!(
"new session: {:?} with {} bytes pending in read buffer",
session,
pending
| {
WORKER_EVENT_ERROR.increment();
self.handle_error(token);
} | conditional_block |
multi.rs | worker. Responses from the storage worker are
//! then serialized onto the session buffer.
use super::*;
use crate::poll::Poll;
use crate::QUEUE_RETRIES;
use common::signal::Signal;
use config::WorkerConfig;
use core::marker::PhantomData;
use core::time::Duration;
use entrystore::EntryStore;
use mio::event::Event;
use mio::{Events, Token, Waker};
use protocol_common::{Compose, Execute, Parse, ParseError};
use queues::TrackedItem;
use session::Session;
use std::io::{BufRead, Write};
use std::sync::Arc;
const WAKER_TOKEN: Token = Token(usize::MAX);
const STORAGE_THREAD_ID: usize = 0;
/// A builder for the request/response worker which communicates to the storage
/// thread over a queue.
pub struct MultiWorkerBuilder<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser,
poll: Poll,
timeout: Duration,
_storage: PhantomData<Storage>,
_request: PhantomData<Request>,
_response: PhantomData<Response>,
}
impl<Storage, Parser, Request, Response> MultiWorkerBuilder<Storage, Parser, Request, Response> {
/// Create a new builder from the provided config and parser.
pub fn new<T: WorkerConfig>(config: &T, parser: Parser) -> Result<Self, std::io::Error> {
let poll = Poll::new().map_err(|e| {
error!("{}", e);
std::io::Error::new(std::io::ErrorKind::Other, "Failed to create epoll instance")
})?;
Ok(Self {
poll,
nevent: config.worker().nevent(),
timeout: Duration::from_millis(config.worker().timeout() as u64),
_request: PhantomData,
_response: PhantomData,
_storage: PhantomData,
parser,
})
}
/// Get the waker that is registered to the epoll instance.
pub(crate) fn waker(&self) -> Arc<Waker> {
self.poll.waker()
}
/// Converts the builder into a `MultiWorker` by providing the queues that
/// are necessary for communication between components.
pub fn build(
self,
signal_queue: Queues<(), Signal>,
session_queue: Queues<(), Session>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
) -> MultiWorker<Storage, Parser, Request, Response> |
}
/// Represents a finalized request/response worker which is ready to be run.
pub struct MultiWorker<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser,
poll: Poll,
timeout: Duration,
session_queue: Queues<(), Session>,
signal_queue: Queues<(), Signal>,
_storage: PhantomData<Storage>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
}
impl<Storage, Parser, Request, Response> MultiWorker<Storage, Parser, Request, Response>
where
Parser: Parse<Request>,
Response: Compose,
Storage: Execute<Request, Response> + EntryStore,
{
/// Run the worker in a loop, handling new events.
pub fn run(&mut self) {
// these are buffers which are re-used in each loop iteration to receive
// events and queue messages
let mut events = Events::with_capacity(self.nevent);
let mut responses = Vec::with_capacity(QUEUE_CAPACITY);
let mut sessions = Vec::with_capacity(QUEUE_CAPACITY);
loop {
WORKER_EVENT_LOOP.increment();
// get events with timeout
if self.poll.poll(&mut events, self.timeout).is_err() {
error!("Error polling");
}
let timestamp = Instant::now();
let count = events.iter().count();
WORKER_EVENT_TOTAL.add(count as _);
if count == self.nevent {
WORKER_EVENT_MAX_REACHED.increment();
} else {
WORKER_EVENT_DEPTH.increment(timestamp, count as _, 1);
}
// process all events
for event in events.iter() {
match event.token() {
WAKER_TOKEN => {
self.handle_new_sessions(&mut sessions);
self.handle_storage_queue(&mut responses);
// check if we received any signals from the admin thread
while let Some(signal) =
self.signal_queue.try_recv().map(|v| v.into_inner())
{
match signal {
Signal::FlushAll => {}
Signal::Shutdown => {
// if we received a shutdown, we can return
// and stop processing events
return;
}
}
}
}
_ => {
self.handle_event(event, timestamp);
}
}
}
// wakes the storage thread if necessary
let _ = self.storage_queue.wake();
}
}
fn handle_event(&mut self, event: &Event, timestamp: Instant) {
let token = event.token();
// handle error events first
if event.is_error() {
WORKER_EVENT_ERROR.increment();
self.handle_error(token);
}
// handle write events before read events to reduce write buffer
// growth if there is also a readable event
if event.is_writable() {
WORKER_EVENT_WRITE.increment();
self.do_write(token);
}
// read events are handled last
if event.is_readable() {
WORKER_EVENT_READ.increment();
if let Ok(session) = self.poll.get_mut_session(token) {
session.set_timestamp(timestamp);
}
let _ = self.do_read(token);
}
if let Ok(session) = self.poll.get_mut_session(token) {
if session.read_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in read buffer",
session,
session.read_pending()
);
}
if session.write_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in write buffer",
session,
session.read_pending()
);
}
}
}
fn handle_session_read(&mut self, token: Token) -> Result<(), std::io::Error> {
let session = self.poll.get_mut_session(token)?;
match self.parser.parse(session.buffer()) {
Ok(request) => {
let consumed = request.consumed();
let request = request.into_inner();
trace!("parsed request for sesion: {:?}", session);
session.consume(consumed);
let mut message = TokenWrapper::new(request, token);
for retry in 0..QUEUE_RETRIES {
if let Err(m) = self.storage_queue.try_send_to(STORAGE_THREAD_ID, message) {
if (retry + 1) == QUEUE_RETRIES {
error!("queue full trying to send message to storage thread");
let _ = self.poll.close_session(token);
}
// try to wake storage thread
let _ = self.storage_queue.wake();
message = m;
} else {
break;
}
}
Ok(())
}
Err(ParseError::Incomplete) => {
trace!("incomplete request for session: {:?}", session);
Err(std::io::Error::new(
std::io::ErrorKind::WouldBlock,
"incomplete request",
))
}
Err(_) => {
debug!("bad request for session: {:?}", session);
trace!("session: {:?} read buffer: {:?}", session, session.buffer());
let _ = self.poll.close_session(token);
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"bad request",
))
}
}
}
fn handle_storage_queue(
&mut self,
responses: &mut Vec<TrackedItem<WrappedResult<Request, Response>>>,
) {
trace!("handling event for storage queue");
// process all storage queue responses
self.storage_queue.try_recv_all(responses);
for message in responses.drain(..).map(|v| v.into_inner()) {
let token = message.token();
let mut reregister = false;
if let Ok(session) = self.poll.get_mut_session(token) {
let result = message.into_inner();
trace!("composing response for session: {:?}", session);
result.compose(session);
session.finalize_response();
// if we have pending writes, we should attempt to flush the session
// now. if we still have pending bytes, we should re-register to
// remove the read interest.
if session.write_pending() > 0 {
let _ = session.flush();
if session.write_pending() > 0 {
reregister = true;
}
}
if session.read_pending() > 0 && self.handle_session_read(token).is_ok() {
let _ = self.storage_queue.wake();
}
}
if reregister {
self.poll.reregister(token);
}
}
let _ = self.storage_queue.wake();
}
fn handle_new_sessions(&mut self, sessions: &mut Vec<TrackedItem<Session>>) {
self.session_queue.try_recv_all(sessions);
for session in sessions.drain(..).map(|v| v.into_inner()) {
let pending = session.read_pending();
trace!(
"new session: {:?} with {} bytes pending in read buffer",
session,
pending
| {
MultiWorker {
nevent: self.nevent,
parser: self.parser,
poll: self.poll,
timeout: self.timeout,
signal_queue,
_storage: PhantomData,
storage_queue,
session_queue,
}
} | identifier_body |
multi.rs | storage worker. Responses from the storage worker are
//! then serialized onto the session buffer.
use super::*;
use crate::poll::Poll;
use crate::QUEUE_RETRIES;
use common::signal::Signal;
use config::WorkerConfig;
use core::marker::PhantomData;
use core::time::Duration;
use entrystore::EntryStore;
use mio::event::Event;
use mio::{Events, Token, Waker};
use protocol_common::{Compose, Execute, Parse, ParseError};
use queues::TrackedItem;
use session::Session;
use std::io::{BufRead, Write};
use std::sync::Arc;
const WAKER_TOKEN: Token = Token(usize::MAX);
const STORAGE_THREAD_ID: usize = 0;
/// A builder for the request/response worker which communicates to the storage
/// thread over a queue.
pub struct MultiWorkerBuilder<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser,
poll: Poll,
timeout: Duration,
_storage: PhantomData<Storage>,
_request: PhantomData<Request>,
_response: PhantomData<Response>,
}
impl<Storage, Parser, Request, Response> MultiWorkerBuilder<Storage, Parser, Request, Response> {
/// Create a new builder from the provided config and parser.
pub fn new<T: WorkerConfig>(config: &T, parser: Parser) -> Result<Self, std::io::Error> {
let poll = Poll::new().map_err(|e| {
error!("{}", e);
std::io::Error::new(std::io::ErrorKind::Other, "Failed to create epoll instance")
})?;
Ok(Self {
poll,
nevent: config.worker().nevent(),
timeout: Duration::from_millis(config.worker().timeout() as u64),
_request: PhantomData,
_response: PhantomData,
_storage: PhantomData,
parser,
})
}
/// Get the waker that is registered to the epoll instance.
pub(crate) fn waker(&self) -> Arc<Waker> {
self.poll.waker()
}
/// Converts the builder into a `MultiWorker` by providing the queues that
/// are necessary for communication between components.
pub fn build(
self,
signal_queue: Queues<(), Signal>,
session_queue: Queues<(), Session>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
) -> MultiWorker<Storage, Parser, Request, Response> {
MultiWorker {
nevent: self.nevent,
parser: self.parser,
poll: self.poll,
timeout: self.timeout,
signal_queue,
_storage: PhantomData,
storage_queue,
session_queue,
}
}
}
/// Represents a finalized request/response worker which is ready to be run.
pub struct MultiWorker<Storage, Parser, Request, Response> {
nevent: usize,
parser: Parser,
poll: Poll,
timeout: Duration,
session_queue: Queues<(), Session>,
signal_queue: Queues<(), Signal>,
_storage: PhantomData<Storage>,
storage_queue: Queues<TokenWrapper<Request>, WrappedResult<Request, Response>>,
}
impl<Storage, Parser, Request, Response> MultiWorker<Storage, Parser, Request, Response>
where
Parser: Parse<Request>,
Response: Compose,
Storage: Execute<Request, Response> + EntryStore,
{
/// Run the worker in a loop, handling new events.
pub fn run(&mut self) {
// these are buffers which are re-used in each loop iteration to receive
// events and queue messages
let mut events = Events::with_capacity(self.nevent);
let mut responses = Vec::with_capacity(QUEUE_CAPACITY);
let mut sessions = Vec::with_capacity(QUEUE_CAPACITY);
loop {
WORKER_EVENT_LOOP.increment();
// get events with timeout
if self.poll.poll(&mut events, self.timeout).is_err() {
error!("Error polling");
}
let timestamp = Instant::now();
let count = events.iter().count();
WORKER_EVENT_TOTAL.add(count as _);
if count == self.nevent {
WORKER_EVENT_MAX_REACHED.increment();
} else {
WORKER_EVENT_DEPTH.increment(timestamp, count as _, 1);
}
// process all events
for event in events.iter() {
match event.token() {
WAKER_TOKEN => {
self.handle_new_sessions(&mut sessions);
self.handle_storage_queue(&mut responses);
// check if we received any signals from the admin thread
while let Some(signal) =
self.signal_queue.try_recv().map(|v| v.into_inner())
{
match signal {
Signal::FlushAll => {}
Signal::Shutdown => {
// if we received a shutdown, we can return
// and stop processing events
return;
}
}
}
}
_ => {
self.handle_event(event, timestamp);
}
}
}
// wakes the storage thread if necessary
let _ = self.storage_queue.wake();
}
}
fn handle_event(&mut self, event: &Event, timestamp: Instant) {
let token = event.token();
// handle error events first
if event.is_error() {
WORKER_EVENT_ERROR.increment();
self.handle_error(token);
}
// handle write events before read events to reduce write buffer
// growth if there is also a readable event
if event.is_writable() {
WORKER_EVENT_WRITE.increment();
self.do_write(token);
}
// read events are handled last
if event.is_readable() {
WORKER_EVENT_READ.increment();
if let Ok(session) = self.poll.get_mut_session(token) {
session.set_timestamp(timestamp);
}
let _ = self.do_read(token);
}
if let Ok(session) = self.poll.get_mut_session(token) {
if session.read_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in read buffer",
session,
session.read_pending()
);
}
if session.write_pending() > 0 {
trace!(
"session: {:?} has {} bytes pending in write buffer",
session,
session.read_pending()
);
}
}
}
fn handle_session_read(&mut self, token: Token) -> Result<(), std::io::Error> {
let session = self.poll.get_mut_session(token)?;
match self.parser.parse(session.buffer()) {
Ok(request) => {
let consumed = request.consumed();
let request = request.into_inner();
trace!("parsed request for sesion: {:?}", session);
session.consume(consumed);
let mut message = TokenWrapper::new(request, token);
for retry in 0..QUEUE_RETRIES {
if let Err(m) = self.storage_queue.try_send_to(STORAGE_THREAD_ID, message) {
if (retry + 1) == QUEUE_RETRIES {
error!("queue full trying to send message to storage thread");
let _ = self.poll.close_session(token);
}
// try to wake storage thread
let _ = self.storage_queue.wake();
message = m;
} else {
break;
}
}
Ok(())
}
Err(ParseError::Incomplete) => {
trace!("incomplete request for session: {:?}", session);
Err(std::io::Error::new(
std::io::ErrorKind::WouldBlock,
"incomplete request",
))
}
Err(_) => {
debug!("bad request for session: {:?}", session);
trace!("session: {:?} read buffer: {:?}", session, session.buffer());
let _ = self.poll.close_session(token);
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"bad request",
))
}
}
}
fn handle_storage_queue(
&mut self,
responses: &mut Vec<TrackedItem<WrappedResult<Request, Response>>>,
) {
trace!("handling event for storage queue");
// process all storage queue responses
self.storage_queue.try_recv_all(responses);
for message in responses.drain(..).map(|v| v.into_inner()) {
let token = message.token();
let mut reregister = false;
if let Ok(session) = self.poll.get_mut_session(token) {
let result = message.into_inner();
trace!("composing response for session: {:?}", session);
result.compose(session);
session.finalize_response();
// if we have pending writes, we should attempt to flush the session
// now. if we still have pending bytes, we should re-register to
// remove the read interest.
if session.write_pending() > 0 {
let _ = session.flush();
if session.write_pending() > 0 {
reregister = true;
}
}
if session.read_pending() > 0 && self.handle_session_read(token).is_ok() {
let _ = self.storage_queue.wake();
}
}
if reregister {
self.poll.reregister(token);
}
}
let _ = self.storage_queue.wake();
}
fn | (&mut self, sessions: &mut Vec<TrackedItem<Session>>) {
self.session_queue.try_recv_all(sessions);
for session in sessions.drain(..).map(|v| v.into_inner()) {
let pending = session.read_pending();
trace!(
"new session: {:?} with {} bytes pending in read buffer",
session,
pending
| handle_new_sessions | identifier_name |
scanner.go | used if a match is returned
AddDescription(desc ServiceDescription) string
// Scan the IPv4 network for services matching given descriptions.
// Returns a map of IPs (string encoded) pointing to the IDs of those descriptions which matched
Scan() map[string][]string
// By default, after an IP is found with Scan it is ignored in future searches.
// Unlock instructs the scanner to include responses for that IP address in future scans.
Unlock(ip net.IP)
}
// A ServiceFoundNotification indicates that a service was found at ip IP which
// matched all of MatchingDescriptionIDs.
type ServiceFoundNotification struct {
IP net.IP
MatchingDescriptionIDs []string
}
// Scanner implements IScanner. It searches the IPv4 network for services
// matching given descriptions. Once services are found, they are locked and
// returned to the caller. It is up to the caller to unlock IPs (via Unlock())
// if they are no longer in use.
type Scanner struct {
interfaces map[string]net.Interface
ilock sync.RWMutex // protects interfaces
descriptionsByID map[string]ServiceDescription
dlock sync.RWMutex // protects descriptionsByID
activeServicesByIP map[string]struct{}
slock *sync.RWMutex // protects activeServicesByIP
log log.Logger
}
// NewScanner properly instantiates a Scanner.
func NewScanner() *Scanner {
s := &Scanner{
interfaces: make(map[string]net.Interface),
ilock: sync.RWMutex{},
descriptionsByID: make(map[string]ServiceDescription),
dlock: sync.RWMutex{},
activeServicesByIP: make(map[string]struct{}),
slock: &sync.RWMutex{},
log: Log.New("obj", "ipv4.scanner", "id", logext.RandId(8)),
}
err := s.refreshInterfaces()
if err != nil {
panic("ipv4.NewScanner(): scanner could not refresh interfaces: " + err.Error())
}
return s
}
// AddDescription adds a ServiceDescription to the Scanner. On following scans,
// the Scanner will find services which match the description.
func (s *Scanner) AddDescription(desc ServiceDescription) string {
s.dlock.Lock()
defer s.dlock.Unlock()
id := uuid.New()
s.descriptionsByID[id] = desc
return id
}
// Scan the IPv4 network for services matching given descriptions.
// Returns a map of IPs (string encoded) pointing to the IDs of those descriptions which matched
func (s *Scanner) Scan() map[string][]string | for _, a := range addrs {
switch v := a.(type) {
case *net.IPAddr:
s.log.Warn("ipv4 scanner got a *net.IPAddr, which isn't useful and maybe shoudln't happen?", "interface", intf.Name, "*net.IPAddr", v)
case *net.IPNet:
if v.IP.DefaultMask() != nil { // ignore IPs without default mask (IPv6?)
ip := v.IP
for ip := ip.Mask(v.Mask); v.Contains(ip); incrementIP(ip) {
wg.Add(1)
numChecked++
// To save space, try and only use 4 bytes
if x := ip.To4(); x != nil {
ip = x
}
dup := make(net.IP, len(ip)) // make a copy of the IP ([]byte)
copy(dup, ip)
go func() {
defer wg.Done()
s.slock.RLock()
_, ok := s.activeServicesByIP[dup.String()] // ignore IPs already in use
s.slock.RUnlock()
if ok { // ignore IPs already in use
s.log.Debug("scanner ignoring IP that is already in use", "ip", dup.String())
numAlreadyInUse++
} else {
ids := s.getMatchingDescriptions(dup)
if len(ids) > 0 { // At least one service matches
s.log.Debug("found possible matches for ipv4 service", "num_matches", len(ids), "matching_ids", ids)
numFound++
flock.Lock()
foundServices[dup.String()] = ids
flock.Unlock()
s.slock.Lock()
s.activeServicesByIP[dup.String()] = struct{}{} // mark IP as in use
s.slock.Unlock()
}
}
}()
}
}
default:
s.log.Warn("ipv4 scanner encountered address of unknown type", "type", fmt.Sprintf("%T", a))
}
}
}
s.log.Debug("ipv4 scanner waiting for waitgroup to finish")
wg.Wait()
s.log.Debug("ipv4 scanner done waiting (all waitgroup items completed)")
s.log.Info("ipv4 scan complete", "ips_checked", numChecked, "possibilities_found", numFound, "ips_already_in_use", numAlreadyInUse)
return foundServices
}
// Unlock unlocks the provided IP, such that it will no longer be ignored in
// future scans.
func (s *Scanner) Unlock(ip net.IP) {
s.slock.Lock()
defer s.slock.Unlock()
delete(s.activeServicesByIP, ip.String())
s.log.Debug("ipv4 scanner unlocked IP", "ip", ip.String())
}
func (s *Scanner) getMatchingDescriptions(ip net.IP) []string {
var matchedDrivers []string
matchedPorts := make(map[uint16]bool) // true if found open, false if not, nil key if untested
s.dlock.RLock()
defer s.dlock.RUnlock()
for id, desc := range s.descriptionsByID {
match := true
for _, port := range desc.OpenPorts {
// Try the cache first
if portIsOpen, ok := matchedPorts[port]; ok {
if !portIsOpen {
match = false
s.log.Debug("ipv4 scanner found a service description which is not a match for services available at the target IP, because a port which is expected to be open is closed (according to cache)", "ip", ip.String(), "description_id", id, "port", port, "desc_ports", desc.OpenPorts)
break
}
} else {
// No cached entry, try dialing
timeout := 1 * time.Second
url := ip.String() + ":" + strconv.Itoa(int(port))
conn, err := net.DialTimeout("tcp", url, timeout)
if err != nil {
match = false
matchedPorts[port] = false
s.log.Debug("ipv4 scanner found a service description which is not a match for the services available at the target IP, because a port which is expected to be open is closed (timed out trying)", "ip", ip.String(), "description_id", id, "port", port, "url", url, "timeout", timeout, "err", err, "desc_ports", desc.OpenPorts)
break
}
conn.Close()
}
}
if match {
matchedDrivers = append(matchedDrivers, id) // add
s.log.Debug("ipv4 scanner found a service description match", "ip", ip.String(), "service_desc", id)
}
}
return matchedDrivers
}
// refreshInterfaces searches the device for any new or removed network interfaces
func (s *Scanner) refreshInterfaces() error {
s.ilock.Lock() // Lock the interfaces for writing
defer s.ilock.Unlock()
ifaces, err := net.Interfaces()
if err != nil {
return fmt.Errorf("error while refreshing ipv4 interfaces: %v", err)
}
foundInterfaces := make(map[string]net.Interface)
newInterfaces := make(map[string]struct{})
for _, iface := range ifaces {
name := iface.Name
if _, ok := ignoredIPv4Interfaces[name]; !ok { // ignore certain interfaces
foundInterfaces[name] = iface
if _, ok := s.interfaces[name]; !ok {
newInterfaces[name] = struct{}{}
}
delete(s.interfaces, name) // unmark the interface - anything left once we're done has disappeared
}
}
if len(s.interfaces) > 0 {
names := make([]string, len(s.interfaces))
i := 0
for name := range s.interfaces {
names[i] = name
}
s.log.Warn("STUB: IPv4 interfaces have disappeared, but handling logic is unimplemented! Services on the missing interfaces may still be active", "deleted_interfaces", names)
}
s.interfaces = foundInterfaces
if len(newInterfaces) > 0 {
names := make([]string, len(newInterfaces))
i := 0
for name := range newInterfaces {
names[i] = name | {
if len(s.descriptionsByID) == 0 {
s.log.Debug("scanner has no descriptions, ignoring scan")
return map[string][]string{}
}
s.log.Debug("ip4v scanner beginning scan", "interfaces", s.interfaces, "target_descriptions", s.descriptionsByID)
foundServices := make(map[string][]string)
flock := sync.Mutex{} // protects foundServices
var wg sync.WaitGroup
var numChecked, numFound, numAlreadyInUse int
s.ilock.RLock()
defer s.ilock.RUnlock()
for name, intf := range s.interfaces {
addrs, err := intf.Addrs()
if err != nil {
panic("could not get addresses from " + name + ": " + err.Error())
}
s.log.Debug("ip4v scanner scanning interface", "interface", name, "num_addrs", len(addrs)) | identifier_body |
scanner.go | used if a match is returned
AddDescription(desc ServiceDescription) string
// Scan the IPv4 network for services matching given descriptions.
// Returns a map of IPs (string encoded) pointing to the IDs of those descriptions which matched
Scan() map[string][]string
// By default, after an IP is found with Scan it is ignored in future searches.
// Unlock instructs the scanner to include responses for that IP address in future scans.
Unlock(ip net.IP)
}
// A ServiceFoundNotification indicates that a service was found at ip IP which
// matched all of MatchingDescriptionIDs.
type ServiceFoundNotification struct {
IP net.IP
MatchingDescriptionIDs []string
}
// Scanner implements IScanner. It searches the IPv4 network for services
// matching given descriptions. Once services are found, they are locked and
// returned to the caller. It is up to the caller to unlock IPs (via Unlock())
// if they are no longer in use.
type Scanner struct {
interfaces map[string]net.Interface
ilock sync.RWMutex // protects interfaces
descriptionsByID map[string]ServiceDescription
dlock sync.RWMutex // protects descriptionsByID
activeServicesByIP map[string]struct{}
slock *sync.RWMutex // protects activeServicesByIP
log log.Logger
}
// NewScanner properly instantiates a Scanner.
func NewScanner() *Scanner {
s := &Scanner{
interfaces: make(map[string]net.Interface),
ilock: sync.RWMutex{},
descriptionsByID: make(map[string]ServiceDescription),
dlock: sync.RWMutex{},
activeServicesByIP: make(map[string]struct{}),
slock: &sync.RWMutex{},
log: Log.New("obj", "ipv4.scanner", "id", logext.RandId(8)),
}
err := s.refreshInterfaces()
if err != nil {
panic("ipv4.NewScanner(): scanner could not refresh interfaces: " + err.Error())
}
return s
}
// AddDescription adds a ServiceDescription to the Scanner. On following scans,
// the Scanner will find services which match the description.
func (s *Scanner) AddDescription(desc ServiceDescription) string {
s.dlock.Lock()
defer s.dlock.Unlock()
id := uuid.New()
s.descriptionsByID[id] = desc
return id
}
// Scan the IPv4 network for services matching given descriptions.
// Returns a map of IPs (string encoded) pointing to the IDs of those descriptions which matched
func (s *Scanner) Scan() map[string][]string {
if len(s.descriptionsByID) == 0 {
s.log.Debug("scanner has no descriptions, ignoring scan")
return map[string][]string{}
}
s.log.Debug("ip4v scanner beginning scan", "interfaces", s.interfaces, "target_descriptions", s.descriptionsByID)
foundServices := make(map[string][]string)
flock := sync.Mutex{} // protects foundServices
var wg sync.WaitGroup
var numChecked, numFound, numAlreadyInUse int
s.ilock.RLock()
defer s.ilock.RUnlock()
for name, intf := range s.interfaces {
addrs, err := intf.Addrs()
if err != nil {
panic("could not get addresses from " + name + ": " + err.Error())
}
s.log.Debug("ip4v scanner scanning interface", "interface", name, "num_addrs", len(addrs))
for _, a := range addrs {
switch v := a.(type) {
case *net.IPAddr:
s.log.Warn("ipv4 scanner got a *net.IPAddr, which isn't useful and maybe shoudln't happen?", "interface", intf.Name, "*net.IPAddr", v)
case *net.IPNet:
if v.IP.DefaultMask() != nil { // ignore IPs without default mask (IPv6?)
ip := v.IP
for ip := ip.Mask(v.Mask); v.Contains(ip); incrementIP(ip) {
wg.Add(1)
numChecked++
// To save space, try and only use 4 bytes
if x := ip.To4(); x != nil {
ip = x
}
dup := make(net.IP, len(ip)) // make a copy of the IP ([]byte)
copy(dup, ip)
go func() {
defer wg.Done()
s.slock.RLock()
_, ok := s.activeServicesByIP[dup.String()] // ignore IPs already in use
s.slock.RUnlock()
if ok { // ignore IPs already in use
s.log.Debug("scanner ignoring IP that is already in use", "ip", dup.String())
numAlreadyInUse++
} else {
ids := s.getMatchingDescriptions(dup)
if len(ids) > 0 { // At least one service matches
s.log.Debug("found possible matches for ipv4 service", "num_matches", len(ids), "matching_ids", ids)
numFound++
flock.Lock()
foundServices[dup.String()] = ids
flock.Unlock()
s.slock.Lock()
s.activeServicesByIP[dup.String()] = struct{}{} // mark IP as in use
s.slock.Unlock()
}
}
}()
}
}
default:
s.log.Warn("ipv4 scanner encountered address of unknown type", "type", fmt.Sprintf("%T", a))
}
}
}
s.log.Debug("ipv4 scanner waiting for waitgroup to finish")
wg.Wait()
s.log.Debug("ipv4 scanner done waiting (all waitgroup items completed)")
s.log.Info("ipv4 scan complete", "ips_checked", numChecked, "possibilities_found", numFound, "ips_already_in_use", numAlreadyInUse)
return foundServices
}
// Unlock unlocks the provided IP, such that it will no longer be ignored in
// future scans.
func (s *Scanner) Unlock(ip net.IP) {
s.slock.Lock()
defer s.slock.Unlock()
delete(s.activeServicesByIP, ip.String())
s.log.Debug("ipv4 scanner unlocked IP", "ip", ip.String())
}
func (s *Scanner) getMatchingDescriptions(ip net.IP) []string {
var matchedDrivers []string
matchedPorts := make(map[uint16]bool) // true if found open, false if not, nil key if untested
s.dlock.RLock()
defer s.dlock.RUnlock()
for id, desc := range s.descriptionsByID {
match := true
for _, port := range desc.OpenPorts {
// Try the cache first
if portIsOpen, ok := matchedPorts[port]; ok {
if !portIsOpen {
match = false
s.log.Debug("ipv4 scanner found a service description which is not a match for services available at the target IP, because a port which is expected to be open is closed (according to cache)", "ip", ip.String(), "description_id", id, "port", port, "desc_ports", desc.OpenPorts)
break
}
} else {
// No cached entry, try dialing
timeout := 1 * time.Second
url := ip.String() + ":" + strconv.Itoa(int(port))
conn, err := net.DialTimeout("tcp", url, timeout)
if err != nil {
match = false
matchedPorts[port] = false
s.log.Debug("ipv4 scanner found a service description which is not a match for the services available at the target IP, because a port which is expected to be open is closed (timed out trying)", "ip", ip.String(), "description_id", id, "port", port, "url", url, "timeout", timeout, "err", err, "desc_ports", desc.OpenPorts)
break
}
conn.Close()
}
}
if match {
matchedDrivers = append(matchedDrivers, id) // add
s.log.Debug("ipv4 scanner found a service description match", "ip", ip.String(), "service_desc", id)
}
}
return matchedDrivers
}
// refreshInterfaces searches the device for any new or removed network interfaces
func (s *Scanner) refreshInterfaces() error {
s.ilock.Lock() // Lock the interfaces for writing
defer s.ilock.Unlock()
ifaces, err := net.Interfaces()
if err != nil {
return fmt.Errorf("error while refreshing ipv4 interfaces: %v", err)
}
foundInterfaces := make(map[string]net.Interface)
newInterfaces := make(map[string]struct{})
for _, iface := range ifaces {
name := iface.Name
if _, ok := ignoredIPv4Interfaces[name]; !ok { // ignore certain interfaces
foundInterfaces[name] = iface
if _, ok := s.interfaces[name]; !ok {
newInterfaces[name] = struct{}{}
}
delete(s.interfaces, name) // unmark the interface - anything left once we're done has disappeared
}
}
if len(s.interfaces) > 0 |
s.interfaces = foundInterfaces
if len(newInterfaces) > 0 {
names := make([]string, len(newInterfaces))
i := 0
for name := range newInterfaces {
names[i] = | {
names := make([]string, len(s.interfaces))
i := 0
for name := range s.interfaces {
names[i] = name
}
s.log.Warn("STUB: IPv4 interfaces have disappeared, but handling logic is unimplemented! Services on the missing interfaces may still be active", "deleted_interfaces", names)
} | conditional_block |
scanner.go | // Unlock instructs the scanner to include responses for that IP address in future scans.
Unlock(ip net.IP)
}
// A ServiceFoundNotification indicates that a service was found at ip IP which
// matched all of MatchingDescriptionIDs.
type ServiceFoundNotification struct {
IP net.IP
MatchingDescriptionIDs []string
}
// Scanner implements IScanner. It searches the IPv4 network for services
// matching given descriptions. Once services are found, they are locked and
// returned to the caller. It is up to the caller to unlock IPs (via Unlock())
// if they are no longer in use.
type Scanner struct {
interfaces map[string]net.Interface
ilock sync.RWMutex // protects interfaces
descriptionsByID map[string]ServiceDescription
dlock sync.RWMutex // protects descriptionsByID
activeServicesByIP map[string]struct{}
slock *sync.RWMutex // protects activeServicesByIP
log log.Logger
}
// NewScanner properly instantiates a Scanner.
func NewScanner() *Scanner {
s := &Scanner{
interfaces: make(map[string]net.Interface),
ilock: sync.RWMutex{},
descriptionsByID: make(map[string]ServiceDescription),
dlock: sync.RWMutex{},
activeServicesByIP: make(map[string]struct{}),
slock: &sync.RWMutex{},
log: Log.New("obj", "ipv4.scanner", "id", logext.RandId(8)),
}
err := s.refreshInterfaces()
if err != nil {
panic("ipv4.NewScanner(): scanner could not refresh interfaces: " + err.Error())
}
return s
}
// AddDescription adds a ServiceDescription to the Scanner. On following scans,
// the Scanner will find services which match the description.
func (s *Scanner) AddDescription(desc ServiceDescription) string {
s.dlock.Lock()
defer s.dlock.Unlock()
id := uuid.New()
s.descriptionsByID[id] = desc
return id
}
// Scan the IPv4 network for services matching given descriptions.
// Returns a map of IPs (string encoded) pointing to the IDs of those descriptions which matched
func (s *Scanner) Scan() map[string][]string {
if len(s.descriptionsByID) == 0 {
s.log.Debug("scanner has no descriptions, ignoring scan")
return map[string][]string{}
}
s.log.Debug("ip4v scanner beginning scan", "interfaces", s.interfaces, "target_descriptions", s.descriptionsByID)
foundServices := make(map[string][]string)
flock := sync.Mutex{} // protects foundServices
var wg sync.WaitGroup
var numChecked, numFound, numAlreadyInUse int
s.ilock.RLock()
defer s.ilock.RUnlock()
for name, intf := range s.interfaces {
addrs, err := intf.Addrs()
if err != nil {
panic("could not get addresses from " + name + ": " + err.Error())
}
s.log.Debug("ip4v scanner scanning interface", "interface", name, "num_addrs", len(addrs))
for _, a := range addrs {
switch v := a.(type) {
case *net.IPAddr:
s.log.Warn("ipv4 scanner got a *net.IPAddr, which isn't useful and maybe shoudln't happen?", "interface", intf.Name, "*net.IPAddr", v)
case *net.IPNet:
if v.IP.DefaultMask() != nil { // ignore IPs without default mask (IPv6?)
ip := v.IP
for ip := ip.Mask(v.Mask); v.Contains(ip); incrementIP(ip) {
wg.Add(1)
numChecked++
// To save space, try and only use 4 bytes
if x := ip.To4(); x != nil {
ip = x
}
dup := make(net.IP, len(ip)) // make a copy of the IP ([]byte)
copy(dup, ip)
go func() {
defer wg.Done()
s.slock.RLock()
_, ok := s.activeServicesByIP[dup.String()] // ignore IPs already in use
s.slock.RUnlock()
if ok { // ignore IPs already in use
s.log.Debug("scanner ignoring IP that is already in use", "ip", dup.String())
numAlreadyInUse++
} else {
ids := s.getMatchingDescriptions(dup)
if len(ids) > 0 { // At least one service matches
s.log.Debug("found possible matches for ipv4 service", "num_matches", len(ids), "matching_ids", ids)
numFound++
flock.Lock()
foundServices[dup.String()] = ids
flock.Unlock()
s.slock.Lock()
s.activeServicesByIP[dup.String()] = struct{}{} // mark IP as in use
s.slock.Unlock()
}
}
}()
}
}
default:
s.log.Warn("ipv4 scanner encountered address of unknown type", "type", fmt.Sprintf("%T", a))
}
}
}
s.log.Debug("ipv4 scanner waiting for waitgroup to finish")
wg.Wait()
s.log.Debug("ipv4 scanner done waiting (all waitgroup items completed)")
s.log.Info("ipv4 scan complete", "ips_checked", numChecked, "possibilities_found", numFound, "ips_already_in_use", numAlreadyInUse)
return foundServices
}
// Unlock unlocks the provided IP, such that it will no longer be ignored in
// future scans.
func (s *Scanner) Unlock(ip net.IP) {
s.slock.Lock()
defer s.slock.Unlock()
delete(s.activeServicesByIP, ip.String())
s.log.Debug("ipv4 scanner unlocked IP", "ip", ip.String())
}
func (s *Scanner) getMatchingDescriptions(ip net.IP) []string {
var matchedDrivers []string
matchedPorts := make(map[uint16]bool) // true if found open, false if not, nil key if untested
s.dlock.RLock()
defer s.dlock.RUnlock()
for id, desc := range s.descriptionsByID {
match := true
for _, port := range desc.OpenPorts {
// Try the cache first
if portIsOpen, ok := matchedPorts[port]; ok {
if !portIsOpen {
match = false
s.log.Debug("ipv4 scanner found a service description which is not a match for services available at the target IP, because a port which is expected to be open is closed (according to cache)", "ip", ip.String(), "description_id", id, "port", port, "desc_ports", desc.OpenPorts)
break
}
} else {
// No cached entry, try dialing
timeout := 1 * time.Second
url := ip.String() + ":" + strconv.Itoa(int(port))
conn, err := net.DialTimeout("tcp", url, timeout)
if err != nil {
match = false
matchedPorts[port] = false
s.log.Debug("ipv4 scanner found a service description which is not a match for the services available at the target IP, because a port which is expected to be open is closed (timed out trying)", "ip", ip.String(), "description_id", id, "port", port, "url", url, "timeout", timeout, "err", err, "desc_ports", desc.OpenPorts)
break
}
conn.Close()
}
}
if match {
matchedDrivers = append(matchedDrivers, id) // add
s.log.Debug("ipv4 scanner found a service description match", "ip", ip.String(), "service_desc", id)
}
}
return matchedDrivers
}
// refreshInterfaces searches the device for any new or removed network interfaces
func (s *Scanner) refreshInterfaces() error {
s.ilock.Lock() // Lock the interfaces for writing
defer s.ilock.Unlock()
ifaces, err := net.Interfaces()
if err != nil {
return fmt.Errorf("error while refreshing ipv4 interfaces: %v", err)
}
foundInterfaces := make(map[string]net.Interface)
newInterfaces := make(map[string]struct{})
for _, iface := range ifaces {
name := iface.Name
if _, ok := ignoredIPv4Interfaces[name]; !ok { // ignore certain interfaces
foundInterfaces[name] = iface
if _, ok := s.interfaces[name]; !ok {
newInterfaces[name] = struct{}{}
}
delete(s.interfaces, name) // unmark the interface - anything left once we're done has disappeared
}
}
if len(s.interfaces) > 0 {
names := make([]string, len(s.interfaces))
i := 0
for name := range s.interfaces {
names[i] = name
}
s.log.Warn("STUB: IPv4 interfaces have disappeared, but handling logic is unimplemented! Services on the missing interfaces may still be active", "deleted_interfaces", names)
}
s.interfaces = foundInterfaces
if len(newInterfaces) > 0 {
names := make([]string, len(newInterfaces))
i := 0
for name := range newInterfaces {
names[i] = name
i++
}
s.log.Debug("new IPv4 interfaces found", "interfaces", names)
}
return nil
}
// incrementIP increments an IPv4 address | func incrementIP(ip net.IP) {
for j := len(ip) - 1; j >= 0; j-- {
ip[j]++
if ip[j] > 0 { | random_line_split |
|
scanner.go | [string]net.Interface),
ilock: sync.RWMutex{},
descriptionsByID: make(map[string]ServiceDescription),
dlock: sync.RWMutex{},
activeServicesByIP: make(map[string]struct{}),
slock: &sync.RWMutex{},
log: Log.New("obj", "ipv4.scanner", "id", logext.RandId(8)),
}
err := s.refreshInterfaces()
if err != nil {
panic("ipv4.NewScanner(): scanner could not refresh interfaces: " + err.Error())
}
return s
}
// AddDescription adds a ServiceDescription to the Scanner. On following scans,
// the Scanner will find services which match the description.
func (s *Scanner) AddDescription(desc ServiceDescription) string {
s.dlock.Lock()
defer s.dlock.Unlock()
id := uuid.New()
s.descriptionsByID[id] = desc
return id
}
// Scan the IPv4 network for services matching given descriptions.
// Returns a map of IPs (string encoded) pointing to the IDs of those descriptions which matched
func (s *Scanner) Scan() map[string][]string {
if len(s.descriptionsByID) == 0 {
s.log.Debug("scanner has no descriptions, ignoring scan")
return map[string][]string{}
}
s.log.Debug("ip4v scanner beginning scan", "interfaces", s.interfaces, "target_descriptions", s.descriptionsByID)
foundServices := make(map[string][]string)
flock := sync.Mutex{} // protects foundServices
var wg sync.WaitGroup
var numChecked, numFound, numAlreadyInUse int
s.ilock.RLock()
defer s.ilock.RUnlock()
for name, intf := range s.interfaces {
addrs, err := intf.Addrs()
if err != nil {
panic("could not get addresses from " + name + ": " + err.Error())
}
s.log.Debug("ip4v scanner scanning interface", "interface", name, "num_addrs", len(addrs))
for _, a := range addrs {
switch v := a.(type) {
case *net.IPAddr:
s.log.Warn("ipv4 scanner got a *net.IPAddr, which isn't useful and maybe shoudln't happen?", "interface", intf.Name, "*net.IPAddr", v)
case *net.IPNet:
if v.IP.DefaultMask() != nil { // ignore IPs without default mask (IPv6?)
ip := v.IP
for ip := ip.Mask(v.Mask); v.Contains(ip); incrementIP(ip) {
wg.Add(1)
numChecked++
// To save space, try and only use 4 bytes
if x := ip.To4(); x != nil {
ip = x
}
dup := make(net.IP, len(ip)) // make a copy of the IP ([]byte)
copy(dup, ip)
go func() {
defer wg.Done()
s.slock.RLock()
_, ok := s.activeServicesByIP[dup.String()] // ignore IPs already in use
s.slock.RUnlock()
if ok { // ignore IPs already in use
s.log.Debug("scanner ignoring IP that is already in use", "ip", dup.String())
numAlreadyInUse++
} else {
ids := s.getMatchingDescriptions(dup)
if len(ids) > 0 { // At least one service matches
s.log.Debug("found possible matches for ipv4 service", "num_matches", len(ids), "matching_ids", ids)
numFound++
flock.Lock()
foundServices[dup.String()] = ids
flock.Unlock()
s.slock.Lock()
s.activeServicesByIP[dup.String()] = struct{}{} // mark IP as in use
s.slock.Unlock()
}
}
}()
}
}
default:
s.log.Warn("ipv4 scanner encountered address of unknown type", "type", fmt.Sprintf("%T", a))
}
}
}
s.log.Debug("ipv4 scanner waiting for waitgroup to finish")
wg.Wait()
s.log.Debug("ipv4 scanner done waiting (all waitgroup items completed)")
s.log.Info("ipv4 scan complete", "ips_checked", numChecked, "possibilities_found", numFound, "ips_already_in_use", numAlreadyInUse)
return foundServices
}
// Unlock unlocks the provided IP, such that it will no longer be ignored in
// future scans.
func (s *Scanner) Unlock(ip net.IP) {
s.slock.Lock()
defer s.slock.Unlock()
delete(s.activeServicesByIP, ip.String())
s.log.Debug("ipv4 scanner unlocked IP", "ip", ip.String())
}
func (s *Scanner) getMatchingDescriptions(ip net.IP) []string {
var matchedDrivers []string
matchedPorts := make(map[uint16]bool) // true if found open, false if not, nil key if untested
s.dlock.RLock()
defer s.dlock.RUnlock()
for id, desc := range s.descriptionsByID {
match := true
for _, port := range desc.OpenPorts {
// Try the cache first
if portIsOpen, ok := matchedPorts[port]; ok {
if !portIsOpen {
match = false
s.log.Debug("ipv4 scanner found a service description which is not a match for services available at the target IP, because a port which is expected to be open is closed (according to cache)", "ip", ip.String(), "description_id", id, "port", port, "desc_ports", desc.OpenPorts)
break
}
} else {
// No cached entry, try dialing
timeout := 1 * time.Second
url := ip.String() + ":" + strconv.Itoa(int(port))
conn, err := net.DialTimeout("tcp", url, timeout)
if err != nil {
match = false
matchedPorts[port] = false
s.log.Debug("ipv4 scanner found a service description which is not a match for the services available at the target IP, because a port which is expected to be open is closed (timed out trying)", "ip", ip.String(), "description_id", id, "port", port, "url", url, "timeout", timeout, "err", err, "desc_ports", desc.OpenPorts)
break
}
conn.Close()
}
}
if match {
matchedDrivers = append(matchedDrivers, id) // add
s.log.Debug("ipv4 scanner found a service description match", "ip", ip.String(), "service_desc", id)
}
}
return matchedDrivers
}
// refreshInterfaces searches the device for any new or removed network interfaces
func (s *Scanner) refreshInterfaces() error {
s.ilock.Lock() // Lock the interfaces for writing
defer s.ilock.Unlock()
ifaces, err := net.Interfaces()
if err != nil {
return fmt.Errorf("error while refreshing ipv4 interfaces: %v", err)
}
foundInterfaces := make(map[string]net.Interface)
newInterfaces := make(map[string]struct{})
for _, iface := range ifaces {
name := iface.Name
if _, ok := ignoredIPv4Interfaces[name]; !ok { // ignore certain interfaces
foundInterfaces[name] = iface
if _, ok := s.interfaces[name]; !ok {
newInterfaces[name] = struct{}{}
}
delete(s.interfaces, name) // unmark the interface - anything left once we're done has disappeared
}
}
if len(s.interfaces) > 0 {
names := make([]string, len(s.interfaces))
i := 0
for name := range s.interfaces {
names[i] = name
}
s.log.Warn("STUB: IPv4 interfaces have disappeared, but handling logic is unimplemented! Services on the missing interfaces may still be active", "deleted_interfaces", names)
}
s.interfaces = foundInterfaces
if len(newInterfaces) > 0 {
names := make([]string, len(newInterfaces))
i := 0
for name := range newInterfaces {
names[i] = name
i++
}
s.log.Debug("new IPv4 interfaces found", "interfaces", names)
}
return nil
}
// incrementIP increments an IPv4 address
func incrementIP(ip net.IP) {
for j := len(ip) - 1; j >= 0; j-- {
ip[j]++
if ip[j] > 0 {
break
}
}
}
// An IContinuousScanner is a Scanner that scans continuously. Scan results are
// passed to the channel provided by FoundServices()
type IContinuousScanner interface {
suture.Service
IScanner
FoundServices() chan ServiceFoundNotification
}
// ContinuousScanner implements IContinuousScanner. It scans continuously,
// putting results into the channel provided by FoundServices().
type ContinuousScanner struct {
*Scanner
foundIPChan chan ServiceFoundNotification
period time.Duration
stop chan struct{}
}
// NewContinousScanner properly instantiates a ContinuousScanner. The new
// Scanner will wait between scans for a time defined by `period`.
func NewContinousScanner(period time.Duration) *ContinuousScanner {
return &ContinuousScanner{
Scanner: NewScanner(),
foundIPChan: make(chan ServiceFoundNotification),
period: period,
stop: make(chan struct{}),
}
}
// FoundServices returns a channel which will be populated with services found
// by the ContinuousScanner
func (s *ContinuousScanner) | FoundServices | identifier_name |
|
menus.py | CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import (QAction, QActionGroup, QApplication, QFrame,
QLabel, QMainWindow, QMenu, QMessageBox, QSizePolicy, QVBoxLayout,
QWidget)
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
widget = QWidget()
self.setCentralWidget(widget)
topFiller = QWidget()
topFiller.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.infoLabel = QLabel(
"<i>Choose a menu option, or right-click to invoke a context menu</i>",
alignment=Qt.AlignCenter)
self.infoLabel.setFrameStyle(QFrame.StyledPanel | QFrame.Sunken)
bottomFiller = QWidget()
bottomFiller.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
vbox = QVBoxLayout()
vbox.setContentsMargins(5, 5, 5, 5)
vbox.addWidget(topFiller)
vbox.addWidget(self.infoLabel)
vbox.addWidget(bottomFiller)
widget.setLayout(vbox)
self.createActions()
self.createMenus()
message = "A context menu is available by right-clicking"
self.statusBar().showMessage(message)
self.setWindowTitle("Menus")
self.setMinimumSize(160,160)
self.resize(480,320)
def contextMenuEvent(self, event):
menu = QMenu(self)
menu.addAction(self.cutAct)
menu.addAction(self.copyAct)
menu.addAction(self.pasteAct)
menu.exec_(event.globalPos())
def newFile(self):
self.infoLabel.setText("Invoked <b>File|New</b>")
def open(self):
self.infoLabel.setText("Invoked <b>File|Open</b>")
def save(self):
self.infoLabel.setText("Invoked <b>File|Save</b>")
def print_(self):
self.infoLabel.setText("Invoked <b>File|Print</b>")
def undo(self):
self.infoLabel.setText("Invoked <b>Edit|Undo</b>")
def redo(self):
self.infoLabel.setText("Invoked <b>Edit|Redo</b>")
def cut(self):
self.infoLabel.setText("Invoked <b>Edit|Cut</b>")
def copy(self):
self.infoLabel.setText("Invoked <b>Edit|Copy</b>")
def paste(self):
self.infoLabel.setText("Invoked <b>Edit|Paste</b>")
def bold(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Bold</b>")
def italic(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Italic</b>")
def leftAlign(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Left Align</b>")
def rightAlign(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Right Align</b>")
def justify(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Justify</b>")
def center(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Center</b>")
def setLineSpacing(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Set Line Spacing</b>")
def setParagraphSpacing(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Set Paragraph Spacing</b>")
def about(self):
self.infoLabel.setText("Invoked <b>Help|About</b>")
QMessageBox.about(self, "About Menu",
"The <b>Menu</b> example shows how to create menu-bar menus "
"and context menus.")
def aboutQt(self):
self.infoLabel.setText("Invoked <b>Help|About Qt</b>")
def createActions(self):
self.newAct = QAction("&New", self, shortcut=QKeySequence.New,
statusTip="Create a new file", triggered=self.newFile)
self.openAct = QAction("&Open...", self, shortcut=QKeySequence.Open,
statusTip="Open an existing file", triggered=self.open)
self.saveAct = QAction("&Save", self, shortcut=QKeySequence.Save,
statusTip="Save the document to disk", triggered=self.save)
self.printAct = QAction("&Print...", self, shortcut=QKeySequence.Print,
statusTip="Print the document", triggered=self.print_)
self.exitAct = QAction("E&xit", self, shortcut="Ctrl+Q",
statusTip="Exit the application", triggered=self.close)
self.undoAct = QAction("&Undo", self, shortcut=QKeySequence.Undo,
statusTip="Undo the last operation", triggered=self.undo)
self.redoAct = QAction("&Redo", self, shortcut=QKeySequence.Redo,
statusTip="Redo the last operation", triggered=self.redo)
self.cutAct = QAction("Cu&t", self, shortcut=QKeySequence.Cut,
statusTip="Cut the current selection's contents to the clipboard",
triggered=self.cut)
self.copyAct = QAction("&Copy", self, shortcut=QKeySequence.Copy,
statusTip="Copy the current selection's contents to the clipboard",
triggered=self.copy)
self.pasteAct = QAction("&Paste", self, shortcut=QKeySequence.Paste,
statusTip="Paste the clipboard's contents into the current selection",
triggered=self.paste)
self.boldAct = QAction("&Bold", self, checkable=True,
shortcut="Ctrl+B", statusTip="Make the text bold",
triggered=self.bold)
boldFont = self.boldAct.font()
boldFont.setBold(True)
self.boldAct.setFont(boldFont)
self.italicAct = QAction("&Italic", self, checkable=True,
shortcut="Ctrl+I", statusTip="Make the text italic",
triggered=self.italic)
italicFont = self.italicAct.font()
italicFont.setItalic(True)
self.italicAct.setFont(italicFont)
self.setLineSpacingAct = QAction("Set &Line Spacing...", self,
statusTip="Change the gap between the lines of a paragraph",
triggered=self.setLineSpacing)
self.setParagraphSpacingAct = QAction("Set &Paragraph Spacing...",
self, statusTip="Change the gap between paragraphs",
triggered=self.setParagraphSpacing)
self.aboutAct = QAction("&About", self,
statusTip="Show the application's About box",
triggered=self.about)
self.aboutQtAct = QAction("About &Qt", self,
statusTip="Show the Qt library's About box",
triggered=self.aboutQt)
self.aboutQtAct.triggered.connect(QApplication.instance().aboutQt)
self.leftAlignAct = QAction("&Left Align", self, checkable=True,
shortcut="Ctrl+L", statusTip="Left align the selected text",
triggered=self.leftAlign)
self.rightAlignAct = QAction("&Right Align", self, checkable=True,
shortcut="Ctrl+R", statusTip="Right align the selected text",
triggered=self.rightAlign)
self.justifyAct = QAction("&Justify", self, checkable=True,
shortcut="Ctrl+J", statusTip="Justify the selected text",
triggered=self.justify)
self.centerAct = QAction("&Center", self, checkable=True,
shortcut="Ctrl+C", statusTip="Center the selected text",
triggered=self.center)
self.alignmentGroup = QActionGroup(self)
self.alignmentGroup.addAction(self.leftAlignAct)
self.alignmentGroup.addAction(self.rightAlignAct)
self.alignmentGroup.addAction(self.justifyAct)
self.alignmentGroup.addAction(self.centerAct)
self.leftAlignAct.setChecked(True)
def createMenus(self):
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction(self.newAct)
self.fileMenu.addAction(self.openAct)
self.fileMenu.addAction(self.saveAct)
self.fileMenu.addAction(self.printAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAct)
self.editMenu = self.menuBar().addMenu("&Edit")
self.editMenu.addAction(self.undoAct)
self.editMenu.addAction(self.redoAct)
self.editMenu.addSeparator()
self.editMenu.addAction(self.cutAct)
self.editMenu.addAction(self.copyAct)
self.editMenu.addAction(self.pasteAct)
self.editMenu.addSeparator()
self.helpMenu = self.menuBar().addMenu("&Help")
self.helpMenu.addAction(self.aboutAct)
self.helpMenu.addAction(self.aboutQtAct)
self.formatMenu = self.editMenu.addMenu("&Format")
self.formatMenu.addAction(self.boldAct)
self.formatMenu.addAction(self.italicAct)
self.formatMenu.addSeparator().setText("Alignment")
self.formatMenu.addAction(self.leftAlignAct)
self.formatMenu.addAction(self.rightAlignAct)
self.formatMenu.addAction(self.justifyAct)
self.formatMenu.addAction(self.centerAct)
self.formatMenu.addSeparator()
self.formatMenu.addAction(self.setLineSpacingAct)
self.formatMenu.addAction(self.setParagraphSpacingAct)
if __name__ == '__main__':
| import sys
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_()) | conditional_block |
|
menus.py | , are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import (QAction, QActionGroup, QApplication, QFrame,
QLabel, QMainWindow, QMenu, QMessageBox, QSizePolicy, QVBoxLayout,
QWidget)
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
widget = QWidget()
self.setCentralWidget(widget)
topFiller = QWidget()
topFiller.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.infoLabel = QLabel(
"<i>Choose a menu option, or right-click to invoke a context menu</i>",
alignment=Qt.AlignCenter)
self.infoLabel.setFrameStyle(QFrame.StyledPanel | QFrame.Sunken)
bottomFiller = QWidget()
bottomFiller.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
vbox = QVBoxLayout()
vbox.setContentsMargins(5, 5, 5, 5)
vbox.addWidget(topFiller)
vbox.addWidget(self.infoLabel)
vbox.addWidget(bottomFiller)
widget.setLayout(vbox)
self.createActions()
self.createMenus()
message = "A context menu is available by right-clicking"
self.statusBar().showMessage(message)
self.setWindowTitle("Menus")
self.setMinimumSize(160,160)
self.resize(480,320)
def contextMenuEvent(self, event):
menu = QMenu(self)
menu.addAction(self.cutAct)
menu.addAction(self.copyAct)
menu.addAction(self.pasteAct)
menu.exec_(event.globalPos())
def newFile(self):
self.infoLabel.setText("Invoked <b>File|New</b>")
def open(self):
self.infoLabel.setText("Invoked <b>File|Open</b>")
def save(self):
self.infoLabel.setText("Invoked <b>File|Save</b>")
def print_(self):
self.infoLabel.setText("Invoked <b>File|Print</b>")
def undo(self):
self.infoLabel.setText("Invoked <b>Edit|Undo</b>")
def redo(self):
self.infoLabel.setText("Invoked <b>Edit|Redo</b>")
def cut(self):
self.infoLabel.setText("Invoked <b>Edit|Cut</b>")
def copy(self):
self.infoLabel.setText("Invoked <b>Edit|Copy</b>")
def paste(self):
self.infoLabel.setText("Invoked <b>Edit|Paste</b>")
def bold(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Bold</b>")
def italic(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Italic</b>")
def leftAlign(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Left Align</b>")
def rightAlign(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Right Align</b>")
def justify(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Justify</b>")
def center(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Center</b>")
def setLineSpacing(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Set Line Spacing</b>")
def setParagraphSpacing(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Set Paragraph Spacing</b>")
def about(self):
self.infoLabel.setText("Invoked <b>Help|About</b>")
QMessageBox.about(self, "About Menu", | self.infoLabel.setText("Invoked <b>Help|About Qt</b>")
def createActions(self):
self.newAct = QAction("&New", self, shortcut=QKeySequence.New,
statusTip="Create a new file", triggered=self.newFile)
self.openAct = QAction("&Open...", self, shortcut=QKeySequence.Open,
statusTip="Open an existing file", triggered=self.open)
self.saveAct = QAction("&Save", self, shortcut=QKeySequence.Save,
statusTip="Save the document to disk", triggered=self.save)
self.printAct = QAction("&Print...", self, shortcut=QKeySequence.Print,
statusTip="Print the document", triggered=self.print_)
self.exitAct = QAction("E&xit", self, shortcut="Ctrl+Q",
statusTip="Exit the application", triggered=self.close)
self.undoAct = QAction("&Undo", self, shortcut=QKeySequence.Undo,
statusTip="Undo the last operation", triggered=self.undo)
self.redoAct = QAction("&Redo", self, shortcut=QKeySequence.Redo,
statusTip="Redo the last operation", triggered=self.redo)
self.cutAct = QAction("Cu&t", self, shortcut=QKeySequence.Cut,
statusTip="Cut the current selection's contents to the clipboard",
triggered=self.cut)
self.copyAct = QAction("&Copy", self, shortcut=QKeySequence.Copy,
statusTip="Copy the current selection's contents to the clipboard",
triggered=self.copy)
self.pasteAct = QAction("&Paste", self, shortcut=QKeySequence.Paste,
statusTip="Paste the clipboard's contents into the current selection",
triggered=self.paste)
self.boldAct = QAction("&Bold", self, checkable=True,
shortcut="Ctrl+B", statusTip="Make the text bold",
triggered=self.bold)
boldFont = self.boldAct.font()
boldFont.setBold(True)
self.boldAct.setFont(boldFont)
self.italicAct = QAction("&Italic", self, checkable=True,
shortcut="Ctrl+I", statusTip="Make the text italic",
triggered=self.italic)
italicFont = self.italicAct.font()
italicFont.setItalic(True)
self.italicAct.setFont(italicFont)
self.setLineSpacingAct = QAction("Set &Line Spacing...", self,
statusTip="Change the gap between the lines of a paragraph",
triggered=self.setLineSpacing)
self.setParagraphSpacingAct = QAction("Set &Paragraph Spacing...",
self, statusTip="Change the gap between paragraphs",
triggered=self.setParagraphSpacing)
self.aboutAct = QAction("&About", self,
statusTip="Show the application's About box",
triggered=self.about)
self.aboutQtAct = QAction("About &Qt", self,
statusTip="Show the Qt library's About box",
triggered=self.aboutQt)
self.aboutQtAct.triggered.connect(QApplication.instance().aboutQt)
self.leftAlignAct = QAction("&Left Align", self, checkable=True,
shortcut="Ctrl+L", statusTip="Left align the selected text",
triggered=self.leftAlign)
self.rightAlignAct = QAction("&Right Align", self, checkable=True,
shortcut="Ctrl+R", statusTip="Right align the selected text",
triggered=self.rightAlign)
self.justifyAct = QAction("&Justify", self, checkable=True,
shortcut="Ctrl+J", statusTip="Justify the selected text",
triggered=self.justify)
self.centerAct = QAction("&Center", self, checkable=True,
shortcut="Ctrl+C", statusTip="Center the selected text",
triggered=self.center)
self.alignmentGroup = QActionGroup(self)
self.alignmentGroup.addAction(self.leftAlignAct)
self.alignmentGroup.addAction(self.rightAlignAct)
self.alignmentGroup.addAction(self.justifyAct)
self.alignmentGroup.addAction(self.centerAct)
self.leftAlignAct.setChecked(True)
def createMenus(self):
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction(self.newAct)
self.fileMenu.addAction(self.openAct)
self.fileMenu.addAction(self.saveAct)
self.fileMenu.addAction(self.printAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAct)
self.editMenu = self.menuBar().addMenu | "The <b>Menu</b> example shows how to create menu-bar menus "
"and context menus.")
def aboutQt(self): | random_line_split |
menus.py | , are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import (QAction, QActionGroup, QApplication, QFrame,
QLabel, QMainWindow, QMenu, QMessageBox, QSizePolicy, QVBoxLayout,
QWidget)
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
widget = QWidget()
self.setCentralWidget(widget)
topFiller = QWidget()
topFiller.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.infoLabel = QLabel(
"<i>Choose a menu option, or right-click to invoke a context menu</i>",
alignment=Qt.AlignCenter)
self.infoLabel.setFrameStyle(QFrame.StyledPanel | QFrame.Sunken)
bottomFiller = QWidget()
bottomFiller.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
vbox = QVBoxLayout()
vbox.setContentsMargins(5, 5, 5, 5)
vbox.addWidget(topFiller)
vbox.addWidget(self.infoLabel)
vbox.addWidget(bottomFiller)
widget.setLayout(vbox)
self.createActions()
self.createMenus()
message = "A context menu is available by right-clicking"
self.statusBar().showMessage(message)
self.setWindowTitle("Menus")
self.setMinimumSize(160,160)
self.resize(480,320)
def contextMenuEvent(self, event):
menu = QMenu(self)
menu.addAction(self.cutAct)
menu.addAction(self.copyAct)
menu.addAction(self.pasteAct)
menu.exec_(event.globalPos())
def newFile(self):
self.infoLabel.setText("Invoked <b>File|New</b>")
def | (self):
self.infoLabel.setText("Invoked <b>File|Open</b>")
def save(self):
self.infoLabel.setText("Invoked <b>File|Save</b>")
def print_(self):
self.infoLabel.setText("Invoked <b>File|Print</b>")
def undo(self):
self.infoLabel.setText("Invoked <b>Edit|Undo</b>")
def redo(self):
self.infoLabel.setText("Invoked <b>Edit|Redo</b>")
def cut(self):
self.infoLabel.setText("Invoked <b>Edit|Cut</b>")
def copy(self):
self.infoLabel.setText("Invoked <b>Edit|Copy</b>")
def paste(self):
self.infoLabel.setText("Invoked <b>Edit|Paste</b>")
def bold(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Bold</b>")
def italic(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Italic</b>")
def leftAlign(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Left Align</b>")
def rightAlign(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Right Align</b>")
def justify(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Justify</b>")
def center(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Center</b>")
def setLineSpacing(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Set Line Spacing</b>")
def setParagraphSpacing(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Set Paragraph Spacing</b>")
def about(self):
self.infoLabel.setText("Invoked <b>Help|About</b>")
QMessageBox.about(self, "About Menu",
"The <b>Menu</b> example shows how to create menu-bar menus "
"and context menus.")
def aboutQt(self):
self.infoLabel.setText("Invoked <b>Help|About Qt</b>")
def createActions(self):
self.newAct = QAction("&New", self, shortcut=QKeySequence.New,
statusTip="Create a new file", triggered=self.newFile)
self.openAct = QAction("&Open...", self, shortcut=QKeySequence.Open,
statusTip="Open an existing file", triggered=self.open)
self.saveAct = QAction("&Save", self, shortcut=QKeySequence.Save,
statusTip="Save the document to disk", triggered=self.save)
self.printAct = QAction("&Print...", self, shortcut=QKeySequence.Print,
statusTip="Print the document", triggered=self.print_)
self.exitAct = QAction("E&xit", self, shortcut="Ctrl+Q",
statusTip="Exit the application", triggered=self.close)
self.undoAct = QAction("&Undo", self, shortcut=QKeySequence.Undo,
statusTip="Undo the last operation", triggered=self.undo)
self.redoAct = QAction("&Redo", self, shortcut=QKeySequence.Redo,
statusTip="Redo the last operation", triggered=self.redo)
self.cutAct = QAction("Cu&t", self, shortcut=QKeySequence.Cut,
statusTip="Cut the current selection's contents to the clipboard",
triggered=self.cut)
self.copyAct = QAction("&Copy", self, shortcut=QKeySequence.Copy,
statusTip="Copy the current selection's contents to the clipboard",
triggered=self.copy)
self.pasteAct = QAction("&Paste", self, shortcut=QKeySequence.Paste,
statusTip="Paste the clipboard's contents into the current selection",
triggered=self.paste)
self.boldAct = QAction("&Bold", self, checkable=True,
shortcut="Ctrl+B", statusTip="Make the text bold",
triggered=self.bold)
boldFont = self.boldAct.font()
boldFont.setBold(True)
self.boldAct.setFont(boldFont)
self.italicAct = QAction("&Italic", self, checkable=True,
shortcut="Ctrl+I", statusTip="Make the text italic",
triggered=self.italic)
italicFont = self.italicAct.font()
italicFont.setItalic(True)
self.italicAct.setFont(italicFont)
self.setLineSpacingAct = QAction("Set &Line Spacing...", self,
statusTip="Change the gap between the lines of a paragraph",
triggered=self.setLineSpacing)
self.setParagraphSpacingAct = QAction("Set &Paragraph Spacing...",
self, statusTip="Change the gap between paragraphs",
triggered=self.setParagraphSpacing)
self.aboutAct = QAction("&About", self,
statusTip="Show the application's About box",
triggered=self.about)
self.aboutQtAct = QAction("About &Qt", self,
statusTip="Show the Qt library's About box",
triggered=self.aboutQt)
self.aboutQtAct.triggered.connect(QApplication.instance().aboutQt)
self.leftAlignAct = QAction("&Left Align", self, checkable=True,
shortcut="Ctrl+L", statusTip="Left align the selected text",
triggered=self.leftAlign)
self.rightAlignAct = QAction("&Right Align", self, checkable=True,
shortcut="Ctrl+R", statusTip="Right align the selected text",
triggered=self.rightAlign)
self.justifyAct = QAction("&Justify", self, checkable=True,
shortcut="Ctrl+J", statusTip="Justify the selected text",
triggered=self.justify)
self.centerAct = QAction("&Center", self, checkable=True,
shortcut="Ctrl+C", statusTip="Center the selected text",
triggered=self.center)
self.alignmentGroup = QActionGroup(self)
self.alignmentGroup.addAction(self.leftAlignAct)
self.alignmentGroup.addAction(self.rightAlignAct)
self.alignmentGroup.addAction(self.justifyAct)
self.alignmentGroup.addAction(self.centerAct)
self.leftAlignAct.setChecked(True)
def createMenus(self):
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction(self.newAct)
self.fileMenu.addAction(self.openAct)
self.fileMenu.addAction(self.saveAct)
self.fileMenu.addAction(self.printAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAct)
self.editMenu = self.menuBar().add | open | identifier_name |
menus.py | , are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import (QAction, QActionGroup, QApplication, QFrame,
QLabel, QMainWindow, QMenu, QMessageBox, QSizePolicy, QVBoxLayout,
QWidget)
class MainWindow(QMainWindow):
def __init__(self):
| vbox.addWidget(bottomFiller)
widget.setLayout(vbox)
self.createActions()
self.createMenus()
message = "A context menu is available by right-clicking"
self.statusBar().showMessage(message)
self.setWindowTitle("Menus")
self.setMinimumSize(160,160)
self.resize(480,320)
def contextMenuEvent(self, event):
menu = QMenu(self)
menu.addAction(self.cutAct)
menu.addAction(self.copyAct)
menu.addAction(self.pasteAct)
menu.exec_(event.globalPos())
def newFile(self):
self.infoLabel.setText("Invoked <b>File|New</b>")
def open(self):
self.infoLabel.setText("Invoked <b>File|Open</b>")
def save(self):
self.infoLabel.setText("Invoked <b>File|Save</b>")
def print_(self):
self.infoLabel.setText("Invoked <b>File|Print</b>")
def undo(self):
self.infoLabel.setText("Invoked <b>Edit|Undo</b>")
def redo(self):
self.infoLabel.setText("Invoked <b>Edit|Redo</b>")
def cut(self):
self.infoLabel.setText("Invoked <b>Edit|Cut</b>")
def copy(self):
self.infoLabel.setText("Invoked <b>Edit|Copy</b>")
def paste(self):
self.infoLabel.setText("Invoked <b>Edit|Paste</b>")
def bold(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Bold</b>")
def italic(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Italic</b>")
def leftAlign(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Left Align</b>")
def rightAlign(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Right Align</b>")
def justify(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Justify</b>")
def center(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Center</b>")
def setLineSpacing(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Set Line Spacing</b>")
def setParagraphSpacing(self):
self.infoLabel.setText("Invoked <b>Edit|Format|Set Paragraph Spacing</b>")
def about(self):
self.infoLabel.setText("Invoked <b>Help|About</b>")
QMessageBox.about(self, "About Menu",
"The <b>Menu</b> example shows how to create menu-bar menus "
"and context menus.")
def aboutQt(self):
self.infoLabel.setText("Invoked <b>Help|About Qt</b>")
def createActions(self):
self.newAct = QAction("&New", self, shortcut=QKeySequence.New,
statusTip="Create a new file", triggered=self.newFile)
self.openAct = QAction("&Open...", self, shortcut=QKeySequence.Open,
statusTip="Open an existing file", triggered=self.open)
self.saveAct = QAction("&Save", self, shortcut=QKeySequence.Save,
statusTip="Save the document to disk", triggered=self.save)
self.printAct = QAction("&Print...", self, shortcut=QKeySequence.Print,
statusTip="Print the document", triggered=self.print_)
self.exitAct = QAction("E&xit", self, shortcut="Ctrl+Q",
statusTip="Exit the application", triggered=self.close)
self.undoAct = QAction("&Undo", self, shortcut=QKeySequence.Undo,
statusTip="Undo the last operation", triggered=self.undo)
self.redoAct = QAction("&Redo", self, shortcut=QKeySequence.Redo,
statusTip="Redo the last operation", triggered=self.redo)
self.cutAct = QAction("Cu&t", self, shortcut=QKeySequence.Cut,
statusTip="Cut the current selection's contents to the clipboard",
triggered=self.cut)
self.copyAct = QAction("&Copy", self, shortcut=QKeySequence.Copy,
statusTip="Copy the current selection's contents to the clipboard",
triggered=self.copy)
self.pasteAct = QAction("&Paste", self, shortcut=QKeySequence.Paste,
statusTip="Paste the clipboard's contents into the current selection",
triggered=self.paste)
self.boldAct = QAction("&Bold", self, checkable=True,
shortcut="Ctrl+B", statusTip="Make the text bold",
triggered=self.bold)
boldFont = self.boldAct.font()
boldFont.setBold(True)
self.boldAct.setFont(boldFont)
self.italicAct = QAction("&Italic", self, checkable=True,
shortcut="Ctrl+I", statusTip="Make the text italic",
triggered=self.italic)
italicFont = self.italicAct.font()
italicFont.setItalic(True)
self.italicAct.setFont(italicFont)
self.setLineSpacingAct = QAction("Set &Line Spacing...", self,
statusTip="Change the gap between the lines of a paragraph",
triggered=self.setLineSpacing)
self.setParagraphSpacingAct = QAction("Set &Paragraph Spacing...",
self, statusTip="Change the gap between paragraphs",
triggered=self.setParagraphSpacing)
self.aboutAct = QAction("&About", self,
statusTip="Show the application's About box",
triggered=self.about)
self.aboutQtAct = QAction("About &Qt", self,
statusTip="Show the Qt library's About box",
triggered=self.aboutQt)
self.aboutQtAct.triggered.connect(QApplication.instance().aboutQt)
self.leftAlignAct = QAction("&Left Align", self, checkable=True,
shortcut="Ctrl+L", statusTip="Left align the selected text",
triggered=self.leftAlign)
self.rightAlignAct = QAction("&Right Align", self, checkable=True,
shortcut="Ctrl+R", statusTip="Right align the selected text",
triggered=self.rightAlign)
self.justifyAct = QAction("&Justify", self, checkable=True,
shortcut="Ctrl+J", statusTip="Justify the selected text",
triggered=self.justify)
self.centerAct = QAction("&Center", self, checkable=True,
shortcut="Ctrl+C", statusTip="Center the selected text",
triggered=self.center)
self.alignmentGroup = QActionGroup(self)
self.alignmentGroup.addAction(self.leftAlignAct)
self.alignmentGroup.addAction(self.rightAlignAct)
self.alignmentGroup.addAction(self.justifyAct)
self.alignmentGroup.addAction(self.centerAct)
self.leftAlignAct.setChecked(True)
def createMenus(self):
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction(self.newAct)
self.fileMenu.addAction(self.openAct)
self.fileMenu.addAction(self.saveAct)
self.fileMenu.addAction(self.printAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAct)
self.editMenu = self.menuBar().addMenu | super(MainWindow, self).__init__()
widget = QWidget()
self.setCentralWidget(widget)
topFiller = QWidget()
topFiller.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.infoLabel = QLabel(
"<i>Choose a menu option, or right-click to invoke a context menu</i>",
alignment=Qt.AlignCenter)
self.infoLabel.setFrameStyle(QFrame.StyledPanel | QFrame.Sunken)
bottomFiller = QWidget()
bottomFiller.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
vbox = QVBoxLayout()
vbox.setContentsMargins(5, 5, 5, 5)
vbox.addWidget(topFiller)
vbox.addWidget(self.infoLabel) | identifier_body |
wasm.rs | }
pub fn extra_rust_options(self) -> Vec<String> {
match self {
// Profile::Production => ["-Clto=fat", "-Ccodegen-units=1", "-Cincremental=false"]
// .into_iter()
// .map(ToString::to_string)
// .collect(),
Profile::Dev | Profile::Profile | Profile::Release => vec![],
}
}
pub fn optimization_level(self) -> wasm_opt::OptimizationLevel {
match self {
Profile::Dev => wasm_opt::OptimizationLevel::O0,
Profile::Profile => wasm_opt::OptimizationLevel::O,
Profile::Release => wasm_opt::OptimizationLevel::O3,
}
}
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct BuildInput {
/// Path to the crate to be compiled to WAM. Relative to the repository root.
pub crate_path: PathBuf,
pub wasm_opt_options: Vec<String>,
pub skip_wasm_opt: bool,
pub extra_cargo_options: Vec<String>,
pub profile: Profile,
pub profiling_level: Option<ProfilingLevel>,
pub log_level: LogLevel,
pub uncollapsed_log_level: LogLevel,
pub wasm_size_limit: Option<byte_unit::Byte>,
pub system_shader_tools: bool,
}
impl BuildInput {
pub async fn perhaps_check_size(&self, wasm_path: impl AsRef<Path>) -> Result {
let compressed_size = compressed_size(&wasm_path).await?.get_appropriate_unit(true);
info!("Compressed size of {} is {}.", wasm_path.as_ref().display(), compressed_size);
if let Some(wasm_size_limit) = self.wasm_size_limit {
let wasm_size_limit = wasm_size_limit.get_appropriate_unit(true);
if !self.profile.should_check_size() {
warn!("Skipping size check because profile is '{}'.", self.profile,);
} else if self.profiling_level.unwrap_or_default() != ProfilingLevel::Objective {
// TODO? additional leeway as sanity check
warn!(
"Skipping size check because profiling level is {:?} rather than {}.",
self.profiling_level,
ProfilingLevel::Objective
);
} else {
ensure!(
compressed_size < wasm_size_limit,
"Compressed WASM size ~{} ({} bytes) exceeds the limit of {} ({} bytes).",
compressed_size,
compressed_size.get_byte(),
wasm_size_limit,
wasm_size_limit.get_byte(),
)
}
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Wasm;
#[async_trait]
impl IsTarget for Wasm {
type BuildInput = BuildInput;
type Artifact = Artifact;
fn artifact_name(&self) -> String {
WASM_ARTIFACT_NAME.into()
}
fn adapt_artifact(self, path: impl AsRef<Path>) -> BoxFuture<'static, Result<Self::Artifact>> {
ready(Ok(Artifact::new(path.as_ref()))).boxed()
}
fn build_internal(
&self,
context: Context,
job: BuildTargetJob<Self>,
) -> BoxFuture<'static, Result<Self::Artifact>> | log_level,
uncollapsed_log_level,
wasm_size_limit: _wasm_size_limit,
system_shader_tools,
} = &inner;
// NOTE: We cannot trust locally installed version of shader tools to be correct.
// Those binaries have no reliable versioning, and existing common distributions (e.g.
// Vulkan SDK) contain old builds with bugs that impact our shaders. By default, we have
// to force usage of our own distribution built on our CI.
if *system_shader_tools {
ShaderTools.install_if_missing(&cache).await?;
} else {
ShaderTools.install(&cache).await?;
}
cache::goodie::binaryen::Binaryen { version: BINARYEN_VERSION_TO_INSTALL }
.install_if_missing(&cache)
.await?;
info!("Building wasm.");
let temp_dir = tempdir()?;
let temp_dist = RepoRootDistWasm::new_root(temp_dir.path());
ensogl_pack::build(
ensogl_pack::WasmPackOutputs {
out_dir: temp_dist.path.clone(),
out_name: OUTPUT_NAME.into(),
},
|args| {
let mut command = WasmPack.cmd()?;
command
.current_dir(&repo_root)
.kill_on_drop(true)
.env_remove(ide_ci::programs::rustup::env::RUSTUP_TOOLCHAIN.name())
.build()
.arg(wasm_pack::Profile::from(*profile))
.target(wasm_pack::Target::Web)
.output_directory(args.out_dir)
.output_name(args.out_name)
.arg(crate_path)
.arg("--")
.apply(&cargo::Color::Always)
.args(extra_cargo_options);
if let Some(profiling_level) = profiling_level {
command.set_env(env::ENSO_MAX_PROFILING_LEVEL, &profiling_level)?;
}
command.set_env(env::ENSO_MAX_LOG_LEVEL, &log_level)?;
command.set_env(env::ENSO_MAX_UNCOLLAPSED_LOG_LEVEL, &uncollapsed_log_level)?;
Ok(command)
},
)
.await?;
Self::finalize_wasm(wasm_opt_options, *skip_wasm_opt, *profile, &temp_dist).await?;
ide_ci::fs::create_dir_if_missing(&destination)?;
let ret = RepoRootDistWasm::new_root(&destination);
ide_ci::fs::copy(&temp_dist, &ret)?;
inner.perhaps_check_size(&ret.pkg_opt_wasm).await?;
Ok(Artifact(ret))
}
.instrument(span)
.boxed()
}
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct WatchInput {
pub cargo_watch_options: Vec<String>,
}
impl IsWatchable for Wasm {
type Watcher = crate::project::Watcher<Self, Child>;
type WatchInput = WatchInput;
fn watch(
&self,
context: Context,
job: WatchTargetJob<Self>,
) -> BoxFuture<'static, Result<Self::Watcher>> {
let span = debug_span!("Watching WASM.", ?job).entered();
// The esbuild watcher must succeed in its first build, or it will prematurely exit.
// See the issue: https://github.com/evanw/esbuild/issues/1063
//
// Because of this, we run first build of wasm manually, rather through cargo-watch.
// After it is completed, the cargo-watch gets spawned and this method yields the watcher.
// This forces esbuild watcher (whose setup requires the watcher artifacts) to wait until
// all wasm build outputs are in place, so the build won't crash.
//
// In general, much neater workaround should be possible, if we stop relying on cargo-watch
// and do the WASM watch directly in the build script.
let first_build_job = self
.build(context.clone(), job.build.clone())
.instrument(debug_span!("Initial single build of WASM before setting up cargo-watch."));
async move {
let first_build_output = first_build_job.await?;
let WatchTargetJob {
watch_input: WatchInput { cargo_watch_options: cargo_watch_flags },
build: WithDestination { inner, destination },
} = job;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level,
log_level,
uncollapsed_log_level,
wasm_size_limit,
system_shader_tools: _,
} = inner;
let current_exe = std::env::current_exe()?;
// cargo-watch apparently cannot handle verbatim path prefix. We remove it and hope for
// the best.
let current_exe = current_exe.without_verbatim_prefix();
let mut watch_cmd = Cargo.cmd()?;
let (watch_cmd_name, mut watch_cmd_opts) = match std::env::var("USE_CARGO_WATCH_PLUS") {
Ok(_) => ("watch-plus", vec!["--why"]),
Err(_) => ("watch", vec![]),
};
watch_cmd_opts.push("--ignore");
watch_cmd_opts.push("README.md");
watch_cmd
.kill_on_drop(true)
.current_dir(&context.repo_root)
.arg(watch_cmd_name)
.args(watch_cmd_opts)
.args(cargo_watch_flags)
.arg("--");
// === Build Script top-level options ===
watch_cmd
// TODO [mwu]
// This is not nice, as this module should not be aware of the CLI
// parsing | {
let Context { octocrab: _, cache, upload_artifacts: _, repo_root } = context;
let WithDestination { inner, destination } = job;
let span = info_span!("Building WASM.",
repo = %repo_root.display(),
crate = %inner.crate_path.display(),
cargo_opts = ?inner.extra_cargo_options
);
async move {
// Old wasm-pack does not pass trailing `build` command arguments to the Cargo.
// We want to be able to pass --profile this way.
WasmPack.require_present_that(VersionReq::parse(">=0.10.1")?).await?;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level, | identifier_body |
wasm.rs | }
pub fn extra_rust_options(self) -> Vec<String> {
match self {
// Profile::Production => ["-Clto=fat", "-Ccodegen-units=1", "-Cincremental=false"]
// .into_iter()
// .map(ToString::to_string)
// .collect(),
Profile::Dev | Profile::Profile | Profile::Release => vec![],
}
}
pub fn optimization_level(self) -> wasm_opt::OptimizationLevel {
match self {
Profile::Dev => wasm_opt::OptimizationLevel::O0,
Profile::Profile => wasm_opt::OptimizationLevel::O,
Profile::Release => wasm_opt::OptimizationLevel::O3,
}
}
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct BuildInput {
/// Path to the crate to be compiled to WAM. Relative to the repository root.
pub crate_path: PathBuf,
pub wasm_opt_options: Vec<String>,
pub skip_wasm_opt: bool,
pub extra_cargo_options: Vec<String>,
pub profile: Profile,
pub profiling_level: Option<ProfilingLevel>,
pub log_level: LogLevel,
pub uncollapsed_log_level: LogLevel,
pub wasm_size_limit: Option<byte_unit::Byte>,
pub system_shader_tools: bool,
}
impl BuildInput {
pub async fn perhaps_check_size(&self, wasm_path: impl AsRef<Path>) -> Result {
let compressed_size = compressed_size(&wasm_path).await?.get_appropriate_unit(true); | if !self.profile.should_check_size() {
warn!("Skipping size check because profile is '{}'.", self.profile,);
} else if self.profiling_level.unwrap_or_default() != ProfilingLevel::Objective {
// TODO? additional leeway as sanity check
warn!(
"Skipping size check because profiling level is {:?} rather than {}.",
self.profiling_level,
ProfilingLevel::Objective
);
} else {
ensure!(
compressed_size < wasm_size_limit,
"Compressed WASM size ~{} ({} bytes) exceeds the limit of {} ({} bytes).",
compressed_size,
compressed_size.get_byte(),
wasm_size_limit,
wasm_size_limit.get_byte(),
)
}
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Wasm;
#[async_trait]
impl IsTarget for Wasm {
type BuildInput = BuildInput;
type Artifact = Artifact;
fn artifact_name(&self) -> String {
WASM_ARTIFACT_NAME.into()
}
fn adapt_artifact(self, path: impl AsRef<Path>) -> BoxFuture<'static, Result<Self::Artifact>> {
ready(Ok(Artifact::new(path.as_ref()))).boxed()
}
fn build_internal(
&self,
context: Context,
job: BuildTargetJob<Self>,
) -> BoxFuture<'static, Result<Self::Artifact>> {
let Context { octocrab: _, cache, upload_artifacts: _, repo_root } = context;
let WithDestination { inner, destination } = job;
let span = info_span!("Building WASM.",
repo = %repo_root.display(),
crate = %inner.crate_path.display(),
cargo_opts = ?inner.extra_cargo_options
);
async move {
// Old wasm-pack does not pass trailing `build` command arguments to the Cargo.
// We want to be able to pass --profile this way.
WasmPack.require_present_that(VersionReq::parse(">=0.10.1")?).await?;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level,
log_level,
uncollapsed_log_level,
wasm_size_limit: _wasm_size_limit,
system_shader_tools,
} = &inner;
// NOTE: We cannot trust locally installed version of shader tools to be correct.
// Those binaries have no reliable versioning, and existing common distributions (e.g.
// Vulkan SDK) contain old builds with bugs that impact our shaders. By default, we have
// to force usage of our own distribution built on our CI.
if *system_shader_tools {
ShaderTools.install_if_missing(&cache).await?;
} else {
ShaderTools.install(&cache).await?;
}
cache::goodie::binaryen::Binaryen { version: BINARYEN_VERSION_TO_INSTALL }
.install_if_missing(&cache)
.await?;
info!("Building wasm.");
let temp_dir = tempdir()?;
let temp_dist = RepoRootDistWasm::new_root(temp_dir.path());
ensogl_pack::build(
ensogl_pack::WasmPackOutputs {
out_dir: temp_dist.path.clone(),
out_name: OUTPUT_NAME.into(),
},
|args| {
let mut command = WasmPack.cmd()?;
command
.current_dir(&repo_root)
.kill_on_drop(true)
.env_remove(ide_ci::programs::rustup::env::RUSTUP_TOOLCHAIN.name())
.build()
.arg(wasm_pack::Profile::from(*profile))
.target(wasm_pack::Target::Web)
.output_directory(args.out_dir)
.output_name(args.out_name)
.arg(crate_path)
.arg("--")
.apply(&cargo::Color::Always)
.args(extra_cargo_options);
if let Some(profiling_level) = profiling_level {
command.set_env(env::ENSO_MAX_PROFILING_LEVEL, &profiling_level)?;
}
command.set_env(env::ENSO_MAX_LOG_LEVEL, &log_level)?;
command.set_env(env::ENSO_MAX_UNCOLLAPSED_LOG_LEVEL, &uncollapsed_log_level)?;
Ok(command)
},
)
.await?;
Self::finalize_wasm(wasm_opt_options, *skip_wasm_opt, *profile, &temp_dist).await?;
ide_ci::fs::create_dir_if_missing(&destination)?;
let ret = RepoRootDistWasm::new_root(&destination);
ide_ci::fs::copy(&temp_dist, &ret)?;
inner.perhaps_check_size(&ret.pkg_opt_wasm).await?;
Ok(Artifact(ret))
}
.instrument(span)
.boxed()
}
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct WatchInput {
pub cargo_watch_options: Vec<String>,
}
impl IsWatchable for Wasm {
type Watcher = crate::project::Watcher<Self, Child>;
type WatchInput = WatchInput;
fn watch(
&self,
context: Context,
job: WatchTargetJob<Self>,
) -> BoxFuture<'static, Result<Self::Watcher>> {
let span = debug_span!("Watching WASM.", ?job).entered();
// The esbuild watcher must succeed in its first build, or it will prematurely exit.
// See the issue: https://github.com/evanw/esbuild/issues/1063
//
// Because of this, we run first build of wasm manually, rather through cargo-watch.
// After it is completed, the cargo-watch gets spawned and this method yields the watcher.
// This forces esbuild watcher (whose setup requires the watcher artifacts) to wait until
// all wasm build outputs are in place, so the build won't crash.
//
// In general, much neater workaround should be possible, if we stop relying on cargo-watch
// and do the WASM watch directly in the build script.
let first_build_job = self
.build(context.clone(), job.build.clone())
.instrument(debug_span!("Initial single build of WASM before setting up cargo-watch."));
async move {
let first_build_output = first_build_job.await?;
let WatchTargetJob {
watch_input: WatchInput { cargo_watch_options: cargo_watch_flags },
build: WithDestination { inner, destination },
} = job;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level,
log_level,
uncollapsed_log_level,
wasm_size_limit,
system_shader_tools: _,
} = inner;
let current_exe = std::env::current_exe()?;
// cargo-watch apparently cannot handle verbatim path prefix. We remove it and hope for
// the best.
let current_exe = current_exe.without_verbatim_prefix();
let mut watch_cmd = Cargo.cmd()?;
let (watch_cmd_name, mut watch_cmd_opts) = match std::env::var("USE_CARGO_WATCH_PLUS") {
Ok(_) => ("watch-plus", vec!["--why"]),
Err(_) => ("watch", vec![]),
};
watch_cmd_opts.push("--ignore");
watch_cmd_opts.push("README.md");
watch_cmd
.kill_on_drop(true)
.current_dir(&context.repo_root)
.arg(watch_cmd_name)
.args(watch_cmd_opts)
.args(cargo_watch_flags)
.arg("--");
// === Build Script top-level options ===
watch_cmd
// TODO [mwu]
// This is not nice, as this module should not be aware of the CLI
// parsing/g | info!("Compressed size of {} is {}.", wasm_path.as_ref().display(), compressed_size);
if let Some(wasm_size_limit) = self.wasm_size_limit {
let wasm_size_limit = wasm_size_limit.get_appropriate_unit(true); | random_line_split |
wasm.rs | .10.1")?).await?;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level,
log_level,
uncollapsed_log_level,
wasm_size_limit: _wasm_size_limit,
system_shader_tools,
} = &inner;
// NOTE: We cannot trust locally installed version of shader tools to be correct.
// Those binaries have no reliable versioning, and existing common distributions (e.g.
// Vulkan SDK) contain old builds with bugs that impact our shaders. By default, we have
// to force usage of our own distribution built on our CI.
if *system_shader_tools {
ShaderTools.install_if_missing(&cache).await?;
} else {
ShaderTools.install(&cache).await?;
}
cache::goodie::binaryen::Binaryen { version: BINARYEN_VERSION_TO_INSTALL }
.install_if_missing(&cache)
.await?;
info!("Building wasm.");
let temp_dir = tempdir()?;
let temp_dist = RepoRootDistWasm::new_root(temp_dir.path());
ensogl_pack::build(
ensogl_pack::WasmPackOutputs {
out_dir: temp_dist.path.clone(),
out_name: OUTPUT_NAME.into(),
},
|args| {
let mut command = WasmPack.cmd()?;
command
.current_dir(&repo_root)
.kill_on_drop(true)
.env_remove(ide_ci::programs::rustup::env::RUSTUP_TOOLCHAIN.name())
.build()
.arg(wasm_pack::Profile::from(*profile))
.target(wasm_pack::Target::Web)
.output_directory(args.out_dir)
.output_name(args.out_name)
.arg(crate_path)
.arg("--")
.apply(&cargo::Color::Always)
.args(extra_cargo_options);
if let Some(profiling_level) = profiling_level {
command.set_env(env::ENSO_MAX_PROFILING_LEVEL, &profiling_level)?;
}
command.set_env(env::ENSO_MAX_LOG_LEVEL, &log_level)?;
command.set_env(env::ENSO_MAX_UNCOLLAPSED_LOG_LEVEL, &uncollapsed_log_level)?;
Ok(command)
},
)
.await?;
Self::finalize_wasm(wasm_opt_options, *skip_wasm_opt, *profile, &temp_dist).await?;
ide_ci::fs::create_dir_if_missing(&destination)?;
let ret = RepoRootDistWasm::new_root(&destination);
ide_ci::fs::copy(&temp_dist, &ret)?;
inner.perhaps_check_size(&ret.pkg_opt_wasm).await?;
Ok(Artifact(ret))
}
.instrument(span)
.boxed()
}
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct WatchInput {
pub cargo_watch_options: Vec<String>,
}
impl IsWatchable for Wasm {
type Watcher = crate::project::Watcher<Self, Child>;
type WatchInput = WatchInput;
fn watch(
&self,
context: Context,
job: WatchTargetJob<Self>,
) -> BoxFuture<'static, Result<Self::Watcher>> {
let span = debug_span!("Watching WASM.", ?job).entered();
// The esbuild watcher must succeed in its first build, or it will prematurely exit.
// See the issue: https://github.com/evanw/esbuild/issues/1063
//
// Because of this, we run first build of wasm manually, rather through cargo-watch.
// After it is completed, the cargo-watch gets spawned and this method yields the watcher.
// This forces esbuild watcher (whose setup requires the watcher artifacts) to wait until
// all wasm build outputs are in place, so the build won't crash.
//
// In general, much neater workaround should be possible, if we stop relying on cargo-watch
// and do the WASM watch directly in the build script.
let first_build_job = self
.build(context.clone(), job.build.clone())
.instrument(debug_span!("Initial single build of WASM before setting up cargo-watch."));
async move {
let first_build_output = first_build_job.await?;
let WatchTargetJob {
watch_input: WatchInput { cargo_watch_options: cargo_watch_flags },
build: WithDestination { inner, destination },
} = job;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level,
log_level,
uncollapsed_log_level,
wasm_size_limit,
system_shader_tools: _,
} = inner;
let current_exe = std::env::current_exe()?;
// cargo-watch apparently cannot handle verbatim path prefix. We remove it and hope for
// the best.
let current_exe = current_exe.without_verbatim_prefix();
let mut watch_cmd = Cargo.cmd()?;
let (watch_cmd_name, mut watch_cmd_opts) = match std::env::var("USE_CARGO_WATCH_PLUS") {
Ok(_) => ("watch-plus", vec!["--why"]),
Err(_) => ("watch", vec![]),
};
watch_cmd_opts.push("--ignore");
watch_cmd_opts.push("README.md");
watch_cmd
.kill_on_drop(true)
.current_dir(&context.repo_root)
.arg(watch_cmd_name)
.args(watch_cmd_opts)
.args(cargo_watch_flags)
.arg("--");
// === Build Script top-level options ===
watch_cmd
// TODO [mwu]
// This is not nice, as this module should not be aware of the CLI
// parsing/generation. Rather than using `cargo watch` this should
// be implemented directly in Rust.
.arg(current_exe)
.arg("--skip-version-check") // We already checked in the parent process.
.args(["--cache-path", context.cache.path().as_str()])
.args(["--upload-artifacts", context.upload_artifacts.to_string().as_str()])
.args(["--repo-path", context.repo_root.as_str()]);
// === Build Script command and its options ===
watch_cmd
.arg("wasm")
.arg("build")
.args(["--crate-path", crate_path.as_str()])
.args(["--wasm-output-path", destination.as_str()])
.args(["--wasm-profile", profile.as_ref()]);
if let Some(profiling_level) = profiling_level {
watch_cmd.args(["--profiling-level", profiling_level.to_string().as_str()]);
}
watch_cmd.args(["--wasm-log-level", log_level.to_string().as_str()]);
watch_cmd
.args(["--wasm-uncollapsed-log-level", uncollapsed_log_level.to_string().as_str()]);
for wasm_opt_option in wasm_opt_options {
watch_cmd.args(["--wasm-opt-option", &wasm_opt_option]);
}
if skip_wasm_opt {
watch_cmd.args(["--skip-wasm-opt"]);
}
if let Some(wasm_size_limit) = wasm_size_limit {
watch_cmd.args(["--wasm-size-limit", wasm_size_limit.to_string().as_str()]);
} else {
watch_cmd.args(["--wasm-size-limit", "0"]);
}
// === cargo-watch options ===
watch_cmd.arg("--").args(extra_cargo_options);
let watch_process = watch_cmd.spawn_intercepting()?;
let artifact = Artifact(RepoRootDistWasm::new_root(&destination));
ensure!(
artifact == first_build_output,
"First build output does not match general watch build output. First build output: \
{first_build_output:?}, general watch build output: {artifact:?}",
);
Ok(Self::Watcher { artifact, watch_process })
}
.instrument(span.exit())
.boxed()
}
}
#[derive(Clone, Debug, Display, PartialEq, Eq)]
pub struct Artifact(pub RepoRootDistWasm);
impl Artifact {
pub fn new(path: impl Into<PathBuf>) -> Self {
Self(RepoRootDistWasm::new_root(path))
}
/// The main JS bundle to load WASM and JS wasm-pack bundles.
pub fn ensogl_app(&self) -> &Path {
&self.0.index_js
}
/// Files that should be shipped in the Gui bundle.
pub fn files_to_ship(&self) -> Vec<&Path> {
// We explicitly deconstruct object, so when new fields are added, we will be forced to
// consider whether they should be shipped or not.
let RepoRootDistWasm {
path: _,
dynamic_assets,
index_js: _,
index_d_ts: _,
index_js_map: _,
pkg_js,
pkg_js_map,
pkg_wasm: _,
pkg_opt_wasm,
} = &self.0;
vec![
dynamic_assets.as_path(),
pkg_js.as_path(),
pkg_js_map.as_path(),
pkg_opt_wasm.as_path(),
]
}
pub fn symlink_ensogl_dist(&self, linked_dist: &RepoRootTargetEnsoglPackLinkedDist) -> Result {
ide_ci::fs::remove_symlink_dir_if_exists(linked_dist)?;
ide_ci::fs::symlink_auto(self, linked_dist)
}
}
impl AsRef<Path> for Artifact {
fn as_ref(&self) -> &Path {
self.0.as_path()
}
}
impl IsArtifact for Artifact {}
impl Wasm {
pub async fn | check | identifier_name |
|
wasm.rs | artifacts) to wait until
// all wasm build outputs are in place, so the build won't crash.
//
// In general, much neater workaround should be possible, if we stop relying on cargo-watch
// and do the WASM watch directly in the build script.
let first_build_job = self
.build(context.clone(), job.build.clone())
.instrument(debug_span!("Initial single build of WASM before setting up cargo-watch."));
async move {
let first_build_output = first_build_job.await?;
let WatchTargetJob {
watch_input: WatchInput { cargo_watch_options: cargo_watch_flags },
build: WithDestination { inner, destination },
} = job;
let BuildInput {
crate_path,
wasm_opt_options,
skip_wasm_opt,
extra_cargo_options,
profile,
profiling_level,
log_level,
uncollapsed_log_level,
wasm_size_limit,
system_shader_tools: _,
} = inner;
let current_exe = std::env::current_exe()?;
// cargo-watch apparently cannot handle verbatim path prefix. We remove it and hope for
// the best.
let current_exe = current_exe.without_verbatim_prefix();
let mut watch_cmd = Cargo.cmd()?;
let (watch_cmd_name, mut watch_cmd_opts) = match std::env::var("USE_CARGO_WATCH_PLUS") {
Ok(_) => ("watch-plus", vec!["--why"]),
Err(_) => ("watch", vec![]),
};
watch_cmd_opts.push("--ignore");
watch_cmd_opts.push("README.md");
watch_cmd
.kill_on_drop(true)
.current_dir(&context.repo_root)
.arg(watch_cmd_name)
.args(watch_cmd_opts)
.args(cargo_watch_flags)
.arg("--");
// === Build Script top-level options ===
watch_cmd
// TODO [mwu]
// This is not nice, as this module should not be aware of the CLI
// parsing/generation. Rather than using `cargo watch` this should
// be implemented directly in Rust.
.arg(current_exe)
.arg("--skip-version-check") // We already checked in the parent process.
.args(["--cache-path", context.cache.path().as_str()])
.args(["--upload-artifacts", context.upload_artifacts.to_string().as_str()])
.args(["--repo-path", context.repo_root.as_str()]);
// === Build Script command and its options ===
watch_cmd
.arg("wasm")
.arg("build")
.args(["--crate-path", crate_path.as_str()])
.args(["--wasm-output-path", destination.as_str()])
.args(["--wasm-profile", profile.as_ref()]);
if let Some(profiling_level) = profiling_level {
watch_cmd.args(["--profiling-level", profiling_level.to_string().as_str()]);
}
watch_cmd.args(["--wasm-log-level", log_level.to_string().as_str()]);
watch_cmd
.args(["--wasm-uncollapsed-log-level", uncollapsed_log_level.to_string().as_str()]);
for wasm_opt_option in wasm_opt_options {
watch_cmd.args(["--wasm-opt-option", &wasm_opt_option]);
}
if skip_wasm_opt {
watch_cmd.args(["--skip-wasm-opt"]);
}
if let Some(wasm_size_limit) = wasm_size_limit {
watch_cmd.args(["--wasm-size-limit", wasm_size_limit.to_string().as_str()]);
} else {
watch_cmd.args(["--wasm-size-limit", "0"]);
}
// === cargo-watch options ===
watch_cmd.arg("--").args(extra_cargo_options);
let watch_process = watch_cmd.spawn_intercepting()?;
let artifact = Artifact(RepoRootDistWasm::new_root(&destination));
ensure!(
artifact == first_build_output,
"First build output does not match general watch build output. First build output: \
{first_build_output:?}, general watch build output: {artifact:?}",
);
Ok(Self::Watcher { artifact, watch_process })
}
.instrument(span.exit())
.boxed()
}
}
#[derive(Clone, Debug, Display, PartialEq, Eq)]
pub struct Artifact(pub RepoRootDistWasm);
impl Artifact {
pub fn new(path: impl Into<PathBuf>) -> Self {
Self(RepoRootDistWasm::new_root(path))
}
/// The main JS bundle to load WASM and JS wasm-pack bundles.
pub fn ensogl_app(&self) -> &Path {
&self.0.index_js
}
/// Files that should be shipped in the Gui bundle.
pub fn files_to_ship(&self) -> Vec<&Path> {
// We explicitly deconstruct object, so when new fields are added, we will be forced to
// consider whether they should be shipped or not.
let RepoRootDistWasm {
path: _,
dynamic_assets,
index_js: _,
index_d_ts: _,
index_js_map: _,
pkg_js,
pkg_js_map,
pkg_wasm: _,
pkg_opt_wasm,
} = &self.0;
vec![
dynamic_assets.as_path(),
pkg_js.as_path(),
pkg_js_map.as_path(),
pkg_opt_wasm.as_path(),
]
}
pub fn symlink_ensogl_dist(&self, linked_dist: &RepoRootTargetEnsoglPackLinkedDist) -> Result {
ide_ci::fs::remove_symlink_dir_if_exists(linked_dist)?;
ide_ci::fs::symlink_auto(self, linked_dist)
}
}
impl AsRef<Path> for Artifact {
fn as_ref(&self) -> &Path {
self.0.as_path()
}
}
impl IsArtifact for Artifact {}
impl Wasm {
pub async fn check(&self) -> Result {
Cargo
.cmd()?
.apply(&cargo::Command::Check)
.apply(&cargo::Options::Workspace)
.apply(&cargo::Options::Package(INTEGRATION_TESTS_CRATE_NAME.into()))
.apply(&cargo::Options::AllTargets)
.run_ok()
.await
}
pub async fn test(&self, repo_root: PathBuf, wasm: &[test::Browser], native: bool) -> Result {
async fn maybe_run<Fut: Future<Output = Result>>(
name: &str,
enabled: bool,
f: impl (FnOnce() -> Fut),
) -> Result {
if enabled {
info!("Will run {name} tests.");
f().await.context(format!("Running {name} tests."))
} else {
info!("Skipping {name} tests.");
Ok(())
}
}
maybe_run("native", native, async || {
Cargo
.cmd()?
.current_dir(repo_root.clone())
.apply(&cargo::Command::Test)
.apply(&cargo::Options::Workspace)
// Color needs to be passed to tests themselves separately.
// See: https://github.com/rust-lang/cargo/issues/1983
.arg("--")
.apply(&cargo::Color::Always)
.run_ok()
.await
})
.await?;
maybe_run("wasm", !wasm.is_empty(), || test::test_all(repo_root.clone(), wasm)).await?;
Ok(())
}
pub async fn integration_test(
&self,
source_root: PathBuf,
_project_manager: Option<Child>,
headless: bool,
additional_options: Vec<String>,
wasm_timeout: Option<Duration>,
) -> Result {
info!("Running Rust WASM test suite.");
use wasm_pack::TestFlags::*;
WasmPack
.cmd()?
.current_dir(source_root)
.set_env_opt(
env::WASM_BINDGEN_TEST_TIMEOUT,
wasm_timeout.map(|d| d.as_secs()).as_ref(),
)?
.test()
.apply_opt(headless.then_some(&Headless))
.apply(&test::BROWSER_FOR_WASM_TESTS)
.arg("integration-test")
.arg("--profile=integration-test")
.args(additional_options)
.run_ok()
.await
// PM will be automatically killed by dropping the handle.
}
/// Process "raw" WASM (as compiled) by optionally invoking wasm-opt.
pub async fn finalize_wasm(
wasm_opt_options: &[String],
skip_wasm_opt: bool,
profile: Profile,
temp_dist: &RepoRootDistWasm,
) -> Result {
let should_call_wasm_opt = {
if profile == Profile::Dev {
debug!("Skipping wasm-opt invocation, as it is not part of profile {profile}.");
false
} else if skip_wasm_opt {
debug!("Skipping wasm-opt invocation, as it was explicitly requested.");
false
} else {
true
}
};
if should_call_wasm_opt | {
let mut wasm_opt_command = WasmOpt.cmd()?;
let has_custom_opt_level = wasm_opt_options.iter().any(|opt| {
wasm_opt::OptimizationLevel::from_str(opt.trim_start_matches('-')).is_ok()
});
if !has_custom_opt_level {
wasm_opt_command.apply(&profile.optimization_level());
}
wasm_opt_command
.args(wasm_opt_options)
.arg(&temp_dist.pkg_wasm)
.apply(&wasm_opt::Output(&temp_dist.pkg_opt_wasm))
.run_ok()
.await?;
} | conditional_block |
|
server.rs | state (which can be safely lost) and persistent
/// state (which must be carefully stored and kept safe).
///
/// Currently, the `Server` API is not well defined. **We are looking for feedback and suggestions.**
pub struct Server<S, M> where S: Store, M: StateMachine {
replica: Replica<S, M>,
// Channels and Sockets
listener: NonBlock<TcpListener>,
connections: Slab<Connection>,
}
/// The implementation of the Server. In most use cases, creating a `Server` should just be
/// done via `::new()`.
impl<S, M> Server<S, M> where S: Store, M: StateMachine {
/// Creates a new Raft node with the cluster members specified.
///
/// # Arguments
///
/// * `addr` - The address of the new node.
/// * `peers` - The address of all peers in the Raft cluster.
/// * `store` - The persitent log store.
/// * `state_machine` - The client state machine to which client commands will be applied.
pub fn spawn(addr: SocketAddr,
peers: HashSet<SocketAddr>,
store: S,
state_machine: M) {
debug!("Spawning Server");
// Create an event loop
let mut event_loop = EventLoop::<Server<S, M>>::new().unwrap();
// Setup the socket, make it not block.
let listener = listen(&addr).unwrap();
listener.set_reuseaddr(true).unwrap();
event_loop.register(&listener, LISTENER).unwrap();
let timeout = rand::thread_rng().gen_range::<u64>(ELECTION_MIN, ELECTION_MAX);
event_loop.timeout_ms(ELECTION_TIMEOUT, timeout).unwrap();
event_loop.timeout_ms(HEARTBEAT_TIMEOUT, HEARTBEAT_DURATION).unwrap();
let replica = Replica::new(addr, peers, store, state_machine);
// Fire up the thread.
thread::Builder::new().name(format!("Server {}", addr)).spawn(move || {
let mut raft_node = Server {
listener: listener,
replica: replica,
connections: Slab::new_starting_at(Token(2), 128),
};
event_loop.run(&mut raft_node).unwrap();
}).unwrap();
}
}
impl<S, M> Handler for Server<S, M> where S: Store, M: StateMachine {
type Message = RingBuf;
type Timeout = Token;
/// A registered IoHandle has available writing space.
fn writable(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token) {
debug!("Writeable");
match token {
ELECTION_TIMEOUT => unreachable!(),
HEARTBEAT_TIMEOUT => unreachable!(),
LISTENER => unreachable!(),
tok => {
self.connections[tok].writable(reactor, &mut self.replica).unwrap();
}
}
}
/// A registered IoHandle has available data to read
fn readable(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token, _hint: ReadHint) {
debug!("Readable");
match token {
ELECTION_TIMEOUT => unreachable!(),
HEARTBEAT_TIMEOUT => unreachable!(),
LISTENER => {
let stream = match self.listener.accept().unwrap() {
Some(s) => s,
None => return, // Socket isn't quite ready.
}; // Result<Option<_>,_>
let conn = Connection::new(stream);
let tok = self.connections.insert(conn)
.ok().expect("Could not add connection to slab.");
// Register the connection
self.connections[tok].token = tok;
reactor.register_opt(&self.connections[tok].stream, tok, Interest::readable(), PollOpt::edge() | PollOpt::oneshot())
.ok().expect("Could not register socket with event loop.");
},
tok => {
self.connections[tok].readable(reactor, &mut self.replica).unwrap();
}
}
}
/// A registered timer has expired. This is either:
///
/// * An election timeout, when a `Follower` node has waited too long for a heartbeat and doing
/// to become a `Candidate`.
/// * A heartbeat timeout, when the `Leader` node needs to refresh it's authority over the
/// followers. Initializes and sends an `AppendEntries` request to all followers.
fn timeout(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token) {
debug!("Timeout");
let mut message = MallocMessageBuilder::new_default();
let mut send_message = None;
match token {
ELECTION_TIMEOUT => {
let request = message.init_root::<rpc_request::Builder>();
send_message = self.replica.election_timeout(request.init_request_vote());
// Set timeout.
let timeout = rand::thread_rng().gen_range::<u64>(ELECTION_MIN, ELECTION_MAX);
reactor.timeout_ms(ELECTION_TIMEOUT, timeout).unwrap();
},
HEARTBEAT_TIMEOUT => {
let request = message.init_root::<rpc_request::Builder>();
send_message = self.replica.heartbeat_timeout(request.init_append_entries());
// Set Timeout
reactor.timeout_ms(HEARTBEAT_TIMEOUT, HEARTBEAT_DURATION).unwrap();
},
_ => unreachable!(),
}
// Send if necessary.
match send_message {
Some(Broadcast) => {
let mut buf = RingBuf::new(RINGBUF_SIZE);
serialize_packed::write_message(
&mut buf,
&mut message
).unwrap();
for connection in self.connections.iter_mut() {
connection.add_write(buf.clone());
}
},
None => (),
}
}
}
struct Connection {
stream: NonBlock<TcpStream>,
token: Token,
interest: Interest,
current_read: BufReader<RingBuf>,
current_write: BufReader<RingBuf>,
next_write: VecDeque<RingBuf>,
}
impl Connection {
/// Note: The caller must manually assign `token` to what is desired.
fn new(sock: NonBlock<TcpStream>) -> Connection {
Connection {
stream: sock,
token: Token(0), // Effectively a `null`. This needs to be assigned by the caller.
interest: Interest::hup(),
current_read: BufReader::new(RingBuf::new(4096)),
current_write: BufReader::new(RingBuf::new(4096)),
next_write: VecDeque::with_capacity(10),
}
}
/// A registered IoHandle has available writing space.
fn | <S, M>(&mut self, event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
-> Result<()>
where S: Store, M: StateMachine {
// Attempt to write data.
// The `current_write` buffer will be advanced based on how much we wrote.
match self.stream.write(self.current_write.get_mut()) {
Ok(None) => {
// This is a buffer flush. WOULDBLOCK
self.interest.insert(Interest::writable());
},
Ok(Some(r)) => {
// We managed to write data!
match (self.current_write.get_ref().has_remaining(), self.next_write.is_empty()) {
// Need to write more of what we have.
(true, _) => (),
// Need to roll over.
(false, false) => self.current_write = BufReader::new(self.next_write.pop_front().unwrap()),
// We're done writing for now.
(false, true) => self.interest.remove(Interest::writable()),
}
},
Err(e) => return Err(Error::from(e)),
}
match event_loop.reregister(&self.stream, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()) {
Ok(()) => Ok(()),
Err(e) => Err(Error::from(e)),
}
}
/// A registered IoHandle has available data to read.
/// This does not necessarily mean that there is an entire packed item on the stream. We could
/// get some, all of it, or none. We'll use the buffer to read in until we can find one.
fn readable<S, M>(&mut self, event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
-> Result<()>
where S: Store, M: StateMachine {
let mut read = 0;
match self.stream.read(self.current_read.get_mut()) {
Ok(Some(r)) => {
// Just read `r` bytes.
read = r;
},
Ok(None) => panic!("We just got readable, but were unable to read from the socket?"),
Err(e) => return Err(Error::from(e)),
};
if read > 0 {
match serialize_packed::read_message(&mut self.current_read, ReaderOptions::new()) {
// We have something reasonably interesting in the buffer!
Ok(reader) => {
self.handle_reader(reader, event_loop, replica);
},
// It's not read entirely yet.
// Should roll back, pending changes to bytes upstream.
// TODO: This was fixed.
Err(_) => unimplemented!(),
}
}
match event_loop.reregister(&self.stream, self.token, self.interest, PollOpt::edge()) {
Ok(()) => Ok(()),
Err(e | writable | identifier_name |
server.rs | state (which can be safely lost) and persistent
/// state (which must be carefully stored and kept safe).
///
/// Currently, the `Server` API is not well defined. **We are looking for feedback and suggestions.**
pub struct Server<S, M> where S: Store, M: StateMachine {
replica: Replica<S, M>,
// Channels and Sockets
listener: NonBlock<TcpListener>,
connections: Slab<Connection>,
}
/// The implementation of the Server. In most use cases, creating a `Server` should just be
/// done via `::new()`.
impl<S, M> Server<S, M> where S: Store, M: StateMachine {
/// Creates a new Raft node with the cluster members specified.
///
/// # Arguments
///
/// * `addr` - The address of the new node.
/// * `peers` - The address of all peers in the Raft cluster.
/// * `store` - The persitent log store.
/// * `state_machine` - The client state machine to which client commands will be applied.
pub fn spawn(addr: SocketAddr,
peers: HashSet<SocketAddr>,
store: S,
state_machine: M) {
debug!("Spawning Server");
// Create an event loop
let mut event_loop = EventLoop::<Server<S, M>>::new().unwrap();
// Setup the socket, make it not block.
let listener = listen(&addr).unwrap();
listener.set_reuseaddr(true).unwrap();
event_loop.register(&listener, LISTENER).unwrap();
let timeout = rand::thread_rng().gen_range::<u64>(ELECTION_MIN, ELECTION_MAX);
event_loop.timeout_ms(ELECTION_TIMEOUT, timeout).unwrap();
event_loop.timeout_ms(HEARTBEAT_TIMEOUT, HEARTBEAT_DURATION).unwrap();
let replica = Replica::new(addr, peers, store, state_machine);
// Fire up the thread.
thread::Builder::new().name(format!("Server {}", addr)).spawn(move || {
let mut raft_node = Server {
listener: listener,
replica: replica,
connections: Slab::new_starting_at(Token(2), 128),
};
event_loop.run(&mut raft_node).unwrap();
}).unwrap();
}
}
impl<S, M> Handler for Server<S, M> where S: Store, M: StateMachine {
type Message = RingBuf;
type Timeout = Token;
/// A registered IoHandle has available writing space.
fn writable(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token) {
debug!("Writeable");
match token {
ELECTION_TIMEOUT => unreachable!(),
HEARTBEAT_TIMEOUT => unreachable!(),
LISTENER => unreachable!(),
tok => {
self.connections[tok].writable(reactor, &mut self.replica).unwrap();
}
}
}
/// A registered IoHandle has available data to read
fn readable(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token, _hint: ReadHint) {
debug!("Readable");
match token {
ELECTION_TIMEOUT => unreachable!(),
HEARTBEAT_TIMEOUT => unreachable!(),
LISTENER => {
let stream = match self.listener.accept().unwrap() {
Some(s) => s,
None => return, // Socket isn't quite ready.
}; // Result<Option<_>,_>
let conn = Connection::new(stream);
let tok = self.connections.insert(conn)
.ok().expect("Could not add connection to slab.");
// Register the connection
self.connections[tok].token = tok;
reactor.register_opt(&self.connections[tok].stream, tok, Interest::readable(), PollOpt::edge() | PollOpt::oneshot())
.ok().expect("Could not register socket with event loop.");
},
tok => {
self.connections[tok].readable(reactor, &mut self.replica).unwrap();
}
}
}
/// A registered timer has expired. This is either:
///
/// * An election timeout, when a `Follower` node has waited too long for a heartbeat and doing
/// to become a `Candidate`.
/// * A heartbeat timeout, when the `Leader` node needs to refresh it's authority over the
/// followers. Initializes and sends an `AppendEntries` request to all followers.
fn timeout(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token) {
debug!("Timeout");
let mut message = MallocMessageBuilder::new_default();
let mut send_message = None;
match token {
ELECTION_TIMEOUT => {
let request = message.init_root::<rpc_request::Builder>();
send_message = self.replica.election_timeout(request.init_request_vote());
// Set timeout.
let timeout = rand::thread_rng().gen_range::<u64>(ELECTION_MIN, ELECTION_MAX);
reactor.timeout_ms(ELECTION_TIMEOUT, timeout).unwrap();
},
HEARTBEAT_TIMEOUT => {
let request = message.init_root::<rpc_request::Builder>();
send_message = self.replica.heartbeat_timeout(request.init_append_entries());
// Set Timeout
reactor.timeout_ms(HEARTBEAT_TIMEOUT, HEARTBEAT_DURATION).unwrap();
},
_ => unreachable!(),
}
// Send if necessary.
match send_message {
Some(Broadcast) => {
let mut buf = RingBuf::new(RINGBUF_SIZE);
serialize_packed::write_message(
&mut buf,
&mut message
).unwrap();
for connection in self.connections.iter_mut() {
connection.add_write(buf.clone());
}
},
None => (),
}
}
}
struct Connection {
stream: NonBlock<TcpStream>,
token: Token,
interest: Interest,
current_read: BufReader<RingBuf>,
current_write: BufReader<RingBuf>,
next_write: VecDeque<RingBuf>,
}
impl Connection {
/// Note: The caller must manually assign `token` to what is desired.
fn new(sock: NonBlock<TcpStream>) -> Connection {
Connection {
stream: sock,
token: Token(0), // Effectively a `null`. This needs to be assigned by the caller.
interest: Interest::hup(),
current_read: BufReader::new(RingBuf::new(4096)),
current_write: BufReader::new(RingBuf::new(4096)),
next_write: VecDeque::with_capacity(10),
}
}
/// A registered IoHandle has available writing space.
fn writable<S, M>(&mut self, event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
-> Result<()>
where S: Store, M: StateMachine {
// Attempt to write data.
// The `current_write` buffer will be advanced based on how much we wrote.
match self.stream.write(self.current_write.get_mut()) {
Ok(None) => {
// This is a buffer flush. WOULDBLOCK
self.interest.insert(Interest::writable());
},
Ok(Some(r)) => {
// We managed to write data!
match (self.current_write.get_ref().has_remaining(), self.next_write.is_empty()) {
// Need to write more of what we have.
(true, _) => (),
// Need to roll over.
(false, false) => self.current_write = BufReader::new(self.next_write.pop_front().unwrap()),
// We're done writing for now.
(false, true) => self.interest.remove(Interest::writable()),
}
},
Err(e) => return Err(Error::from(e)),
}
match event_loop.reregister(&self.stream, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()) {
Ok(()) => Ok(()),
Err(e) => Err(Error::from(e)),
}
}
/// A registered IoHandle has available data to read.
/// This does not necessarily mean that there is an entire packed item on the stream. We could
/// get some, all of it, or none. We'll use the buffer to read in until we can find one. | -> Result<()>
where S: Store, M: StateMachine {
let mut read = 0;
match self.stream.read(self.current_read.get_mut()) {
Ok(Some(r)) => {
// Just read `r` bytes.
read = r;
},
Ok(None) => panic!("We just got readable, but were unable to read from the socket?"),
Err(e) => return Err(Error::from(e)),
};
if read > 0 {
match serialize_packed::read_message(&mut self.current_read, ReaderOptions::new()) {
// We have something reasonably interesting in the buffer!
Ok(reader) => {
self.handle_reader(reader, event_loop, replica);
},
// It's not read entirely yet.
// Should roll back, pending changes to bytes upstream.
// TODO: This was fixed.
Err(_) => unimplemented!(),
}
}
match event_loop.reregister(&self.stream, self.token, self.interest, PollOpt::edge()) {
Ok(()) => Ok(()),
Err(e) => | fn readable<S, M>(&mut self, event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>) | random_line_split |
server.rs | Handler for Server<S, M> where S: Store, M: StateMachine {
type Message = RingBuf;
type Timeout = Token;
/// A registered IoHandle has available writing space.
fn writable(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token) {
debug!("Writeable");
match token {
ELECTION_TIMEOUT => unreachable!(),
HEARTBEAT_TIMEOUT => unreachable!(),
LISTENER => unreachable!(),
tok => {
self.connections[tok].writable(reactor, &mut self.replica).unwrap();
}
}
}
/// A registered IoHandle has available data to read
fn readable(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token, _hint: ReadHint) {
debug!("Readable");
match token {
ELECTION_TIMEOUT => unreachable!(),
HEARTBEAT_TIMEOUT => unreachable!(),
LISTENER => {
let stream = match self.listener.accept().unwrap() {
Some(s) => s,
None => return, // Socket isn't quite ready.
}; // Result<Option<_>,_>
let conn = Connection::new(stream);
let tok = self.connections.insert(conn)
.ok().expect("Could not add connection to slab.");
// Register the connection
self.connections[tok].token = tok;
reactor.register_opt(&self.connections[tok].stream, tok, Interest::readable(), PollOpt::edge() | PollOpt::oneshot())
.ok().expect("Could not register socket with event loop.");
},
tok => {
self.connections[tok].readable(reactor, &mut self.replica).unwrap();
}
}
}
/// A registered timer has expired. This is either:
///
/// * An election timeout, when a `Follower` node has waited too long for a heartbeat and doing
/// to become a `Candidate`.
/// * A heartbeat timeout, when the `Leader` node needs to refresh it's authority over the
/// followers. Initializes and sends an `AppendEntries` request to all followers.
fn timeout(&mut self, reactor: &mut EventLoop<Server<S, M>>, token: Token) {
debug!("Timeout");
let mut message = MallocMessageBuilder::new_default();
let mut send_message = None;
match token {
ELECTION_TIMEOUT => {
let request = message.init_root::<rpc_request::Builder>();
send_message = self.replica.election_timeout(request.init_request_vote());
// Set timeout.
let timeout = rand::thread_rng().gen_range::<u64>(ELECTION_MIN, ELECTION_MAX);
reactor.timeout_ms(ELECTION_TIMEOUT, timeout).unwrap();
},
HEARTBEAT_TIMEOUT => {
let request = message.init_root::<rpc_request::Builder>();
send_message = self.replica.heartbeat_timeout(request.init_append_entries());
// Set Timeout
reactor.timeout_ms(HEARTBEAT_TIMEOUT, HEARTBEAT_DURATION).unwrap();
},
_ => unreachable!(),
}
// Send if necessary.
match send_message {
Some(Broadcast) => {
let mut buf = RingBuf::new(RINGBUF_SIZE);
serialize_packed::write_message(
&mut buf,
&mut message
).unwrap();
for connection in self.connections.iter_mut() {
connection.add_write(buf.clone());
}
},
None => (),
}
}
}
struct Connection {
stream: NonBlock<TcpStream>,
token: Token,
interest: Interest,
current_read: BufReader<RingBuf>,
current_write: BufReader<RingBuf>,
next_write: VecDeque<RingBuf>,
}
impl Connection {
/// Note: The caller must manually assign `token` to what is desired.
fn new(sock: NonBlock<TcpStream>) -> Connection {
Connection {
stream: sock,
token: Token(0), // Effectively a `null`. This needs to be assigned by the caller.
interest: Interest::hup(),
current_read: BufReader::new(RingBuf::new(4096)),
current_write: BufReader::new(RingBuf::new(4096)),
next_write: VecDeque::with_capacity(10),
}
}
/// A registered IoHandle has available writing space.
fn writable<S, M>(&mut self, event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
-> Result<()>
where S: Store, M: StateMachine {
// Attempt to write data.
// The `current_write` buffer will be advanced based on how much we wrote.
match self.stream.write(self.current_write.get_mut()) {
Ok(None) => {
// This is a buffer flush. WOULDBLOCK
self.interest.insert(Interest::writable());
},
Ok(Some(r)) => {
// We managed to write data!
match (self.current_write.get_ref().has_remaining(), self.next_write.is_empty()) {
// Need to write more of what we have.
(true, _) => (),
// Need to roll over.
(false, false) => self.current_write = BufReader::new(self.next_write.pop_front().unwrap()),
// We're done writing for now.
(false, true) => self.interest.remove(Interest::writable()),
}
},
Err(e) => return Err(Error::from(e)),
}
match event_loop.reregister(&self.stream, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()) {
Ok(()) => Ok(()),
Err(e) => Err(Error::from(e)),
}
}
/// A registered IoHandle has available data to read.
/// This does not necessarily mean that there is an entire packed item on the stream. We could
/// get some, all of it, or none. We'll use the buffer to read in until we can find one.
fn readable<S, M>(&mut self, event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
-> Result<()>
where S: Store, M: StateMachine {
let mut read = 0;
match self.stream.read(self.current_read.get_mut()) {
Ok(Some(r)) => {
// Just read `r` bytes.
read = r;
},
Ok(None) => panic!("We just got readable, but were unable to read from the socket?"),
Err(e) => return Err(Error::from(e)),
};
if read > 0 {
match serialize_packed::read_message(&mut self.current_read, ReaderOptions::new()) {
// We have something reasonably interesting in the buffer!
Ok(reader) => {
self.handle_reader(reader, event_loop, replica);
},
// It's not read entirely yet.
// Should roll back, pending changes to bytes upstream.
// TODO: This was fixed.
Err(_) => unimplemented!(),
}
}
match event_loop.reregister(&self.stream, self.token, self.interest, PollOpt::edge()) {
Ok(()) => Ok(()),
Err(e) => Err(Error::from(e)),
}
}
/// This is called when there is a full reader available in the buffer.
/// It handles what to do with the data.
fn handle_reader<S, M>(&mut self, reader: OwnedSpaceMessageReader,
event_loop: &mut EventLoop<Server<S, M>>, replica: &mut Replica<S,M>)
where S: Store, M: StateMachine {
let mut builder_message = MallocMessageBuilder::new_default();
let from = self.stream.peer_addr().unwrap();
if let Ok(request) = reader.get_root::<rpc_request::Reader>() {
match request.which().unwrap() {
// TODO: Move these into replica?
rpc_request::Which::AppendEntries(Ok(call)) => {
let builder = builder_message.init_root::<append_entries_response::Builder>();
match replica.append_entries_request(from, call, builder) {
Some(Emit) => {
// Special Cirsumstance Detection
unimplemented!();
},
None => (),
}
},
rpc_request::Which::RequestVote(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<request_vote_response::Builder>();
replica.request_vote_request(from, call, builder)
};
match respond {
Some(Emit) => {
// TODO
self.emit(builder_message);
},
None => (),
}
},
_ => unimplemented!(),
};
} else if let Ok(response) = reader.get_root::<rpc_response::Reader>() {
// We won't be responding. This is already a response.
match response.which().unwrap() {
rpc_response::Which::AppendEntries(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<append_entries_request::Builder>();
replica.append_entries_response(from, call, builder)
};
match respond {
Some(Emit) => {
// TODO
self.emit(builder_message);
},
None => (),
}
},
rpc_response::Which::RequestVote(Ok(call)) => {
let respond = {
let builder = builder_message.init_root::<append_entries_request::Builder>();
replica.request_vote_response(from, call, builder)
};
match respond {
Some(Broadcast) => | {
// Won an election!
self.broadcast(builder_message);
} | conditional_block |
|
group.rs | 't contain central inversion.
fn quaternions(self, left: bool) -> Box<dyn GroupIter> {
if self.dim != 3 {
panic!("Quaternions can only be generated from 3D matrices.");
}
Box::new(
self.rotations()
.map(move |el| quat_to_mat(mat_to_quat(el), left)),
)
}
/// Returns the swirl symmetry group of two 3D groups.
pub fn swirl(g: Self, h: Self) -> Self {
if g.dim != 3 {
panic!("g must be a group of 3D matrices.");
}
if h.dim != 3 {
panic!("h must be a group of 3D matrices.");
}
Self {
dim: 4,
iter: Box::new(
itertools::iproduct!(g.quaternions(true), h.quaternions(false))
.map(|(mat1, mat2)| {
let mat = mat1 * mat2;
std::iter::once(mat.clone()).chain(std::iter::once(-mat))
})
.flatten(),
),
}
}
/// Returns a new group whose elements have all been generated already, so
/// that they can be used multiple times quickly.
pub fn cache(self) -> Self {
self.elements().into()
}
/// Returns the exact same group, but now asserts that each generated
/// element has the appropriate dimension. Used for debugging purposes.
pub fn debug(self) -> Self |
/// Generates the trivial group of a certain dimension.
pub fn trivial(dim: usize) -> Self {
Self {
dim,
iter: Box::new(std::iter::once(Matrix::identity(dim, dim))),
}
}
/// Generates the group with the identity and a central inversion of a
/// certain dimension.
pub fn central_inv(dim: usize) -> Self {
Self {
dim,
iter: Box::new(
vec![Matrix::identity(dim, dim), -Matrix::identity(dim, dim)].into_iter(),
),
}
}
/// Generates a step prism group from a base group and a homomorphism into
/// another group.
pub fn step(g: Self, f: impl Fn(Matrix<f64>) -> Matrix<f64> + Clone + 'static) -> Self {
let dim = g.dim * 2;
Self {
dim,
iter: Box::new(g.map(move |mat| {
let clone = mat.clone();
direct_sum(clone, f(mat))
})),
}
}
/// Generates a Coxeter group from its [`CoxMatrix`], or returns `None` if
/// the group doesn't fit as a matrix group in spherical space.
pub fn cox_group(cox: CoxMatrix) -> Option<Self> {
Some(Self {
dim: cox.nrows(),
iter: Box::new(GenIter::from_cox_mat(cox)?),
})
}
/// Generates the direct product of two groups. Uses the specified function
/// to uniquely map the ordered pairs of matrices into other matrices.
pub fn fn_product(
g: Self,
h: Self,
dim: usize,
product: (impl Fn((Matrix<f64>, Matrix<f64>)) -> Matrix<f64> + Clone + 'static),
) -> Self {
Self {
dim,
iter: Box::new(itertools::iproduct!(g, h).map(product)),
}
}
/// Returns the group determined by all products between elements of the
/// first and the second group. **Is meant only for groups that commute with
/// one another.**
pub fn matrix_product(g: Self, h: Self) -> Option<Self> {
// The two matrices must have the same size.
if g.dim != h.dim {
return None;
}
let dim = g.dim;
Some(Self::fn_product(g, h, dim, |(mat1, mat2)| mat1 * mat2))
}
/// Calculates the direct product of two groups. Pairs of matrices are then
/// mapped to their direct sum.
pub fn direct_product(g: Self, h: Self) -> Self {
let dim = g.dim + h.dim;
Self::fn_product(g, h, dim, |(mat1, mat2)| direct_sum(mat1, mat2))
}
/// Generates the [wreath product](https://en.wikipedia.org/wiki/Wreath_product)
/// of two symmetry groups.
pub fn wreath(g: Self, h: Self) -> Self {
let h = h.elements();
let h_len = h.len();
let g_dim = g.dim;
let dim = g_dim * h_len;
// Indexes each element in h.
let mut h_indices = BTreeMap::new();
for (i, h_el) in h.iter().enumerate() {
h_indices.insert(OrdMatrix::new(h_el.clone()), i);
}
// Converts h into a permutation group.
let mut permutations = Vec::with_capacity(h_len);
for h_el_1 in &h {
let mut perm = Vec::with_capacity(h.len());
for h_el_2 in &h {
perm.push(
*h_indices
.get(&OrdMatrix::new(h_el_1 * h_el_2))
.expect("h is not a valid group!"),
);
}
permutations.push(perm);
}
// Computes the direct product of g with itself |h| times.
let g_prod = vec![&g; h_len - 1]
.into_iter()
.cloned()
.fold(g.clone(), |acc, g| Group::direct_product(g, acc));
Self {
dim,
iter: Box::new(
g_prod
.map(move |g_el| {
let mut matrices = Vec::new();
for perm in &permutations {
let mut new_el = Matrix::zeros(dim, dim);
// Permutes the blocks on the diagonal of g_el.
for (i, &j) in perm.iter().enumerate() {
for x in 0..g_dim {
for y in 0..g_dim {
new_el[(i * g_dim + x, j * g_dim + y)] =
g_el[(i * g_dim + x, i * g_dim + y)];
}
}
}
matrices.push(new_el);
}
matrices.into_iter()
})
.flatten(),
),
}
}
/// Generates the orbit of a point under a given symmetry group.
pub fn orbit(self, p: Point) -> Vec<Point> {
let mut points = BTreeSet::new();
for m in self {
points.insert(OrdPoint::new(m * &p));
}
points.into_iter().map(|x| x.0).collect()
}
/// Generates a polytope as the convex hull of the orbit of a point under a
/// given symmetry group.
pub fn into_polytope(self, p: Point) -> Concrete {
convex::convex_hull(self.orbit(p))
}
}
impl From<Vec<Matrix<f64>>> for Group {
fn from(elements: Vec<Matrix<f64>>) -> Self {
Self {
dim: elements
.get(0)
.expect("Group must have at least one element.")
.nrows(),
iter: Box::new(elements.into_iter()),
}
}
}
/// The result of trying to get the next element in a group.
pub enum GroupNext {
/// We've already found all elements of the group.
None,
/// We found an element we had found previously.
Repeat,
/// We found a new element.
New(Matrix<f64>),
}
#[allow(clippy::upper_case_acronyms)]
type MatrixMN<R, C> = nalgebra::Matrix<f64, R, C, VecStorage<f64, R, C>>;
#[derive(Clone, Debug)]
#[allow(clippy::upper_case_acronyms)]
/// A matrix ordered by fuzzy lexicographic ordering. Used to quickly
/// determine whether an element in a [`GenIter`](GenIter) is a
/// duplicate.
pub struct OrdMatrixMN<R: Dim, C: Dim>(pub MatrixMN<R, C>)
where
VecStorage<f64, R, C>: Storage<f64, R, C>;
impl<R: Dim, C: Dim> std::ops::Deref for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
type Target = MatrixMN<R, C>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<R: Dim, C: Dim> std::ops::DerefMut for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut | {
let dim = self.dim;
Self {
dim,
iter: Box::new(self.map(move |x| {
let msg = "Size of matrix does not match expected dimension.";
assert_eq!(x.nrows(), dim, "{}", msg);
assert_eq!(x.ncols(), dim, "{}", msg);
x
})),
}
} | identifier_body |
group.rs | mat1 * mat2))
}
/// Calculates the direct product of two groups. Pairs of matrices are then
/// mapped to their direct sum.
pub fn direct_product(g: Self, h: Self) -> Self {
let dim = g.dim + h.dim;
Self::fn_product(g, h, dim, |(mat1, mat2)| direct_sum(mat1, mat2))
}
/// Generates the [wreath product](https://en.wikipedia.org/wiki/Wreath_product)
/// of two symmetry groups.
pub fn wreath(g: Self, h: Self) -> Self {
let h = h.elements();
let h_len = h.len();
let g_dim = g.dim;
let dim = g_dim * h_len;
// Indexes each element in h.
let mut h_indices = BTreeMap::new();
for (i, h_el) in h.iter().enumerate() {
h_indices.insert(OrdMatrix::new(h_el.clone()), i);
}
// Converts h into a permutation group.
let mut permutations = Vec::with_capacity(h_len);
for h_el_1 in &h {
let mut perm = Vec::with_capacity(h.len());
for h_el_2 in &h {
perm.push(
*h_indices
.get(&OrdMatrix::new(h_el_1 * h_el_2))
.expect("h is not a valid group!"),
);
}
permutations.push(perm);
}
// Computes the direct product of g with itself |h| times.
let g_prod = vec![&g; h_len - 1]
.into_iter()
.cloned()
.fold(g.clone(), |acc, g| Group::direct_product(g, acc));
Self {
dim,
iter: Box::new(
g_prod
.map(move |g_el| {
let mut matrices = Vec::new();
for perm in &permutations {
let mut new_el = Matrix::zeros(dim, dim);
// Permutes the blocks on the diagonal of g_el.
for (i, &j) in perm.iter().enumerate() {
for x in 0..g_dim {
for y in 0..g_dim {
new_el[(i * g_dim + x, j * g_dim + y)] =
g_el[(i * g_dim + x, i * g_dim + y)];
}
}
}
matrices.push(new_el);
}
matrices.into_iter()
})
.flatten(),
),
}
}
/// Generates the orbit of a point under a given symmetry group.
pub fn orbit(self, p: Point) -> Vec<Point> {
let mut points = BTreeSet::new();
for m in self {
points.insert(OrdPoint::new(m * &p));
}
points.into_iter().map(|x| x.0).collect()
}
/// Generates a polytope as the convex hull of the orbit of a point under a
/// given symmetry group.
pub fn into_polytope(self, p: Point) -> Concrete {
convex::convex_hull(self.orbit(p))
}
}
impl From<Vec<Matrix<f64>>> for Group {
fn from(elements: Vec<Matrix<f64>>) -> Self {
Self {
dim: elements
.get(0)
.expect("Group must have at least one element.")
.nrows(),
iter: Box::new(elements.into_iter()),
}
}
}
/// The result of trying to get the next element in a group.
pub enum GroupNext {
/// We've already found all elements of the group.
None,
/// We found an element we had found previously.
Repeat,
/// We found a new element.
New(Matrix<f64>),
}
#[allow(clippy::upper_case_acronyms)]
type MatrixMN<R, C> = nalgebra::Matrix<f64, R, C, VecStorage<f64, R, C>>;
#[derive(Clone, Debug)]
#[allow(clippy::upper_case_acronyms)]
/// A matrix ordered by fuzzy lexicographic ordering. Used to quickly
/// determine whether an element in a [`GenIter`](GenIter) is a
/// duplicate.
pub struct OrdMatrixMN<R: Dim, C: Dim>(pub MatrixMN<R, C>)
where
VecStorage<f64, R, C>: Storage<f64, R, C>;
impl<R: Dim, C: Dim> std::ops::Deref for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
type Target = MatrixMN<R, C>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<R: Dim, C: Dim> std::ops::DerefMut for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<R: Dim, C: Dim> PartialEq for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn eq(&self, other: &Self) -> bool {
let mut other = other.iter();
for x in self.iter() {
let y = other.next().unwrap();
if abs_diff_ne!(x, y, epsilon = EPS) {
return false;
}
}
true
}
}
impl<R: Dim, C: Dim> Eq for OrdMatrixMN<R, C> where VecStorage<f64, R, C>: Storage<f64, R, C> {}
impl<R: Dim, C: Dim> PartialOrd for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
let mut other = other.iter();
for x in self.iter() {
let y = other.next().unwrap();
if abs_diff_ne!(x, y, epsilon = EPS) {
return x.partial_cmp(y);
}
}
Some(std::cmp::Ordering::Equal)
}
}
impl<R: Dim, C: Dim> Ord for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.partial_cmp(other).unwrap()
}
}
impl<R: Dim, C: Dim> OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
pub fn new(mat: MatrixMN<R, C>) -> Self {
Self(mat)
}
}
type OrdMatrix = OrdMatrixMN<Dynamic, Dynamic>;
type OrdPoint = OrdMatrixMN<Dynamic, U1>;
/// An iterator for a `Group` [generated](https://en.wikipedia.org/wiki/Generator_(mathematics))
/// by a set of floating point matrices. Its elements are built in a BFS order.
/// It contains a lookup table, used to figure out whether an element has
/// already been found or not, as well as a queue to store the next elements.
#[derive(Clone)]
pub struct GenIter {
/// The number of dimensions the group acts on.
pub dim: usize,
/// The generators for the group.
pub gens: Vec<Matrix<f64>>,
/// Stores the elements that have been generated and that can still be
/// generated again. Is integral for the algorithm to work, as without it,
/// duplicate group elements will just keep generating forever.
elements: BTreeMap<OrdMatrix, usize>,
/// Stores the elements that haven't yet been processed.
queue: VecDeque<OrdMatrix>,
/// Stores the index in (`generators`)[GenGroup.generators] of the generator
/// that's being checked. All previous once will have already been
/// multiplied to the right of the current element. Quirk of the current
/// data structure, subject to change.
gen_idx: usize,
}
impl Iterator for GenIter {
type Item = Matrix<f64>;
fn next(&mut self) -> Option<Self::Item> {
loop {
match self.try_next() {
GroupNext::None => return None,
GroupNext::Repeat => {}
GroupNext::New(el) => return Some(el),
};
}
}
}
/// Determines whether two matrices are "approximately equal" elementwise.
fn matrix_approx(mat1: &Matrix<f64>, mat2: &Matrix<f64>) -> bool {
const EPS: f64 = 1e-4;
let mat1 = mat1.iter();
let mut mat2 = mat2.iter();
for x in mat1 {
let y = mat2.next().expect("Matrices don't have the same size!");
if abs_diff_ne!(x, y, epsilon = EPS) {
return false;
}
}
true
}
/// Builds a reflection matrix from a given vector.
pub fn refl_mat(n: Vector<f64>) -> Matrix<f64> {
let dim = n.nrows(); | random_line_split |
||
group.rs | don't contain central inversion.
fn | (self, left: bool) -> Box<dyn GroupIter> {
if self.dim != 3 {
panic!("Quaternions can only be generated from 3D matrices.");
}
Box::new(
self.rotations()
.map(move |el| quat_to_mat(mat_to_quat(el), left)),
)
}
/// Returns the swirl symmetry group of two 3D groups.
pub fn swirl(g: Self, h: Self) -> Self {
if g.dim != 3 {
panic!("g must be a group of 3D matrices.");
}
if h.dim != 3 {
panic!("h must be a group of 3D matrices.");
}
Self {
dim: 4,
iter: Box::new(
itertools::iproduct!(g.quaternions(true), h.quaternions(false))
.map(|(mat1, mat2)| {
let mat = mat1 * mat2;
std::iter::once(mat.clone()).chain(std::iter::once(-mat))
})
.flatten(),
),
}
}
/// Returns a new group whose elements have all been generated already, so
/// that they can be used multiple times quickly.
pub fn cache(self) -> Self {
self.elements().into()
}
/// Returns the exact same group, but now asserts that each generated
/// element has the appropriate dimension. Used for debugging purposes.
pub fn debug(self) -> Self {
let dim = self.dim;
Self {
dim,
iter: Box::new(self.map(move |x| {
let msg = "Size of matrix does not match expected dimension.";
assert_eq!(x.nrows(), dim, "{}", msg);
assert_eq!(x.ncols(), dim, "{}", msg);
x
})),
}
}
/// Generates the trivial group of a certain dimension.
pub fn trivial(dim: usize) -> Self {
Self {
dim,
iter: Box::new(std::iter::once(Matrix::identity(dim, dim))),
}
}
/// Generates the group with the identity and a central inversion of a
/// certain dimension.
pub fn central_inv(dim: usize) -> Self {
Self {
dim,
iter: Box::new(
vec![Matrix::identity(dim, dim), -Matrix::identity(dim, dim)].into_iter(),
),
}
}
/// Generates a step prism group from a base group and a homomorphism into
/// another group.
pub fn step(g: Self, f: impl Fn(Matrix<f64>) -> Matrix<f64> + Clone + 'static) -> Self {
let dim = g.dim * 2;
Self {
dim,
iter: Box::new(g.map(move |mat| {
let clone = mat.clone();
direct_sum(clone, f(mat))
})),
}
}
/// Generates a Coxeter group from its [`CoxMatrix`], or returns `None` if
/// the group doesn't fit as a matrix group in spherical space.
pub fn cox_group(cox: CoxMatrix) -> Option<Self> {
Some(Self {
dim: cox.nrows(),
iter: Box::new(GenIter::from_cox_mat(cox)?),
})
}
/// Generates the direct product of two groups. Uses the specified function
/// to uniquely map the ordered pairs of matrices into other matrices.
pub fn fn_product(
g: Self,
h: Self,
dim: usize,
product: (impl Fn((Matrix<f64>, Matrix<f64>)) -> Matrix<f64> + Clone + 'static),
) -> Self {
Self {
dim,
iter: Box::new(itertools::iproduct!(g, h).map(product)),
}
}
/// Returns the group determined by all products between elements of the
/// first and the second group. **Is meant only for groups that commute with
/// one another.**
pub fn matrix_product(g: Self, h: Self) -> Option<Self> {
// The two matrices must have the same size.
if g.dim != h.dim {
return None;
}
let dim = g.dim;
Some(Self::fn_product(g, h, dim, |(mat1, mat2)| mat1 * mat2))
}
/// Calculates the direct product of two groups. Pairs of matrices are then
/// mapped to their direct sum.
pub fn direct_product(g: Self, h: Self) -> Self {
let dim = g.dim + h.dim;
Self::fn_product(g, h, dim, |(mat1, mat2)| direct_sum(mat1, mat2))
}
/// Generates the [wreath product](https://en.wikipedia.org/wiki/Wreath_product)
/// of two symmetry groups.
pub fn wreath(g: Self, h: Self) -> Self {
let h = h.elements();
let h_len = h.len();
let g_dim = g.dim;
let dim = g_dim * h_len;
// Indexes each element in h.
let mut h_indices = BTreeMap::new();
for (i, h_el) in h.iter().enumerate() {
h_indices.insert(OrdMatrix::new(h_el.clone()), i);
}
// Converts h into a permutation group.
let mut permutations = Vec::with_capacity(h_len);
for h_el_1 in &h {
let mut perm = Vec::with_capacity(h.len());
for h_el_2 in &h {
perm.push(
*h_indices
.get(&OrdMatrix::new(h_el_1 * h_el_2))
.expect("h is not a valid group!"),
);
}
permutations.push(perm);
}
// Computes the direct product of g with itself |h| times.
let g_prod = vec![&g; h_len - 1]
.into_iter()
.cloned()
.fold(g.clone(), |acc, g| Group::direct_product(g, acc));
Self {
dim,
iter: Box::new(
g_prod
.map(move |g_el| {
let mut matrices = Vec::new();
for perm in &permutations {
let mut new_el = Matrix::zeros(dim, dim);
// Permutes the blocks on the diagonal of g_el.
for (i, &j) in perm.iter().enumerate() {
for x in 0..g_dim {
for y in 0..g_dim {
new_el[(i * g_dim + x, j * g_dim + y)] =
g_el[(i * g_dim + x, i * g_dim + y)];
}
}
}
matrices.push(new_el);
}
matrices.into_iter()
})
.flatten(),
),
}
}
/// Generates the orbit of a point under a given symmetry group.
pub fn orbit(self, p: Point) -> Vec<Point> {
let mut points = BTreeSet::new();
for m in self {
points.insert(OrdPoint::new(m * &p));
}
points.into_iter().map(|x| x.0).collect()
}
/// Generates a polytope as the convex hull of the orbit of a point under a
/// given symmetry group.
pub fn into_polytope(self, p: Point) -> Concrete {
convex::convex_hull(self.orbit(p))
}
}
impl From<Vec<Matrix<f64>>> for Group {
fn from(elements: Vec<Matrix<f64>>) -> Self {
Self {
dim: elements
.get(0)
.expect("Group must have at least one element.")
.nrows(),
iter: Box::new(elements.into_iter()),
}
}
}
/// The result of trying to get the next element in a group.
pub enum GroupNext {
/// We've already found all elements of the group.
None,
/// We found an element we had found previously.
Repeat,
/// We found a new element.
New(Matrix<f64>),
}
#[allow(clippy::upper_case_acronyms)]
type MatrixMN<R, C> = nalgebra::Matrix<f64, R, C, VecStorage<f64, R, C>>;
#[derive(Clone, Debug)]
#[allow(clippy::upper_case_acronyms)]
/// A matrix ordered by fuzzy lexicographic ordering. Used to quickly
/// determine whether an element in a [`GenIter`](GenIter) is a
/// duplicate.
pub struct OrdMatrixMN<R: Dim, C: Dim>(pub MatrixMN<R, C>)
where
VecStorage<f64, R, C>: Storage<f64, R, C>;
impl<R: Dim, C: Dim> std::ops::Deref for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
type Target = MatrixMN<R, C>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<R: Dim, C: Dim> std::ops::DerefMut for OrdMatrixMN<R, C>
where
VecStorage<f64, R, C>: Storage<f64, R, C>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut | quaternions | identifier_name |
connection.py | API."""
def __init__(
self,
websession: aiohttp.ClientSession,
username,
password,
device_id,
device_name,
country,
) -> None:
"""
Initialize connection object.
Args:
websession (aiohttp.ClientSession): An instance of aiohttp.ClientSession.
username (str): Username used for the MySubaru mobile app.
password (str): Password used for the MySubaru mobile app.
device_id (str): Alphanumeric designator that Subaru API uses to track individual device authorization.
device_name (str): Human friendly name that is associated with `device_id` (shows on mysubaru.com profile "devices").
country (str): Country of MySubaru Account [CAN, USA].
"""
self._username = username
self._password = password
self._device_id = device_id
self._country = country
self._lock = asyncio.Lock()
self._device_name = device_name
self._vehicles = []
self._head = {
"User-Agent": "Mozilla/5.0 (Linux; Android 10; Android SDK built for x86 Build/QSR1.191030.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.185 Mobile Safari/537.36",
"Origin": "file://",
"X-Requested-With": API_MOBILE_APP[self._country],
"Accept-Language": "en-US,en;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept": "*/*",
}
self._websession = websession
self._authenticated = False
self._registered = False
self._current_vin = None
self._list_of_vins = []
self._session_login_time = None
self._auth_contact_options = None
async def connect(self):
"""
Connect to and establish session with Subaru Starlink mobile app API.
Returns:
List: A list of dicts containing information about each vehicle registered in the Subaru account.
Raises:
InvalidCredentials: If login credentials are incorrect.
IncompleteCredentials: If login credentials were not provided.
SubaruException: If login fails for any other reason.
"""
await self._authenticate()
await self._get_vehicle_data()
if not self.device_registered:
await self._get_contact_methods()
return self._vehicles
@property
def device_registered(self):
"""Device is registered."""
return self._registered
@property
def auth_contact_methods(self):
"""Contact methods for 2FA."""
return self._auth_contact_options
async def request_auth_code(self, contact_method):
"""Request 2FA code be sent via specified contact method."""
if contact_method not in self.auth_contact_methods:
_LOGGER.error("Invalid 2FA contact method requested")
return False
_LOGGER.debug("Requesting 2FA code")
post_data = {
"contactMethod": contact_method,
"languagePreference": "EN",
}
js_resp = await self.__open(
API_2FA_SEND_VERIFICATION,
POST,
params=post_data,
)
if js_resp:
_LOGGER.debug(pprint.pformat(js_resp))
return True
async def submit_auth_code(self, code, make_permanent=True):
"""Submit received 2FA code for validation."""
if not code.isdecimal() or len(code) != 6:
_LOGGER.error("2FA code must be 6 digits")
return False
_LOGGER.info("Validating 2FA response")
post_data = {
"deviceId": self._device_id,
"deviceName": self._device_name,
"verificationCode": code,
}
if make_permanent:
post_data["rememberDevice"] = "on"
js_resp = await self.__open(API_2FA_AUTH_VERIFY, POST, params=post_data)
if js_resp:
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp["success"]:
_LOGGER.info("Device successfully authorized")
while not self._registered:
# Device registration does not always immediately take effect
await asyncio.sleep(3)
await self._authenticate()
# Current server side vin context is ambiguous (even for single vehicle account??)
self._current_vin = None
return True
async def validate_session(self, vin):
"""
Validate if current session is ready for an API command/query.
Verifies session cookie is still valid and re-authenticates if necessary.
Sets server-side vehicle context as needed.
Args:
vin (str): VIN of desired server-side vehicle context.
Returns:
bool: `True` if session is ready to send a command or query to the Subaru API with the desired `vin` context.
Raises:
SubaruException: If validation fails and a new session fails to be established.
"""
result = False
js_resp = await self.__open(API_VALIDATE_SESSION, GET)
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp["success"]:
if vin != self._current_vin:
# API call for VIN that is not the current remote context.
_LOGGER.debug("Switching Subaru API vehicle context to: %s", vin)
if await self._select_vehicle(vin):
result = True
else:
result = True
if result is False:
await self._authenticate(vin)
# New session cookie. Must call selectVehicle.json before any other API call.
if await self._select_vehicle(vin):
result = True
return result
def get_session_age(self):
"""Return number of minutes since last authentication."""
return (time.time() - self._session_login_time) // 60
def reset_session(self):
"""Clear session cookies."""
self._websession.cookie_jar.clear()
async def get(self, url, params=None):
"""
Send HTTPS GET request to Subaru Remote Services API.
Args:
url (str): URL path that will be concatenated after `subarulink.const.MOBILE_API_BASE_URL`
params (Dict, optional): HTTP GET request parameters
Returns:
Dict: JSON response as a Dict
Raises:
SubaruException: If request fails.
"""
if self._authenticated:
return await self.__open(url, method=GET, headers=self._head, params=params)
async def post(self, url, params=None, json_data=None):
"""
Send HTTPS POST request to Subaru Remote Services API.
Args:
url (str): URL path that will be concatenated after `subarulink.const.MOBILE_API_BASE_URL`
params (Dict, optional): HTTP POST request parameters
json_data (Dict, optional): HTTP POST request JSON data as a Dict
Returns:
Dict: JSON response as a Dict
Raises:
SubaruException: If request fails.
"""
if self._authenticated:
return await self.__open(url, method=POST, headers=self._head, params=params, json_data=json_data)
async def _authenticate(self, vin=None) -> bool:
| self._current_vin = None
return True
if js_resp.get("errorCode"):
_LOGGER.debug(pprint.pformat(js_resp))
error = js_resp.get("errorCode")
if error == API_ERROR_INVALID_ACCOUNT:
_LOGGER.error("Invalid account")
raise InvalidCredentials(error)
if error == API_ERROR_INVALID_CREDENTIALS:
_LOGGER.error("Client authentication failed")
raise InvalidCredentials(error)
if error == API_ERROR_PASSWORD_WARNING:
_LOGGER.error("Multiple Password Failures.")
raise InvalidCredentials(error)
raise SubaruException(error)
raise IncompleteCredentials("Connection requires email and password and device id.")
async def _select_vehicle(self, vin):
"""Select active vehicle for accounts with multiple VINs."""
params = {"vin": vin, "_": int(time.time())}
js_resp = await self.get(API_SELECT_VEHICLE, params=params)
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp.get("success"):
self._current_vin = vin
_LOGGER.debug("Current vehicle: vin=%s", js_resp["data"]["vin"])
return js_resp["data"]
if not js_resp.get("success") and js_resp.get("errorCode") == API_ERROR_VEHICLE_SETUP:
# Occasionally happens every few hours. Resetting the session seems to deal with it.
_LOGGER.warning | """Authenticate to Subaru Remote Services API."""
if self._username and self._password and self._device_id:
post_data = {
"env": "cloudprod",
"loginUsername": self._username,
"password": self._password,
"deviceId": self._device_id,
"passwordToken": None,
"selectedVin": vin,
"pushToken": None,
"deviceType": "android",
}
js_resp = await self.__open(API_LOGIN, POST, data=post_data, headers=self._head)
if js_resp.get("success"):
_LOGGER.debug("Client authentication successful")
_LOGGER.debug(pprint.pformat(js_resp))
self._authenticated = True
self._session_login_time = time.time()
self._registered = js_resp["data"]["deviceRegistered"]
self._list_of_vins = [v["vin"] for v in js_resp["data"]["vehicles"]] | identifier_body |
connection.py | API."""
def __init__(
self,
websession: aiohttp.ClientSession,
username,
password,
device_id,
device_name,
country,
) -> None:
"""
Initialize connection object.
Args:
websession (aiohttp.ClientSession): An instance of aiohttp.ClientSession.
username (str): Username used for the MySubaru mobile app.
password (str): Password used for the MySubaru mobile app.
device_id (str): Alphanumeric designator that Subaru API uses to track individual device authorization.
device_name (str): Human friendly name that is associated with `device_id` (shows on mysubaru.com profile "devices").
country (str): Country of MySubaru Account [CAN, USA].
"""
self._username = username
self._password = password
self._device_id = device_id
self._country = country
self._lock = asyncio.Lock()
self._device_name = device_name
self._vehicles = []
self._head = {
"User-Agent": "Mozilla/5.0 (Linux; Android 10; Android SDK built for x86 Build/QSR1.191030.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.185 Mobile Safari/537.36",
"Origin": "file://",
"X-Requested-With": API_MOBILE_APP[self._country],
"Accept-Language": "en-US,en;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept": "*/*",
}
self._websession = websession
self._authenticated = False
self._registered = False
self._current_vin = None
self._list_of_vins = []
self._session_login_time = None
self._auth_contact_options = None
async def connect(self):
"""
Connect to and establish session with Subaru Starlink mobile app API.
Returns:
List: A list of dicts containing information about each vehicle registered in the Subaru account.
Raises:
InvalidCredentials: If login credentials are incorrect.
IncompleteCredentials: If login credentials were not provided.
SubaruException: If login fails for any other reason.
"""
await self._authenticate()
await self._get_vehicle_data()
if not self.device_registered:
await self._get_contact_methods()
return self._vehicles
@property
def device_registered(self):
"""Device is registered."""
return self._registered
@property
def auth_contact_methods(self):
"""Contact methods for 2FA."""
return self._auth_contact_options
async def request_auth_code(self, contact_method):
"""Request 2FA code be sent via specified contact method."""
if contact_method not in self.auth_contact_methods:
_LOGGER.error("Invalid 2FA contact method requested")
return False
_LOGGER.debug("Requesting 2FA code")
post_data = {
"contactMethod": contact_method,
"languagePreference": "EN",
}
js_resp = await self.__open(
API_2FA_SEND_VERIFICATION,
POST,
params=post_data,
)
if js_resp:
_LOGGER.debug(pprint.pformat(js_resp))
return True
async def submit_auth_code(self, code, make_permanent=True):
"""Submit received 2FA code for validation."""
if not code.isdecimal() or len(code) != 6:
_LOGGER.error("2FA code must be 6 digits")
return False
_LOGGER.info("Validating 2FA response")
post_data = {
"deviceId": self._device_id,
"deviceName": self._device_name,
"verificationCode": code,
}
if make_permanent:
post_data["rememberDevice"] = "on"
js_resp = await self.__open(API_2FA_AUTH_VERIFY, POST, params=post_data)
if js_resp:
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp["success"]:
_LOGGER.info("Device successfully authorized")
while not self._registered:
# Device registration does not always immediately take effect
await asyncio.sleep(3)
await self._authenticate()
# Current server side vin context is ambiguous (even for single vehicle account??)
self._current_vin = None
return True
async def validate_session(self, vin):
"""
Validate if current session is ready for an API command/query.
Verifies session cookie is still valid and re-authenticates if necessary.
Sets server-side vehicle context as needed.
Args:
vin (str): VIN of desired server-side vehicle context.
Returns:
bool: `True` if session is ready to send a command or query to the Subaru API with the desired `vin` context.
Raises:
SubaruException: If validation fails and a new session fails to be established.
"""
result = False
js_resp = await self.__open(API_VALIDATE_SESSION, GET)
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp["success"]:
if vin != self._current_vin:
# API call for VIN that is not the current remote context.
_LOGGER.debug("Switching Subaru API vehicle context to: %s", vin)
if await self._select_vehicle(vin):
result = True
else:
result = True
if result is False:
await self._authenticate(vin)
# New session cookie. Must call selectVehicle.json before any other API call.
if await self._select_vehicle(vin):
result = True
return result
def get_session_age(self):
"""Return number of minutes since last authentication."""
return (time.time() - self._session_login_time) // 60
def reset_session(self):
"""Clear session cookies."""
self._websession.cookie_jar.clear()
async def get(self, url, params=None):
"""
Send HTTPS GET request to Subaru Remote Services API.
Args:
url (str): URL path that will be concatenated after `subarulink.const.MOBILE_API_BASE_URL`
params (Dict, optional): HTTP GET request parameters
Returns:
Dict: JSON response as a Dict
Raises:
SubaruException: If request fails.
"""
if self._authenticated:
return await self.__open(url, method=GET, headers=self._head, params=params)
async def post(self, url, params=None, json_data=None):
"""
Send HTTPS POST request to Subaru Remote Services API.
Args:
url (str): URL path that will be concatenated after `subarulink.const.MOBILE_API_BASE_URL`
params (Dict, optional): HTTP POST request parameters
json_data (Dict, optional): HTTP POST request JSON data as a Dict
Returns:
Dict: JSON response as a Dict
Raises:
SubaruException: If request fails.
"""
if self._authenticated:
|
async def _authenticate(self, vin=None) -> bool:
"""Authenticate to Subaru Remote Services API."""
if self._username and self._password and self._device_id:
post_data = {
"env": "cloudprod",
"loginUsername": self._username,
"password": self._password,
"deviceId": self._device_id,
"passwordToken": None,
"selectedVin": vin,
"pushToken": None,
"deviceType": "android",
}
js_resp = await self.__open(API_LOGIN, POST, data=post_data, headers=self._head)
if js_resp.get("success"):
_LOGGER.debug("Client authentication successful")
_LOGGER.debug(pprint.pformat(js_resp))
self._authenticated = True
self._session_login_time = time.time()
self._registered = js_resp["data"]["deviceRegistered"]
self._list_of_vins = [v["vin"] for v in js_resp["data"]["vehicles"]]
self._current_vin = None
return True
if js_resp.get("errorCode"):
_LOGGER.debug(pprint.pformat(js_resp))
error = js_resp.get("errorCode")
if error == API_ERROR_INVALID_ACCOUNT:
_LOGGER.error("Invalid account")
raise InvalidCredentials(error)
if error == API_ERROR_INVALID_CREDENTIALS:
_LOGGER.error("Client authentication failed")
raise InvalidCredentials(error)
if error == API_ERROR_PASSWORD_WARNING:
_LOGGER.error("Multiple Password Failures.")
raise InvalidCredentials(error)
raise SubaruException(error)
raise IncompleteCredentials("Connection requires email and password and device id.")
async def _select_vehicle(self, vin):
"""Select active vehicle for accounts with multiple VINs."""
params = {"vin": vin, "_": int(time.time())}
js_resp = await self.get(API_SELECT_VEHICLE, params=params)
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp.get("success"):
self._current_vin = vin
_LOGGER.debug("Current vehicle: vin=%s", js_resp["data"]["vin"])
return js_resp["data"]
if not js_resp.get("success") and js_resp.get("errorCode") == API_ERROR_VEHICLE_SETUP:
# Occasionally happens every few hours. Resetting the session seems to deal with it.
_LOGGER.warning | return await self.__open(url, method=POST, headers=self._head, params=params, json_data=json_data) | conditional_block |
connection.py | API."""
def __init__(
self,
websession: aiohttp.ClientSession,
username,
password,
device_id,
device_name,
country,
) -> None:
"""
Initialize connection object.
Args:
websession (aiohttp.ClientSession): An instance of aiohttp.ClientSession.
username (str): Username used for the MySubaru mobile app.
password (str): Password used for the MySubaru mobile app.
device_id (str): Alphanumeric designator that Subaru API uses to track individual device authorization.
device_name (str): Human friendly name that is associated with `device_id` (shows on mysubaru.com profile "devices").
country (str): Country of MySubaru Account [CAN, USA].
"""
self._username = username
self._password = password
self._device_id = device_id
self._country = country
self._lock = asyncio.Lock()
self._device_name = device_name
self._vehicles = []
self._head = {
"User-Agent": "Mozilla/5.0 (Linux; Android 10; Android SDK built for x86 Build/QSR1.191030.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.185 Mobile Safari/537.36",
"Origin": "file://",
"X-Requested-With": API_MOBILE_APP[self._country],
"Accept-Language": "en-US,en;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept": "*/*",
}
self._websession = websession
self._authenticated = False
self._registered = False
self._current_vin = None
self._list_of_vins = []
self._session_login_time = None
self._auth_contact_options = None
async def connect(self):
"""
Connect to and establish session with Subaru Starlink mobile app API.
Returns:
List: A list of dicts containing information about each vehicle registered in the Subaru account.
Raises:
InvalidCredentials: If login credentials are incorrect.
IncompleteCredentials: If login credentials were not provided.
SubaruException: If login fails for any other reason.
"""
await self._authenticate()
await self._get_vehicle_data()
if not self.device_registered:
await self._get_contact_methods()
return self._vehicles
@property
def device_registered(self):
"""Device is registered."""
return self._registered
@property
def auth_contact_methods(self):
"""Contact methods for 2FA."""
return self._auth_contact_options
async def request_auth_code(self, contact_method):
"""Request 2FA code be sent via specified contact method."""
if contact_method not in self.auth_contact_methods:
_LOGGER.error("Invalid 2FA contact method requested")
return False
_LOGGER.debug("Requesting 2FA code")
post_data = {
"contactMethod": contact_method,
"languagePreference": "EN",
}
js_resp = await self.__open(
API_2FA_SEND_VERIFICATION,
POST,
params=post_data,
)
if js_resp:
_LOGGER.debug(pprint.pformat(js_resp))
return True
async def submit_auth_code(self, code, make_permanent=True):
"""Submit received 2FA code for validation."""
if not code.isdecimal() or len(code) != 6:
_LOGGER.error("2FA code must be 6 digits")
return False
_LOGGER.info("Validating 2FA response")
post_data = {
"deviceId": self._device_id,
"deviceName": self._device_name,
"verificationCode": code,
}
if make_permanent:
post_data["rememberDevice"] = "on"
js_resp = await self.__open(API_2FA_AUTH_VERIFY, POST, params=post_data)
if js_resp:
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp["success"]:
_LOGGER.info("Device successfully authorized")
while not self._registered:
# Device registration does not always immediately take effect
await asyncio.sleep(3)
await self._authenticate()
# Current server side vin context is ambiguous (even for single vehicle account??)
self._current_vin = None
return True
async def validate_session(self, vin):
"""
Validate if current session is ready for an API command/query.
Verifies session cookie is still valid and re-authenticates if necessary.
Sets server-side vehicle context as needed.
Args:
vin (str): VIN of desired server-side vehicle context.
Returns:
bool: `True` if session is ready to send a command or query to the Subaru API with the desired `vin` context.
Raises:
SubaruException: If validation fails and a new session fails to be established.
"""
result = False
js_resp = await self.__open(API_VALIDATE_SESSION, GET)
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp["success"]:
if vin != self._current_vin:
# API call for VIN that is not the current remote context.
_LOGGER.debug("Switching Subaru API vehicle context to: %s", vin)
if await self._select_vehicle(vin):
result = True
else:
result = True
if result is False:
await self._authenticate(vin)
# New session cookie. Must call selectVehicle.json before any other API call.
if await self._select_vehicle(vin):
result = True
return result
def get_session_age(self):
"""Return number of minutes since last authentication."""
return (time.time() - self._session_login_time) // 60
def | (self):
"""Clear session cookies."""
self._websession.cookie_jar.clear()
async def get(self, url, params=None):
"""
Send HTTPS GET request to Subaru Remote Services API.
Args:
url (str): URL path that will be concatenated after `subarulink.const.MOBILE_API_BASE_URL`
params (Dict, optional): HTTP GET request parameters
Returns:
Dict: JSON response as a Dict
Raises:
SubaruException: If request fails.
"""
if self._authenticated:
return await self.__open(url, method=GET, headers=self._head, params=params)
async def post(self, url, params=None, json_data=None):
"""
Send HTTPS POST request to Subaru Remote Services API.
Args:
url (str): URL path that will be concatenated after `subarulink.const.MOBILE_API_BASE_URL`
params (Dict, optional): HTTP POST request parameters
json_data (Dict, optional): HTTP POST request JSON data as a Dict
Returns:
Dict: JSON response as a Dict
Raises:
SubaruException: If request fails.
"""
if self._authenticated:
return await self.__open(url, method=POST, headers=self._head, params=params, json_data=json_data)
async def _authenticate(self, vin=None) -> bool:
"""Authenticate to Subaru Remote Services API."""
if self._username and self._password and self._device_id:
post_data = {
"env": "cloudprod",
"loginUsername": self._username,
"password": self._password,
"deviceId": self._device_id,
"passwordToken": None,
"selectedVin": vin,
"pushToken": None,
"deviceType": "android",
}
js_resp = await self.__open(API_LOGIN, POST, data=post_data, headers=self._head)
if js_resp.get("success"):
_LOGGER.debug("Client authentication successful")
_LOGGER.debug(pprint.pformat(js_resp))
self._authenticated = True
self._session_login_time = time.time()
self._registered = js_resp["data"]["deviceRegistered"]
self._list_of_vins = [v["vin"] for v in js_resp["data"]["vehicles"]]
self._current_vin = None
return True
if js_resp.get("errorCode"):
_LOGGER.debug(pprint.pformat(js_resp))
error = js_resp.get("errorCode")
if error == API_ERROR_INVALID_ACCOUNT:
_LOGGER.error("Invalid account")
raise InvalidCredentials(error)
if error == API_ERROR_INVALID_CREDENTIALS:
_LOGGER.error("Client authentication failed")
raise InvalidCredentials(error)
if error == API_ERROR_PASSWORD_WARNING:
_LOGGER.error("Multiple Password Failures.")
raise InvalidCredentials(error)
raise SubaruException(error)
raise IncompleteCredentials("Connection requires email and password and device id.")
async def _select_vehicle(self, vin):
"""Select active vehicle for accounts with multiple VINs."""
params = {"vin": vin, "_": int(time.time())}
js_resp = await self.get(API_SELECT_VEHICLE, params=params)
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp.get("success"):
self._current_vin = vin
_LOGGER.debug("Current vehicle: vin=%s", js_resp["data"]["vin"])
return js_resp["data"]
if not js_resp.get("success") and js_resp.get("errorCode") == API_ERROR_VEHICLE_SETUP:
# Occasionally happens every few hours. Resetting the session seems to deal with it.
_LOGGER.warning | reset_session | identifier_name |
connection.py | app API."""
def __init__(
self,
websession: aiohttp.ClientSession,
username,
password,
device_id,
device_name,
country,
) -> None:
"""
Initialize connection object.
Args:
websession (aiohttp.ClientSession): An instance of aiohttp.ClientSession.
username (str): Username used for the MySubaru mobile app.
password (str): Password used for the MySubaru mobile app.
device_id (str): Alphanumeric designator that Subaru API uses to track individual device authorization.
device_name (str): Human friendly name that is associated with `device_id` (shows on mysubaru.com profile "devices").
country (str): Country of MySubaru Account [CAN, USA].
"""
self._username = username
self._password = password
self._device_id = device_id
self._country = country
self._lock = asyncio.Lock()
self._device_name = device_name
self._vehicles = []
self._head = {
"User-Agent": "Mozilla/5.0 (Linux; Android 10; Android SDK built for x86 Build/QSR1.191030.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.185 Mobile Safari/537.36",
"Origin": "file://",
"X-Requested-With": API_MOBILE_APP[self._country],
"Accept-Language": "en-US,en;q=0.9",
"Accept-Encoding": "gzip, deflate", | self._registered = False
self._current_vin = None
self._list_of_vins = []
self._session_login_time = None
self._auth_contact_options = None
async def connect(self):
"""
Connect to and establish session with Subaru Starlink mobile app API.
Returns:
List: A list of dicts containing information about each vehicle registered in the Subaru account.
Raises:
InvalidCredentials: If login credentials are incorrect.
IncompleteCredentials: If login credentials were not provided.
SubaruException: If login fails for any other reason.
"""
await self._authenticate()
await self._get_vehicle_data()
if not self.device_registered:
await self._get_contact_methods()
return self._vehicles
@property
def device_registered(self):
"""Device is registered."""
return self._registered
@property
def auth_contact_methods(self):
"""Contact methods for 2FA."""
return self._auth_contact_options
async def request_auth_code(self, contact_method):
"""Request 2FA code be sent via specified contact method."""
if contact_method not in self.auth_contact_methods:
_LOGGER.error("Invalid 2FA contact method requested")
return False
_LOGGER.debug("Requesting 2FA code")
post_data = {
"contactMethod": contact_method,
"languagePreference": "EN",
}
js_resp = await self.__open(
API_2FA_SEND_VERIFICATION,
POST,
params=post_data,
)
if js_resp:
_LOGGER.debug(pprint.pformat(js_resp))
return True
async def submit_auth_code(self, code, make_permanent=True):
"""Submit received 2FA code for validation."""
if not code.isdecimal() or len(code) != 6:
_LOGGER.error("2FA code must be 6 digits")
return False
_LOGGER.info("Validating 2FA response")
post_data = {
"deviceId": self._device_id,
"deviceName": self._device_name,
"verificationCode": code,
}
if make_permanent:
post_data["rememberDevice"] = "on"
js_resp = await self.__open(API_2FA_AUTH_VERIFY, POST, params=post_data)
if js_resp:
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp["success"]:
_LOGGER.info("Device successfully authorized")
while not self._registered:
# Device registration does not always immediately take effect
await asyncio.sleep(3)
await self._authenticate()
# Current server side vin context is ambiguous (even for single vehicle account??)
self._current_vin = None
return True
async def validate_session(self, vin):
"""
Validate if current session is ready for an API command/query.
Verifies session cookie is still valid and re-authenticates if necessary.
Sets server-side vehicle context as needed.
Args:
vin (str): VIN of desired server-side vehicle context.
Returns:
bool: `True` if session is ready to send a command or query to the Subaru API with the desired `vin` context.
Raises:
SubaruException: If validation fails and a new session fails to be established.
"""
result = False
js_resp = await self.__open(API_VALIDATE_SESSION, GET)
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp["success"]:
if vin != self._current_vin:
# API call for VIN that is not the current remote context.
_LOGGER.debug("Switching Subaru API vehicle context to: %s", vin)
if await self._select_vehicle(vin):
result = True
else:
result = True
if result is False:
await self._authenticate(vin)
# New session cookie. Must call selectVehicle.json before any other API call.
if await self._select_vehicle(vin):
result = True
return result
def get_session_age(self):
"""Return number of minutes since last authentication."""
return (time.time() - self._session_login_time) // 60
def reset_session(self):
"""Clear session cookies."""
self._websession.cookie_jar.clear()
async def get(self, url, params=None):
"""
Send HTTPS GET request to Subaru Remote Services API.
Args:
url (str): URL path that will be concatenated after `subarulink.const.MOBILE_API_BASE_URL`
params (Dict, optional): HTTP GET request parameters
Returns:
Dict: JSON response as a Dict
Raises:
SubaruException: If request fails.
"""
if self._authenticated:
return await self.__open(url, method=GET, headers=self._head, params=params)
async def post(self, url, params=None, json_data=None):
"""
Send HTTPS POST request to Subaru Remote Services API.
Args:
url (str): URL path that will be concatenated after `subarulink.const.MOBILE_API_BASE_URL`
params (Dict, optional): HTTP POST request parameters
json_data (Dict, optional): HTTP POST request JSON data as a Dict
Returns:
Dict: JSON response as a Dict
Raises:
SubaruException: If request fails.
"""
if self._authenticated:
return await self.__open(url, method=POST, headers=self._head, params=params, json_data=json_data)
async def _authenticate(self, vin=None) -> bool:
"""Authenticate to Subaru Remote Services API."""
if self._username and self._password and self._device_id:
post_data = {
"env": "cloudprod",
"loginUsername": self._username,
"password": self._password,
"deviceId": self._device_id,
"passwordToken": None,
"selectedVin": vin,
"pushToken": None,
"deviceType": "android",
}
js_resp = await self.__open(API_LOGIN, POST, data=post_data, headers=self._head)
if js_resp.get("success"):
_LOGGER.debug("Client authentication successful")
_LOGGER.debug(pprint.pformat(js_resp))
self._authenticated = True
self._session_login_time = time.time()
self._registered = js_resp["data"]["deviceRegistered"]
self._list_of_vins = [v["vin"] for v in js_resp["data"]["vehicles"]]
self._current_vin = None
return True
if js_resp.get("errorCode"):
_LOGGER.debug(pprint.pformat(js_resp))
error = js_resp.get("errorCode")
if error == API_ERROR_INVALID_ACCOUNT:
_LOGGER.error("Invalid account")
raise InvalidCredentials(error)
if error == API_ERROR_INVALID_CREDENTIALS:
_LOGGER.error("Client authentication failed")
raise InvalidCredentials(error)
if error == API_ERROR_PASSWORD_WARNING:
_LOGGER.error("Multiple Password Failures.")
raise InvalidCredentials(error)
raise SubaruException(error)
raise IncompleteCredentials("Connection requires email and password and device id.")
async def _select_vehicle(self, vin):
"""Select active vehicle for accounts with multiple VINs."""
params = {"vin": vin, "_": int(time.time())}
js_resp = await self.get(API_SELECT_VEHICLE, params=params)
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp.get("success"):
self._current_vin = vin
_LOGGER.debug("Current vehicle: vin=%s", js_resp["data"]["vin"])
return js_resp["data"]
if not js_resp.get("success") and js_resp.get("errorCode") == API_ERROR_VEHICLE_SETUP:
# Occasionally happens every few hours. Resetting the session seems to deal with it.
_LOGGER.warning(" | "Accept": "*/*",
}
self._websession = websession
self._authenticated = False | random_line_split |
main.go | os.Lstat(workdir)
if err != nil {
log.Fatalf("workdir error: %s", err)
}
dbCertificate, dbCaCertPool := loadCerts(
cfg.MySQL.CertificatePath,
cfg.MySQL.PrivateKeyPath,
cfg.MySQL.PrivateKeyPassphrase,
cfg.MySQL.CACertificatePath,
)
dbURI := db.NewDSN(
cfg.MySQL.Username,
cfg.MySQL.Password,
cfg.MySQL.DBName,
cfg.MySQL.Hostname,
int(cfg.MySQL.Port),
cfg.MySQL.ServerName,
dbCertificate,
dbCaCertPool,
)
database, err := migrations.LockDBAndMigrate(logger, "mysql", dbURI)
if err != nil {
log.Fatalf("db error: %s", err)
}
database.LogMode(false)
clk := clock.NewClock()
cloneMsgCh := make(chan revok.CloneMsg)
scanRepository := db.NewScanRepository(database, clk)
repositoryRepository := db.NewRepositoryRepository(database)
fetchRepository := db.NewFetchRepository(database)
credentialRepository := db.NewCredentialRepository(database)
branchRepository := db.NewBranchRepository(database)
emitter := metrics.BuildEmitter(cfg.Metrics.DatadogAPIKey, cfg.Metrics.Environment)
gitClient := gitclient.New(cfg.GitHub.PrivateKeyPath, cfg.GitHub.PublicKeyPath, cfg.GitPath)
repoWhitelist := notifications.BuildWhitelist(cfg.Whitelist...)
formatter := notifications.NewSlackNotificationFormatter()
traceClient, err := trace.NewClient(context.Background(), cfg.Trace.ProjectName)
if err != nil {
logger.Error("failed-to-create-trace-client", err)
}
slackHTTPClient := &http.Client{
Timeout: 5 * time.Second,
}
notifier := notifications.NewSlackNotifier(clk, slackHTTPClient, formatter)
certificate, caCertPool := loadCerts(
cfg.Identity.CertificatePath,
cfg.Identity.PrivateKeyPath,
cfg.Identity.PrivateKeyPassphrase,
cfg.Identity.CACertificatePath,
)
rolodexServerAddr := fmt.Sprintf("%s:%d", cfg.Rolodex.ServerAddress, cfg.Rolodex.ServerPort)
tlsConfig := tlsconfig.Build(
tlsconfig.WithInternalServiceDefaults(),
tlsconfig.WithIdentity(certificate),
)
transportCreds := credentials.NewTLS(tlsConfig.Client(tlsconfig.WithAuthority(caCertPool)))
conn, err := grpc.Dial(
rolodexServerAddr,
grpc.WithDialer(keepAliveDial),
grpc.WithTransportCredentials(transportCreds),
grpc.WithUnaryInterceptor(traceClient.GRPCClientInterceptor()),
)
if err != nil {
log.Fatalf("failed to connect to rolodex: %s", err)
}
rolodexClient := rolodexpb.NewRolodexClient(conn)
teamURLs := notifications.NewTeamURLs(
cfg.Slack.DefaultURL,
cfg.Slack.DefaultPublicChannel,
cfg.Slack.DefaultPrivateChannel,
cfg.Slack.TeamURLs,
)
addressBook := notifications.NewRolodex(
rolodexClient,
teamURLs,
)
router := notifications.NewRouter(
notifier,
addressBook,
repoWhitelist,
)
sniffer := sniff.NewDefaultSniffer()
scanner := revok.NewScanner(
gitClient,
repositoryRepository,
scanRepository,
credentialRepository,
sniffer,
)
notificationComposer := revok.NewNotificationComposer(
repositoryRepository,
router,
scanner,
)
if cfg.GitHub.AccessToken != "" {
githubHTTPClient := &http.Client{
Timeout: 30 * time.Second,
Transport: &oauth2.Transport{
Source: oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: cfg.GitHub.AccessToken},
),
Base: &http.Transport{
DisableKeepAlives: true,
},
},
}
gh := github.NewClient(githubHTTPClient)
ghClient = revok.NewGitHubClient(gh.Repositories)
}
changeFetcher := revok.NewChangeFetcher(
logger,
gitClient,
ghClient,
notificationComposer,
repositoryRepository,
fetchRepository,
emitter,
)
changeScheduleRunner := revok.NewScheduleRunner(logger)
changeScheduler := revok.NewChangeScheduler(
logger,
repositoryRepository,
changeScheduleRunner,
changeFetcher,
)
cloner := revok.NewCloner(
logger,
workdir,
cloneMsgCh,
gitClient,
repositoryRepository,
notificationComposer,
emitter,
changeScheduler,
)
dirscanUpdater := revok.NewRescanner(
logger,
scanRepository,
credentialRepository,
scanner,
router,
emitter,
)
statsReporter := stats.NewReporter(
logger,
clk,
60*time.Second,
db.NewStatsRepository(database),
emitter,
)
headCredentialCounter := revok.NewHeadCredentialCounter(
logger,
branchRepository,
repositoryRepository,
clk,
cfg.CredentialCounterInterval,
gitClient,
sniffer,
)
gitGCRunner := revok.NewGitGCRunner(logger, clk, repositoryRepository, gitClient, 24*time.Hour)
debug := admin.Runner(
"6060",
admin.WithInfo(info),
admin.WithUptime(),
)
members := []grouper.Member{
{Name: "cloner", Runner: cloner},
{Name: "dirscan-updater", Runner: dirscanUpdater},
{Name: "stats-reporter", Runner: statsReporter},
{Name: "head-credential-counter", Runner: headCredentialCounter},
{Name: "change-schedule-runner", Runner: changeScheduleRunner},
{Name: "git-gc-runner", Runner: gitGCRunner},
{Name: "debug", Runner: debug},
}
looper := gitclient.NewLooper()
searcher := search.NewSearcher(repositoryRepository, looper)
fileLookup := gitclient.NewFileLookup()
blobSearcher := search.NewBlobSearcher(repositoryRepository, fileLookup)
handler := api.NewSearchServer(logger, searcher, blobSearcher)
serverTLS := tlsConfig.Server(tlsconfig.WithClientAuthentication(caCertPool))
grpcServer := grpcrunner.New(
logger,
fmt.Sprintf("127.0.0.1:%d", cfg.API.BindPort),
func(server *grpc.Server) {
revokpb.RegisterRevokSearchServer(server, handler)
},
grpc.Creds(credentials.NewTLS(serverTLS)),
)
members = append(members, grouper.Member{
Name: "grpc-server",
Runner: grpcServer,
})
pubSubClient, err := pubsub.NewClient(context.Background(), cfg.PubSub.ProjectName)
if err != nil {
logger.Fatal("failed", err)
os.Exit(1)
}
subscription := pubSubClient.Subscription(cfg.PubSub.FetchHint.Subscription)
publicKey, err := crypto.ReadRSAPublicKey(cfg.PubSub.PublicKeyPath)
if err != nil {
logger.Fatal("failed", err)
os.Exit(1)
}
pushEventProcessor := queue.NewPushEventProcessor(
changeFetcher,
emitter,
clk,
traceClient,
)
signatureChecker := queue.NewSignatureCheck(crypto.NewRSAVerifier(publicKey), emitter, pushEventProcessor)
members = append(members, grouper.Member{
Name: "github-hint-handler",
Runner: queue.NewPubSubSubscriber(logger, subscription, signatureChecker, emitter),
})
if cfg.GitHub.AccessToken != "" {
repoDiscoverer := revok.NewRepoDiscoverer(
logger,
workdir,
cloneMsgCh,
ghClient,
clk,
cfg.RepositoryDiscovery.Interval,
cfg.RepositoryDiscovery.Organizations,
cfg.RepositoryDiscovery.Users,
repositoryRepository,
)
members = append(members, grouper.Member{
Name: "repo-discoverer",
Runner: repoDiscoverer,
})
}
startupTasks := []grouper.Member{
{
Name: "schedule-fetches",
Runner: changeScheduler,
},
}
system := []grouper.Member{
{
Name: "servers",
Runner: grouper.NewParallel(os.Interrupt, members),
},
{
Name: "startup-tasks",
Runner: grouper.NewParallel(os.Interrupt, startupTasks),
},
}
runner := sigmon.New(grouper.NewOrdered(os.Interrupt, system))
err = <-ifrit.Invoke(runner).Wait()
if err != nil {
log.Fatalf("failed-to-start: %s", err)
}
}
func loadCerts(certificatePath, privateKeyPath, privateKeyPassphrase, caCertificatePath string) (tls.Certificate, *x509.CertPool) {
certificate, err := config.LoadCertificateFromFiles(
certificatePath,
privateKeyPath,
privateKeyPassphrase,
)
if err != nil {
log.Fatalln(err)
}
caCertPool, err := config.LoadCertificatePoolFromFiles(caCertificatePath)
if err != nil {
log.Fatalln(err)
}
return certificate, caCertPool
}
func | keepAliveDial | identifier_name |
|
main.go | "github.com/tedsuo/ifrit/sigmon"
"cred-alert/config"
"cred-alert/crypto"
"cred-alert/db"
"cred-alert/db/migrations"
"cred-alert/gitclient"
"cred-alert/metrics"
"cred-alert/notifications"
"cred-alert/queue"
"cred-alert/revok"
"cred-alert/revok/api"
"cred-alert/revok/stats"
"cred-alert/revokpb"
"cred-alert/search"
"cred-alert/sniff"
"rolodex/rolodexpb"
)
var info = admin.ServiceInfo{
Name: "revok",
Description: "A service which fetches new Git commits and scans them for credentials.",
Team: "PCF Security Enablement",
}
func main() {
var cfg *config.WorkerConfig
var flagOpts config.WorkerOpts
var ghClient *revok.GitHubClient
logger := lager.NewLogger("revok-worker")
logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.DEBUG))
logger.Info("starting")
_, err := flags.Parse(&flagOpts)
if err != nil |
bs, err := ioutil.ReadFile(string(flagOpts.ConfigFile))
if err != nil {
logger.Error("failed-to-open-config-file", err)
os.Exit(1)
}
cfg, err = config.LoadWorkerConfig(bs)
if err != nil {
logger.Error("failed-to-load-config-file", err)
os.Exit(1)
}
errs := cfg.Validate()
if errs != nil {
for _, err := range errs {
fmt.Println(err.Error())
}
os.Exit(1)
}
if cfg.Metrics.SentryDSN != "" {
logger.RegisterSink(revok.NewSentrySink(cfg.Metrics.SentryDSN, cfg.Metrics.Environment))
}
if cfg.Metrics.HoneycombWriteKey != "" && cfg.Metrics.Environment != "" {
s := honeylager.NewSink(cfg.Metrics.HoneycombWriteKey, cfg.Metrics.Environment, lager.DEBUG)
defer s.Close()
logger.RegisterSink(s)
}
workdir := cfg.WorkDir
_, err = os.Lstat(workdir)
if err != nil {
log.Fatalf("workdir error: %s", err)
}
dbCertificate, dbCaCertPool := loadCerts(
cfg.MySQL.CertificatePath,
cfg.MySQL.PrivateKeyPath,
cfg.MySQL.PrivateKeyPassphrase,
cfg.MySQL.CACertificatePath,
)
dbURI := db.NewDSN(
cfg.MySQL.Username,
cfg.MySQL.Password,
cfg.MySQL.DBName,
cfg.MySQL.Hostname,
int(cfg.MySQL.Port),
cfg.MySQL.ServerName,
dbCertificate,
dbCaCertPool,
)
database, err := migrations.LockDBAndMigrate(logger, "mysql", dbURI)
if err != nil {
log.Fatalf("db error: %s", err)
}
database.LogMode(false)
clk := clock.NewClock()
cloneMsgCh := make(chan revok.CloneMsg)
scanRepository := db.NewScanRepository(database, clk)
repositoryRepository := db.NewRepositoryRepository(database)
fetchRepository := db.NewFetchRepository(database)
credentialRepository := db.NewCredentialRepository(database)
branchRepository := db.NewBranchRepository(database)
emitter := metrics.BuildEmitter(cfg.Metrics.DatadogAPIKey, cfg.Metrics.Environment)
gitClient := gitclient.New(cfg.GitHub.PrivateKeyPath, cfg.GitHub.PublicKeyPath, cfg.GitPath)
repoWhitelist := notifications.BuildWhitelist(cfg.Whitelist...)
formatter := notifications.NewSlackNotificationFormatter()
traceClient, err := trace.NewClient(context.Background(), cfg.Trace.ProjectName)
if err != nil {
logger.Error("failed-to-create-trace-client", err)
}
slackHTTPClient := &http.Client{
Timeout: 5 * time.Second,
}
notifier := notifications.NewSlackNotifier(clk, slackHTTPClient, formatter)
certificate, caCertPool := loadCerts(
cfg.Identity.CertificatePath,
cfg.Identity.PrivateKeyPath,
cfg.Identity.PrivateKeyPassphrase,
cfg.Identity.CACertificatePath,
)
rolodexServerAddr := fmt.Sprintf("%s:%d", cfg.Rolodex.ServerAddress, cfg.Rolodex.ServerPort)
tlsConfig := tlsconfig.Build(
tlsconfig.WithInternalServiceDefaults(),
tlsconfig.WithIdentity(certificate),
)
transportCreds := credentials.NewTLS(tlsConfig.Client(tlsconfig.WithAuthority(caCertPool)))
conn, err := grpc.Dial(
rolodexServerAddr,
grpc.WithDialer(keepAliveDial),
grpc.WithTransportCredentials(transportCreds),
grpc.WithUnaryInterceptor(traceClient.GRPCClientInterceptor()),
)
if err != nil {
log.Fatalf("failed to connect to rolodex: %s", err)
}
rolodexClient := rolodexpb.NewRolodexClient(conn)
teamURLs := notifications.NewTeamURLs(
cfg.Slack.DefaultURL,
cfg.Slack.DefaultPublicChannel,
cfg.Slack.DefaultPrivateChannel,
cfg.Slack.TeamURLs,
)
addressBook := notifications.NewRolodex(
rolodexClient,
teamURLs,
)
router := notifications.NewRouter(
notifier,
addressBook,
repoWhitelist,
)
sniffer := sniff.NewDefaultSniffer()
scanner := revok.NewScanner(
gitClient,
repositoryRepository,
scanRepository,
credentialRepository,
sniffer,
)
notificationComposer := revok.NewNotificationComposer(
repositoryRepository,
router,
scanner,
)
if cfg.GitHub.AccessToken != "" {
githubHTTPClient := &http.Client{
Timeout: 30 * time.Second,
Transport: &oauth2.Transport{
Source: oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: cfg.GitHub.AccessToken},
),
Base: &http.Transport{
DisableKeepAlives: true,
},
},
}
gh := github.NewClient(githubHTTPClient)
ghClient = revok.NewGitHubClient(gh.Repositories)
}
changeFetcher := revok.NewChangeFetcher(
logger,
gitClient,
ghClient,
notificationComposer,
repositoryRepository,
fetchRepository,
emitter,
)
changeScheduleRunner := revok.NewScheduleRunner(logger)
changeScheduler := revok.NewChangeScheduler(
logger,
repositoryRepository,
changeScheduleRunner,
changeFetcher,
)
cloner := revok.NewCloner(
logger,
workdir,
cloneMsgCh,
gitClient,
repositoryRepository,
notificationComposer,
emitter,
changeScheduler,
)
dirscanUpdater := revok.NewRescanner(
logger,
scanRepository,
credentialRepository,
scanner,
router,
emitter,
)
statsReporter := stats.NewReporter(
logger,
clk,
60*time.Second,
db.NewStatsRepository(database),
emitter,
)
headCredentialCounter := revok.NewHeadCredentialCounter(
logger,
branchRepository,
repositoryRepository,
clk,
cfg.CredentialCounterInterval,
gitClient,
sniffer,
)
gitGCRunner := revok.NewGitGCRunner(logger, clk, repositoryRepository, gitClient, 24*time.Hour)
debug := admin.Runner(
"6060",
admin.WithInfo(info),
admin.WithUptime(),
)
members := []grouper.Member{
{Name: "cloner", Runner: cloner},
{Name: "dirscan-updater", Runner: dirscanUpdater},
{Name: "stats-reporter", Runner: statsReporter},
{Name: "head-credential-counter", Runner: headCredentialCounter},
{Name: "change-schedule-runner", Runner: changeScheduleRunner},
{Name: "git-gc-runner", Runner: gitGCRunner},
{Name: "debug", Runner: debug},
}
looper := gitclient.NewLooper()
searcher := search.NewSearcher(repositoryRepository, looper)
fileLookup := gitclient.NewFileLookup()
blobSearcher := search.NewBlobSearcher(repositoryRepository, fileLookup)
handler := api.NewSearchServer(logger, searcher, blobSearcher)
serverTLS := tlsConfig.Server(tlsconfig.WithClientAuthentication(caCertPool))
grpcServer := grpcrunner.New(
logger,
fmt.Sprintf("127.0.0.1:%d", cfg.API.BindPort),
func(server *grpc.Server) {
revokpb.RegisterRevokSearchServer(server, handler)
},
grpc.Creds(credentials.NewTLS(serverTLS)),
)
members = append(members, grouper.Member{
Name: "grpc-server",
Runner: grpcServer,
})
pubSubClient, err := pubsub.NewClient(context.Background(), cfg.PubSub.ProjectName)
if err != nil {
logger.Fatal("failed", err)
os.Exit(1)
}
subscription := pubSubClient.Subscription(cfg.PubSub.FetchHint.Subscription)
publicKey, err := crypto.ReadRSAPublicKey(cfg.PubSub.PublicKeyPath)
if err != nil {
logger.Fatal("failed", err)
os.Exit(1)
}
pushEventProcessor | {
os.Exit(1)
} | conditional_block |
main.go | "github.com/tedsuo/ifrit/sigmon"
"cred-alert/config"
"cred-alert/crypto"
"cred-alert/db"
"cred-alert/db/migrations"
"cred-alert/gitclient"
"cred-alert/metrics"
"cred-alert/notifications"
"cred-alert/queue"
"cred-alert/revok"
"cred-alert/revok/api"
"cred-alert/revok/stats"
"cred-alert/revokpb"
"cred-alert/search"
"cred-alert/sniff"
"rolodex/rolodexpb"
)
var info = admin.ServiceInfo{
Name: "revok",
Description: "A service which fetches new Git commits and scans them for credentials.",
Team: "PCF Security Enablement",
}
func main() |
cfg, err = config.LoadWorkerConfig(bs)
if err != nil {
logger.Error("failed-to-load-config-file", err)
os.Exit(1)
}
errs := cfg.Validate()
if errs != nil {
for _, err := range errs {
fmt.Println(err.Error())
}
os.Exit(1)
}
if cfg.Metrics.SentryDSN != "" {
logger.RegisterSink(revok.NewSentrySink(cfg.Metrics.SentryDSN, cfg.Metrics.Environment))
}
if cfg.Metrics.HoneycombWriteKey != "" && cfg.Metrics.Environment != "" {
s := honeylager.NewSink(cfg.Metrics.HoneycombWriteKey, cfg.Metrics.Environment, lager.DEBUG)
defer s.Close()
logger.RegisterSink(s)
}
workdir := cfg.WorkDir
_, err = os.Lstat(workdir)
if err != nil {
log.Fatalf("workdir error: %s", err)
}
dbCertificate, dbCaCertPool := loadCerts(
cfg.MySQL.CertificatePath,
cfg.MySQL.PrivateKeyPath,
cfg.MySQL.PrivateKeyPassphrase,
cfg.MySQL.CACertificatePath,
)
dbURI := db.NewDSN(
cfg.MySQL.Username,
cfg.MySQL.Password,
cfg.MySQL.DBName,
cfg.MySQL.Hostname,
int(cfg.MySQL.Port),
cfg.MySQL.ServerName,
dbCertificate,
dbCaCertPool,
)
database, err := migrations.LockDBAndMigrate(logger, "mysql", dbURI)
if err != nil {
log.Fatalf("db error: %s", err)
}
database.LogMode(false)
clk := clock.NewClock()
cloneMsgCh := make(chan revok.CloneMsg)
scanRepository := db.NewScanRepository(database, clk)
repositoryRepository := db.NewRepositoryRepository(database)
fetchRepository := db.NewFetchRepository(database)
credentialRepository := db.NewCredentialRepository(database)
branchRepository := db.NewBranchRepository(database)
emitter := metrics.BuildEmitter(cfg.Metrics.DatadogAPIKey, cfg.Metrics.Environment)
gitClient := gitclient.New(cfg.GitHub.PrivateKeyPath, cfg.GitHub.PublicKeyPath, cfg.GitPath)
repoWhitelist := notifications.BuildWhitelist(cfg.Whitelist...)
formatter := notifications.NewSlackNotificationFormatter()
traceClient, err := trace.NewClient(context.Background(), cfg.Trace.ProjectName)
if err != nil {
logger.Error("failed-to-create-trace-client", err)
}
slackHTTPClient := &http.Client{
Timeout: 5 * time.Second,
}
notifier := notifications.NewSlackNotifier(clk, slackHTTPClient, formatter)
certificate, caCertPool := loadCerts(
cfg.Identity.CertificatePath,
cfg.Identity.PrivateKeyPath,
cfg.Identity.PrivateKeyPassphrase,
cfg.Identity.CACertificatePath,
)
rolodexServerAddr := fmt.Sprintf("%s:%d", cfg.Rolodex.ServerAddress, cfg.Rolodex.ServerPort)
tlsConfig := tlsconfig.Build(
tlsconfig.WithInternalServiceDefaults(),
tlsconfig.WithIdentity(certificate),
)
transportCreds := credentials.NewTLS(tlsConfig.Client(tlsconfig.WithAuthority(caCertPool)))
conn, err := grpc.Dial(
rolodexServerAddr,
grpc.WithDialer(keepAliveDial),
grpc.WithTransportCredentials(transportCreds),
grpc.WithUnaryInterceptor(traceClient.GRPCClientInterceptor()),
)
if err != nil {
log.Fatalf("failed to connect to rolodex: %s", err)
}
rolodexClient := rolodexpb.NewRolodexClient(conn)
teamURLs := notifications.NewTeamURLs(
cfg.Slack.DefaultURL,
cfg.Slack.DefaultPublicChannel,
cfg.Slack.DefaultPrivateChannel,
cfg.Slack.TeamURLs,
)
addressBook := notifications.NewRolodex(
rolodexClient,
teamURLs,
)
router := notifications.NewRouter(
notifier,
addressBook,
repoWhitelist,
)
sniffer := sniff.NewDefaultSniffer()
scanner := revok.NewScanner(
gitClient,
repositoryRepository,
scanRepository,
credentialRepository,
sniffer,
)
notificationComposer := revok.NewNotificationComposer(
repositoryRepository,
router,
scanner,
)
if cfg.GitHub.AccessToken != "" {
githubHTTPClient := &http.Client{
Timeout: 30 * time.Second,
Transport: &oauth2.Transport{
Source: oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: cfg.GitHub.AccessToken},
),
Base: &http.Transport{
DisableKeepAlives: true,
},
},
}
gh := github.NewClient(githubHTTPClient)
ghClient = revok.NewGitHubClient(gh.Repositories)
}
changeFetcher := revok.NewChangeFetcher(
logger,
gitClient,
ghClient,
notificationComposer,
repositoryRepository,
fetchRepository,
emitter,
)
changeScheduleRunner := revok.NewScheduleRunner(logger)
changeScheduler := revok.NewChangeScheduler(
logger,
repositoryRepository,
changeScheduleRunner,
changeFetcher,
)
cloner := revok.NewCloner(
logger,
workdir,
cloneMsgCh,
gitClient,
repositoryRepository,
notificationComposer,
emitter,
changeScheduler,
)
dirscanUpdater := revok.NewRescanner(
logger,
scanRepository,
credentialRepository,
scanner,
router,
emitter,
)
statsReporter := stats.NewReporter(
logger,
clk,
60*time.Second,
db.NewStatsRepository(database),
emitter,
)
headCredentialCounter := revok.NewHeadCredentialCounter(
logger,
branchRepository,
repositoryRepository,
clk,
cfg.CredentialCounterInterval,
gitClient,
sniffer,
)
gitGCRunner := revok.NewGitGCRunner(logger, clk, repositoryRepository, gitClient, 24*time.Hour)
debug := admin.Runner(
"6060",
admin.WithInfo(info),
admin.WithUptime(),
)
members := []grouper.Member{
{Name: "cloner", Runner: cloner},
{Name: "dirscan-updater", Runner: dirscanUpdater},
{Name: "stats-reporter", Runner: statsReporter},
{Name: "head-credential-counter", Runner: headCredentialCounter},
{Name: "change-schedule-runner", Runner: changeScheduleRunner},
{Name: "git-gc-runner", Runner: gitGCRunner},
{Name: "debug", Runner: debug},
}
looper := gitclient.NewLooper()
searcher := search.NewSearcher(repositoryRepository, looper)
fileLookup := gitclient.NewFileLookup()
blobSearcher := search.NewBlobSearcher(repositoryRepository, fileLookup)
handler := api.NewSearchServer(logger, searcher, blobSearcher)
serverTLS := tlsConfig.Server(tlsconfig.WithClientAuthentication(caCertPool))
grpcServer := grpcrunner.New(
logger,
fmt.Sprintf("127.0.0.1:%d", cfg.API.BindPort),
func(server *grpc.Server) {
revokpb.RegisterRevokSearchServer(server, handler)
},
grpc.Creds(credentials.NewTLS(serverTLS)),
)
members = append(members, grouper.Member{
Name: "grpc-server",
Runner: grpcServer,
})
pubSubClient, err := pubsub.NewClient(context.Background(), cfg.PubSub.ProjectName)
if err != nil {
logger.Fatal("failed", err)
os.Exit(1)
}
subscription := pubSubClient.Subscription(cfg.PubSub.FetchHint.Subscription)
publicKey, err := crypto.ReadRSAPublicKey(cfg.PubSub.PublicKeyPath)
if err != nil {
logger.Fatal("failed", err)
os.Exit(1)
}
pushEventProcessor | {
var cfg *config.WorkerConfig
var flagOpts config.WorkerOpts
var ghClient *revok.GitHubClient
logger := lager.NewLogger("revok-worker")
logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.DEBUG))
logger.Info("starting")
_, err := flags.Parse(&flagOpts)
if err != nil {
os.Exit(1)
}
bs, err := ioutil.ReadFile(string(flagOpts.ConfigFile))
if err != nil {
logger.Error("failed-to-open-config-file", err)
os.Exit(1)
} | identifier_body |
main.go | "cred-alert/db"
"cred-alert/db/migrations"
"cred-alert/gitclient"
"cred-alert/metrics"
"cred-alert/notifications"
"cred-alert/queue"
"cred-alert/revok"
"cred-alert/revok/api"
"cred-alert/revok/stats"
"cred-alert/revokpb"
"cred-alert/search"
"cred-alert/sniff"
"rolodex/rolodexpb"
)
var info = admin.ServiceInfo{
Name: "revok",
Description: "A service which fetches new Git commits and scans them for credentials.",
Team: "PCF Security Enablement",
}
func main() {
var cfg *config.WorkerConfig
var flagOpts config.WorkerOpts
var ghClient *revok.GitHubClient
logger := lager.NewLogger("revok-worker")
logger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.DEBUG))
logger.Info("starting")
_, err := flags.Parse(&flagOpts)
if err != nil {
os.Exit(1)
}
bs, err := ioutil.ReadFile(string(flagOpts.ConfigFile))
if err != nil {
logger.Error("failed-to-open-config-file", err)
os.Exit(1)
}
cfg, err = config.LoadWorkerConfig(bs)
if err != nil {
logger.Error("failed-to-load-config-file", err)
os.Exit(1)
}
errs := cfg.Validate()
if errs != nil {
for _, err := range errs {
fmt.Println(err.Error())
}
os.Exit(1)
}
if cfg.Metrics.SentryDSN != "" {
logger.RegisterSink(revok.NewSentrySink(cfg.Metrics.SentryDSN, cfg.Metrics.Environment))
}
if cfg.Metrics.HoneycombWriteKey != "" && cfg.Metrics.Environment != "" {
s := honeylager.NewSink(cfg.Metrics.HoneycombWriteKey, cfg.Metrics.Environment, lager.DEBUG)
defer s.Close()
logger.RegisterSink(s)
}
workdir := cfg.WorkDir
_, err = os.Lstat(workdir)
if err != nil {
log.Fatalf("workdir error: %s", err)
}
dbCertificate, dbCaCertPool := loadCerts(
cfg.MySQL.CertificatePath,
cfg.MySQL.PrivateKeyPath,
cfg.MySQL.PrivateKeyPassphrase,
cfg.MySQL.CACertificatePath,
)
dbURI := db.NewDSN(
cfg.MySQL.Username,
cfg.MySQL.Password,
cfg.MySQL.DBName,
cfg.MySQL.Hostname,
int(cfg.MySQL.Port),
cfg.MySQL.ServerName,
dbCertificate,
dbCaCertPool,
)
database, err := migrations.LockDBAndMigrate(logger, "mysql", dbURI)
if err != nil {
log.Fatalf("db error: %s", err)
}
database.LogMode(false)
clk := clock.NewClock()
cloneMsgCh := make(chan revok.CloneMsg)
scanRepository := db.NewScanRepository(database, clk)
repositoryRepository := db.NewRepositoryRepository(database)
fetchRepository := db.NewFetchRepository(database)
credentialRepository := db.NewCredentialRepository(database)
branchRepository := db.NewBranchRepository(database)
emitter := metrics.BuildEmitter(cfg.Metrics.DatadogAPIKey, cfg.Metrics.Environment)
gitClient := gitclient.New(cfg.GitHub.PrivateKeyPath, cfg.GitHub.PublicKeyPath, cfg.GitPath)
repoWhitelist := notifications.BuildWhitelist(cfg.Whitelist...)
formatter := notifications.NewSlackNotificationFormatter()
traceClient, err := trace.NewClient(context.Background(), cfg.Trace.ProjectName)
if err != nil {
logger.Error("failed-to-create-trace-client", err)
}
slackHTTPClient := &http.Client{
Timeout: 5 * time.Second,
}
notifier := notifications.NewSlackNotifier(clk, slackHTTPClient, formatter)
certificate, caCertPool := loadCerts(
cfg.Identity.CertificatePath,
cfg.Identity.PrivateKeyPath,
cfg.Identity.PrivateKeyPassphrase,
cfg.Identity.CACertificatePath,
)
rolodexServerAddr := fmt.Sprintf("%s:%d", cfg.Rolodex.ServerAddress, cfg.Rolodex.ServerPort)
tlsConfig := tlsconfig.Build(
tlsconfig.WithInternalServiceDefaults(),
tlsconfig.WithIdentity(certificate),
)
transportCreds := credentials.NewTLS(tlsConfig.Client(tlsconfig.WithAuthority(caCertPool)))
conn, err := grpc.Dial(
rolodexServerAddr,
grpc.WithDialer(keepAliveDial),
grpc.WithTransportCredentials(transportCreds),
grpc.WithUnaryInterceptor(traceClient.GRPCClientInterceptor()),
)
if err != nil {
log.Fatalf("failed to connect to rolodex: %s", err)
}
rolodexClient := rolodexpb.NewRolodexClient(conn)
teamURLs := notifications.NewTeamURLs(
cfg.Slack.DefaultURL,
cfg.Slack.DefaultPublicChannel,
cfg.Slack.DefaultPrivateChannel,
cfg.Slack.TeamURLs,
)
addressBook := notifications.NewRolodex(
rolodexClient,
teamURLs,
)
router := notifications.NewRouter(
notifier,
addressBook,
repoWhitelist,
)
sniffer := sniff.NewDefaultSniffer()
scanner := revok.NewScanner(
gitClient,
repositoryRepository,
scanRepository,
credentialRepository,
sniffer,
)
notificationComposer := revok.NewNotificationComposer(
repositoryRepository,
router,
scanner,
)
if cfg.GitHub.AccessToken != "" {
githubHTTPClient := &http.Client{
Timeout: 30 * time.Second,
Transport: &oauth2.Transport{
Source: oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: cfg.GitHub.AccessToken},
),
Base: &http.Transport{
DisableKeepAlives: true,
},
},
}
gh := github.NewClient(githubHTTPClient)
ghClient = revok.NewGitHubClient(gh.Repositories)
}
changeFetcher := revok.NewChangeFetcher(
logger,
gitClient,
ghClient,
notificationComposer,
repositoryRepository,
fetchRepository,
emitter,
)
changeScheduleRunner := revok.NewScheduleRunner(logger)
changeScheduler := revok.NewChangeScheduler(
logger,
repositoryRepository,
changeScheduleRunner,
changeFetcher,
)
cloner := revok.NewCloner(
logger,
workdir,
cloneMsgCh,
gitClient,
repositoryRepository,
notificationComposer,
emitter,
changeScheduler,
)
dirscanUpdater := revok.NewRescanner(
logger,
scanRepository,
credentialRepository,
scanner,
router,
emitter,
)
statsReporter := stats.NewReporter(
logger,
clk,
60*time.Second,
db.NewStatsRepository(database),
emitter,
)
headCredentialCounter := revok.NewHeadCredentialCounter(
logger,
branchRepository,
repositoryRepository,
clk,
cfg.CredentialCounterInterval,
gitClient,
sniffer,
)
gitGCRunner := revok.NewGitGCRunner(logger, clk, repositoryRepository, gitClient, 24*time.Hour)
debug := admin.Runner(
"6060",
admin.WithInfo(info),
admin.WithUptime(),
)
members := []grouper.Member{
{Name: "cloner", Runner: cloner},
{Name: "dirscan-updater", Runner: dirscanUpdater},
{Name: "stats-reporter", Runner: statsReporter},
{Name: "head-credential-counter", Runner: headCredentialCounter},
{Name: "change-schedule-runner", Runner: changeScheduleRunner},
{Name: "git-gc-runner", Runner: gitGCRunner},
{Name: "debug", Runner: debug},
}
looper := gitclient.NewLooper()
searcher := search.NewSearcher(repositoryRepository, looper)
fileLookup := gitclient.NewFileLookup()
blobSearcher := search.NewBlobSearcher(repositoryRepository, fileLookup)
handler := api.NewSearchServer(logger, searcher, blobSearcher)
serverTLS := tlsConfig.Server(tlsconfig.WithClientAuthentication(caCertPool))
grpcServer := grpcrunner.New(
logger,
fmt.Sprintf("127.0.0.1:%d", cfg.API.BindPort),
func(server *grpc.Server) {
revokpb.RegisterRevokSearchServer(server, handler)
},
grpc.Creds(credentials.NewTLS(serverTLS)),
)
members = append(members, grouper.Member{
Name: "grpc-server",
Runner: grpcServer,
})
pubSubClient, err := pubsub.NewClient(context.Background(), cfg.PubSub.ProjectName)
if err != nil {
logger.Fatal("failed", err)
os.Exit(1)
}
subscription := pubSubClient.Subscription(cfg.PubSub.FetchHint.Subscription)
publicKey, err := crypto.ReadRSAPublicKey(cfg.PubSub.PublicKeyPath)
if err != nil {
logger.Fatal("failed", err)
os.Exit(1)
}
pushEventProcessor := queue.NewPushEventProcessor(
changeFetcher,
emitter,
clk,
traceClient,
) | random_line_split |
||
task.rs | currently running, but has been notified that it must run again.
Notified,
/// Task has been scheduled
Scheduled,
/// Task is complete
Complete,
}
// ===== impl Task =====
impl Task {
/// Create a new task handle
pub fn new(future: BoxFuture) -> Task {
let task_fut = TaskFuture::Futures1(executor::spawn(future));
let inner = Box::new(Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::new().into()),
ref_count: AtomicUsize::new(1),
future: Some(task_fut),
});
Task { ptr: Box::into_raw(inner) }
}
/// Create a new task handle for a futures 0.2 future
#[cfg(feature = "unstable-futures")]
pub fn new2<F>(fut: BoxFuture2, make_waker: F) -> Task
where F: FnOnce(usize) -> futures2::task::Waker
{
let mut inner = Box::new(Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::new().into()),
ref_count: AtomicUsize::new(1),
future: None,
});
let waker = make_waker((&*inner) as *const _ as usize);
let tls = futures2::task::LocalMap::new();
inner.future = Some(TaskFuture::Futures2 { waker, tls, fut });
Task { ptr: Box::into_raw(inner) }
}
/// Transmute a u64 to a Task
pub unsafe fn from_notify_id(unpark_id: usize) -> Task {
mem::transmute(unpark_id)
}
/// Transmute a u64 to a task ref
pub unsafe fn from_notify_id_ref<'a>(unpark_id: &'a usize) -> &'a Task {
mem::transmute(unpark_id)
}
/// Execute the task returning `Run::Schedule` if the task needs to be
/// scheduled again.
pub fn run(&self, unpark: &Arc<Notifier>, exec: &mut Sender) -> Run {
use self::State::*;
// Transition task to running state. At this point, the task must be
// scheduled.
let actual: State = self.inner().state.compare_and_swap(
Scheduled.into(), Running.into(), AcqRel).into();
trace!("running; state={:?}", actual);
match actual {
Scheduled => {},
_ => panic!("unexpected task state; {:?}", actual),
}
trace!("Task::run; state={:?}", State::from(self.inner().state.load(Relaxed)));
let fut = &mut self.inner_mut().future;
// This block deals with the future panicking while being polled.
//
// If the future panics, then the drop handler must be called such that
// `thread::panicking() -> true`. To do this, the future is dropped from
// within the catch_unwind block.
let res = panic::catch_unwind(panic::AssertUnwindSafe(|| {
struct Guard<'a>(&'a mut Option<TaskFuture>, bool);
impl<'a> Drop for Guard<'a> {
fn drop(&mut self) {
// This drops the future
if self.1 {
let _ = self.0.take();
}
}
}
let mut g = Guard(fut, true);
let ret = g.0.as_mut().unwrap()
.poll(unpark, self.ptr as usize, exec);
g.1 = false;
ret
}));
match res {
Ok(Ok(Async::Ready(_))) | Ok(Err(_)) | Err(_) => {
trace!(" -> task complete");
// Drop the future
self.inner_mut().drop_future();
// Transition to the completed state
self.inner().state.store(State::Complete.into(), Release);
Run::Complete
}
Ok(Ok(Async::NotReady)) => {
trace!(" -> not ready");
// Attempt to transition from Running -> Idle, if successful,
// then the task does not need to be scheduled again. If the CAS
// fails, then the task has been unparked concurrent to running,
// in which case it transitions immediately back to scheduled
// and we return `true`.
let prev: State = self.inner().state.compare_and_swap(
Running.into(), Idle.into(), AcqRel).into();
match prev {
Running => Run::Idle,
Notified => {
self.inner().state.store(Scheduled.into(), Release);
Run::Schedule
}
_ => unreachable!(),
}
}
}
}
/// Transition the task state to scheduled.
///
/// Returns `true` if the caller is permitted to schedule the task.
pub fn schedule(&self) -> bool {
use self::State::*;
loop {
let actual = self.inner().state.compare_and_swap(
Idle.into(),
Scheduled.into(),
AcqRel).into();
match actual {
Idle => return true,
Running => {
let actual = self.inner().state.compare_and_swap(
Running.into(), Notified.into(), AcqRel).into();
match actual {
Idle => continue,
_ => return false,
}
}
Complete | Notified | Scheduled => return false,
}
}
}
#[inline]
fn inner(&self) -> &Inner {
unsafe { &*self.ptr }
}
#[inline]
fn inner_mut(&self) -> &mut Inner {
unsafe { &mut *self.ptr }
}
}
impl fmt::Debug for Task {
fn | (&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Task")
.field("inner", self.inner())
.finish()
}
}
impl Clone for Task {
fn clone(&self) -> Task {
use std::isize;
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
// Using a relaxed ordering is alright here, as knowledge of the
// original reference prevents other threads from erroneously deleting
// the object.
//
// As explained in the [Boost documentation][1], Increasing the
// reference counter can always be done with memory_order_relaxed: New
// references to an object can only be formed from an existing
// reference, and passing an existing reference from one thread to
// another must already provide any required synchronization.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
let old_size = self.inner().ref_count.fetch_add(1, Relaxed);
// However we need to guard against massive refcounts in case someone
// is `mem::forget`ing Arcs. If we don't do this the count can overflow
// and users will use-after free. We racily saturate to `isize::MAX` on
// the assumption that there aren't ~2 billion threads incrementing
// the reference count at once. This branch will never be taken in
// any realistic program.
//
// We abort because such a program is incredibly degenerate, and we
// don't care to support it.
if old_size > MAX_REFCOUNT {
// TODO: abort
panic!();
}
Task { ptr: self.ptr }
}
}
impl Drop for Task {
fn drop(&mut self) {
// Because `fetch_sub` is already atomic, we do not need to synchronize
// with other threads unless we are going to delete the object. This
// same logic applies to the below `fetch_sub` to the `weak` count.
if self.inner().ref_count.fetch_sub(1, Release) != 1 {
return;
}
// This fence is needed to prevent reordering of use of the data and
// deletion of the data. Because it is marked `Release`, the decreasing
// of the reference count synchronizes with this `Acquire` fence. This
// means that use of the data happens before decreasing the reference
// count, which happens before this fence, which happens before the
// deletion of the data.
//
// As explained in the [Boost documentation][1],
//
// > It is important to enforce any possible access to the object in one
// > thread (through an existing reference) to *happen before* deleting
// > the object in a different thread. This is achieved by a "release"
// > operation after dropping a reference (any access to the object
// > through this reference must obviously happened before), and an
// > "acquire" operation before deleting the object.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
atomic::fence(Acquire);
unsafe {
let _ = Box::from_raw(self.ptr);
}
}
}
unsafe impl Send for Task {}
// ===== impl Inner =====
impl Inner {
fn stub() -> Inner {
Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::stub().into()),
ref_count: AtomicUsize::new(0),
future: Some(TaskFuture | fmt | identifier_name |
task.rs | drop(&mut self) {
// This drops the future
if self.1 {
let _ = self.0.take();
}
}
}
let mut g = Guard(fut, true);
let ret = g.0.as_mut().unwrap()
.poll(unpark, self.ptr as usize, exec);
g.1 = false;
ret
}));
match res {
Ok(Ok(Async::Ready(_))) | Ok(Err(_)) | Err(_) => {
trace!(" -> task complete");
// Drop the future
self.inner_mut().drop_future();
// Transition to the completed state
self.inner().state.store(State::Complete.into(), Release);
Run::Complete
}
Ok(Ok(Async::NotReady)) => {
trace!(" -> not ready");
// Attempt to transition from Running -> Idle, if successful,
// then the task does not need to be scheduled again. If the CAS
// fails, then the task has been unparked concurrent to running,
// in which case it transitions immediately back to scheduled
// and we return `true`.
let prev: State = self.inner().state.compare_and_swap(
Running.into(), Idle.into(), AcqRel).into();
match prev {
Running => Run::Idle,
Notified => {
self.inner().state.store(Scheduled.into(), Release);
Run::Schedule
}
_ => unreachable!(),
}
}
}
}
/// Transition the task state to scheduled.
///
/// Returns `true` if the caller is permitted to schedule the task.
pub fn schedule(&self) -> bool {
use self::State::*;
loop {
let actual = self.inner().state.compare_and_swap(
Idle.into(),
Scheduled.into(),
AcqRel).into();
match actual {
Idle => return true,
Running => {
let actual = self.inner().state.compare_and_swap(
Running.into(), Notified.into(), AcqRel).into();
match actual {
Idle => continue,
_ => return false,
}
}
Complete | Notified | Scheduled => return false,
}
}
}
#[inline]
fn inner(&self) -> &Inner {
unsafe { &*self.ptr }
}
#[inline]
fn inner_mut(&self) -> &mut Inner {
unsafe { &mut *self.ptr }
}
}
impl fmt::Debug for Task {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Task")
.field("inner", self.inner())
.finish()
}
}
impl Clone for Task {
fn clone(&self) -> Task {
use std::isize;
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
// Using a relaxed ordering is alright here, as knowledge of the
// original reference prevents other threads from erroneously deleting
// the object.
//
// As explained in the [Boost documentation][1], Increasing the
// reference counter can always be done with memory_order_relaxed: New
// references to an object can only be formed from an existing
// reference, and passing an existing reference from one thread to
// another must already provide any required synchronization.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
let old_size = self.inner().ref_count.fetch_add(1, Relaxed);
// However we need to guard against massive refcounts in case someone
// is `mem::forget`ing Arcs. If we don't do this the count can overflow
// and users will use-after free. We racily saturate to `isize::MAX` on
// the assumption that there aren't ~2 billion threads incrementing
// the reference count at once. This branch will never be taken in
// any realistic program.
//
// We abort because such a program is incredibly degenerate, and we
// don't care to support it.
if old_size > MAX_REFCOUNT {
// TODO: abort
panic!();
}
Task { ptr: self.ptr }
}
}
impl Drop for Task {
fn drop(&mut self) {
// Because `fetch_sub` is already atomic, we do not need to synchronize
// with other threads unless we are going to delete the object. This
// same logic applies to the below `fetch_sub` to the `weak` count.
if self.inner().ref_count.fetch_sub(1, Release) != 1 {
return;
}
// This fence is needed to prevent reordering of use of the data and
// deletion of the data. Because it is marked `Release`, the decreasing
// of the reference count synchronizes with this `Acquire` fence. This
// means that use of the data happens before decreasing the reference
// count, which happens before this fence, which happens before the
// deletion of the data.
//
// As explained in the [Boost documentation][1],
//
// > It is important to enforce any possible access to the object in one
// > thread (through an existing reference) to *happen before* deleting
// > the object in a different thread. This is achieved by a "release"
// > operation after dropping a reference (any access to the object
// > through this reference must obviously happened before), and an
// > "acquire" operation before deleting the object.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
atomic::fence(Acquire);
unsafe {
let _ = Box::from_raw(self.ptr);
}
}
}
unsafe impl Send for Task {}
// ===== impl Inner =====
impl Inner {
fn stub() -> Inner {
Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::stub().into()),
ref_count: AtomicUsize::new(0),
future: Some(TaskFuture::Futures1(executor::spawn(Box::new(future::empty())))),
}
}
fn drop_future(&mut self) {
let _ = self.future.take();
}
}
impl Drop for Inner {
fn drop(&mut self) {
self.drop_future();
}
}
impl fmt::Debug for Inner {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Inner")
.field("next", &self.next)
.field("state", &self.state)
.field("ref_count", &self.ref_count)
.field("future", &"Spawn<BoxFuture>")
.finish()
}
}
// ===== impl Queue =====
impl Queue {
pub fn new() -> Queue {
let stub = Box::new(Inner::stub());
let ptr = &*stub as *const _ as *mut _;
Queue {
head: AtomicPtr::new(ptr),
tail: Cell::new(ptr),
stub: stub,
}
}
pub fn push(&self, handle: Task) {
unsafe {
self.push2(handle.ptr);
// Forgetting the handle is necessary to avoid the ref dec
mem::forget(handle);
}
}
unsafe fn push2(&self, handle: *mut Inner) {
// Set the next pointer. This does not require an atomic operation as
// this node is not accessible. The write will be flushed with the next
// operation
(*handle).next = AtomicPtr::new(ptr::null_mut());
// Update the head to point to the new node. We need to see the previous
// node in order to update the next pointer as well as release `handle`
// to any other threads calling `push`.
let prev = self.head.swap(handle, AcqRel);
// Release `handle` to the consume end.
(*prev).next.store(handle, Release);
}
pub unsafe fn poll(&self) -> Poll {
let mut tail = self.tail.get();
let mut next = (*tail).next.load(Acquire);
let stub = &*self.stub as *const _ as *mut _;
if tail == stub {
if next.is_null() {
return Poll::Empty;
}
self.tail.set(next);
tail = next;
next = (*next).next.load(Acquire);
}
if !next.is_null() {
self.tail.set(next);
// No ref_count inc is necessary here as this poll is paired
// with a `push` which "forgets" the handle.
return Poll::Data(Task {
ptr: tail,
});
}
if self.head.load(Acquire) != tail {
return Poll::Inconsistent;
}
self.push2(stub);
next = (*tail).next.load(Acquire);
if !next.is_null() {
self.tail.set(next);
return Poll::Data(Task {
ptr: tail,
});
}
Poll::Inconsistent
}
}
// ===== impl State =====
impl State {
/// Returns the initial task state.
///
/// Tasks start in the scheduled state as they are immediately scheduled on
/// creation.
fn new() -> State {
State::Scheduled
}
fn stub() -> State | {
State::Idle
} | identifier_body |
|
task.rs | currently running, but has been notified that it must run again.
Notified,
/// Task has been scheduled
Scheduled,
/// Task is complete
Complete,
}
// ===== impl Task =====
impl Task {
/// Create a new task handle
pub fn new(future: BoxFuture) -> Task {
let task_fut = TaskFuture::Futures1(executor::spawn(future));
let inner = Box::new(Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::new().into()),
ref_count: AtomicUsize::new(1),
future: Some(task_fut),
});
Task { ptr: Box::into_raw(inner) }
}
/// Create a new task handle for a futures 0.2 future
#[cfg(feature = "unstable-futures")]
pub fn new2<F>(fut: BoxFuture2, make_waker: F) -> Task
where F: FnOnce(usize) -> futures2::task::Waker
{
let mut inner = Box::new(Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::new().into()),
ref_count: AtomicUsize::new(1),
future: None,
});
let waker = make_waker((&*inner) as *const _ as usize);
let tls = futures2::task::LocalMap::new();
inner.future = Some(TaskFuture::Futures2 { waker, tls, fut });
Task { ptr: Box::into_raw(inner) }
}
/// Transmute a u64 to a Task
pub unsafe fn from_notify_id(unpark_id: usize) -> Task {
mem::transmute(unpark_id)
}
/// Transmute a u64 to a task ref
pub unsafe fn from_notify_id_ref<'a>(unpark_id: &'a usize) -> &'a Task {
mem::transmute(unpark_id)
}
/// Execute the task returning `Run::Schedule` if the task needs to be
/// scheduled again.
pub fn run(&self, unpark: &Arc<Notifier>, exec: &mut Sender) -> Run {
use self::State::*;
// Transition task to running state. At this point, the task must be
// scheduled.
let actual: State = self.inner().state.compare_and_swap(
Scheduled.into(), Running.into(), AcqRel).into();
trace!("running; state={:?}", actual);
match actual {
Scheduled => {},
_ => panic!("unexpected task state; {:?}", actual),
}
trace!("Task::run; state={:?}", State::from(self.inner().state.load(Relaxed)));
let fut = &mut self.inner_mut().future;
// This block deals with the future panicking while being polled.
//
// If the future panics, then the drop handler must be called such that
// `thread::panicking() -> true`. To do this, the future is dropped from
// within the catch_unwind block.
let res = panic::catch_unwind(panic::AssertUnwindSafe(|| {
struct Guard<'a>(&'a mut Option<TaskFuture>, bool);
impl<'a> Drop for Guard<'a> {
fn drop(&mut self) {
// This drops the future
if self.1 {
let _ = self.0.take();
}
}
}
let mut g = Guard(fut, true);
let ret = g.0.as_mut().unwrap()
.poll(unpark, self.ptr as usize, exec);
g.1 = false;
ret
}));
match res {
Ok(Ok(Async::Ready(_))) | Ok(Err(_)) | Err(_) => {
trace!(" -> task complete");
// Drop the future
self.inner_mut().drop_future();
// Transition to the completed state
self.inner().state.store(State::Complete.into(), Release);
Run::Complete
}
Ok(Ok(Async::NotReady)) => {
trace!(" -> not ready");
// Attempt to transition from Running -> Idle, if successful,
// then the task does not need to be scheduled again. If the CAS
// fails, then the task has been unparked concurrent to running,
// in which case it transitions immediately back to scheduled
// and we return `true`.
let prev: State = self.inner().state.compare_and_swap(
Running.into(), Idle.into(), AcqRel).into();
match prev {
Running => Run::Idle,
Notified => {
self.inner().state.store(Scheduled.into(), Release);
Run::Schedule
}
_ => unreachable!(),
}
}
}
}
/// Transition the task state to scheduled.
///
/// Returns `true` if the caller is permitted to schedule the task.
pub fn schedule(&self) -> bool {
use self::State::*;
loop {
let actual = self.inner().state.compare_and_swap(
Idle.into(),
Scheduled.into(),
AcqRel).into();
match actual {
Idle => return true,
Running => {
let actual = self.inner().state.compare_and_swap(
Running.into(), Notified.into(), AcqRel).into();
match actual {
Idle => continue,
_ => return false,
}
}
Complete | Notified | Scheduled => return false,
}
}
}
#[inline]
fn inner(&self) -> &Inner {
unsafe { &*self.ptr }
}
#[inline]
fn inner_mut(&self) -> &mut Inner {
unsafe { &mut *self.ptr }
}
}
impl fmt::Debug for Task {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Task")
.field("inner", self.inner())
.finish()
}
}
impl Clone for Task {
fn clone(&self) -> Task {
use std::isize;
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
// Using a relaxed ordering is alright here, as knowledge of the
// original reference prevents other threads from erroneously deleting
// the object.
//
// As explained in the [Boost documentation][1], Increasing the
// reference counter can always be done with memory_order_relaxed: New
// references to an object can only be formed from an existing
// reference, and passing an existing reference from one thread to
// another must already provide any required synchronization.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
let old_size = self.inner().ref_count.fetch_add(1, Relaxed);
// However we need to guard against massive refcounts in case someone
// is `mem::forget`ing Arcs. If we don't do this the count can overflow
// and users will use-after free. We racily saturate to `isize::MAX` on
// the assumption that there aren't ~2 billion threads incrementing
// the reference count at once. This branch will never be taken in
// any realistic program.
//
// We abort because such a program is incredibly degenerate, and we
// don't care to support it.
if old_size > MAX_REFCOUNT {
// TODO: abort
panic!();
}
Task { ptr: self.ptr }
}
}
impl Drop for Task {
fn drop(&mut self) {
// Because `fetch_sub` is already atomic, we do not need to synchronize
// with other threads unless we are going to delete the object. This
// same logic applies to the below `fetch_sub` to the `weak` count.
if self.inner().ref_count.fetch_sub(1, Release) != 1 {
return;
}
// This fence is needed to prevent reordering of use of the data and
// deletion of the data. Because it is marked `Release`, the decreasing
// of the reference count synchronizes with this `Acquire` fence. This
// means that use of the data happens before decreasing the reference
// count, which happens before this fence, which happens before the
// deletion of the data.
//
// As explained in the [Boost documentation][1],
//
// > It is important to enforce any possible access to the object in one
// > thread (through an existing reference) to *happen before* deleting
// > the object in a different thread. This is achieved by a "release"
// > operation after dropping a reference (any access to the object
// > through this reference must obviously happened before), and an
// > "acquire" operation before deleting the object.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
atomic::fence(Acquire);
unsafe {
let _ = Box::from_raw(self.ptr);
}
}
}
unsafe impl Send for Task {} |
impl Inner {
fn stub() -> Inner {
Inner {
next: AtomicPtr::new(ptr::null_mut()),
state: AtomicUsize::new(State::stub().into()),
ref_count: AtomicUsize::new(0),
future: Some(TaskFuture |
// ===== impl Inner ===== | random_line_split |
process_test.py | az) * np.sin(az) * np.cos(elev))
G = sum(np.sin(az) * np.sin(elev))
D = sum(vel * np.cos(az))
E = sum(np.sin(az) * np.cos(az) * np.cos(elev))
F = sum(np.cos(az) ** 2 * np.cos(elev))
H = sum(np.cos(az) * np.sin(elev))
W = sum(vel)
X = sum(np.sin(az) * np.cos(elev))
Y = sum(np.cos(az) * np.cos(elev))
Z = sum(az * np.sin(elev))
# solve A = uB + vC + wG , D = uE + vF + wH and W = uX + vY+ wZ
y = np.array([[B, E, X], [C, F, Y], [G, H, Z]])
z = np.array([A, D, W])
# print y
# print z
try:
sol = np.linalg.solve(y, z)
# print sol
u = sol[0]
v = sol[1]
w = sol[2]
return u, v, w
except np.linalg.linalg.LinAlgError:
return FILL_VALUE, FILL_VALUE, FILL_VALUE
else:
return FILL_VALUE, FILL_VALUE, FILL_VALUE
def decode_header(header):
"""
Takes in a list of lines from the raw hpl file. Separates them by
tab and removes unnecessary text
"""
new_header = {}
for item in header:
split = item.split('\t')
new_header[split[0].replace(':', '')] = split[1].replace("\r\n", "")
return new_header
def _to_epoch(dt):
return (dt - datetime(1970, 1, 1)).total_seconds()
"""
process_file(in_file, out_dir, prefix):
Processes a raw halo hpl file and turns it into a netcdf
:param in_file:
:param out_dir:
:return:
"""
def writeVAD_to_nc(filename, date, elev, u, v, w, ws, wd, hgt, rmse, r_sq,up_flag,intensity):
if os.path.exists(filename):
# open the netcdf
nc = netCDF4.Dataset(filename, 'a', format="NETCDF4")
dim = nc.dimensions['t'].size
u_var = nc.variables['u']
v_var = nc.variables['v']
w_var = nc.variables['w']
ws_var = nc.variables['ws']
wd_var = nc.variables['wd']
rms_var = nc.variables['rms']
r_sq_var = nc.variables['r_sq']
time_var = nc.variables['time']
hgt_var = nc.variables['hgt']
up_flag_var = nc.variables['up_flag']
u_var[dim, :] = u
v_var[dim, :] = v
w_var[dim, :] = w
ws_var[dim, :] = ws
wd_var[dim, :] = wd
rms_var[dim, :] = rmse
r_sq_var[dim, :] = r_sq
up_flag_var[dim] = up_flag
else:
# Create the netcdf
nc = netCDF4.Dataset(filename, 'w', format="NETCDF4")
# Create the height dimension
nc.createDimension('height', len(hgt))
nc.createDimension('t', None)
# Add the attributes
nc.setncattr("elev", elev)
nc.setncattr("date", date.isoformat())
# Create the variables
u_var = nc.createVariable('u', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
v_var = nc.createVariable('v', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
w_var = nc.createVariable('w', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
ws_var = nc.createVariable('ws', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
wd_var = nc.createVariable('wd', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
hgt_var = nc.createVariable('hgt', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
rms_var = nc.createVariable('rms', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
r_sq_var = nc.createVariable('r_sq', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
up_flag_var = nc.createVariable('up_flag', 'f8', ('t'))
intensity_var = nc.createVariable('intensity', 'f8', ('t','hgt'))
time_var = nc.createVariable('time', 'i8', ('t'))
time_var.setncattr('units', 'seconds since 1970-01-01 00:00:00 UTC')
dim = nc.dimensions['t'].size
u_var[dim, :] = np.where(np.isnan(u), FILL_VALUE, u)
v_var[dim, :] = np.where(np.isnan(v), FILL_VALUE, v)
w_var[dim, :] = np.where(np.isnan(w), FILL_VALUE, w)
ws_var[dim, :] = np.where(np.isnan(ws), FILL_VALUE, ws)
wd_var[dim, :] = np.where(np.isnan(wd), FILL_VALUE, wd)
hgt_var[dim, :] = np.where(np.isnan(hgt), FILL_VALUE, hgt)
rms_var[dim, :] = np.where(np.isnan(rmse), FILL_VALUE, rmse)
r_sq_var[dim, :] = np.where(np.isnan(r_sq), FILL_VALUE, r_sq)
time_var[dim] = (date - datetime(1970, 1, 1)).total_seconds()
up_flag_var[dim]=up_flag
intensity_var[dim] = intensity
# Close the netcdf
nc.close()
def writeSTARE_to_nc(filename, date, w, hgt, intensity):
|
hgt_var[:] = hgt
time_var[:] = [(d - datetime(1970, 1, 1)).total_seconds() for d in date]
w_var[:, :] = w
intensity_var[:] = intensity
# Close the netcdf
nc.close()
def writeRHI_to_nc(filename, date, vel, rng, elev, az, intensity,up_flag):
if os.path.exists(filename):
# open the netcdf
nc = netCDF4.Dataset(filename, 'r+', format="NETCDF4")
dim = nc.dimensions['t'].size
vel_var = nc.variables['velocity']
rng_var = nc.variables['range']
elev_var = nc.variables['elevation']
az_var = nc.variables['azimuth']
intensity_var = nc.variables['intensity']
time_var = nc.variables['time']
up_flag_var = nc.variables['up_flag']
# vel_var[dim, :] = vel
# rng_var[:] = rng
# elev_var[dim] = elev
# az_var[dim] = az
# intensity_var[dim, :] = intensity
else:
# Create the netcdf
nc = netCDF4.Dataset(filename, 'w', format="NETCDF4")
# Create the height dimension
nc.createDimension('height', len(rng))
nc.createDimension('t', None)
# Create the variables
vel_var = nc.createVariable('velocity', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
rng_var = nc.createVariable('range', 'f8', ('height'), fill_value=FILL_VALUE)
elev_var = nc.createVariable('elevation', 'f8', ('t'), fill_value=FILL_VALUE)
az_var = nc.createVariable('azimuth', 'f8', ('t'), fill_value=FILL_VALUE)
intensity_var = nc.createVariable('intensity', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
up_flag_var = nc.createVariable('up_flag', 'f8', ('t'))
time_var = nc.createVariable('time', 'i8', ('t'))
time_var.setncattr('units', 'seconds since 1970-0 | logging.debug(filename)
logging.debug(date)
# Create the netcdf
nc = netCDF4.Dataset(filename, 'w', format="NETCDF4")
# Create the height dimension
nc.createDimension('hgt', len(hgt))
nc.createDimension('t', None)
# Add the attributes
nc.setncattr("date", date[0].isoformat())
# Create the variables
w_var = nc.createVariable('w', 'f8', ('t', 'hgt'), fill_value=FILL_VALUE)
hgt_var = nc.createVariable('hgt', 'f8', ('t', 'hgt'), fill_value=FILL_VALUE)
intensity_var = nc.createVariable('intensity', 'f8', ('t','hgt'))
time_var = nc.createVariable('time', 'i8', ('t'))
time_var.setncattr('units', 'seconds since 1970-01-01 00:00:00 UTC') | identifier_body |
process_test.py | attr('units', 'seconds since 1970-01-01 00:00:00 UTC')
hgt_var[:] = hgt
time_var[:] = [(d - datetime(1970, 1, 1)).total_seconds() for d in date]
w_var[:, :] = w
intensity_var[:] = intensity
# Close the netcdf
nc.close()
def writeRHI_to_nc(filename, date, vel, rng, elev, az, intensity,up_flag):
if os.path.exists(filename):
# open the netcdf
nc = netCDF4.Dataset(filename, 'r+', format="NETCDF4")
dim = nc.dimensions['t'].size
vel_var = nc.variables['velocity']
rng_var = nc.variables['range']
elev_var = nc.variables['elevation']
az_var = nc.variables['azimuth']
intensity_var = nc.variables['intensity']
time_var = nc.variables['time']
up_flag_var = nc.variables['up_flag']
# vel_var[dim, :] = vel
# rng_var[:] = rng
# elev_var[dim] = elev
# az_var[dim] = az
# intensity_var[dim, :] = intensity
else:
# Create the netcdf
nc = netCDF4.Dataset(filename, 'w', format="NETCDF4")
# Create the height dimension
nc.createDimension('height', len(rng))
nc.createDimension('t', None)
# Create the variables
vel_var = nc.createVariable('velocity', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
rng_var = nc.createVariable('range', 'f8', ('height'), fill_value=FILL_VALUE)
elev_var = nc.createVariable('elevation', 'f8', ('t'), fill_value=FILL_VALUE)
az_var = nc.createVariable('azimuth', 'f8', ('t'), fill_value=FILL_VALUE)
intensity_var = nc.createVariable('intensity', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
up_flag_var = nc.createVariable('up_flag', 'f8', ('t'))
time_var = nc.createVariable('time', 'i8', ('t'))
time_var.setncattr('units', 'seconds since 1970-01-01 00:00:00 UTC')
dim = nc.dimensions['t'].size
dim2 = dim+len(date)
vel_var[dim:dim2, :] = vel
rng_var[:] = np.where(np.isnan(rng), FILL_VALUE, rng)
elev_var[dim:dim2] = np.where(np.isnan(elev), FILL_VALUE, elev)
az_var[dim:dim2] = np.where(np.isnan(az), FILL_VALUE, az)
intensity_var[dim:dim2, :] = np.where(np.isnan(intensity), FILL_VALUE, intensity)
time_var[dim:dim2] = [(d - datetime(1970, 1, 1)).total_seconds() for d in date]
#print [up_flag for i in range(len(date))]
up_flag_var[dim:dim2] = [float(up_flag) for i in range(len(date))]
# Close the netcdf
nc.close()
#########PROCESS CODE####################
#######deployment logging
# reads config file in TORUS_DL/logs/config.js
# writesout changes/events to deployment log in TORUS_DL/logs/log_MMDDYY.txt
# grab current timestamp for run
now = datetime.utcnow()
#now=datetime(2019,05,17,23,58)
log_time = now.strftime("%m%d%y_%H%M")
today = now.strftime("%Y%m%d") # - commented out for test date below.
# today_l = datetime(2019,05,07)
# today=today_l.strftime("%Y%m%d")
# now = datetime(year=2019, month=5, day=7, hour=16, minute=15)
# open and read in config file info
config = open('/Users/elizabethsmith/TORUS_DL/logs/config.js')
logdata = json.load(config)
config.close()
if logdata["status"]=='up':
print "we're up :)"
up_flag=1
if logdata["status"]=='down':
print "we're down :( "
up_flag=0
# TB - Running into an error here when starting a new log file.
# Added some automation to make your like easier
if os.path.exists('/Users/elizabethsmith/TORUS_DL/logs/' + logdata["logfile"]):
# open and read most recent logfile entry
current_logfile = open('/Users/elizabethsmith/TORUS_DL/logs/' + logdata["logfile"], "r+")
lines = current_logfile.readlines()
prev_status = lines[-6][8:-1] # reading logfile previous status (skipping text)
prev_heading = lines[-5][9:-1] # reading logfile previous heading (skipping text)
prev_lat = lines[-4][5:-1] # reading logfile previous lat (skipping text)
prev_lon = lines[-3][5:-1] # reading logfile previous lat (skipping text)
prev_note = lines[-2][6:-1] # reading logfile previous note (skipping text)
# check if the previous log entry matches the data in the config file..
print logdata["note"], prev_note
if (str(logdata["status"]) != prev_status or str(logdata["heading"]) != prev_heading or
str(logdata["lat"]) != prev_lat or str(logdata["lon"]) != prev_lon or
str(logdata["note"]) != prev_note):
print '**CONFIG FILE HAS BEEN UPDATED!** generating log entry...'
# generate writeout for new log entry based on config file.
writeout = ["*********ENTRY**************\n",
"timestamp: %s\n" % log_time,
"status: %s\n" % logdata["status"],
"heading: %s\n" % logdata["heading"],
"lat: %s\n" % logdata["lat"],
"lon: %s\n" % logdata["lon"],
"note: %s\n" % logdata["note"],
"*********END***************\n"]
current_logfile.writelines(writeout)
print "**Logfile updated -- see /Users/elizabethsmith/TORUS_DL/logs/%s" % logdata["logfile"]
else:
print "--no config changes"
current_logfile.close()
else:
current_logfile = open('/Users/elizabethsmith/TORUS_DL/logs/' + logdata["logfile"], "w")
writeout = ["*********ENTRY**************\n",
"timestamp: %s\n" % log_time,
"status: %s\n" % logdata["status"],
"heading: %s\n" % logdata["heading"],
"lat: %s\n" % logdata["lat"],
"lon: %s\n" % logdata["lon"],
"note: %s\n" % logdata["note"],
"*********END***************\n"]
current_logfile.writelines(writeout)
print "**Logfile updated -- see /Users/elizabethsmith/TORUS_DL/logs/%s" % logdata["logfile"]
# get list of exisiting processed scans
path_proc = '/Users/elizabethsmith/TORUS_DL/data/nonQA_proc/dl/2019/201905/' + today + '/'
# check to make sure the output dir exists
try:
os.makedirs(path_proc)
except OSError:
logging.debug("Output path already exists...")
# Check to make sure the processed_files.txt exists
if not os.path.exists(path_proc + 'processed_files.txt'):
os.system('touch {}'.format(path_proc + 'processed_files.txt'))
# Open the processed files list and read it in
proc_list = open(path_proc + 'processed_files.txt', "r+")
proc_files = proc_list.readlines()
proc_list.close()
# Be sure to add the files that are processed to the running list
# get list of existing raw scans - always do the stare
# TB - I changed some things here so only the scans from the current hour are even looked at.
# - This cuts down on processing for the stare files!
path_raw = now.strftime('/Users/elizabethsmith/TORUS_DL/data/raw/dl/%Y/%Y%m/%Y%m%d/*%Y%m%d_*.hpl')
#print path_raw
raw_files = [f for f in glob(path_raw)]
#print raw_files
raw_files=sorted(raw_files)
# Process the scans
for in_file in raw_files:
# TB - I changed your logic for finding the files to process. This is a little easier and less prone to bugs
# Check to see if the file is in the alreasy processed files. If it is, skip it.
if in_file+'\n' in proc_files:
logging.debug("{} already processed".format(in_file))
continue
else:
logging.info("Processing {}".format(in_file))
# read in new scan
out_dir = path_proc
prefix = 'nonQA'
# Read in the text file
lines = []
with open(in_file) as f:
for line in f:
| lines.append(line) | conditional_block |
|
process_test.py | scans - always do the stare
# TB - I changed some things here so only the scans from the current hour are even looked at.
# - This cuts down on processing for the stare files!
path_raw = now.strftime('/Users/elizabethsmith/TORUS_DL/data/raw/dl/%Y/%Y%m/%Y%m%d/*%Y%m%d_*.hpl')
#print path_raw
raw_files = [f for f in glob(path_raw)]
#print raw_files
raw_files=sorted(raw_files)
# Process the scans
for in_file in raw_files:
# TB - I changed your logic for finding the files to process. This is a little easier and less prone to bugs
# Check to see if the file is in the alreasy processed files. If it is, skip it.
if in_file+'\n' in proc_files:
logging.debug("{} already processed".format(in_file))
continue
else:
logging.info("Processing {}".format(in_file))
# read in new scan
out_dir = path_proc
prefix = 'nonQA'
# Read in the text file
lines = []
with open(in_file) as f:
for line in f:
lines.append(line)
logging.debug("Decoding header")
# Read in the header info
header = decode_header(lines[0:11])
ngates = int(header['Number of gates'])
# nrays = int(header['No. of rays in file']) # Cant do this apparently. Not always correct (wtf)
len_data = len(lines[17:])
nrays = len_data / (ngates + 1)
gate_length = float(header['Range gate length (m)'])
start_time = datetime.strptime(header['Start time'], '%Y%m%d %H:%M:%S.%f')
scan_type = None
logging.debug("Reading data")
# Read in the actual data
az = np.zeros(nrays)
hour = np.zeros(nrays)
elev = np.zeros(nrays)
pitch = np.zeros(nrays)
roll = np.zeros(nrays)
rng = np.asarray([(gate + .5) * gate_length for gate in range(ngates)])
vel = np.zeros((ngates, nrays))
intensity = np.zeros((ngates, nrays))
beta = np.zeros((ngates, nrays))
try:
for ray in range(nrays):
# Get the scan info
info = lines[ray * (ngates + 1) + 17].split()
hour[ray] = float(info[0])
az[ray] = float(info[1])
elev[ray] = float(info[2])
pitch[ray] = float(info[3])
roll[ray] = float(info[4])
for gate in range(ngates):
data = lines[ray * (ngates + 1) + 17 + gate + 1].split()
vel[gate, ray] = float(data[1])
intensity[gate, ray] = float(data[2])
beta[gate, ray] = float(data[3])
except IndexError:
logging.warning("Something went wrong with the indexing here...")
# correction for some rounding/hysteresis in scanner azimuths... setting all vals==360. to 0.
az[np.where(az == 360.)] = 0.
# dynamic identification of lidar scan type (fp,ppi,rhi)
# TB - I Added the round here. Was getting a fp file ID'd as a rhi file
# - Also had an issue with and RHI file where az[0] was 0.01 and az[2] was 0
try:
if np.round(az[0], 1) == np.round(az[2], 1): # const azimuth could be RHI or stare
if np.round(elev[0], 1) == np.round(elev[2], 1): # const azimuth and constant elev = stare
scan_type = 'fp'
else: # const azimuth and non-constant elev = RHI
scan_type = 'rhi'
elif np.round(elev[0], 1) == np.round(elev[2]): # changing azimuth, const elev = PPI
scan_type = 'ppi'
if scan_type == None:
raise IndexError
logging.info("Scan Type: " + scan_type)
except IndexError:
logging.warning("Something went wrong with scan type IDing...")
if scan_type == 'ppi':
date = datetime.strptime(start_time.strftime('%Y-%m-%dT%H:%M:%S'), "%Y-%m-%dT%H:%M:%S")
hgt = []
u = []
v = []
w = []
rmse = []
r_sq = []
for i, rng in enumerate(rng):
# Get the required stuff for this range ring
cnr = intensity[i, :] # range,azimuth
Vel = vel[i, :] # range,azimuth
Az = az # 8-terms of az
Elev = elev # 8-terms of az
# Filter out the bad values based on CNR - default was 1.015
Az = np.where(cnr <= 1.01, FILL_VALUE, Az)
Vel = np.where(cnr <= 1.01, FILL_VALUE, Vel)
Az = list_to_masked_array(Az, FILL_VALUE)
Vel = list_to_masked_array(Vel, FILL_VALUE)
# Calculate the vad and height for this range ring
tmp_u, tmp_v, tmp_w = calc_vad_3d(Az, Elev, Vel) # grab this to it can point to it!!!
# Calculate the RMSE
N = float(Vel.size)
az_rad = np.deg2rad(Az)
elev_rad = np.deg2rad(Elev)
derived_vr = (np.sin(az_rad) * np.cos(elev_rad) * tmp_u) + \
(np.cos(az_rad) * np.cos(elev_rad) * tmp_v) + \
(np.sin(elev_rad) * tmp_w)
tmp_E = Vel - derived_vr
# Calculate rms error
tmp_RMSE = np.sqrt(1 / N * np.sum(tmp_E ** 2))
tmp_r_sq = calc_homogeneity(Vel, derived_vr)
# Append to the lists for plotting
u.append(tmp_u)
v.append(tmp_v)
w.append(tmp_w)
hgt.append(ray_height(rng, Elev[0]))
rmse.append(tmp_RMSE)
r_sq.append(tmp_r_sq)
vector_wind = rotate(u, v, w, logdata["heading"], 0, 0)
vector_wind = vector_wind.squeeze()
u = vector_wind[:, 0]
v = vector_wind[:, 1]
w = vector_wind[:, 2]
ws = wind_uv_to_spd(u, v)
wd = wind_uv_to_dir(u, v)
writeVAD_to_nc(path_proc + prefix + date.strftime('%Y%m%d') + '_VAD.nc', date, elev, u, v, w, ws, wd, hgt,
rmse, r_sq,up_flag)
# add newly processed file to list of processed files
proc_list = open(path_proc + 'processed_files.txt', "a")
proc_list.writelines(in_file+'\n')
proc_list.close()
if scan_type == 'fp':
# TB - I decided that it is best to just process the entire stare file every time
# instead of try to append to the netcdf. This shouldn't hinder processeing time that much
# since I changed things to only grab things from the same hour you're processing
date = datetime.strptime(start_time.strftime('%Y-%m-%dT%H:%M:%S'), "%Y-%m-%dT%H:%M:%S")
times = np.asarray([datetime(year=date.year, month=date.month, day=date.day) + timedelta(hours=h) for h in hour])
# Filter out the bad values based on CNR
Vel = np.where(intensity <= 1.01, FILL_VALUE, vel)
# Get the rng into a 2d array
rng = np.array([rng for i in range(len(rng))])
logging.debug("Writing stare file")
writeSTARE_to_nc(path_proc+prefix+date.strftime('%Y%m%d_%H_STARE.nc'), times, vel.transpose(), rng, up_flag)
if scan_type=='rhi':
# TB - A quick tip: Don't do an RHI at az=0. It bounces between 0 and 360 and is a pain in the ass to process
# - Just do it at like 1 deg. or even .1
# TB - Note to self - need to do a heading correction on this one.
date = start_time
times = np.asarray([datetime(year=date.year, month=date.month, day=date.day) + timedelta(hours=h) for h in hour])
filename = path_proc + prefix + date.strftime('%Y%m%d_%H') + '_RHI.nc'
# break | writeRHI_to_nc(filename, times, vel.transpose(), rng, elev, az, intensity.transpose(), up_flag) | random_line_split |
|
process_test.py | az) * np.sin(az) * np.cos(elev))
G = sum(np.sin(az) * np.sin(elev))
D = sum(vel * np.cos(az))
E = sum(np.sin(az) * np.cos(az) * np.cos(elev))
F = sum(np.cos(az) ** 2 * np.cos(elev))
H = sum(np.cos(az) * np.sin(elev))
W = sum(vel)
X = sum(np.sin(az) * np.cos(elev))
Y = sum(np.cos(az) * np.cos(elev))
Z = sum(az * np.sin(elev))
# solve A = uB + vC + wG , D = uE + vF + wH and W = uX + vY+ wZ
y = np.array([[B, E, X], [C, F, Y], [G, H, Z]])
z = np.array([A, D, W])
# print y
# print z
try:
sol = np.linalg.solve(y, z)
# print sol
u = sol[0]
v = sol[1]
w = sol[2]
return u, v, w
except np.linalg.linalg.LinAlgError:
return FILL_VALUE, FILL_VALUE, FILL_VALUE
else:
return FILL_VALUE, FILL_VALUE, FILL_VALUE
def | (header):
"""
Takes in a list of lines from the raw hpl file. Separates them by
tab and removes unnecessary text
"""
new_header = {}
for item in header:
split = item.split('\t')
new_header[split[0].replace(':', '')] = split[1].replace("\r\n", "")
return new_header
def _to_epoch(dt):
return (dt - datetime(1970, 1, 1)).total_seconds()
"""
process_file(in_file, out_dir, prefix):
Processes a raw halo hpl file and turns it into a netcdf
:param in_file:
:param out_dir:
:return:
"""
def writeVAD_to_nc(filename, date, elev, u, v, w, ws, wd, hgt, rmse, r_sq,up_flag,intensity):
if os.path.exists(filename):
# open the netcdf
nc = netCDF4.Dataset(filename, 'a', format="NETCDF4")
dim = nc.dimensions['t'].size
u_var = nc.variables['u']
v_var = nc.variables['v']
w_var = nc.variables['w']
ws_var = nc.variables['ws']
wd_var = nc.variables['wd']
rms_var = nc.variables['rms']
r_sq_var = nc.variables['r_sq']
time_var = nc.variables['time']
hgt_var = nc.variables['hgt']
up_flag_var = nc.variables['up_flag']
u_var[dim, :] = u
v_var[dim, :] = v
w_var[dim, :] = w
ws_var[dim, :] = ws
wd_var[dim, :] = wd
rms_var[dim, :] = rmse
r_sq_var[dim, :] = r_sq
up_flag_var[dim] = up_flag
else:
# Create the netcdf
nc = netCDF4.Dataset(filename, 'w', format="NETCDF4")
# Create the height dimension
nc.createDimension('height', len(hgt))
nc.createDimension('t', None)
# Add the attributes
nc.setncattr("elev", elev)
nc.setncattr("date", date.isoformat())
# Create the variables
u_var = nc.createVariable('u', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
v_var = nc.createVariable('v', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
w_var = nc.createVariable('w', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
ws_var = nc.createVariable('ws', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
wd_var = nc.createVariable('wd', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
hgt_var = nc.createVariable('hgt', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
rms_var = nc.createVariable('rms', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
r_sq_var = nc.createVariable('r_sq', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
up_flag_var = nc.createVariable('up_flag', 'f8', ('t'))
intensity_var = nc.createVariable('intensity', 'f8', ('t','hgt'))
time_var = nc.createVariable('time', 'i8', ('t'))
time_var.setncattr('units', 'seconds since 1970-01-01 00:00:00 UTC')
dim = nc.dimensions['t'].size
u_var[dim, :] = np.where(np.isnan(u), FILL_VALUE, u)
v_var[dim, :] = np.where(np.isnan(v), FILL_VALUE, v)
w_var[dim, :] = np.where(np.isnan(w), FILL_VALUE, w)
ws_var[dim, :] = np.where(np.isnan(ws), FILL_VALUE, ws)
wd_var[dim, :] = np.where(np.isnan(wd), FILL_VALUE, wd)
hgt_var[dim, :] = np.where(np.isnan(hgt), FILL_VALUE, hgt)
rms_var[dim, :] = np.where(np.isnan(rmse), FILL_VALUE, rmse)
r_sq_var[dim, :] = np.where(np.isnan(r_sq), FILL_VALUE, r_sq)
time_var[dim] = (date - datetime(1970, 1, 1)).total_seconds()
up_flag_var[dim]=up_flag
intensity_var[dim] = intensity
# Close the netcdf
nc.close()
def writeSTARE_to_nc(filename, date, w, hgt, intensity):
logging.debug(filename)
logging.debug(date)
# Create the netcdf
nc = netCDF4.Dataset(filename, 'w', format="NETCDF4")
# Create the height dimension
nc.createDimension('hgt', len(hgt))
nc.createDimension('t', None)
# Add the attributes
nc.setncattr("date", date[0].isoformat())
# Create the variables
w_var = nc.createVariable('w', 'f8', ('t', 'hgt'), fill_value=FILL_VALUE)
hgt_var = nc.createVariable('hgt', 'f8', ('t', 'hgt'), fill_value=FILL_VALUE)
intensity_var = nc.createVariable('intensity', 'f8', ('t','hgt'))
time_var = nc.createVariable('time', 'i8', ('t'))
time_var.setncattr('units', 'seconds since 1970-01-01 00:00:00 UTC')
hgt_var[:] = hgt
time_var[:] = [(d - datetime(1970, 1, 1)).total_seconds() for d in date]
w_var[:, :] = w
intensity_var[:] = intensity
# Close the netcdf
nc.close()
def writeRHI_to_nc(filename, date, vel, rng, elev, az, intensity,up_flag):
if os.path.exists(filename):
# open the netcdf
nc = netCDF4.Dataset(filename, 'r+', format="NETCDF4")
dim = nc.dimensions['t'].size
vel_var = nc.variables['velocity']
rng_var = nc.variables['range']
elev_var = nc.variables['elevation']
az_var = nc.variables['azimuth']
intensity_var = nc.variables['intensity']
time_var = nc.variables['time']
up_flag_var = nc.variables['up_flag']
# vel_var[dim, :] = vel
# rng_var[:] = rng
# elev_var[dim] = elev
# az_var[dim] = az
# intensity_var[dim, :] = intensity
else:
# Create the netcdf
nc = netCDF4.Dataset(filename, 'w', format="NETCDF4")
# Create the height dimension
nc.createDimension('height', len(rng))
nc.createDimension('t', None)
# Create the variables
vel_var = nc.createVariable('velocity', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
rng_var = nc.createVariable('range', 'f8', ('height'), fill_value=FILL_VALUE)
elev_var = nc.createVariable('elevation', 'f8', ('t'), fill_value=FILL_VALUE)
az_var = nc.createVariable('azimuth', 'f8', ('t'), fill_value=FILL_VALUE)
intensity_var = nc.createVariable('intensity', 'f8', ('t', 'height'), fill_value=FILL_VALUE)
up_flag_var = nc.createVariable('up_flag', 'f8', ('t'))
time_var = nc.createVariable('time', 'i8', ('t'))
time_var.setncattr('units', 'seconds since 1970-0 | decode_header | identifier_name |
sender.go | Logic
type metricPair struct {
attributes pcommon.Map
metric pmetric.Metric
}
type sender struct {
logBuffer []plog.LogRecord
metricBuffer []metricPair
config *Config
client *http.Client
filter filter
sources sourceFormats
compressor compressor
prometheusFormatter prometheusFormatter
graphiteFormatter graphiteFormatter
}
const (
logKey string = "log"
// maxBufferSize defines size of the logBuffer (maximum number of plog.LogRecord entries)
maxBufferSize int = 1024 * 1024
headerContentType string = "Content-Type"
headerContentEncoding string = "Content-Encoding"
headerClient string = "X-Sumo-Client"
headerHost string = "X-Sumo-Host"
headerName string = "X-Sumo-Name"
headerCategory string = "X-Sumo-Category"
headerFields string = "X-Sumo-Fields"
contentTypeLogs string = "application/x-www-form-urlencoded"
contentTypePrometheus string = "application/vnd.sumologic.prometheus"
contentTypeCarbon2 string = "application/vnd.sumologic.carbon2"
contentTypeGraphite string = "application/vnd.sumologic.graphite"
contentEncodingGzip string = "gzip"
contentEncodingDeflate string = "deflate"
)
func newAppendResponse() appendResponse {
return appendResponse{
appended: true,
}
}
func newSender(
cfg *Config,
cl *http.Client,
f filter,
s sourceFormats,
c compressor,
pf prometheusFormatter,
gf graphiteFormatter,
) *sender {
return &sender{
config: cfg,
client: cl,
filter: f,
sources: s,
compressor: c,
prometheusFormatter: pf,
graphiteFormatter: gf,
}
}
// send sends data to sumologic
func (s *sender) send(ctx context.Context, pipeline PipelineType, body io.Reader, flds fields) error {
data, err := s.compressor.compress(body)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, s.config.HTTPClientSettings.Endpoint, data)
if err != nil |
// Add headers
switch s.config.CompressEncoding {
case GZIPCompression:
req.Header.Set(headerContentEncoding, contentEncodingGzip)
case DeflateCompression:
req.Header.Set(headerContentEncoding, contentEncodingDeflate)
case NoCompression:
default:
return fmt.Errorf("invalid content encoding: %s", s.config.CompressEncoding)
}
req.Header.Add(headerClient, s.config.Client)
if s.sources.host.isSet() {
req.Header.Add(headerHost, s.sources.host.format(flds))
}
if s.sources.name.isSet() {
req.Header.Add(headerName, s.sources.name.format(flds))
}
if s.sources.category.isSet() {
req.Header.Add(headerCategory, s.sources.category.format(flds))
}
switch pipeline {
case LogsPipeline:
req.Header.Add(headerContentType, contentTypeLogs)
req.Header.Add(headerFields, flds.string())
case MetricsPipeline:
switch s.config.MetricFormat {
case PrometheusFormat:
req.Header.Add(headerContentType, contentTypePrometheus)
case Carbon2Format:
req.Header.Add(headerContentType, contentTypeCarbon2)
case GraphiteFormat:
req.Header.Add(headerContentType, contentTypeGraphite)
default:
return fmt.Errorf("unsupported metrics format: %s", s.config.MetricFormat)
}
default:
return errors.New("unexpected pipeline")
}
resp, err := s.client.Do(req)
if err != nil {
return err
}
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
return fmt.Errorf("error during sending data: %s", resp.Status)
}
return nil
}
// logToText converts LogRecord to a plain text line, returns it and error eventually
func (s *sender) logToText(record plog.LogRecord) string {
return record.Body().AsString()
}
// logToJSON converts LogRecord to a json line, returns it and error eventually
func (s *sender) logToJSON(record plog.LogRecord) (string, error) {
data := s.filter.filterOut(record.Attributes())
record.Body().CopyTo(data.orig.PutEmpty(logKey))
nextLine, err := json.Marshal(data.orig.AsRaw())
if err != nil {
return "", err
}
return bytes.NewBuffer(nextLine).String(), nil
}
// sendLogs sends log records from the logBuffer formatted according
// to configured LogFormat and as the result of execution
// returns array of records which has not been sent correctly and error
func (s *sender) sendLogs(ctx context.Context, flds fields) ([]plog.LogRecord, error) {
var (
body strings.Builder
errs error
droppedRecords []plog.LogRecord
currentRecords []plog.LogRecord
)
for _, record := range s.logBuffer {
var formattedLine string
var err error
switch s.config.LogFormat {
case TextFormat:
formattedLine = s.logToText(record)
case JSONFormat:
formattedLine, err = s.logToJSON(record)
default:
err = errors.New("unexpected log format")
}
if err != nil {
droppedRecords = append(droppedRecords, record)
errs = multierr.Append(errs, err)
continue
}
ar, err := s.appendAndSend(ctx, formattedLine, LogsPipeline, &body, flds)
if err != nil {
errs = multierr.Append(errs, err)
if ar.sent {
droppedRecords = append(droppedRecords, currentRecords...)
}
if !ar.appended {
droppedRecords = append(droppedRecords, record)
}
}
// If data was sent, cleanup the currentTimeSeries counter
if ar.sent {
currentRecords = currentRecords[:0]
}
// If log has been appended to body, increment the currentTimeSeries
if ar.appended {
currentRecords = append(currentRecords, record)
}
}
if body.Len() > 0 {
if err := s.send(ctx, LogsPipeline, strings.NewReader(body.String()), flds); err != nil {
errs = multierr.Append(errs, err)
droppedRecords = append(droppedRecords, currentRecords...)
}
}
return droppedRecords, errs
}
// sendMetrics sends metrics in right format basing on the s.config.MetricFormat
func (s *sender) sendMetrics(ctx context.Context, flds fields) ([]metricPair, error) {
var (
body strings.Builder
errs error
droppedRecords []metricPair
currentRecords []metricPair
)
for _, record := range s.metricBuffer {
var formattedLine string
var err error
switch s.config.MetricFormat {
case PrometheusFormat:
formattedLine = s.prometheusFormatter.metric2String(record)
case Carbon2Format:
formattedLine = carbon2Metric2String(record)
case GraphiteFormat:
formattedLine = s.graphiteFormatter.metric2String(record)
default:
err = fmt.Errorf("unexpected metric format: %s", s.config.MetricFormat)
}
if err != nil {
droppedRecords = append(droppedRecords, record)
errs = multierr.Append(errs, err)
continue
}
ar, err := s.appendAndSend(ctx, formattedLine, MetricsPipeline, &body, flds)
if err != nil {
errs = multierr.Append(errs, err)
if ar.sent {
droppedRecords = append(droppedRecords, currentRecords...)
}
if !ar.appended {
droppedRecords = append(droppedRecords, record)
}
}
// If data was sent, cleanup the currentTimeSeries counter
if ar.sent {
currentRecords = currentRecords[:0]
}
// If log has been appended to body, increment the currentTimeSeries
if ar.appended {
currentRecords = append(currentRecords, record)
}
}
if body.Len() > 0 {
if err := s.send(ctx, MetricsPipeline, strings.NewReader(body.String()), flds); err != nil {
errs = multierr.Append(errs, err)
droppedRecords = append(droppedRecords, currentRecords...)
}
}
return droppedRecords, errs
}
// appendAndSend appends line to the request body that will be sent and sends
// the accumulated data if the internal logBuffer has been filled (with maxBufferSize elements).
// It returns appendResponse
func (s *sender) appendAndSend(
ctx context.Context,
line string,
pipeline PipelineType,
body *strings.Builder,
flds fields,
) (appendResponse, error) {
var errs error
ar := newAppendResponse()
if body.Len() > 0 && body.Len()+len(line) >= s.config.MaxRequestBodySize {
ar.sent = true
errs = multierr.Append(errs, s.send(ctx, pipeline, strings.NewReader(body.String()), flds))
body.Reset()
}
if body.Len() > 0 {
// Do not add newline if the body is empty
if _, err := body.WriteString("\n"); err != nil {
errs = multierr.Append(errs, err)
ar.appended = false
| {
return err
} | conditional_block |
sender.go | Logic
type metricPair struct {
attributes pcommon.Map
metric pmetric.Metric
}
type sender struct {
logBuffer []plog.LogRecord
metricBuffer []metricPair
config *Config
client *http.Client
filter filter
sources sourceFormats
compressor compressor
prometheusFormatter prometheusFormatter
graphiteFormatter graphiteFormatter
}
const (
logKey string = "log"
// maxBufferSize defines size of the logBuffer (maximum number of plog.LogRecord entries)
maxBufferSize int = 1024 * 1024
headerContentType string = "Content-Type"
headerContentEncoding string = "Content-Encoding"
headerClient string = "X-Sumo-Client"
headerHost string = "X-Sumo-Host"
headerName string = "X-Sumo-Name"
headerCategory string = "X-Sumo-Category"
headerFields string = "X-Sumo-Fields"
contentTypeLogs string = "application/x-www-form-urlencoded"
contentTypePrometheus string = "application/vnd.sumologic.prometheus"
contentTypeCarbon2 string = "application/vnd.sumologic.carbon2"
contentTypeGraphite string = "application/vnd.sumologic.graphite"
contentEncodingGzip string = "gzip"
contentEncodingDeflate string = "deflate"
)
func newAppendResponse() appendResponse {
return appendResponse{
appended: true,
}
}
func newSender(
cfg *Config,
cl *http.Client,
f filter,
s sourceFormats,
c compressor,
pf prometheusFormatter,
gf graphiteFormatter,
) *sender {
return &sender{
config: cfg,
client: cl,
filter: f,
sources: s,
compressor: c,
prometheusFormatter: pf,
graphiteFormatter: gf,
}
}
// send sends data to sumologic
func (s *sender) send(ctx context.Context, pipeline PipelineType, body io.Reader, flds fields) error {
data, err := s.compressor.compress(body)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, s.config.HTTPClientSettings.Endpoint, data)
if err != nil {
return err
}
// Add headers
switch s.config.CompressEncoding {
case GZIPCompression:
req.Header.Set(headerContentEncoding, contentEncodingGzip)
case DeflateCompression:
req.Header.Set(headerContentEncoding, contentEncodingDeflate)
case NoCompression:
default:
return fmt.Errorf("invalid content encoding: %s", s.config.CompressEncoding)
}
req.Header.Add(headerClient, s.config.Client)
if s.sources.host.isSet() {
req.Header.Add(headerHost, s.sources.host.format(flds))
}
if s.sources.name.isSet() {
req.Header.Add(headerName, s.sources.name.format(flds))
}
if s.sources.category.isSet() {
req.Header.Add(headerCategory, s.sources.category.format(flds))
}
switch pipeline {
case LogsPipeline:
req.Header.Add(headerContentType, contentTypeLogs)
req.Header.Add(headerFields, flds.string())
case MetricsPipeline:
switch s.config.MetricFormat {
case PrometheusFormat:
req.Header.Add(headerContentType, contentTypePrometheus)
case Carbon2Format:
req.Header.Add(headerContentType, contentTypeCarbon2)
case GraphiteFormat:
req.Header.Add(headerContentType, contentTypeGraphite)
default:
return fmt.Errorf("unsupported metrics format: %s", s.config.MetricFormat)
}
default:
return errors.New("unexpected pipeline")
}
resp, err := s.client.Do(req)
if err != nil {
return err
}
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
return fmt.Errorf("error during sending data: %s", resp.Status)
}
return nil
}
// logToText converts LogRecord to a plain text line, returns it and error eventually
func (s *sender) | (record plog.LogRecord) string {
return record.Body().AsString()
}
// logToJSON converts LogRecord to a json line, returns it and error eventually
func (s *sender) logToJSON(record plog.LogRecord) (string, error) {
data := s.filter.filterOut(record.Attributes())
record.Body().CopyTo(data.orig.PutEmpty(logKey))
nextLine, err := json.Marshal(data.orig.AsRaw())
if err != nil {
return "", err
}
return bytes.NewBuffer(nextLine).String(), nil
}
// sendLogs sends log records from the logBuffer formatted according
// to configured LogFormat and as the result of execution
// returns array of records which has not been sent correctly and error
func (s *sender) sendLogs(ctx context.Context, flds fields) ([]plog.LogRecord, error) {
var (
body strings.Builder
errs error
droppedRecords []plog.LogRecord
currentRecords []plog.LogRecord
)
for _, record := range s.logBuffer {
var formattedLine string
var err error
switch s.config.LogFormat {
case TextFormat:
formattedLine = s.logToText(record)
case JSONFormat:
formattedLine, err = s.logToJSON(record)
default:
err = errors.New("unexpected log format")
}
if err != nil {
droppedRecords = append(droppedRecords, record)
errs = multierr.Append(errs, err)
continue
}
ar, err := s.appendAndSend(ctx, formattedLine, LogsPipeline, &body, flds)
if err != nil {
errs = multierr.Append(errs, err)
if ar.sent {
droppedRecords = append(droppedRecords, currentRecords...)
}
if !ar.appended {
droppedRecords = append(droppedRecords, record)
}
}
// If data was sent, cleanup the currentTimeSeries counter
if ar.sent {
currentRecords = currentRecords[:0]
}
// If log has been appended to body, increment the currentTimeSeries
if ar.appended {
currentRecords = append(currentRecords, record)
}
}
if body.Len() > 0 {
if err := s.send(ctx, LogsPipeline, strings.NewReader(body.String()), flds); err != nil {
errs = multierr.Append(errs, err)
droppedRecords = append(droppedRecords, currentRecords...)
}
}
return droppedRecords, errs
}
// sendMetrics sends metrics in right format basing on the s.config.MetricFormat
func (s *sender) sendMetrics(ctx context.Context, flds fields) ([]metricPair, error) {
var (
body strings.Builder
errs error
droppedRecords []metricPair
currentRecords []metricPair
)
for _, record := range s.metricBuffer {
var formattedLine string
var err error
switch s.config.MetricFormat {
case PrometheusFormat:
formattedLine = s.prometheusFormatter.metric2String(record)
case Carbon2Format:
formattedLine = carbon2Metric2String(record)
case GraphiteFormat:
formattedLine = s.graphiteFormatter.metric2String(record)
default:
err = fmt.Errorf("unexpected metric format: %s", s.config.MetricFormat)
}
if err != nil {
droppedRecords = append(droppedRecords, record)
errs = multierr.Append(errs, err)
continue
}
ar, err := s.appendAndSend(ctx, formattedLine, MetricsPipeline, &body, flds)
if err != nil {
errs = multierr.Append(errs, err)
if ar.sent {
droppedRecords = append(droppedRecords, currentRecords...)
}
if !ar.appended {
droppedRecords = append(droppedRecords, record)
}
}
// If data was sent, cleanup the currentTimeSeries counter
if ar.sent {
currentRecords = currentRecords[:0]
}
// If log has been appended to body, increment the currentTimeSeries
if ar.appended {
currentRecords = append(currentRecords, record)
}
}
if body.Len() > 0 {
if err := s.send(ctx, MetricsPipeline, strings.NewReader(body.String()), flds); err != nil {
errs = multierr.Append(errs, err)
droppedRecords = append(droppedRecords, currentRecords...)
}
}
return droppedRecords, errs
}
// appendAndSend appends line to the request body that will be sent and sends
// the accumulated data if the internal logBuffer has been filled (with maxBufferSize elements).
// It returns appendResponse
func (s *sender) appendAndSend(
ctx context.Context,
line string,
pipeline PipelineType,
body *strings.Builder,
flds fields,
) (appendResponse, error) {
var errs error
ar := newAppendResponse()
if body.Len() > 0 && body.Len()+len(line) >= s.config.MaxRequestBodySize {
ar.sent = true
errs = multierr.Append(errs, s.send(ctx, pipeline, strings.NewReader(body.String()), flds))
body.Reset()
}
if body.Len() > 0 {
// Do not add newline if the body is empty
if _, err := body.WriteString("\n"); err != nil {
errs = multierr.Append(errs, err)
ar.appended = false
| logToText | identifier_name |
sender.go | o Logic
type metricPair struct {
attributes pcommon.Map
metric pmetric.Metric
}
type sender struct {
logBuffer []plog.LogRecord
metricBuffer []metricPair
config *Config
client *http.Client
filter filter
sources sourceFormats
compressor compressor
prometheusFormatter prometheusFormatter
graphiteFormatter graphiteFormatter
}
const (
logKey string = "log"
// maxBufferSize defines size of the logBuffer (maximum number of plog.LogRecord entries)
maxBufferSize int = 1024 * 1024
headerContentType string = "Content-Type"
headerContentEncoding string = "Content-Encoding"
headerClient string = "X-Sumo-Client"
headerHost string = "X-Sumo-Host"
headerName string = "X-Sumo-Name"
headerCategory string = "X-Sumo-Category"
headerFields string = "X-Sumo-Fields"
contentTypeLogs string = "application/x-www-form-urlencoded"
contentTypePrometheus string = "application/vnd.sumologic.prometheus"
contentTypeCarbon2 string = "application/vnd.sumologic.carbon2"
contentTypeGraphite string = "application/vnd.sumologic.graphite"
contentEncodingGzip string = "gzip"
contentEncodingDeflate string = "deflate"
)
func newAppendResponse() appendResponse {
return appendResponse{
appended: true,
}
}
func newSender(
cfg *Config,
cl *http.Client,
f filter,
s sourceFormats,
c compressor,
pf prometheusFormatter,
gf graphiteFormatter,
) *sender {
return &sender{
config: cfg,
client: cl,
filter: f,
sources: s,
compressor: c,
prometheusFormatter: pf,
graphiteFormatter: gf,
}
}
// send sends data to sumologic
func (s *sender) send(ctx context.Context, pipeline PipelineType, body io.Reader, flds fields) error {
data, err := s.compressor.compress(body)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, s.config.HTTPClientSettings.Endpoint, data)
if err != nil {
return err
}
// Add headers
switch s.config.CompressEncoding {
case GZIPCompression:
req.Header.Set(headerContentEncoding, contentEncodingGzip)
case DeflateCompression:
req.Header.Set(headerContentEncoding, contentEncodingDeflate)
case NoCompression:
default:
return fmt.Errorf("invalid content encoding: %s", s.config.CompressEncoding)
}
req.Header.Add(headerClient, s.config.Client)
if s.sources.host.isSet() {
req.Header.Add(headerHost, s.sources.host.format(flds))
}
if s.sources.name.isSet() {
req.Header.Add(headerName, s.sources.name.format(flds))
}
if s.sources.category.isSet() {
req.Header.Add(headerCategory, s.sources.category.format(flds))
}
switch pipeline {
case LogsPipeline:
req.Header.Add(headerContentType, contentTypeLogs)
req.Header.Add(headerFields, flds.string())
case MetricsPipeline:
switch s.config.MetricFormat {
case PrometheusFormat:
req.Header.Add(headerContentType, contentTypePrometheus)
case Carbon2Format:
req.Header.Add(headerContentType, contentTypeCarbon2)
case GraphiteFormat:
req.Header.Add(headerContentType, contentTypeGraphite)
default:
return fmt.Errorf("unsupported metrics format: %s", s.config.MetricFormat)
}
default:
return errors.New("unexpected pipeline")
}
resp, err := s.client.Do(req)
if err != nil {
return err
}
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
return fmt.Errorf("error during sending data: %s", resp.Status)
}
return nil
}
// logToText converts LogRecord to a plain text line, returns it and error eventually
func (s *sender) logToText(record plog.LogRecord) string {
return record.Body().AsString()
}
// logToJSON converts LogRecord to a json line, returns it and error eventually
func (s *sender) logToJSON(record plog.LogRecord) (string, error) {
data := s.filter.filterOut(record.Attributes())
record.Body().CopyTo(data.orig.PutEmpty(logKey))
nextLine, err := json.Marshal(data.orig.AsRaw())
if err != nil {
return "", err
}
return bytes.NewBuffer(nextLine).String(), nil
}
// sendLogs sends log records from the logBuffer formatted according
// to configured LogFormat and as the result of execution
// returns array of records which has not been sent correctly and error
func (s *sender) sendLogs(ctx context.Context, flds fields) ([]plog.LogRecord, error) {
var (
body strings.Builder
errs error
droppedRecords []plog.LogRecord
currentRecords []plog.LogRecord
)
for _, record := range s.logBuffer {
var formattedLine string
var err error
switch s.config.LogFormat {
case TextFormat:
formattedLine = s.logToText(record)
case JSONFormat:
formattedLine, err = s.logToJSON(record)
default:
err = errors.New("unexpected log format")
}
if err != nil {
droppedRecords = append(droppedRecords, record)
errs = multierr.Append(errs, err)
continue
}
ar, err := s.appendAndSend(ctx, formattedLine, LogsPipeline, &body, flds)
if err != nil {
errs = multierr.Append(errs, err)
if ar.sent {
droppedRecords = append(droppedRecords, currentRecords...)
}
if !ar.appended {
droppedRecords = append(droppedRecords, record)
}
}
// If data was sent, cleanup the currentTimeSeries counter
if ar.sent {
currentRecords = currentRecords[:0]
}
// If log has been appended to body, increment the currentTimeSeries
if ar.appended {
currentRecords = append(currentRecords, record)
}
}
if body.Len() > 0 {
if err := s.send(ctx, LogsPipeline, strings.NewReader(body.String()), flds); err != nil {
errs = multierr.Append(errs, err)
droppedRecords = append(droppedRecords, currentRecords...)
}
} | func (s *sender) sendMetrics(ctx context.Context, flds fields) ([]metricPair, error) {
var (
body strings.Builder
errs error
droppedRecords []metricPair
currentRecords []metricPair
)
for _, record := range s.metricBuffer {
var formattedLine string
var err error
switch s.config.MetricFormat {
case PrometheusFormat:
formattedLine = s.prometheusFormatter.metric2String(record)
case Carbon2Format:
formattedLine = carbon2Metric2String(record)
case GraphiteFormat:
formattedLine = s.graphiteFormatter.metric2String(record)
default:
err = fmt.Errorf("unexpected metric format: %s", s.config.MetricFormat)
}
if err != nil {
droppedRecords = append(droppedRecords, record)
errs = multierr.Append(errs, err)
continue
}
ar, err := s.appendAndSend(ctx, formattedLine, MetricsPipeline, &body, flds)
if err != nil {
errs = multierr.Append(errs, err)
if ar.sent {
droppedRecords = append(droppedRecords, currentRecords...)
}
if !ar.appended {
droppedRecords = append(droppedRecords, record)
}
}
// If data was sent, cleanup the currentTimeSeries counter
if ar.sent {
currentRecords = currentRecords[:0]
}
// If log has been appended to body, increment the currentTimeSeries
if ar.appended {
currentRecords = append(currentRecords, record)
}
}
if body.Len() > 0 {
if err := s.send(ctx, MetricsPipeline, strings.NewReader(body.String()), flds); err != nil {
errs = multierr.Append(errs, err)
droppedRecords = append(droppedRecords, currentRecords...)
}
}
return droppedRecords, errs
}
// appendAndSend appends line to the request body that will be sent and sends
// the accumulated data if the internal logBuffer has been filled (with maxBufferSize elements).
// It returns appendResponse
func (s *sender) appendAndSend(
ctx context.Context,
line string,
pipeline PipelineType,
body *strings.Builder,
flds fields,
) (appendResponse, error) {
var errs error
ar := newAppendResponse()
if body.Len() > 0 && body.Len()+len(line) >= s.config.MaxRequestBodySize {
ar.sent = true
errs = multierr.Append(errs, s.send(ctx, pipeline, strings.NewReader(body.String()), flds))
body.Reset()
}
if body.Len() > 0 {
// Do not add newline if the body is empty
if _, err := body.WriteString("\n"); err != nil {
errs = multierr.Append(errs, err)
ar.appended = false
|
return droppedRecords, errs
}
// sendMetrics sends metrics in right format basing on the s.config.MetricFormat | random_line_split |
sender.go | BufferSize defines size of the logBuffer (maximum number of plog.LogRecord entries)
maxBufferSize int = 1024 * 1024
headerContentType string = "Content-Type"
headerContentEncoding string = "Content-Encoding"
headerClient string = "X-Sumo-Client"
headerHost string = "X-Sumo-Host"
headerName string = "X-Sumo-Name"
headerCategory string = "X-Sumo-Category"
headerFields string = "X-Sumo-Fields"
contentTypeLogs string = "application/x-www-form-urlencoded"
contentTypePrometheus string = "application/vnd.sumologic.prometheus"
contentTypeCarbon2 string = "application/vnd.sumologic.carbon2"
contentTypeGraphite string = "application/vnd.sumologic.graphite"
contentEncodingGzip string = "gzip"
contentEncodingDeflate string = "deflate"
)
func newAppendResponse() appendResponse {
return appendResponse{
appended: true,
}
}
func newSender(
cfg *Config,
cl *http.Client,
f filter,
s sourceFormats,
c compressor,
pf prometheusFormatter,
gf graphiteFormatter,
) *sender {
return &sender{
config: cfg,
client: cl,
filter: f,
sources: s,
compressor: c,
prometheusFormatter: pf,
graphiteFormatter: gf,
}
}
// send sends data to sumologic
func (s *sender) send(ctx context.Context, pipeline PipelineType, body io.Reader, flds fields) error {
data, err := s.compressor.compress(body)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, s.config.HTTPClientSettings.Endpoint, data)
if err != nil {
return err
}
// Add headers
switch s.config.CompressEncoding {
case GZIPCompression:
req.Header.Set(headerContentEncoding, contentEncodingGzip)
case DeflateCompression:
req.Header.Set(headerContentEncoding, contentEncodingDeflate)
case NoCompression:
default:
return fmt.Errorf("invalid content encoding: %s", s.config.CompressEncoding)
}
req.Header.Add(headerClient, s.config.Client)
if s.sources.host.isSet() {
req.Header.Add(headerHost, s.sources.host.format(flds))
}
if s.sources.name.isSet() {
req.Header.Add(headerName, s.sources.name.format(flds))
}
if s.sources.category.isSet() {
req.Header.Add(headerCategory, s.sources.category.format(flds))
}
switch pipeline {
case LogsPipeline:
req.Header.Add(headerContentType, contentTypeLogs)
req.Header.Add(headerFields, flds.string())
case MetricsPipeline:
switch s.config.MetricFormat {
case PrometheusFormat:
req.Header.Add(headerContentType, contentTypePrometheus)
case Carbon2Format:
req.Header.Add(headerContentType, contentTypeCarbon2)
case GraphiteFormat:
req.Header.Add(headerContentType, contentTypeGraphite)
default:
return fmt.Errorf("unsupported metrics format: %s", s.config.MetricFormat)
}
default:
return errors.New("unexpected pipeline")
}
resp, err := s.client.Do(req)
if err != nil {
return err
}
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
return fmt.Errorf("error during sending data: %s", resp.Status)
}
return nil
}
// logToText converts LogRecord to a plain text line, returns it and error eventually
func (s *sender) logToText(record plog.LogRecord) string {
return record.Body().AsString()
}
// logToJSON converts LogRecord to a json line, returns it and error eventually
func (s *sender) logToJSON(record plog.LogRecord) (string, error) {
data := s.filter.filterOut(record.Attributes())
record.Body().CopyTo(data.orig.PutEmpty(logKey))
nextLine, err := json.Marshal(data.orig.AsRaw())
if err != nil {
return "", err
}
return bytes.NewBuffer(nextLine).String(), nil
}
// sendLogs sends log records from the logBuffer formatted according
// to configured LogFormat and as the result of execution
// returns array of records which has not been sent correctly and error
func (s *sender) sendLogs(ctx context.Context, flds fields) ([]plog.LogRecord, error) {
var (
body strings.Builder
errs error
droppedRecords []plog.LogRecord
currentRecords []plog.LogRecord
)
for _, record := range s.logBuffer {
var formattedLine string
var err error
switch s.config.LogFormat {
case TextFormat:
formattedLine = s.logToText(record)
case JSONFormat:
formattedLine, err = s.logToJSON(record)
default:
err = errors.New("unexpected log format")
}
if err != nil {
droppedRecords = append(droppedRecords, record)
errs = multierr.Append(errs, err)
continue
}
ar, err := s.appendAndSend(ctx, formattedLine, LogsPipeline, &body, flds)
if err != nil {
errs = multierr.Append(errs, err)
if ar.sent {
droppedRecords = append(droppedRecords, currentRecords...)
}
if !ar.appended {
droppedRecords = append(droppedRecords, record)
}
}
// If data was sent, cleanup the currentTimeSeries counter
if ar.sent {
currentRecords = currentRecords[:0]
}
// If log has been appended to body, increment the currentTimeSeries
if ar.appended {
currentRecords = append(currentRecords, record)
}
}
if body.Len() > 0 {
if err := s.send(ctx, LogsPipeline, strings.NewReader(body.String()), flds); err != nil {
errs = multierr.Append(errs, err)
droppedRecords = append(droppedRecords, currentRecords...)
}
}
return droppedRecords, errs
}
// sendMetrics sends metrics in right format basing on the s.config.MetricFormat
func (s *sender) sendMetrics(ctx context.Context, flds fields) ([]metricPair, error) {
var (
body strings.Builder
errs error
droppedRecords []metricPair
currentRecords []metricPair
)
for _, record := range s.metricBuffer {
var formattedLine string
var err error
switch s.config.MetricFormat {
case PrometheusFormat:
formattedLine = s.prometheusFormatter.metric2String(record)
case Carbon2Format:
formattedLine = carbon2Metric2String(record)
case GraphiteFormat:
formattedLine = s.graphiteFormatter.metric2String(record)
default:
err = fmt.Errorf("unexpected metric format: %s", s.config.MetricFormat)
}
if err != nil {
droppedRecords = append(droppedRecords, record)
errs = multierr.Append(errs, err)
continue
}
ar, err := s.appendAndSend(ctx, formattedLine, MetricsPipeline, &body, flds)
if err != nil {
errs = multierr.Append(errs, err)
if ar.sent {
droppedRecords = append(droppedRecords, currentRecords...)
}
if !ar.appended {
droppedRecords = append(droppedRecords, record)
}
}
// If data was sent, cleanup the currentTimeSeries counter
if ar.sent {
currentRecords = currentRecords[:0]
}
// If log has been appended to body, increment the currentTimeSeries
if ar.appended {
currentRecords = append(currentRecords, record)
}
}
if body.Len() > 0 {
if err := s.send(ctx, MetricsPipeline, strings.NewReader(body.String()), flds); err != nil {
errs = multierr.Append(errs, err)
droppedRecords = append(droppedRecords, currentRecords...)
}
}
return droppedRecords, errs
}
// appendAndSend appends line to the request body that will be sent and sends
// the accumulated data if the internal logBuffer has been filled (with maxBufferSize elements).
// It returns appendResponse
func (s *sender) appendAndSend(
ctx context.Context,
line string,
pipeline PipelineType,
body *strings.Builder,
flds fields,
) (appendResponse, error) {
var errs error
ar := newAppendResponse()
if body.Len() > 0 && body.Len()+len(line) >= s.config.MaxRequestBodySize {
ar.sent = true
errs = multierr.Append(errs, s.send(ctx, pipeline, strings.NewReader(body.String()), flds))
body.Reset()
}
if body.Len() > 0 {
// Do not add newline if the body is empty
if _, err := body.WriteString("\n"); err != nil {
errs = multierr.Append(errs, err)
ar.appended = false
}
}
if ar.appended {
// Do not append new line if separator was not appended
if _, err := body.WriteString(line); err != nil {
errs = multierr.Append(errs, err)
ar.appended = false
}
}
return ar, errs
}
// cleanLogsBuffer zeroes logBuffer
func (s *sender) cleanLogsBuffer() | {
s.logBuffer = (s.logBuffer)[:0]
} | identifier_body |
|
marketplace.js | (n) {
return !isNaN(parseFloat(n)) && isFinite(n);
}
function filterSearch() {
var fs = {};
fs.CompanyName = '';
fs.Products = [];
fs.ProductCategories = []
fs.ProducingCountries = [];
fs.ProducingRegions = [];
fs.StandardsCertified = []
fs.StandardsAssessed = [];
fs.Compliance = {};
fs.Compliance.Overall = max(-1, $('#overallPercentage').val());
fs.Compliance.Environment = max(-1, $('#environmentPercentage').val());
fs.Compliance.Social = max(-1, $('#sociallPercentage').val());
fs.Compliance.Economic = max(-1, $('#economicPercentage').val());
fs.Compliance.Quality = max(-1, $('#qualityPercentage').val());
fs.Compliance.BusinessIntegrity = max(-1, $('#businessIntegrityPercentage').val());
var sd;
sd = $('#CompanyName').val();
if (sd && 0 < sd.length) {
fs.CompanyName = sd;
}
sd = $('#DropDownProduct').val();
if (sd && 0 < sd.length) {
sd.forEach(function (o, i) {
var p = o.split(':')[2];
if (p && 0 < p) fs.Products.push(p);
else fs.ProductCategories.push(o.split(':')[0]);
});
}
sd = $('#DropDownProducingCountry').val();
if (sd && 0 < sd.length) {
sd.forEach(function (o, i) {
var p = o.split(':')[1];
if (p && 0 < p) fs.ProducingCountries.push(p);
else fs.ProducingRegions.push(o.split(':')[0]);
});
}
sd = $('#StandardsCertifiedList').val();
if (sd && 0 < sd.length) {
sd.forEach(function (o, i) {
fs.StandardsCertified.push(o);
});
}
sd = $('#StandardsAssessedList').val();
if (sd && 0 < sd.length) {
sd.forEach(function (o, i) {
fs.StandardsAssessed.push(o);
});
}
searchReports(fs, buildResultsTable);
}
var buildResultsTable = function (d) {
var results_container = $('#ResultsTable tbody');
var standard_template = '<span class="standard" data-id="{0}" data-name="{1}" data-logolarge="{3}"><a href="marketplace.aspx#"><img src="http://search.standardsmap.org:8080/{2}" title="{1}" width="45" height="45" /></a></span>';
var row_template = '<tr class="report_result" data-id="{0}"><td class="company"><a href="marketplace.aspx#">{1}</a></td><td>{2}</td><td>{3}</td><td>{4}</td><td>{5}</td><td><a href="http://search.standardsmap.org:8080/reportfinal.aspx?report={0}">View</a></td></tr>';
var report_template = '<tr class="standard_result" data-id="{0}" id="{3}-{0}"><td colspan="6"><span class="standard_title"><img src="http://search.standardsmap.org:8080/{2}" width="90" height="90" /><h4>{1}</h4></span><span id="data-{3}-{0}"></span></td></tr>';
var company_template = '<tr class="company_result" data-id="{0}" id="{1}-{0}"><td colspan="6"><span id="company-{1}-{0}"></span></td></tr>';
results_container.empty();
if (d) {
var rpt_list = [];
d.forEach(function (r, i) {
rpt_list.push(r.ReportID);
var standards = '';
if (r.Standards) {
r.Standards.forEach(function (s, i) { standards += standard_template.format(s.Id, s.Name, s.LogoSmall, s.LogoLarge); });
}
var row = $(row_template.format(r.ReportID, r.CompanyName, r.Product, r.Country, standards, formatDate(fixJsonDate(r.Modified))));
row.find('td.company a').on('click', function (ev) {
ev.preventDefault();
var result = $(ev.currentTarget).closest('.report_result');
var cmp_row = results_container.find('#' + r.ReportID + '-' + r.CompanyID + '.company_result');
// does the row exist? if so toggle visibility
if (cmp_row.length) {
cmp_row.toggle();
}
else {
cmp_row = result.after(company_template.format(r.CompanyID, r.ReportID));
getCompanyDetails(r.CompanyID, 0, 1, function (d) {
showCompanyDetails(d, '#company-' + r.ReportID + '-' + r.CompanyID);
});
}
});
results_container.append(row);
});
results_container.find('.standard a').on('click', function (ev) {
ev.preventDefault();
var std = $(ev.currentTarget).closest('.standard');
var result = std.closest('.report_result');
var id = std.attr('data-id');
var name = std.attr('data-name');
var logo = std.attr('data-logolarge');
var rid = result.attr('data-id');
var std_row = results_container.find('#' + rid + '-' + id + '.standard_result');
// does the row exist? if so toggle visibility
if (std_row.length) {
std_row.toggle();
}
else {
var cmp_row = result.next('.company_result');
if (cmp_row.length) {
std_row = cmp_row.after(report_template.format(id, name, logo, rid));
}
else {
std_row = result.after(report_template.format(id, name, logo, rid));
}
getStandardResults(rid, id, 0, function (d) {
showStandardResults(d.Data, '#data-' + rid + '-' + id);
});
}
});
$('#ReportIDs').val(rpt_list.join(','));
}
}
var fixJsonDate = function (dt) {
var date = new Date(parseInt(dt.replace(/[^0-9 +]/g, '')));
return date;
}
var formatDate = function (d) {
var dd = d.getDate();
if ( dd < 10 ) dd = '0' + dd;
var mm = d.getMonth()+1;
if ( mm < 10 ) mm = '0' + mm;
var yy = d.getFullYear() % 100;
if ( yy < 10 ) yy = '0' + yy;
return dd+'/'+mm+'/'+yy;
}
var showCompanyDetails = function (d, el) {
var template = '<span class="standard_title"><img src="http://search.standardsmap.org:8080/{5}" width="90" height="90"><h4>{1}</h4></span><span class="company_details"><p class="lead">{2}</p><p>Products/Services: {6}<br />Destination markets: {7}</p><p>Contact: <a href="mailto:{4}">{4}</a><br />Website: <a href="http://search.standardsmap.org:8080/{3}" target="_blank">{3}</a></p></span>{8}';
var standards = '';
var standard_template = '<span class="standard" data-id="{0}" data-name="{1}" data-logolarge="{3}"><img src="http://search.standardsmap.org:8080/{2}" title="{1}" alt="{1}" width="45" height="45" /></span>';
if (d.Standards) {
standards = '<span class="standards_certified">';
d.Standards.forEach(function (s, i) { standards += standard_template.format(s.Id, s.Name, s.LogoSmall, s.LogoLarge); });
standards += '</span>';
}
$(el).append(template.format(d.Id, d.Name, d.Description, d.Website, d.ContactEmail, d.Logo, d.Products, d.DestinationMarkets, standards));
}
var showStandardResults = function (d, el) {
//var overview_template = '<table class="table overview_table"><thead><tr><th>Overall</th><th>Environment</th><th>Social</th><th>Economic</th><th>Quality management</th><th>Ethics</th></tr></thead><tbody><tr><td>{0}</td><td>{1}</td><td>{2}</td><td>{3}</td><td>{4}</td><td>{5}</td></tr></tbody></table>';
var overview_template = '<table class="overview_table"><caption>Latest compliance assessment {6}</caption><thead><tr class="hotspot-background-colors"><th class=""><i>∑</i>Overall</th><th class="hotspot-environment "><i class="icon-sm-environment"></i> Environment</th><th class="hotspot-social "><i class="icon-sm-social"></i> Social</th><th class="hotspot-economic "><i class="icon-sm-economic"></i> Economic</th><th class="hotspot-quality-management"><i class="icon-sm-quality"></i> Quality</th><th class="hotspot-ethics"><i class=" | isNumber | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.